]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: ignore link rate in TPS3 selection
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
01527b31
CT
31#include <linux/notifier.h>
32#include <linux/reboot.h>
760285e7 33#include <drm/drmP.h>
c6f95f27 34#include <drm/drm_atomic_helper.h>
760285e7
DH
35#include <drm/drm_crtc.h>
36#include <drm/drm_crtc_helper.h>
37#include <drm/drm_edid.h>
a4fc5ed6 38#include "intel_drv.h"
760285e7 39#include <drm/i915_drm.h>
a4fc5ed6 40#include "i915_drv.h"
a4fc5ed6 41
a4fc5ed6
KP
42#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
43
559be30c
TP
44/* Compliance test status bits */
45#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
46#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
47#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
48#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49
9dd4ffdf 50struct dp_link_dpll {
840b32b7 51 int clock;
9dd4ffdf
CML
52 struct dpll dpll;
53};
54
55static const struct dp_link_dpll gen4_dpll[] = {
840b32b7 56 { 162000,
9dd4ffdf 57 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 58 { 270000,
9dd4ffdf
CML
59 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
60};
61
62static const struct dp_link_dpll pch_dpll[] = {
840b32b7 63 { 162000,
9dd4ffdf 64 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 65 { 270000,
9dd4ffdf
CML
66 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
67};
68
65ce4bf5 69static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 70 { 162000,
58f6e632 71 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 72 { 270000,
65ce4bf5
CML
73 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
74};
75
ef9348c8
CML
76/*
77 * CHV supports eDP 1.4 that have more link rates.
78 * Below only provides the fixed rate but exclude variable rate.
79 */
80static const struct dp_link_dpll chv_dpll[] = {
81 /*
82 * CHV requires to program fractional division for m2.
83 * m2 is stored in fixed point format using formula below
84 * (m2_int << 22) | m2_fraction
85 */
840b32b7 86 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 87 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 88 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 89 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
840b32b7 90 { 540000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8
CML
91 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
92};
637a9c63 93
64987fc5
SJ
94static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
95 324000, 432000, 540000 };
637a9c63 96static const int skl_rates[] = { 162000, 216000, 270000,
f4896f15
VS
97 324000, 432000, 540000 };
98static const int default_rates[] = { 162000, 270000, 540000 };
ef9348c8 99
cfcb0fc9
JB
100/**
101 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
102 * @intel_dp: DP struct
103 *
104 * If a CPU or PCH DP output is attached to an eDP panel, this function
105 * will return true, and false otherwise.
106 */
107static bool is_edp(struct intel_dp *intel_dp)
108{
da63a9f2
PZ
109 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
110
111 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
112}
113
68b4d824 114static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 115{
68b4d824
ID
116 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
117
118 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
119}
120
df0e9248
CW
121static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
122{
fa90ecef 123 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
124}
125
ea5b213a 126static void intel_dp_link_down(struct intel_dp *intel_dp);
1e0560e0 127static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 128static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
093e3f13 129static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
a8c3344e
VS
130static void vlv_steal_power_sequencer(struct drm_device *dev,
131 enum pipe pipe);
a4fc5ed6 132
e0fce78f
VS
133static unsigned int intel_dp_unused_lane_mask(int lane_count)
134{
135 return ~((1 << lane_count) - 1) & 0xf;
136}
137
ed4e9c1d
VS
138static int
139intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 140{
7183dc29 141 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
142
143 switch (max_link_bw) {
144 case DP_LINK_BW_1_62:
145 case DP_LINK_BW_2_7:
1db10e28 146 case DP_LINK_BW_5_4:
d4eead50 147 break;
a4fc5ed6 148 default:
d4eead50
ID
149 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
150 max_link_bw);
a4fc5ed6
KP
151 max_link_bw = DP_LINK_BW_1_62;
152 break;
153 }
154 return max_link_bw;
155}
156
eeb6324d
PZ
157static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
158{
159 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
160 struct drm_device *dev = intel_dig_port->base.base.dev;
161 u8 source_max, sink_max;
162
163 source_max = 4;
164 if (HAS_DDI(dev) && intel_dig_port->port == PORT_A &&
165 (intel_dig_port->saved_port_bits & DDI_A_4_LANES) == 0)
166 source_max = 2;
167
168 sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
169
170 return min(source_max, sink_max);
171}
172
cd9dde44
AJ
173/*
174 * The units on the numbers in the next two are... bizarre. Examples will
175 * make it clearer; this one parallels an example in the eDP spec.
176 *
177 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
178 *
179 * 270000 * 1 * 8 / 10 == 216000
180 *
181 * The actual data capacity of that configuration is 2.16Gbit/s, so the
182 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
183 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
184 * 119000. At 18bpp that's 2142000 kilobits per second.
185 *
186 * Thus the strange-looking division by 10 in intel_dp_link_required, to
187 * get the result in decakilobits instead of kilobits.
188 */
189
a4fc5ed6 190static int
c898261c 191intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 192{
cd9dde44 193 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
194}
195
fe27d53e
DA
196static int
197intel_dp_max_data_rate(int max_link_clock, int max_lanes)
198{
199 return (max_link_clock * max_lanes * 8) / 10;
200}
201
c19de8eb 202static enum drm_mode_status
a4fc5ed6
KP
203intel_dp_mode_valid(struct drm_connector *connector,
204 struct drm_display_mode *mode)
205{
df0e9248 206 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
207 struct intel_connector *intel_connector = to_intel_connector(connector);
208 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
209 int target_clock = mode->clock;
210 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 211
dd06f90e
JN
212 if (is_edp(intel_dp) && fixed_mode) {
213 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
214 return MODE_PANEL;
215
dd06f90e 216 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 217 return MODE_PANEL;
03afc4a2
DV
218
219 target_clock = fixed_mode->clock;
7de56f43
ZY
220 }
221
50fec21a 222 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 223 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
224
225 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
226 mode_rate = intel_dp_link_required(target_clock, 18);
227
228 if (mode_rate > max_rate)
c4867936 229 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
230
231 if (mode->clock < 10000)
232 return MODE_CLOCK_LOW;
233
0af78a2b
DV
234 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
235 return MODE_H_ILLEGAL;
236
a4fc5ed6
KP
237 return MODE_OK;
238}
239
a4f1289e 240uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
a4fc5ed6
KP
241{
242 int i;
243 uint32_t v = 0;
244
245 if (src_bytes > 4)
246 src_bytes = 4;
247 for (i = 0; i < src_bytes; i++)
248 v |= ((uint32_t) src[i]) << ((3-i) * 8);
249 return v;
250}
251
c2af70e2 252static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
a4fc5ed6
KP
253{
254 int i;
255 if (dst_bytes > 4)
256 dst_bytes = 4;
257 for (i = 0; i < dst_bytes; i++)
258 dst[i] = src >> ((3-i) * 8);
259}
260
bf13e81b
JN
261static void
262intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 263 struct intel_dp *intel_dp);
bf13e81b
JN
264static void
265intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 266 struct intel_dp *intel_dp);
bf13e81b 267
773538e8
VS
268static void pps_lock(struct intel_dp *intel_dp)
269{
270 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
271 struct intel_encoder *encoder = &intel_dig_port->base;
272 struct drm_device *dev = encoder->base.dev;
273 struct drm_i915_private *dev_priv = dev->dev_private;
274 enum intel_display_power_domain power_domain;
275
276 /*
277 * See vlv_power_sequencer_reset() why we need
278 * a power domain reference here.
279 */
280 power_domain = intel_display_port_power_domain(encoder);
281 intel_display_power_get(dev_priv, power_domain);
282
283 mutex_lock(&dev_priv->pps_mutex);
284}
285
286static void pps_unlock(struct intel_dp *intel_dp)
287{
288 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
289 struct intel_encoder *encoder = &intel_dig_port->base;
290 struct drm_device *dev = encoder->base.dev;
291 struct drm_i915_private *dev_priv = dev->dev_private;
292 enum intel_display_power_domain power_domain;
293
294 mutex_unlock(&dev_priv->pps_mutex);
295
296 power_domain = intel_display_port_power_domain(encoder);
297 intel_display_power_put(dev_priv, power_domain);
298}
299
961a0db0
VS
300static void
301vlv_power_sequencer_kick(struct intel_dp *intel_dp)
302{
303 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
304 struct drm_device *dev = intel_dig_port->base.base.dev;
305 struct drm_i915_private *dev_priv = dev->dev_private;
306 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
307 bool pll_enabled, release_cl_override = false;
308 enum dpio_phy phy = DPIO_PHY(pipe);
309 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
961a0db0
VS
310 uint32_t DP;
311
312 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
313 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
314 pipe_name(pipe), port_name(intel_dig_port->port)))
315 return;
316
317 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
318 pipe_name(pipe), port_name(intel_dig_port->port));
319
320 /* Preserve the BIOS-computed detected bit. This is
321 * supposed to be read-only.
322 */
323 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
324 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
325 DP |= DP_PORT_WIDTH(1);
326 DP |= DP_LINK_TRAIN_PAT_1;
327
328 if (IS_CHERRYVIEW(dev))
329 DP |= DP_PIPE_SELECT_CHV(pipe);
330 else if (pipe == PIPE_B)
331 DP |= DP_PIPEB_SELECT;
332
d288f65f
VS
333 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
334
335 /*
336 * The DPLL for the pipe must be enabled for this to work.
337 * So enable temporarily it if it's not already enabled.
338 */
0047eedc
VS
339 if (!pll_enabled) {
340 release_cl_override = IS_CHERRYVIEW(dev) &&
341 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
342
d288f65f
VS
343 vlv_force_pll_on(dev, pipe, IS_CHERRYVIEW(dev) ?
344 &chv_dpll[0].dpll : &vlv_dpll[0].dpll);
0047eedc 345 }
d288f65f 346
961a0db0
VS
347 /*
348 * Similar magic as in intel_dp_enable_port().
349 * We _must_ do this port enable + disable trick
350 * to make this power seqeuencer lock onto the port.
351 * Otherwise even VDD force bit won't work.
352 */
353 I915_WRITE(intel_dp->output_reg, DP);
354 POSTING_READ(intel_dp->output_reg);
355
356 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
357 POSTING_READ(intel_dp->output_reg);
358
359 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
360 POSTING_READ(intel_dp->output_reg);
d288f65f 361
0047eedc 362 if (!pll_enabled) {
d288f65f 363 vlv_force_pll_off(dev, pipe);
0047eedc
VS
364
365 if (release_cl_override)
366 chv_phy_powergate_ch(dev_priv, phy, ch, false);
367 }
961a0db0
VS
368}
369
bf13e81b
JN
370static enum pipe
371vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
372{
373 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bf13e81b
JN
374 struct drm_device *dev = intel_dig_port->base.base.dev;
375 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
376 struct intel_encoder *encoder;
377 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
a8c3344e 378 enum pipe pipe;
bf13e81b 379
e39b999a 380 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 381
a8c3344e
VS
382 /* We should never land here with regular DP ports */
383 WARN_ON(!is_edp(intel_dp));
384
a4a5d2f8
VS
385 if (intel_dp->pps_pipe != INVALID_PIPE)
386 return intel_dp->pps_pipe;
387
388 /*
389 * We don't have power sequencer currently.
390 * Pick one that's not used by other ports.
391 */
392 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
393 base.head) {
394 struct intel_dp *tmp;
395
396 if (encoder->type != INTEL_OUTPUT_EDP)
397 continue;
398
399 tmp = enc_to_intel_dp(&encoder->base);
400
401 if (tmp->pps_pipe != INVALID_PIPE)
402 pipes &= ~(1 << tmp->pps_pipe);
403 }
404
405 /*
406 * Didn't find one. This should not happen since there
407 * are two power sequencers and up to two eDP ports.
408 */
409 if (WARN_ON(pipes == 0))
a8c3344e
VS
410 pipe = PIPE_A;
411 else
412 pipe = ffs(pipes) - 1;
a4a5d2f8 413
a8c3344e
VS
414 vlv_steal_power_sequencer(dev, pipe);
415 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
416
417 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
418 pipe_name(intel_dp->pps_pipe),
419 port_name(intel_dig_port->port));
420
421 /* init power sequencer on this pipe and port */
36b5f425
VS
422 intel_dp_init_panel_power_sequencer(dev, intel_dp);
423 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8 424
961a0db0
VS
425 /*
426 * Even vdd force doesn't work until we've made
427 * the power sequencer lock in on the port.
428 */
429 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
430
431 return intel_dp->pps_pipe;
432}
433
6491ab27
VS
434typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
435 enum pipe pipe);
436
437static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
438 enum pipe pipe)
439{
440 return I915_READ(VLV_PIPE_PP_STATUS(pipe)) & PP_ON;
441}
442
443static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
444 enum pipe pipe)
445{
446 return I915_READ(VLV_PIPE_PP_CONTROL(pipe)) & EDP_FORCE_VDD;
447}
448
449static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
450 enum pipe pipe)
451{
452 return true;
453}
bf13e81b 454
a4a5d2f8 455static enum pipe
6491ab27
VS
456vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
457 enum port port,
458 vlv_pipe_check pipe_check)
a4a5d2f8
VS
459{
460 enum pipe pipe;
bf13e81b 461
bf13e81b
JN
462 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
463 u32 port_sel = I915_READ(VLV_PIPE_PP_ON_DELAYS(pipe)) &
464 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
465
466 if (port_sel != PANEL_PORT_SELECT_VLV(port))
467 continue;
468
6491ab27
VS
469 if (!pipe_check(dev_priv, pipe))
470 continue;
471
a4a5d2f8 472 return pipe;
bf13e81b
JN
473 }
474
a4a5d2f8
VS
475 return INVALID_PIPE;
476}
477
478static void
479vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
480{
481 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
482 struct drm_device *dev = intel_dig_port->base.base.dev;
483 struct drm_i915_private *dev_priv = dev->dev_private;
a4a5d2f8
VS
484 enum port port = intel_dig_port->port;
485
486 lockdep_assert_held(&dev_priv->pps_mutex);
487
488 /* try to find a pipe with this port selected */
6491ab27
VS
489 /* first pick one where the panel is on */
490 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
491 vlv_pipe_has_pp_on);
492 /* didn't find one? pick one where vdd is on */
493 if (intel_dp->pps_pipe == INVALID_PIPE)
494 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
495 vlv_pipe_has_vdd_on);
496 /* didn't find one? pick one with just the correct port */
497 if (intel_dp->pps_pipe == INVALID_PIPE)
498 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
499 vlv_pipe_any);
a4a5d2f8
VS
500
501 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
502 if (intel_dp->pps_pipe == INVALID_PIPE) {
503 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
504 port_name(port));
505 return;
bf13e81b
JN
506 }
507
a4a5d2f8
VS
508 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
509 port_name(port), pipe_name(intel_dp->pps_pipe));
510
36b5f425
VS
511 intel_dp_init_panel_power_sequencer(dev, intel_dp);
512 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
bf13e81b
JN
513}
514
773538e8
VS
515void vlv_power_sequencer_reset(struct drm_i915_private *dev_priv)
516{
517 struct drm_device *dev = dev_priv->dev;
518 struct intel_encoder *encoder;
519
520 if (WARN_ON(!IS_VALLEYVIEW(dev)))
521 return;
522
523 /*
524 * We can't grab pps_mutex here due to deadlock with power_domain
525 * mutex when power_domain functions are called while holding pps_mutex.
526 * That also means that in order to use pps_pipe the code needs to
527 * hold both a power domain reference and pps_mutex, and the power domain
528 * reference get/put must be done while _not_ holding pps_mutex.
529 * pps_{lock,unlock}() do these steps in the correct order, so one
530 * should use them always.
531 */
532
533 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
534 struct intel_dp *intel_dp;
535
536 if (encoder->type != INTEL_OUTPUT_EDP)
537 continue;
538
539 intel_dp = enc_to_intel_dp(&encoder->base);
540 intel_dp->pps_pipe = INVALID_PIPE;
541 }
bf13e81b
JN
542}
543
544static u32 _pp_ctrl_reg(struct intel_dp *intel_dp)
545{
546 struct drm_device *dev = intel_dp_to_dev(intel_dp);
547
b0a08bec
VK
548 if (IS_BROXTON(dev))
549 return BXT_PP_CONTROL(0);
550 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
551 return PCH_PP_CONTROL;
552 else
553 return VLV_PIPE_PP_CONTROL(vlv_power_sequencer_pipe(intel_dp));
554}
555
556static u32 _pp_stat_reg(struct intel_dp *intel_dp)
557{
558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
559
b0a08bec
VK
560 if (IS_BROXTON(dev))
561 return BXT_PP_STATUS(0);
562 else if (HAS_PCH_SPLIT(dev))
bf13e81b
JN
563 return PCH_PP_STATUS;
564 else
565 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
566}
567
01527b31
CT
568/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
569 This function only applicable when panel PM state is not to be tracked */
570static int edp_notify_handler(struct notifier_block *this, unsigned long code,
571 void *unused)
572{
573 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
574 edp_notifier);
575 struct drm_device *dev = intel_dp_to_dev(intel_dp);
576 struct drm_i915_private *dev_priv = dev->dev_private;
577 u32 pp_div;
578 u32 pp_ctrl_reg, pp_div_reg;
01527b31
CT
579
580 if (!is_edp(intel_dp) || code != SYS_RESTART)
581 return 0;
582
773538e8 583 pps_lock(intel_dp);
e39b999a 584
01527b31 585 if (IS_VALLEYVIEW(dev)) {
e39b999a
VS
586 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
587
01527b31
CT
588 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
589 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
590 pp_div = I915_READ(pp_div_reg);
591 pp_div &= PP_REFERENCE_DIVIDER_MASK;
592
593 /* 0x1F write to PP_DIV_REG sets max cycle delay */
594 I915_WRITE(pp_div_reg, pp_div | 0x1F);
595 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
596 msleep(intel_dp->panel_power_cycle_delay);
597 }
598
773538e8 599 pps_unlock(intel_dp);
e39b999a 600
01527b31
CT
601 return 0;
602}
603
4be73780 604static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 605{
30add22d 606 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
607 struct drm_i915_private *dev_priv = dev->dev_private;
608
e39b999a
VS
609 lockdep_assert_held(&dev_priv->pps_mutex);
610
9a42356b
VS
611 if (IS_VALLEYVIEW(dev) &&
612 intel_dp->pps_pipe == INVALID_PIPE)
613 return false;
614
bf13e81b 615 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
616}
617
4be73780 618static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 619{
30add22d 620 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18
KP
621 struct drm_i915_private *dev_priv = dev->dev_private;
622
e39b999a
VS
623 lockdep_assert_held(&dev_priv->pps_mutex);
624
9a42356b
VS
625 if (IS_VALLEYVIEW(dev) &&
626 intel_dp->pps_pipe == INVALID_PIPE)
627 return false;
628
773538e8 629 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
630}
631
9b984dae
KP
632static void
633intel_dp_check_edp(struct intel_dp *intel_dp)
634{
30add22d 635 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 636 struct drm_i915_private *dev_priv = dev->dev_private;
ebf33b18 637
9b984dae
KP
638 if (!is_edp(intel_dp))
639 return;
453c5420 640
4be73780 641 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
642 WARN(1, "eDP powered off while attempting aux channel communication.\n");
643 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
644 I915_READ(_pp_stat_reg(intel_dp)),
645 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
646 }
647}
648
9ee32fea
DV
649static uint32_t
650intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
651{
652 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
653 struct drm_device *dev = intel_dig_port->base.base.dev;
654 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 655 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
656 uint32_t status;
657 bool done;
658
ef04f00d 659#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 660 if (has_aux_irq)
b18ac466 661 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 662 msecs_to_jiffies_timeout(10));
9ee32fea
DV
663 else
664 done = wait_for_atomic(C, 10) == 0;
665 if (!done)
666 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
667 has_aux_irq);
668#undef C
669
670 return status;
671}
672
ec5b01dd 673static uint32_t i9xx_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 674{
174edf1f
PZ
675 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
676 struct drm_device *dev = intel_dig_port->base.base.dev;
9ee32fea 677
ec5b01dd
DL
678 /*
679 * The clock divider is based off the hrawclk, and would like to run at
680 * 2MHz. So, take the hrawclk value and divide by 2 and use that
a4fc5ed6 681 */
ec5b01dd
DL
682 return index ? 0 : intel_hrawclk(dev) / 2;
683}
684
685static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
686{
687 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
688 struct drm_device *dev = intel_dig_port->base.base.dev;
469d4b2a 689 struct drm_i915_private *dev_priv = dev->dev_private;
ec5b01dd
DL
690
691 if (index)
692 return 0;
693
694 if (intel_dig_port->port == PORT_A) {
05024da3
VS
695 return DIV_ROUND_UP(dev_priv->cdclk_freq, 2000);
696
ec5b01dd
DL
697 } else {
698 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
699 }
700}
701
702static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
703{
704 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
705 struct drm_device *dev = intel_dig_port->base.base.dev;
706 struct drm_i915_private *dev_priv = dev->dev_private;
707
708 if (intel_dig_port->port == PORT_A) {
709 if (index)
710 return 0;
05024da3 711 return DIV_ROUND_CLOSEST(dev_priv->cdclk_freq, 2000);
2c55c336
JN
712 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
713 /* Workaround for non-ULT HSW */
bc86625a
CW
714 switch (index) {
715 case 0: return 63;
716 case 1: return 72;
717 default: return 0;
718 }
ec5b01dd 719 } else {
bc86625a 720 return index ? 0 : DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 721 }
b84a1cf8
RV
722}
723
ec5b01dd
DL
724static uint32_t vlv_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
725{
726 return index ? 0 : 100;
727}
728
b6b5e383
DL
729static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
730{
731 /*
732 * SKL doesn't need us to program the AUX clock divider (Hardware will
733 * derive the clock from CDCLK automatically). We still implement the
734 * get_aux_clock_divider vfunc to plug-in into the existing code.
735 */
736 return index ? 0 : 1;
737}
738
5ed12a19
DL
739static uint32_t i9xx_get_aux_send_ctl(struct intel_dp *intel_dp,
740 bool has_aux_irq,
741 int send_bytes,
742 uint32_t aux_clock_divider)
743{
744 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
745 struct drm_device *dev = intel_dig_port->base.base.dev;
746 uint32_t precharge, timeout;
747
748 if (IS_GEN6(dev))
749 precharge = 3;
750 else
751 precharge = 5;
752
753 if (IS_BROADWELL(dev) && intel_dp->aux_ch_ctl_reg == DPA_AUX_CH_CTL)
754 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
755 else
756 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
757
758 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 759 DP_AUX_CH_CTL_DONE |
5ed12a19 760 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
788d4433 761 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 762 timeout |
788d4433 763 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
764 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
765 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 766 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
767}
768
b9ca5fad
DL
769static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
770 bool has_aux_irq,
771 int send_bytes,
772 uint32_t unused)
773{
774 return DP_AUX_CH_CTL_SEND_BUSY |
775 DP_AUX_CH_CTL_DONE |
776 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
777 DP_AUX_CH_CTL_TIME_OUT_ERROR |
778 DP_AUX_CH_CTL_TIME_OUT_1600us |
779 DP_AUX_CH_CTL_RECEIVE_ERROR |
780 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
781 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
782}
783
b84a1cf8
RV
784static int
785intel_dp_aux_ch(struct intel_dp *intel_dp,
bd9f74a5 786 const uint8_t *send, int send_bytes,
b84a1cf8
RV
787 uint8_t *recv, int recv_size)
788{
789 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
790 struct drm_device *dev = intel_dig_port->base.base.dev;
791 struct drm_i915_private *dev_priv = dev->dev_private;
792 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
793 uint32_t ch_data = ch_ctl + 4;
bc86625a 794 uint32_t aux_clock_divider;
b84a1cf8
RV
795 int i, ret, recv_bytes;
796 uint32_t status;
5ed12a19 797 int try, clock = 0;
4e6b788c 798 bool has_aux_irq = HAS_AUX_IRQ(dev);
884f19e9
JN
799 bool vdd;
800
773538e8 801 pps_lock(intel_dp);
e39b999a 802
72c3500a
VS
803 /*
804 * We will be called with VDD already enabled for dpcd/edid/oui reads.
805 * In such cases we want to leave VDD enabled and it's up to upper layers
806 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
807 * ourselves.
808 */
1e0560e0 809 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
810
811 /* dp aux is extremely sensitive to irq latency, hence request the
812 * lowest possible wakeup latency and so prevent the cpu from going into
813 * deep sleep states.
814 */
815 pm_qos_update_request(&dev_priv->pm_qos, 0);
816
817 intel_dp_check_edp(intel_dp);
5eb08b69 818
c67a470b
PZ
819 intel_aux_display_runtime_get(dev_priv);
820
11bee43e
JB
821 /* Try to wait for any previous AUX channel activity */
822 for (try = 0; try < 3; try++) {
ef04f00d 823 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
824 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
825 break;
826 msleep(1);
827 }
828
829 if (try == 3) {
02196c77
MK
830 static u32 last_status = -1;
831 const u32 status = I915_READ(ch_ctl);
832
833 if (status != last_status) {
834 WARN(1, "dp_aux_ch not started status 0x%08x\n",
835 status);
836 last_status = status;
837 }
838
9ee32fea
DV
839 ret = -EBUSY;
840 goto out;
4f7f7b7e
CW
841 }
842
46a5ae9f
PZ
843 /* Only 5 data registers! */
844 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
845 ret = -E2BIG;
846 goto out;
847 }
848
ec5b01dd 849 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
153b1100
DL
850 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
851 has_aux_irq,
852 send_bytes,
853 aux_clock_divider);
5ed12a19 854
bc86625a
CW
855 /* Must try at least 3 times according to DP spec */
856 for (try = 0; try < 5; try++) {
857 /* Load the send data into the aux channel data registers */
858 for (i = 0; i < send_bytes; i += 4)
859 I915_WRITE(ch_data + i,
a4f1289e
RV
860 intel_dp_pack_aux(send + i,
861 send_bytes - i));
bc86625a
CW
862
863 /* Send the command and wait for it to complete */
5ed12a19 864 I915_WRITE(ch_ctl, send_ctl);
bc86625a
CW
865
866 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
867
868 /* Clear done status and any errors */
869 I915_WRITE(ch_ctl,
870 status |
871 DP_AUX_CH_CTL_DONE |
872 DP_AUX_CH_CTL_TIME_OUT_ERROR |
873 DP_AUX_CH_CTL_RECEIVE_ERROR);
874
74ebf294 875 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
bc86625a 876 continue;
74ebf294
TP
877
878 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
879 * 400us delay required for errors and timeouts
880 * Timeout errors from the HW already meet this
881 * requirement so skip to next iteration
882 */
883 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
884 usleep_range(400, 500);
bc86625a 885 continue;
74ebf294 886 }
bc86625a 887 if (status & DP_AUX_CH_CTL_DONE)
e058c945 888 goto done;
bc86625a 889 }
a4fc5ed6
KP
890 }
891
a4fc5ed6 892 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 893 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
894 ret = -EBUSY;
895 goto out;
a4fc5ed6
KP
896 }
897
e058c945 898done:
a4fc5ed6
KP
899 /* Check for timeout or receive error.
900 * Timeouts occur when the sink is not connected
901 */
a5b3da54 902 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 903 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
904 ret = -EIO;
905 goto out;
a5b3da54 906 }
1ae8c0a5
KP
907
908 /* Timeouts occur when the device isn't connected, so they're
909 * "normal" -- don't fill the kernel log with these */
a5b3da54 910 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 911 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
912 ret = -ETIMEDOUT;
913 goto out;
a4fc5ed6
KP
914 }
915
916 /* Unload any bytes sent back from the other side */
917 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
918 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
919 if (recv_bytes > recv_size)
920 recv_bytes = recv_size;
0206e353 921
4f7f7b7e 922 for (i = 0; i < recv_bytes; i += 4)
a4f1289e
RV
923 intel_dp_unpack_aux(I915_READ(ch_data + i),
924 recv + i, recv_bytes - i);
a4fc5ed6 925
9ee32fea
DV
926 ret = recv_bytes;
927out:
928 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
c67a470b 929 intel_aux_display_runtime_put(dev_priv);
9ee32fea 930
884f19e9
JN
931 if (vdd)
932 edp_panel_vdd_off(intel_dp, false);
933
773538e8 934 pps_unlock(intel_dp);
e39b999a 935
9ee32fea 936 return ret;
a4fc5ed6
KP
937}
938
a6c8aff0
JN
939#define BARE_ADDRESS_SIZE 3
940#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
9d1a1031
JN
941static ssize_t
942intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 943{
9d1a1031
JN
944 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
945 uint8_t txbuf[20], rxbuf[20];
946 size_t txsize, rxsize;
a4fc5ed6 947 int ret;
a4fc5ed6 948
d2d9cbbd
VS
949 txbuf[0] = (msg->request << 4) |
950 ((msg->address >> 16) & 0xf);
951 txbuf[1] = (msg->address >> 8) & 0xff;
9d1a1031
JN
952 txbuf[2] = msg->address & 0xff;
953 txbuf[3] = msg->size - 1;
46a5ae9f 954
9d1a1031
JN
955 switch (msg->request & ~DP_AUX_I2C_MOT) {
956 case DP_AUX_NATIVE_WRITE:
957 case DP_AUX_I2C_WRITE:
a6c8aff0 958 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 959 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 960
9d1a1031
JN
961 if (WARN_ON(txsize > 20))
962 return -E2BIG;
a4fc5ed6 963
9d1a1031 964 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 965
9d1a1031
JN
966 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
967 if (ret > 0) {
968 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 969
a1ddefd8
JN
970 if (ret > 1) {
971 /* Number of bytes written in a short write. */
972 ret = clamp_t(int, rxbuf[1], 0, msg->size);
973 } else {
974 /* Return payload size. */
975 ret = msg->size;
976 }
9d1a1031
JN
977 }
978 break;
46a5ae9f 979
9d1a1031
JN
980 case DP_AUX_NATIVE_READ:
981 case DP_AUX_I2C_READ:
a6c8aff0 982 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 983 rxsize = msg->size + 1;
a4fc5ed6 984
9d1a1031
JN
985 if (WARN_ON(rxsize > 20))
986 return -E2BIG;
a4fc5ed6 987
9d1a1031
JN
988 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
989 if (ret > 0) {
990 msg->reply = rxbuf[0] >> 4;
991 /*
992 * Assume happy day, and copy the data. The caller is
993 * expected to check msg->reply before touching it.
994 *
995 * Return payload size.
996 */
997 ret--;
998 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 999 }
9d1a1031
JN
1000 break;
1001
1002 default:
1003 ret = -EINVAL;
1004 break;
a4fc5ed6 1005 }
f51a44b9 1006
9d1a1031 1007 return ret;
a4fc5ed6
KP
1008}
1009
9d1a1031
JN
1010static void
1011intel_dp_aux_init(struct intel_dp *intel_dp, struct intel_connector *connector)
1012{
1013 struct drm_device *dev = intel_dp_to_dev(intel_dp);
500ea70d 1014 struct drm_i915_private *dev_priv = dev->dev_private;
33ad6626
JN
1015 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1016 enum port port = intel_dig_port->port;
500ea70d 1017 struct ddi_vbt_port_info *info = &dev_priv->vbt.ddi_port_info[port];
0b99836f 1018 const char *name = NULL;
500ea70d 1019 uint32_t porte_aux_ctl_reg = DPA_AUX_CH_CTL;
ab2c0672
DA
1020 int ret;
1021
500ea70d
RV
1022 /* On SKL we don't have Aux for port E so we rely on VBT to set
1023 * a proper alternate aux channel.
1024 */
1025 if (IS_SKYLAKE(dev) && port == PORT_E) {
1026 switch (info->alternate_aux_channel) {
1027 case DP_AUX_B:
1028 porte_aux_ctl_reg = DPB_AUX_CH_CTL;
1029 break;
1030 case DP_AUX_C:
1031 porte_aux_ctl_reg = DPC_AUX_CH_CTL;
1032 break;
1033 case DP_AUX_D:
1034 porte_aux_ctl_reg = DPD_AUX_CH_CTL;
1035 break;
1036 case DP_AUX_A:
1037 default:
1038 porte_aux_ctl_reg = DPA_AUX_CH_CTL;
1039 }
1040 }
1041
33ad6626
JN
1042 switch (port) {
1043 case PORT_A:
1044 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
0b99836f 1045 name = "DPDDC-A";
ab2c0672 1046 break;
33ad6626
JN
1047 case PORT_B:
1048 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
0b99836f 1049 name = "DPDDC-B";
ab2c0672 1050 break;
33ad6626
JN
1051 case PORT_C:
1052 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
0b99836f 1053 name = "DPDDC-C";
ab2c0672 1054 break;
33ad6626
JN
1055 case PORT_D:
1056 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
0b99836f 1057 name = "DPDDC-D";
33ad6626 1058 break;
500ea70d
RV
1059 case PORT_E:
1060 intel_dp->aux_ch_ctl_reg = porte_aux_ctl_reg;
1061 name = "DPDDC-E";
1062 break;
33ad6626
JN
1063 default:
1064 BUG();
ab2c0672
DA
1065 }
1066
1b1aad75
DL
1067 /*
1068 * The AUX_CTL register is usually DP_CTL + 0x10.
1069 *
1070 * On Haswell and Broadwell though:
1071 * - Both port A DDI_BUF_CTL and DDI_AUX_CTL are on the CPU
1072 * - Port B/C/D AUX channels are on the PCH, DDI_BUF_CTL on the CPU
1073 *
1074 * Skylake moves AUX_CTL back next to DDI_BUF_CTL, on the CPU.
1075 */
500ea70d 1076 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev) && port != PORT_E)
33ad6626 1077 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
8316f337 1078
0b99836f 1079 intel_dp->aux.name = name;
9d1a1031
JN
1080 intel_dp->aux.dev = dev->dev;
1081 intel_dp->aux.transfer = intel_dp_aux_transfer;
8316f337 1082
0b99836f
JN
1083 DRM_DEBUG_KMS("registering %s bus for %s\n", name,
1084 connector->base.kdev->kobj.name);
8316f337 1085
4f71d0cb 1086 ret = drm_dp_aux_register(&intel_dp->aux);
0b99836f 1087 if (ret < 0) {
4f71d0cb 1088 DRM_ERROR("drm_dp_aux_register() for %s failed (%d)\n",
0b99836f
JN
1089 name, ret);
1090 return;
ab2c0672 1091 }
8a5e6aeb 1092
0b99836f
JN
1093 ret = sysfs_create_link(&connector->base.kdev->kobj,
1094 &intel_dp->aux.ddc.dev.kobj,
1095 intel_dp->aux.ddc.dev.kobj.name);
1096 if (ret < 0) {
1097 DRM_ERROR("sysfs_create_link() for %s failed (%d)\n", name, ret);
4f71d0cb 1098 drm_dp_aux_unregister(&intel_dp->aux);
ab2c0672 1099 }
a4fc5ed6
KP
1100}
1101
80f65de3
ID
1102static void
1103intel_dp_connector_unregister(struct intel_connector *intel_connector)
1104{
1105 struct intel_dp *intel_dp = intel_attached_dp(&intel_connector->base);
1106
0e32b39c
DA
1107 if (!intel_connector->mst_port)
1108 sysfs_remove_link(&intel_connector->base.kdev->kobj,
1109 intel_dp->aux.ddc.dev.kobj.name);
80f65de3
ID
1110 intel_connector_unregister(intel_connector);
1111}
1112
5416d871 1113static void
840b32b7 1114skl_edp_set_pll_config(struct intel_crtc_state *pipe_config)
5416d871
DL
1115{
1116 u32 ctrl1;
1117
dd3cd74a
ACO
1118 memset(&pipe_config->dpll_hw_state, 0,
1119 sizeof(pipe_config->dpll_hw_state));
1120
5416d871
DL
1121 pipe_config->ddi_pll_sel = SKL_DPLL0;
1122 pipe_config->dpll_hw_state.cfgcr1 = 0;
1123 pipe_config->dpll_hw_state.cfgcr2 = 0;
1124
1125 ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
840b32b7 1126 switch (pipe_config->port_clock / 2) {
c3346ef6 1127 case 81000:
71cd8423 1128 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5416d871
DL
1129 SKL_DPLL0);
1130 break;
c3346ef6 1131 case 135000:
71cd8423 1132 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350,
5416d871
DL
1133 SKL_DPLL0);
1134 break;
c3346ef6 1135 case 270000:
71cd8423 1136 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700,
5416d871
DL
1137 SKL_DPLL0);
1138 break;
c3346ef6 1139 case 162000:
71cd8423 1140 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620,
c3346ef6
SJ
1141 SKL_DPLL0);
1142 break;
1143 /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which
1144 results in CDCLK change. Need to handle the change of CDCLK by
1145 disabling pipes and re-enabling them */
1146 case 108000:
71cd8423 1147 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
c3346ef6
SJ
1148 SKL_DPLL0);
1149 break;
1150 case 216000:
71cd8423 1151 ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160,
c3346ef6
SJ
1152 SKL_DPLL0);
1153 break;
1154
5416d871
DL
1155 }
1156 pipe_config->dpll_hw_state.ctrl1 = ctrl1;
1157}
1158
6fa2d197 1159void
840b32b7 1160hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config)
0e50338c 1161{
ee46f3c7
ACO
1162 memset(&pipe_config->dpll_hw_state, 0,
1163 sizeof(pipe_config->dpll_hw_state));
1164
840b32b7
VS
1165 switch (pipe_config->port_clock / 2) {
1166 case 81000:
0e50338c
DV
1167 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_810;
1168 break;
840b32b7 1169 case 135000:
0e50338c
DV
1170 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_1350;
1171 break;
840b32b7 1172 case 270000:
0e50338c
DV
1173 pipe_config->ddi_pll_sel = PORT_CLK_SEL_LCPLL_2700;
1174 break;
1175 }
1176}
1177
fc0f8e25 1178static int
12f6a2e2 1179intel_dp_sink_rates(struct intel_dp *intel_dp, const int **sink_rates)
fc0f8e25 1180{
94ca719e
VS
1181 if (intel_dp->num_sink_rates) {
1182 *sink_rates = intel_dp->sink_rates;
1183 return intel_dp->num_sink_rates;
fc0f8e25 1184 }
12f6a2e2
VS
1185
1186 *sink_rates = default_rates;
1187
1188 return (intel_dp_max_link_bw(intel_dp) >> 3) + 1;
fc0f8e25
SJ
1189}
1190
ed63baaf
TS
1191static bool intel_dp_source_supports_hbr2(struct drm_device *dev)
1192{
1193 /* WaDisableHBR2:skl */
1194 if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0)
1195 return false;
1196
1197 if ((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || IS_BROADWELL(dev) ||
1198 (INTEL_INFO(dev)->gen >= 9))
1199 return true;
1200 else
1201 return false;
1202}
1203
a8f3ef61 1204static int
1db10e28 1205intel_dp_source_rates(struct drm_device *dev, const int **source_rates)
a8f3ef61 1206{
af7080f5
TS
1207 int size;
1208
64987fc5
SJ
1209 if (IS_BROXTON(dev)) {
1210 *source_rates = bxt_rates;
af7080f5 1211 size = ARRAY_SIZE(bxt_rates);
64987fc5 1212 } else if (IS_SKYLAKE(dev)) {
637a9c63 1213 *source_rates = skl_rates;
af7080f5
TS
1214 size = ARRAY_SIZE(skl_rates);
1215 } else {
1216 *source_rates = default_rates;
1217 size = ARRAY_SIZE(default_rates);
a8f3ef61 1218 }
636280ba 1219
ed63baaf 1220 /* This depends on the fact that 5.4 is last value in the array */
af7080f5
TS
1221 if (!intel_dp_source_supports_hbr2(dev))
1222 size--;
636280ba 1223
af7080f5 1224 return size;
a8f3ef61
SJ
1225}
1226
c6bb3538
DV
1227static void
1228intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1229 struct intel_crtc_state *pipe_config)
c6bb3538
DV
1230{
1231 struct drm_device *dev = encoder->base.dev;
9dd4ffdf
CML
1232 const struct dp_link_dpll *divisor = NULL;
1233 int i, count = 0;
c6bb3538
DV
1234
1235 if (IS_G4X(dev)) {
9dd4ffdf
CML
1236 divisor = gen4_dpll;
1237 count = ARRAY_SIZE(gen4_dpll);
c6bb3538 1238 } else if (HAS_PCH_SPLIT(dev)) {
9dd4ffdf
CML
1239 divisor = pch_dpll;
1240 count = ARRAY_SIZE(pch_dpll);
ef9348c8
CML
1241 } else if (IS_CHERRYVIEW(dev)) {
1242 divisor = chv_dpll;
1243 count = ARRAY_SIZE(chv_dpll);
c6bb3538 1244 } else if (IS_VALLEYVIEW(dev)) {
65ce4bf5
CML
1245 divisor = vlv_dpll;
1246 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1247 }
9dd4ffdf
CML
1248
1249 if (divisor && count) {
1250 for (i = 0; i < count; i++) {
840b32b7 1251 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1252 pipe_config->dpll = divisor[i].dpll;
1253 pipe_config->clock_set = true;
1254 break;
1255 }
1256 }
c6bb3538
DV
1257 }
1258}
1259
2ecae76a
VS
1260static int intersect_rates(const int *source_rates, int source_len,
1261 const int *sink_rates, int sink_len,
94ca719e 1262 int *common_rates)
a8f3ef61
SJ
1263{
1264 int i = 0, j = 0, k = 0;
1265
a8f3ef61
SJ
1266 while (i < source_len && j < sink_len) {
1267 if (source_rates[i] == sink_rates[j]) {
e6bda3e4
VS
1268 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
1269 return k;
94ca719e 1270 common_rates[k] = source_rates[i];
a8f3ef61
SJ
1271 ++k;
1272 ++i;
1273 ++j;
1274 } else if (source_rates[i] < sink_rates[j]) {
1275 ++i;
1276 } else {
1277 ++j;
1278 }
1279 }
1280 return k;
1281}
1282
94ca719e
VS
1283static int intel_dp_common_rates(struct intel_dp *intel_dp,
1284 int *common_rates)
2ecae76a
VS
1285{
1286 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1287 const int *source_rates, *sink_rates;
1288 int source_len, sink_len;
1289
1290 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1291 source_len = intel_dp_source_rates(dev, &source_rates);
1292
1293 return intersect_rates(source_rates, source_len,
1294 sink_rates, sink_len,
94ca719e 1295 common_rates);
2ecae76a
VS
1296}
1297
0336400e
VS
1298static void snprintf_int_array(char *str, size_t len,
1299 const int *array, int nelem)
1300{
1301 int i;
1302
1303 str[0] = '\0';
1304
1305 for (i = 0; i < nelem; i++) {
b2f505be 1306 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1307 if (r >= len)
1308 return;
1309 str += r;
1310 len -= r;
1311 }
1312}
1313
1314static void intel_dp_print_rates(struct intel_dp *intel_dp)
1315{
1316 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1317 const int *source_rates, *sink_rates;
94ca719e
VS
1318 int source_len, sink_len, common_len;
1319 int common_rates[DP_MAX_SUPPORTED_RATES];
0336400e
VS
1320 char str[128]; /* FIXME: too big for stack? */
1321
1322 if ((drm_debug & DRM_UT_KMS) == 0)
1323 return;
1324
1325 source_len = intel_dp_source_rates(dev, &source_rates);
1326 snprintf_int_array(str, sizeof(str), source_rates, source_len);
1327 DRM_DEBUG_KMS("source rates: %s\n", str);
1328
1329 sink_len = intel_dp_sink_rates(intel_dp, &sink_rates);
1330 snprintf_int_array(str, sizeof(str), sink_rates, sink_len);
1331 DRM_DEBUG_KMS("sink rates: %s\n", str);
1332
94ca719e
VS
1333 common_len = intel_dp_common_rates(intel_dp, common_rates);
1334 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1335 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1336}
1337
f4896f15 1338static int rate_to_index(int find, const int *rates)
a8f3ef61
SJ
1339{
1340 int i = 0;
1341
1342 for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i)
1343 if (find == rates[i])
1344 break;
1345
1346 return i;
1347}
1348
50fec21a
VS
1349int
1350intel_dp_max_link_rate(struct intel_dp *intel_dp)
1351{
1352 int rates[DP_MAX_SUPPORTED_RATES] = {};
1353 int len;
1354
94ca719e 1355 len = intel_dp_common_rates(intel_dp, rates);
50fec21a
VS
1356 if (WARN_ON(len <= 0))
1357 return 162000;
1358
1359 return rates[rate_to_index(0, rates) - 1];
1360}
1361
ed4e9c1d
VS
1362int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1363{
94ca719e 1364 return rate_to_index(rate, intel_dp->sink_rates);
ed4e9c1d
VS
1365}
1366
04a60f9f
VS
1367static void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1368 uint8_t *link_bw, uint8_t *rate_select)
1369{
1370 if (intel_dp->num_sink_rates) {
1371 *link_bw = 0;
1372 *rate_select =
1373 intel_dp_rate_select(intel_dp, port_clock);
1374 } else {
1375 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1376 *rate_select = 0;
1377 }
1378}
1379
00c09d70 1380bool
5bfe2ac0 1381intel_dp_compute_config(struct intel_encoder *encoder,
5cec258b 1382 struct intel_crtc_state *pipe_config)
a4fc5ed6 1383{
5bfe2ac0 1384 struct drm_device *dev = encoder->base.dev;
36008365 1385 struct drm_i915_private *dev_priv = dev->dev_private;
2d112de7 1386 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 1387 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1388 enum port port = dp_to_dig_port(intel_dp)->port;
84556d58 1389 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
dd06f90e 1390 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 1391 int lane_count, clock;
56071a20 1392 int min_lane_count = 1;
eeb6324d 1393 int max_lane_count = intel_dp_max_lane_count(intel_dp);
06ea66b6 1394 /* Conveniently, the link BW constants become indices with a shift...*/
56071a20 1395 int min_clock = 0;
a8f3ef61 1396 int max_clock;
083f9560 1397 int bpp, mode_rate;
ff9a6750 1398 int link_avail, link_clock;
94ca719e
VS
1399 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1400 int common_len;
04a60f9f 1401 uint8_t link_bw, rate_select;
a8f3ef61 1402
94ca719e 1403 common_len = intel_dp_common_rates(intel_dp, common_rates);
a8f3ef61
SJ
1404
1405 /* No common link rates between source and sink */
94ca719e 1406 WARN_ON(common_len <= 0);
a8f3ef61 1407
94ca719e 1408 max_clock = common_len - 1;
a4fc5ed6 1409
bc7d38a4 1410 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
1411 pipe_config->has_pch_encoder = true;
1412
03afc4a2 1413 pipe_config->has_dp_encoder = true;
f769cd24 1414 pipe_config->has_drrs = false;
9fcb1704 1415 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
a4fc5ed6 1416
dd06f90e
JN
1417 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1418 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1419 adjusted_mode);
a1b2278e
CK
1420
1421 if (INTEL_INFO(dev)->gen >= 9) {
1422 int ret;
e435d6e5 1423 ret = skl_update_scaler_crtc(pipe_config);
a1b2278e
CK
1424 if (ret)
1425 return ret;
1426 }
1427
2dd24552
JB
1428 if (!HAS_PCH_SPLIT(dev))
1429 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1430 intel_connector->panel.fitting_mode);
1431 else
b074cec8
JB
1432 intel_pch_panel_fitting(intel_crtc, pipe_config,
1433 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
1434 }
1435
cb1793ce 1436 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
1437 return false;
1438
083f9560 1439 DRM_DEBUG_KMS("DP link computation with max lane count %i "
a8f3ef61 1440 "max bw %d pixel clock %iKHz\n",
94ca719e 1441 max_lane_count, common_rates[max_clock],
241bfc38 1442 adjusted_mode->crtc_clock);
083f9560 1443
36008365
DV
1444 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1445 * bpc in between. */
3e7ca985 1446 bpp = pipe_config->pipe_bpp;
56071a20 1447 if (is_edp(intel_dp)) {
22ce5628
TS
1448
1449 /* Get bpp from vbt only for panels that dont have bpp in edid */
1450 if (intel_connector->base.display_info.bpc == 0 &&
1451 (dev_priv->vbt.edp_bpp && dev_priv->vbt.edp_bpp < bpp)) {
56071a20
JN
1452 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1453 dev_priv->vbt.edp_bpp);
1454 bpp = dev_priv->vbt.edp_bpp;
1455 }
1456
344c5bbc
JN
1457 /*
1458 * Use the maximum clock and number of lanes the eDP panel
1459 * advertizes being capable of. The panels are generally
1460 * designed to support only a single clock and lane
1461 * configuration, and typically these values correspond to the
1462 * native resolution of the panel.
1463 */
1464 min_lane_count = max_lane_count;
1465 min_clock = max_clock;
7984211e 1466 }
657445fe 1467
36008365 1468 for (; bpp >= 6*3; bpp -= 2*3) {
241bfc38
DL
1469 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1470 bpp);
36008365 1471
c6930992 1472 for (clock = min_clock; clock <= max_clock; clock++) {
a8f3ef61
SJ
1473 for (lane_count = min_lane_count;
1474 lane_count <= max_lane_count;
1475 lane_count <<= 1) {
1476
94ca719e 1477 link_clock = common_rates[clock];
36008365
DV
1478 link_avail = intel_dp_max_data_rate(link_clock,
1479 lane_count);
1480
1481 if (mode_rate <= link_avail) {
1482 goto found;
1483 }
1484 }
1485 }
1486 }
c4867936 1487
36008365 1488 return false;
3685a8f3 1489
36008365 1490found:
55bc60db
VS
1491 if (intel_dp->color_range_auto) {
1492 /*
1493 * See:
1494 * CEA-861-E - 5.1 Default Encoding Parameters
1495 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1496 */
0f2a2a75
VS
1497 pipe_config->limited_color_range =
1498 bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1;
1499 } else {
1500 pipe_config->limited_color_range =
1501 intel_dp->limited_color_range;
55bc60db
VS
1502 }
1503
90a6b7b0 1504 pipe_config->lane_count = lane_count;
a8f3ef61 1505
657445fe 1506 pipe_config->pipe_bpp = bpp;
94ca719e 1507 pipe_config->port_clock = common_rates[clock];
a4fc5ed6 1508
04a60f9f
VS
1509 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1510 &link_bw, &rate_select);
1511
1512 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1513 link_bw, rate_select, pipe_config->lane_count,
ff9a6750 1514 pipe_config->port_clock, bpp);
36008365
DV
1515 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1516 mode_rate, link_avail);
a4fc5ed6 1517
03afc4a2 1518 intel_link_compute_m_n(bpp, lane_count,
241bfc38
DL
1519 adjusted_mode->crtc_clock,
1520 pipe_config->port_clock,
03afc4a2 1521 &pipe_config->dp_m_n);
9d1a455b 1522
439d7ac0 1523 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 1524 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 1525 pipe_config->has_drrs = true;
439d7ac0
PB
1526 intel_link_compute_m_n(bpp, lane_count,
1527 intel_connector->panel.downclock_mode->clock,
1528 pipe_config->port_clock,
1529 &pipe_config->dp_m2_n2);
1530 }
1531
5416d871 1532 if (IS_SKYLAKE(dev) && is_edp(intel_dp))
840b32b7 1533 skl_edp_set_pll_config(pipe_config);
977bb38d
S
1534 else if (IS_BROXTON(dev))
1535 /* handled in ddi */;
5416d871 1536 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
840b32b7 1537 hsw_dp_set_ddi_pll_sel(pipe_config);
0e50338c 1538 else
840b32b7 1539 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 1540
03afc4a2 1541 return true;
a4fc5ed6
KP
1542}
1543
7c62a164 1544static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 1545{
7c62a164
DV
1546 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1547 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1548 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
1549 struct drm_i915_private *dev_priv = dev->dev_private;
1550 u32 dpa_ctl;
1551
6e3c9717
ACO
1552 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n",
1553 crtc->config->port_clock);
ea9b6006
DV
1554 dpa_ctl = I915_READ(DP_A);
1555 dpa_ctl &= ~DP_PLL_FREQ_MASK;
1556
6e3c9717 1557 if (crtc->config->port_clock == 162000) {
1ce17038
DV
1558 /* For a long time we've carried around a ILK-DevA w/a for the
1559 * 160MHz clock. If we're really unlucky, it's still required.
1560 */
1561 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 1562 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 1563 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
1564 } else {
1565 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 1566 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 1567 }
1ce17038 1568
ea9b6006
DV
1569 I915_WRITE(DP_A, dpa_ctl);
1570
1571 POSTING_READ(DP_A);
1572 udelay(500);
1573}
1574
901c2daf
VS
1575void intel_dp_set_link_params(struct intel_dp *intel_dp,
1576 const struct intel_crtc_state *pipe_config)
1577{
1578 intel_dp->link_rate = pipe_config->port_clock;
1579 intel_dp->lane_count = pipe_config->lane_count;
1580}
1581
8ac33ed3 1582static void intel_dp_prepare(struct intel_encoder *encoder)
a4fc5ed6 1583{
b934223d 1584 struct drm_device *dev = encoder->base.dev;
417e822d 1585 struct drm_i915_private *dev_priv = dev->dev_private;
b934223d 1586 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1587 enum port port = dp_to_dig_port(intel_dp)->port;
b934223d 1588 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
6e3c9717 1589 struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
a4fc5ed6 1590
901c2daf
VS
1591 intel_dp_set_link_params(intel_dp, crtc->config);
1592
417e822d 1593 /*
1a2eb460 1594 * There are four kinds of DP registers:
417e822d
KP
1595 *
1596 * IBX PCH
1a2eb460
KP
1597 * SNB CPU
1598 * IVB CPU
417e822d
KP
1599 * CPT PCH
1600 *
1601 * IBX PCH and CPU are the same for almost everything,
1602 * except that the CPU DP PLL is configured in this
1603 * register
1604 *
1605 * CPT PCH is quite different, having many bits moved
1606 * to the TRANS_DP_CTL register instead. That
1607 * configuration happens (oddly) in ironlake_pch_enable
1608 */
9c9e7927 1609
417e822d
KP
1610 /* Preserve the BIOS-computed detected bit. This is
1611 * supposed to be read-only.
1612 */
1613 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 1614
417e822d 1615 /* Handle DP bits in common between all three register formats */
417e822d 1616 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
90a6b7b0 1617 intel_dp->DP |= DP_PORT_WIDTH(crtc->config->lane_count);
a4fc5ed6 1618
6e3c9717 1619 if (crtc->config->has_audio)
ea5b213a 1620 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
247d89f6 1621
417e822d 1622 /* Split out the IBX/CPU vs CPT settings */
32f9d658 1623
39e5fa88 1624 if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1625 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1626 intel_dp->DP |= DP_SYNC_HS_HIGH;
1627 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1628 intel_dp->DP |= DP_SYNC_VS_HIGH;
1629 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1630
6aba5b6c 1631 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
1632 intel_dp->DP |= DP_ENHANCED_FRAMING;
1633
7c62a164 1634 intel_dp->DP |= crtc->pipe << 29;
39e5fa88 1635 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
e3ef4479
VS
1636 u32 trans_dp;
1637
39e5fa88 1638 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
1639
1640 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1641 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1642 trans_dp |= TRANS_DP_ENH_FRAMING;
1643 else
1644 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1645 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 1646 } else {
0f2a2a75
VS
1647 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
1648 crtc->config->limited_color_range)
1649 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
1650
1651 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1652 intel_dp->DP |= DP_SYNC_HS_HIGH;
1653 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1654 intel_dp->DP |= DP_SYNC_VS_HIGH;
1655 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1656
6aba5b6c 1657 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
1658 intel_dp->DP |= DP_ENHANCED_FRAMING;
1659
39e5fa88 1660 if (IS_CHERRYVIEW(dev))
44f37d1f 1661 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
39e5fa88
VS
1662 else if (crtc->pipe == PIPE_B)
1663 intel_dp->DP |= DP_PIPEB_SELECT;
32f9d658 1664 }
a4fc5ed6
KP
1665}
1666
ffd6749d
PZ
1667#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1668#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 1669
1a5ef5b7
PZ
1670#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1671#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 1672
ffd6749d
PZ
1673#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1674#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 1675
4be73780 1676static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
1677 u32 mask,
1678 u32 value)
bd943159 1679{
30add22d 1680 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 1681 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
1682 u32 pp_stat_reg, pp_ctrl_reg;
1683
e39b999a
VS
1684 lockdep_assert_held(&dev_priv->pps_mutex);
1685
bf13e81b
JN
1686 pp_stat_reg = _pp_stat_reg(intel_dp);
1687 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 1688
99ea7127 1689 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
1690 mask, value,
1691 I915_READ(pp_stat_reg),
1692 I915_READ(pp_ctrl_reg));
32ce697c 1693
453c5420 1694 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 1695 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
1696 I915_READ(pp_stat_reg),
1697 I915_READ(pp_ctrl_reg));
32ce697c 1698 }
54c136d4
CW
1699
1700 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 1701}
32ce697c 1702
4be73780 1703static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
1704{
1705 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 1706 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
1707}
1708
4be73780 1709static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
1710{
1711 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 1712 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
1713}
1714
4be73780 1715static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127
KP
1716{
1717 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c
PZ
1718
1719 /* When we disable the VDD override bit last we have to do the manual
1720 * wait. */
1721 wait_remaining_ms_from_jiffies(intel_dp->last_power_cycle,
1722 intel_dp->panel_power_cycle_delay);
1723
4be73780 1724 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
1725}
1726
4be73780 1727static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
1728{
1729 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1730 intel_dp->backlight_on_delay);
1731}
1732
4be73780 1733static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
1734{
1735 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1736 intel_dp->backlight_off_delay);
1737}
99ea7127 1738
832dd3c1
KP
1739/* Read the current pp_control value, unlocking the register if it
1740 * is locked
1741 */
1742
453c5420 1743static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 1744{
453c5420
JB
1745 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1746 struct drm_i915_private *dev_priv = dev->dev_private;
1747 u32 control;
832dd3c1 1748
e39b999a
VS
1749 lockdep_assert_held(&dev_priv->pps_mutex);
1750
bf13e81b 1751 control = I915_READ(_pp_ctrl_reg(intel_dp));
b0a08bec
VK
1752 if (!IS_BROXTON(dev)) {
1753 control &= ~PANEL_UNLOCK_MASK;
1754 control |= PANEL_UNLOCK_REGS;
1755 }
832dd3c1 1756 return control;
bd943159
KP
1757}
1758
951468f3
VS
1759/*
1760 * Must be paired with edp_panel_vdd_off().
1761 * Must hold pps_mutex around the whole on/off sequence.
1762 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1763 */
1e0560e0 1764static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 1765{
30add22d 1766 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4e6e1a54
ID
1767 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1768 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5d613501 1769 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1770 enum intel_display_power_domain power_domain;
5d613501 1771 u32 pp;
453c5420 1772 u32 pp_stat_reg, pp_ctrl_reg;
adddaaf4 1773 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 1774
e39b999a
VS
1775 lockdep_assert_held(&dev_priv->pps_mutex);
1776
97af61f5 1777 if (!is_edp(intel_dp))
adddaaf4 1778 return false;
bd943159 1779
2c623c11 1780 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 1781 intel_dp->want_panel_vdd = true;
99ea7127 1782
4be73780 1783 if (edp_have_panel_vdd(intel_dp))
adddaaf4 1784 return need_to_disable;
b0665d57 1785
4e6e1a54
ID
1786 power_domain = intel_display_port_power_domain(intel_encoder);
1787 intel_display_power_get(dev_priv, power_domain);
e9cb81a2 1788
3936fcf4
VS
1789 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
1790 port_name(intel_dig_port->port));
bd943159 1791
4be73780
DV
1792 if (!edp_have_panel_power(intel_dp))
1793 wait_panel_power_cycle(intel_dp);
99ea7127 1794
453c5420 1795 pp = ironlake_get_pp_control(intel_dp);
5d613501 1796 pp |= EDP_FORCE_VDD;
ebf33b18 1797
bf13e81b
JN
1798 pp_stat_reg = _pp_stat_reg(intel_dp);
1799 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
1800
1801 I915_WRITE(pp_ctrl_reg, pp);
1802 POSTING_READ(pp_ctrl_reg);
1803 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1804 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1805 /*
1806 * If the panel wasn't on, delay before accessing aux channel
1807 */
4be73780 1808 if (!edp_have_panel_power(intel_dp)) {
3936fcf4
VS
1809 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
1810 port_name(intel_dig_port->port));
f01eca2e 1811 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1812 }
adddaaf4
JN
1813
1814 return need_to_disable;
1815}
1816
951468f3
VS
1817/*
1818 * Must be paired with intel_edp_panel_vdd_off() or
1819 * intel_edp_panel_off().
1820 * Nested calls to these functions are not allowed since
1821 * we drop the lock. Caller must use some higher level
1822 * locking to prevent nested calls from other threads.
1823 */
b80d6c78 1824void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 1825{
c695b6b6 1826 bool vdd;
adddaaf4 1827
c695b6b6
VS
1828 if (!is_edp(intel_dp))
1829 return;
1830
773538e8 1831 pps_lock(intel_dp);
c695b6b6 1832 vdd = edp_panel_vdd_on(intel_dp);
773538e8 1833 pps_unlock(intel_dp);
c695b6b6 1834
e2c719b7 1835 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
3936fcf4 1836 port_name(dp_to_dig_port(intel_dp)->port));
5d613501
JB
1837}
1838
4be73780 1839static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1840{
30add22d 1841 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501 1842 struct drm_i915_private *dev_priv = dev->dev_private;
be2c9196
VS
1843 struct intel_digital_port *intel_dig_port =
1844 dp_to_dig_port(intel_dp);
1845 struct intel_encoder *intel_encoder = &intel_dig_port->base;
1846 enum intel_display_power_domain power_domain;
5d613501 1847 u32 pp;
453c5420 1848 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1849
e39b999a 1850 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 1851
15e899a0 1852 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 1853
15e899a0 1854 if (!edp_have_panel_vdd(intel_dp))
be2c9196 1855 return;
b0665d57 1856
3936fcf4
VS
1857 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
1858 port_name(intel_dig_port->port));
bd943159 1859
be2c9196
VS
1860 pp = ironlake_get_pp_control(intel_dp);
1861 pp &= ~EDP_FORCE_VDD;
453c5420 1862
be2c9196
VS
1863 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1864 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 1865
be2c9196
VS
1866 I915_WRITE(pp_ctrl_reg, pp);
1867 POSTING_READ(pp_ctrl_reg);
90791a5c 1868
be2c9196
VS
1869 /* Make sure sequencer is idle before allowing subsequent activity */
1870 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1871 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 1872
be2c9196
VS
1873 if ((pp & POWER_TARGET_ON) == 0)
1874 intel_dp->last_power_cycle = jiffies;
e9cb81a2 1875
be2c9196
VS
1876 power_domain = intel_display_port_power_domain(intel_encoder);
1877 intel_display_power_put(dev_priv, power_domain);
bd943159 1878}
5d613501 1879
4be73780 1880static void edp_panel_vdd_work(struct work_struct *__work)
bd943159
KP
1881{
1882 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1883 struct intel_dp, panel_vdd_work);
bd943159 1884
773538e8 1885 pps_lock(intel_dp);
15e899a0
VS
1886 if (!intel_dp->want_panel_vdd)
1887 edp_panel_vdd_off_sync(intel_dp);
773538e8 1888 pps_unlock(intel_dp);
bd943159
KP
1889}
1890
aba86890
ID
1891static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
1892{
1893 unsigned long delay;
1894
1895 /*
1896 * Queue the timer to fire a long time from now (relative to the power
1897 * down delay) to keep the panel power up across a sequence of
1898 * operations.
1899 */
1900 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
1901 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
1902}
1903
951468f3
VS
1904/*
1905 * Must be paired with edp_panel_vdd_on().
1906 * Must hold pps_mutex around the whole on/off sequence.
1907 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
1908 */
4be73780 1909static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1910{
e39b999a
VS
1911 struct drm_i915_private *dev_priv =
1912 intel_dp_to_dev(intel_dp)->dev_private;
1913
1914 lockdep_assert_held(&dev_priv->pps_mutex);
1915
97af61f5
KP
1916 if (!is_edp(intel_dp))
1917 return;
5d613501 1918
e2c719b7 1919 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
3936fcf4 1920 port_name(dp_to_dig_port(intel_dp)->port));
f2e8b18a 1921
bd943159
KP
1922 intel_dp->want_panel_vdd = false;
1923
aba86890 1924 if (sync)
4be73780 1925 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
1926 else
1927 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
1928}
1929
9f0fb5be 1930static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 1931{
30add22d 1932 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1933 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1934 u32 pp;
453c5420 1935 u32 pp_ctrl_reg;
9934c132 1936
9f0fb5be
VS
1937 lockdep_assert_held(&dev_priv->pps_mutex);
1938
97af61f5 1939 if (!is_edp(intel_dp))
bd943159 1940 return;
99ea7127 1941
3936fcf4
VS
1942 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
1943 port_name(dp_to_dig_port(intel_dp)->port));
e39b999a 1944
e7a89ace
VS
1945 if (WARN(edp_have_panel_power(intel_dp),
1946 "eDP port %c panel power already on\n",
1947 port_name(dp_to_dig_port(intel_dp)->port)))
9f0fb5be 1948 return;
9934c132 1949
4be73780 1950 wait_panel_power_cycle(intel_dp);
37c6c9b0 1951
bf13e81b 1952 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 1953 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1954 if (IS_GEN5(dev)) {
1955 /* ILK workaround: disable reset around power sequence */
1956 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
1957 I915_WRITE(pp_ctrl_reg, pp);
1958 POSTING_READ(pp_ctrl_reg);
05ce1a49 1959 }
37c6c9b0 1960
1c0ae80a 1961 pp |= POWER_TARGET_ON;
99ea7127
KP
1962 if (!IS_GEN5(dev))
1963 pp |= PANEL_POWER_RESET;
1964
453c5420
JB
1965 I915_WRITE(pp_ctrl_reg, pp);
1966 POSTING_READ(pp_ctrl_reg);
9934c132 1967
4be73780 1968 wait_panel_on(intel_dp);
dce56b3c 1969 intel_dp->last_power_on = jiffies;
9934c132 1970
05ce1a49
KP
1971 if (IS_GEN5(dev)) {
1972 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
1973 I915_WRITE(pp_ctrl_reg, pp);
1974 POSTING_READ(pp_ctrl_reg);
05ce1a49 1975 }
9f0fb5be 1976}
e39b999a 1977
9f0fb5be
VS
1978void intel_edp_panel_on(struct intel_dp *intel_dp)
1979{
1980 if (!is_edp(intel_dp))
1981 return;
1982
1983 pps_lock(intel_dp);
1984 edp_panel_on(intel_dp);
773538e8 1985 pps_unlock(intel_dp);
9934c132
JB
1986}
1987
9f0fb5be
VS
1988
1989static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 1990{
4e6e1a54
ID
1991 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1992 struct intel_encoder *intel_encoder = &intel_dig_port->base;
30add22d 1993 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1994 struct drm_i915_private *dev_priv = dev->dev_private;
4e6e1a54 1995 enum intel_display_power_domain power_domain;
99ea7127 1996 u32 pp;
453c5420 1997 u32 pp_ctrl_reg;
9934c132 1998
9f0fb5be
VS
1999 lockdep_assert_held(&dev_priv->pps_mutex);
2000
97af61f5
KP
2001 if (!is_edp(intel_dp))
2002 return;
37c6c9b0 2003
3936fcf4
VS
2004 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2005 port_name(dp_to_dig_port(intel_dp)->port));
37c6c9b0 2006
3936fcf4
VS
2007 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2008 port_name(dp_to_dig_port(intel_dp)->port));
24f3e092 2009
453c5420 2010 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2011 /* We need to switch off panel power _and_ force vdd, for otherwise some
2012 * panels get very unhappy and cease to work. */
b3064154
PJ
2013 pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2014 EDP_BLC_ENABLE);
453c5420 2015
bf13e81b 2016 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2017
849e39f5
PZ
2018 intel_dp->want_panel_vdd = false;
2019
453c5420
JB
2020 I915_WRITE(pp_ctrl_reg, pp);
2021 POSTING_READ(pp_ctrl_reg);
9934c132 2022
dce56b3c 2023 intel_dp->last_power_cycle = jiffies;
4be73780 2024 wait_panel_off(intel_dp);
849e39f5
PZ
2025
2026 /* We got a reference when we enabled the VDD. */
4e6e1a54
ID
2027 power_domain = intel_display_port_power_domain(intel_encoder);
2028 intel_display_power_put(dev_priv, power_domain);
9f0fb5be 2029}
e39b999a 2030
9f0fb5be
VS
2031void intel_edp_panel_off(struct intel_dp *intel_dp)
2032{
2033 if (!is_edp(intel_dp))
2034 return;
e39b999a 2035
9f0fb5be
VS
2036 pps_lock(intel_dp);
2037 edp_panel_off(intel_dp);
773538e8 2038 pps_unlock(intel_dp);
9934c132
JB
2039}
2040
1250d107
JN
2041/* Enable backlight in the panel power control. */
2042static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2043{
da63a9f2
PZ
2044 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2045 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658
ZW
2046 struct drm_i915_private *dev_priv = dev->dev_private;
2047 u32 pp;
453c5420 2048 u32 pp_ctrl_reg;
32f9d658 2049
01cb9ea6
JB
2050 /*
2051 * If we enable the backlight right away following a panel power
2052 * on, we may see slight flicker as the panel syncs with the eDP
2053 * link. So delay a bit to make sure the image is solid before
2054 * allowing it to appear.
2055 */
4be73780 2056 wait_backlight_on(intel_dp);
e39b999a 2057
773538e8 2058 pps_lock(intel_dp);
e39b999a 2059
453c5420 2060 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2061 pp |= EDP_BLC_ENABLE;
453c5420 2062
bf13e81b 2063 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2064
2065 I915_WRITE(pp_ctrl_reg, pp);
2066 POSTING_READ(pp_ctrl_reg);
e39b999a 2067
773538e8 2068 pps_unlock(intel_dp);
32f9d658
ZW
2069}
2070
1250d107
JN
2071/* Enable backlight PWM and backlight PP control. */
2072void intel_edp_backlight_on(struct intel_dp *intel_dp)
2073{
2074 if (!is_edp(intel_dp))
2075 return;
2076
2077 DRM_DEBUG_KMS("\n");
2078
2079 intel_panel_enable_backlight(intel_dp->attached_connector);
2080 _intel_edp_backlight_on(intel_dp);
2081}
2082
2083/* Disable backlight in the panel power control. */
2084static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2085{
30add22d 2086 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
2087 struct drm_i915_private *dev_priv = dev->dev_private;
2088 u32 pp;
453c5420 2089 u32 pp_ctrl_reg;
32f9d658 2090
f01eca2e
KP
2091 if (!is_edp(intel_dp))
2092 return;
2093
773538e8 2094 pps_lock(intel_dp);
e39b999a 2095
453c5420 2096 pp = ironlake_get_pp_control(intel_dp);
32f9d658 2097 pp &= ~EDP_BLC_ENABLE;
453c5420 2098
bf13e81b 2099 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2100
2101 I915_WRITE(pp_ctrl_reg, pp);
2102 POSTING_READ(pp_ctrl_reg);
f7d2323c 2103
773538e8 2104 pps_unlock(intel_dp);
e39b999a
VS
2105
2106 intel_dp->last_backlight_off = jiffies;
f7d2323c 2107 edp_wait_backlight_off(intel_dp);
1250d107 2108}
f7d2323c 2109
1250d107
JN
2110/* Disable backlight PP control and backlight PWM. */
2111void intel_edp_backlight_off(struct intel_dp *intel_dp)
2112{
2113 if (!is_edp(intel_dp))
2114 return;
2115
2116 DRM_DEBUG_KMS("\n");
f7d2323c 2117
1250d107 2118 _intel_edp_backlight_off(intel_dp);
f7d2323c 2119 intel_panel_disable_backlight(intel_dp->attached_connector);
32f9d658 2120}
a4fc5ed6 2121
73580fb7
JN
2122/*
2123 * Hook for controlling the panel power control backlight through the bl_power
2124 * sysfs attribute. Take care to handle multiple calls.
2125 */
2126static void intel_edp_backlight_power(struct intel_connector *connector,
2127 bool enable)
2128{
2129 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
e39b999a
VS
2130 bool is_enabled;
2131
773538e8 2132 pps_lock(intel_dp);
e39b999a 2133 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
773538e8 2134 pps_unlock(intel_dp);
73580fb7
JN
2135
2136 if (is_enabled == enable)
2137 return;
2138
23ba9373
JN
2139 DRM_DEBUG_KMS("panel power control backlight %s\n",
2140 enable ? "enable" : "disable");
73580fb7
JN
2141
2142 if (enable)
2143 _intel_edp_backlight_on(intel_dp);
2144 else
2145 _intel_edp_backlight_off(intel_dp);
2146}
2147
2bd2ad64 2148static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 2149{
da63a9f2
PZ
2150 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2151 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2152 struct drm_device *dev = crtc->dev;
d240f20f
JB
2153 struct drm_i915_private *dev_priv = dev->dev_private;
2154 u32 dpa_ctl;
2155
2bd2ad64
DV
2156 assert_pipe_disabled(dev_priv,
2157 to_intel_crtc(crtc)->pipe);
2158
d240f20f
JB
2159 DRM_DEBUG_KMS("\n");
2160 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2161 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
2162 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2163
2164 /* We don't adjust intel_dp->DP while tearing down the link, to
2165 * facilitate link retraining (e.g. after hotplug). Hence clear all
2166 * enable bits here to ensure that we don't enable too much. */
2167 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
2168 intel_dp->DP |= DP_PLL_ENABLE;
2169 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2170 POSTING_READ(DP_A);
2171 udelay(200);
d240f20f
JB
2172}
2173
2bd2ad64 2174static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 2175{
da63a9f2
PZ
2176 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2177 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
2178 struct drm_device *dev = crtc->dev;
d240f20f
JB
2179 struct drm_i915_private *dev_priv = dev->dev_private;
2180 u32 dpa_ctl;
2181
2bd2ad64
DV
2182 assert_pipe_disabled(dev_priv,
2183 to_intel_crtc(crtc)->pipe);
2184
d240f20f 2185 dpa_ctl = I915_READ(DP_A);
0767935e
DV
2186 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
2187 "dp pll off, should be on\n");
2188 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
2189
2190 /* We can't rely on the value tracked for the DP register in
2191 * intel_dp->DP because link_down must not change that (otherwise link
2192 * re-training will fail. */
298b0b39 2193 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 2194 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 2195 POSTING_READ(DP_A);
d240f20f
JB
2196 udelay(200);
2197}
2198
c7ad3810 2199/* If the sink supports it, try to set the power state appropriately */
c19b0669 2200void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2201{
2202 int ret, i;
2203
2204 /* Should have a valid DPCD by this point */
2205 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2206 return;
2207
2208 if (mode != DRM_MODE_DPMS_ON) {
9d1a1031
JN
2209 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2210 DP_SET_POWER_D3);
c7ad3810
JB
2211 } else {
2212 /*
2213 * When turning on, we need to retry for 1ms to give the sink
2214 * time to wake up.
2215 */
2216 for (i = 0; i < 3; i++) {
9d1a1031
JN
2217 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2218 DP_SET_POWER_D0);
c7ad3810
JB
2219 if (ret == 1)
2220 break;
2221 msleep(1);
2222 }
2223 }
f9cac721
JN
2224
2225 if (ret != 1)
2226 DRM_DEBUG_KMS("failed to %s sink power state\n",
2227 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2228}
2229
19d8fe15
DV
2230static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2231 enum pipe *pipe)
d240f20f 2232{
19d8fe15 2233 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2234 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
2235 struct drm_device *dev = encoder->base.dev;
2236 struct drm_i915_private *dev_priv = dev->dev_private;
6d129bea
ID
2237 enum intel_display_power_domain power_domain;
2238 u32 tmp;
2239
2240 power_domain = intel_display_port_power_domain(encoder);
f458ebbc 2241 if (!intel_display_power_is_enabled(dev_priv, power_domain))
6d129bea
ID
2242 return false;
2243
2244 tmp = I915_READ(intel_dp->output_reg);
19d8fe15
DV
2245
2246 if (!(tmp & DP_PORT_EN))
2247 return false;
2248
39e5fa88 2249 if (IS_GEN7(dev) && port == PORT_A) {
19d8fe15 2250 *pipe = PORT_TO_PIPE_CPT(tmp);
39e5fa88 2251 } else if (HAS_PCH_CPT(dev) && port != PORT_A) {
adc289d7 2252 enum pipe p;
19d8fe15 2253
adc289d7
VS
2254 for_each_pipe(dev_priv, p) {
2255 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2256 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2257 *pipe = p;
19d8fe15
DV
2258 return true;
2259 }
2260 }
19d8fe15 2261
4a0833ec
DV
2262 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2263 intel_dp->output_reg);
39e5fa88
VS
2264 } else if (IS_CHERRYVIEW(dev)) {
2265 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2266 } else {
2267 *pipe = PORT_TO_PIPE(tmp);
4a0833ec 2268 }
d240f20f 2269
19d8fe15
DV
2270 return true;
2271}
d240f20f 2272
045ac3b5 2273static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 2274 struct intel_crtc_state *pipe_config)
045ac3b5
JB
2275{
2276 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 2277 u32 tmp, flags = 0;
63000ef6
XZ
2278 struct drm_device *dev = encoder->base.dev;
2279 struct drm_i915_private *dev_priv = dev->dev_private;
2280 enum port port = dp_to_dig_port(intel_dp)->port;
2281 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
18442d08 2282 int dotclock;
045ac3b5 2283
9ed109a7 2284 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
2285
2286 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 2287
39e5fa88 2288 if (HAS_PCH_CPT(dev) && port != PORT_A) {
b81e34c2
VS
2289 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2290
2291 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
2292 flags |= DRM_MODE_FLAG_PHSYNC;
2293 else
2294 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2295
b81e34c2 2296 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
2297 flags |= DRM_MODE_FLAG_PVSYNC;
2298 else
2299 flags |= DRM_MODE_FLAG_NVSYNC;
2300 } else {
39e5fa88 2301 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
2302 flags |= DRM_MODE_FLAG_PHSYNC;
2303 else
2304 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 2305
39e5fa88 2306 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
2307 flags |= DRM_MODE_FLAG_PVSYNC;
2308 else
2309 flags |= DRM_MODE_FLAG_NVSYNC;
2310 }
045ac3b5 2311
2d112de7 2312 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 2313
8c875fca
VS
2314 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev) &&
2315 tmp & DP_COLOR_RANGE_16_235)
2316 pipe_config->limited_color_range = true;
2317
eb14cb74
VS
2318 pipe_config->has_dp_encoder = true;
2319
90a6b7b0
VS
2320 pipe_config->lane_count =
2321 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2322
eb14cb74
VS
2323 intel_dp_get_m_n(crtc, pipe_config);
2324
18442d08 2325 if (port == PORT_A) {
f1f644dc
JB
2326 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
2327 pipe_config->port_clock = 162000;
2328 else
2329 pipe_config->port_clock = 270000;
2330 }
18442d08
VS
2331
2332 dotclock = intel_dotclock_calculate(pipe_config->port_clock,
2333 &pipe_config->dp_m_n);
2334
2335 if (HAS_PCH_SPLIT(dev_priv->dev) && port != PORT_A)
2336 ironlake_check_encoder_dotclock(pipe_config, dotclock);
2337
2d112de7 2338 pipe_config->base.adjusted_mode.crtc_clock = dotclock;
7f16e5c1 2339
c6cd2ee2
JN
2340 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp &&
2341 pipe_config->pipe_bpp > dev_priv->vbt.edp_bpp) {
2342 /*
2343 * This is a big fat ugly hack.
2344 *
2345 * Some machines in UEFI boot mode provide us a VBT that has 18
2346 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2347 * unknown we fail to light up. Yet the same BIOS boots up with
2348 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2349 * max, not what it tells us to use.
2350 *
2351 * Note: This will still be broken if the eDP panel is not lit
2352 * up by the BIOS, and thus we can't get the mode at module
2353 * load.
2354 */
2355 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2356 pipe_config->pipe_bpp, dev_priv->vbt.edp_bpp);
2357 dev_priv->vbt.edp_bpp = pipe_config->pipe_bpp;
2358 }
045ac3b5
JB
2359}
2360
e8cb4558 2361static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 2362{
e8cb4558 2363 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2364 struct drm_device *dev = encoder->base.dev;
495a5bb8
JN
2365 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2366
6e3c9717 2367 if (crtc->config->has_audio)
495a5bb8 2368 intel_audio_codec_disable(encoder);
6cb49835 2369
b32c6f48
RV
2370 if (HAS_PSR(dev) && !HAS_DDI(dev))
2371 intel_psr_disable(intel_dp);
2372
6cb49835
DV
2373 /* Make sure the panel is off before trying to change the mode. But also
2374 * ensure that we have vdd while we switch off the panel. */
24f3e092 2375 intel_edp_panel_vdd_on(intel_dp);
4be73780 2376 intel_edp_backlight_off(intel_dp);
fdbc3b1f 2377 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 2378 intel_edp_panel_off(intel_dp);
3739850b 2379
08aff3fe
VS
2380 /* disable the port before the pipe on g4x */
2381 if (INTEL_INFO(dev)->gen < 5)
3739850b 2382 intel_dp_link_down(intel_dp);
d240f20f
JB
2383}
2384
08aff3fe 2385static void ilk_post_disable_dp(struct intel_encoder *encoder)
d240f20f 2386{
2bd2ad64 2387 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 2388 enum port port = dp_to_dig_port(intel_dp)->port;
2bd2ad64 2389
49277c31 2390 intel_dp_link_down(intel_dp);
08aff3fe
VS
2391 if (port == PORT_A)
2392 ironlake_edp_pll_off(intel_dp);
49277c31
VS
2393}
2394
2395static void vlv_post_disable_dp(struct intel_encoder *encoder)
2396{
2397 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2398
2399 intel_dp_link_down(intel_dp);
2bd2ad64
DV
2400}
2401
a8f327fb
VS
2402static void chv_data_lane_soft_reset(struct intel_encoder *encoder,
2403 bool reset)
580d3811 2404{
a8f327fb
VS
2405 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2406 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
2407 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2408 enum pipe pipe = crtc->pipe;
2409 uint32_t val;
580d3811 2410
a8f327fb
VS
2411 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
2412 if (reset)
2413 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2414 else
2415 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2416 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
580d3811 2417
a8f327fb
VS
2418 if (crtc->config->lane_count > 2) {
2419 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
2420 if (reset)
2421 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
2422 else
2423 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
2424 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
2425 }
580d3811 2426
97fd4d5c 2427 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
d2152b25 2428 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2429 if (reset)
2430 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2431 else
2432 val |= DPIO_PCS_CLK_SOFT_RESET;
97fd4d5c 2433 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
d2152b25 2434
a8f327fb 2435 if (crtc->config->lane_count > 2) {
e0fce78f
VS
2436 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
2437 val |= CHV_PCS_REQ_SOFTRESET_EN;
a8f327fb
VS
2438 if (reset)
2439 val &= ~DPIO_PCS_CLK_SOFT_RESET;
2440 else
2441 val |= DPIO_PCS_CLK_SOFT_RESET;
e0fce78f
VS
2442 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
2443 }
a8f327fb 2444}
97fd4d5c 2445
a8f327fb
VS
2446static void chv_post_disable_dp(struct intel_encoder *encoder)
2447{
2448 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2449 struct drm_device *dev = encoder->base.dev;
2450 struct drm_i915_private *dev_priv = dev->dev_private;
97fd4d5c 2451
a8f327fb
VS
2452 intel_dp_link_down(intel_dp);
2453
2454 mutex_lock(&dev_priv->sb_lock);
2455
2456 /* Assert data lane reset */
2457 chv_data_lane_soft_reset(encoder, true);
580d3811 2458
a580516d 2459 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
2460}
2461
7b13b58a
VS
2462static void
2463_intel_dp_set_link_train(struct intel_dp *intel_dp,
2464 uint32_t *DP,
2465 uint8_t dp_train_pat)
2466{
2467 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2468 struct drm_device *dev = intel_dig_port->base.base.dev;
2469 struct drm_i915_private *dev_priv = dev->dev_private;
2470 enum port port = intel_dig_port->port;
2471
2472 if (HAS_DDI(dev)) {
2473 uint32_t temp = I915_READ(DP_TP_CTL(port));
2474
2475 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2476 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2477 else
2478 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2479
2480 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2481 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2482 case DP_TRAINING_PATTERN_DISABLE:
2483 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2484
2485 break;
2486 case DP_TRAINING_PATTERN_1:
2487 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2488 break;
2489 case DP_TRAINING_PATTERN_2:
2490 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2491 break;
2492 case DP_TRAINING_PATTERN_3:
2493 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2494 break;
2495 }
2496 I915_WRITE(DP_TP_CTL(port), temp);
2497
39e5fa88
VS
2498 } else if ((IS_GEN7(dev) && port == PORT_A) ||
2499 (HAS_PCH_CPT(dev) && port != PORT_A)) {
7b13b58a
VS
2500 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2501
2502 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2503 case DP_TRAINING_PATTERN_DISABLE:
2504 *DP |= DP_LINK_TRAIN_OFF_CPT;
2505 break;
2506 case DP_TRAINING_PATTERN_1:
2507 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2508 break;
2509 case DP_TRAINING_PATTERN_2:
2510 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2511 break;
2512 case DP_TRAINING_PATTERN_3:
2513 DRM_ERROR("DP training pattern 3 not supported\n");
2514 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2515 break;
2516 }
2517
2518 } else {
2519 if (IS_CHERRYVIEW(dev))
2520 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2521 else
2522 *DP &= ~DP_LINK_TRAIN_MASK;
2523
2524 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2525 case DP_TRAINING_PATTERN_DISABLE:
2526 *DP |= DP_LINK_TRAIN_OFF;
2527 break;
2528 case DP_TRAINING_PATTERN_1:
2529 *DP |= DP_LINK_TRAIN_PAT_1;
2530 break;
2531 case DP_TRAINING_PATTERN_2:
2532 *DP |= DP_LINK_TRAIN_PAT_2;
2533 break;
2534 case DP_TRAINING_PATTERN_3:
2535 if (IS_CHERRYVIEW(dev)) {
2536 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2537 } else {
2538 DRM_ERROR("DP training pattern 3 not supported\n");
2539 *DP |= DP_LINK_TRAIN_PAT_2;
2540 }
2541 break;
2542 }
2543 }
2544}
2545
2546static void intel_dp_enable_port(struct intel_dp *intel_dp)
2547{
2548 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2549 struct drm_i915_private *dev_priv = dev->dev_private;
2550
7b13b58a
VS
2551 /* enable with pattern 1 (as per spec) */
2552 _intel_dp_set_link_train(intel_dp, &intel_dp->DP,
2553 DP_TRAINING_PATTERN_1);
2554
2555 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2556 POSTING_READ(intel_dp->output_reg);
7b713f50
VS
2557
2558 /*
2559 * Magic for VLV/CHV. We _must_ first set up the register
2560 * without actually enabling the port, and then do another
2561 * write to enable the port. Otherwise link training will
2562 * fail when the power sequencer is freshly used for this port.
2563 */
2564 intel_dp->DP |= DP_PORT_EN;
2565
2566 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2567 POSTING_READ(intel_dp->output_reg);
580d3811
VS
2568}
2569
e8cb4558 2570static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 2571{
e8cb4558
DV
2572 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2573 struct drm_device *dev = encoder->base.dev;
2574 struct drm_i915_private *dev_priv = dev->dev_private;
c1dec79a 2575 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
e8cb4558 2576 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 2577
0c33d8d7
DV
2578 if (WARN_ON(dp_reg & DP_PORT_EN))
2579 return;
5d613501 2580
093e3f13
VS
2581 pps_lock(intel_dp);
2582
2583 if (IS_VALLEYVIEW(dev))
2584 vlv_init_panel_power_sequencer(intel_dp);
2585
7b13b58a 2586 intel_dp_enable_port(intel_dp);
093e3f13
VS
2587
2588 edp_panel_vdd_on(intel_dp);
2589 edp_panel_on(intel_dp);
2590 edp_panel_vdd_off(intel_dp, true);
2591
2592 pps_unlock(intel_dp);
2593
e0fce78f
VS
2594 if (IS_VALLEYVIEW(dev)) {
2595 unsigned int lane_mask = 0x0;
2596
2597 if (IS_CHERRYVIEW(dev))
2598 lane_mask = intel_dp_unused_lane_mask(crtc->config->lane_count);
2599
9b6de0a1
VS
2600 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2601 lane_mask);
e0fce78f 2602 }
61234fa5 2603
f01eca2e 2604 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 2605 intel_dp_start_link_train(intel_dp);
33a34e4e 2606 intel_dp_complete_link_train(intel_dp);
3ab9c637 2607 intel_dp_stop_link_train(intel_dp);
c1dec79a 2608
6e3c9717 2609 if (crtc->config->has_audio) {
c1dec79a
JN
2610 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2611 pipe_name(crtc->pipe));
2612 intel_audio_codec_enable(encoder);
2613 }
ab1f90f9 2614}
89b667f8 2615
ecff4f3b
JN
2616static void g4x_enable_dp(struct intel_encoder *encoder)
2617{
828f5c6e
JN
2618 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2619
ecff4f3b 2620 intel_enable_dp(encoder);
4be73780 2621 intel_edp_backlight_on(intel_dp);
ab1f90f9 2622}
89b667f8 2623
ab1f90f9
JN
2624static void vlv_enable_dp(struct intel_encoder *encoder)
2625{
828f5c6e
JN
2626 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2627
4be73780 2628 intel_edp_backlight_on(intel_dp);
b32c6f48 2629 intel_psr_enable(intel_dp);
d240f20f
JB
2630}
2631
ecff4f3b 2632static void g4x_pre_enable_dp(struct intel_encoder *encoder)
ab1f90f9
JN
2633{
2634 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2635 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2636
8ac33ed3
DV
2637 intel_dp_prepare(encoder);
2638
d41f1efb
DV
2639 /* Only ilk+ has port A */
2640 if (dport->port == PORT_A) {
2641 ironlake_set_pll_cpu_edp(intel_dp);
ab1f90f9 2642 ironlake_edp_pll_on(intel_dp);
d41f1efb 2643 }
ab1f90f9
JN
2644}
2645
83b84597
VS
2646static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2647{
2648 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2649 struct drm_i915_private *dev_priv = intel_dig_port->base.base.dev->dev_private;
2650 enum pipe pipe = intel_dp->pps_pipe;
2651 int pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
2652
2653 edp_panel_vdd_off_sync(intel_dp);
2654
2655 /*
2656 * VLV seems to get confused when multiple power seqeuencers
2657 * have the same port selected (even if only one has power/vdd
2658 * enabled). The failure manifests as vlv_wait_port_ready() failing
2659 * CHV on the other hand doesn't seem to mind having the same port
2660 * selected in multiple power seqeuencers, but let's clear the
2661 * port select always when logically disconnecting a power sequencer
2662 * from a port.
2663 */
2664 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2665 pipe_name(pipe), port_name(intel_dig_port->port));
2666 I915_WRITE(pp_on_reg, 0);
2667 POSTING_READ(pp_on_reg);
2668
2669 intel_dp->pps_pipe = INVALID_PIPE;
2670}
2671
a4a5d2f8
VS
2672static void vlv_steal_power_sequencer(struct drm_device *dev,
2673 enum pipe pipe)
2674{
2675 struct drm_i915_private *dev_priv = dev->dev_private;
2676 struct intel_encoder *encoder;
2677
2678 lockdep_assert_held(&dev_priv->pps_mutex);
2679
ac3c12e4
VS
2680 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2681 return;
2682
a4a5d2f8
VS
2683 list_for_each_entry(encoder, &dev->mode_config.encoder_list,
2684 base.head) {
2685 struct intel_dp *intel_dp;
773538e8 2686 enum port port;
a4a5d2f8
VS
2687
2688 if (encoder->type != INTEL_OUTPUT_EDP)
2689 continue;
2690
2691 intel_dp = enc_to_intel_dp(&encoder->base);
773538e8 2692 port = dp_to_dig_port(intel_dp)->port;
a4a5d2f8
VS
2693
2694 if (intel_dp->pps_pipe != pipe)
2695 continue;
2696
2697 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 2698 pipe_name(pipe), port_name(port));
a4a5d2f8 2699
e02f9a06 2700 WARN(encoder->base.crtc,
034e43c6
VS
2701 "stealing pipe %c power sequencer from active eDP port %c\n",
2702 pipe_name(pipe), port_name(port));
a4a5d2f8 2703
a4a5d2f8 2704 /* make sure vdd is off before we steal it */
83b84597 2705 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2706 }
2707}
2708
2709static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2710{
2711 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2712 struct intel_encoder *encoder = &intel_dig_port->base;
2713 struct drm_device *dev = encoder->base.dev;
2714 struct drm_i915_private *dev_priv = dev->dev_private;
2715 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
a4a5d2f8
VS
2716
2717 lockdep_assert_held(&dev_priv->pps_mutex);
2718
093e3f13
VS
2719 if (!is_edp(intel_dp))
2720 return;
2721
a4a5d2f8
VS
2722 if (intel_dp->pps_pipe == crtc->pipe)
2723 return;
2724
2725 /*
2726 * If another power sequencer was being used on this
2727 * port previously make sure to turn off vdd there while
2728 * we still have control of it.
2729 */
2730 if (intel_dp->pps_pipe != INVALID_PIPE)
83b84597 2731 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
2732
2733 /*
2734 * We may be stealing the power
2735 * sequencer from another port.
2736 */
2737 vlv_steal_power_sequencer(dev, crtc->pipe);
2738
2739 /* now it's all ours */
2740 intel_dp->pps_pipe = crtc->pipe;
2741
2742 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
2743 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
2744
2745 /* init power sequencer on this pipe and port */
36b5f425
VS
2746 intel_dp_init_panel_power_sequencer(dev, intel_dp);
2747 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
a4a5d2f8
VS
2748}
2749
ab1f90f9 2750static void vlv_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 2751{
2bd2ad64 2752 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 2753 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 2754 struct drm_device *dev = encoder->base.dev;
89b667f8 2755 struct drm_i915_private *dev_priv = dev->dev_private;
ab1f90f9 2756 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
e4607fcf 2757 enum dpio_channel port = vlv_dport_to_channel(dport);
ab1f90f9
JN
2758 int pipe = intel_crtc->pipe;
2759 u32 val;
a4fc5ed6 2760
a580516d 2761 mutex_lock(&dev_priv->sb_lock);
89b667f8 2762
ab3c759a 2763 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
ab1f90f9
JN
2764 val = 0;
2765 if (pipe)
2766 val |= (1<<21);
2767 else
2768 val &= ~(1<<21);
2769 val |= 0x001000c4;
ab3c759a
CML
2770 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
2771 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
2772 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
89b667f8 2773
a580516d 2774 mutex_unlock(&dev_priv->sb_lock);
ab1f90f9
JN
2775
2776 intel_enable_dp(encoder);
89b667f8
JB
2777}
2778
ecff4f3b 2779static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder)
89b667f8
JB
2780{
2781 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2782 struct drm_device *dev = encoder->base.dev;
2783 struct drm_i915_private *dev_priv = dev->dev_private;
5e69f97f
CML
2784 struct intel_crtc *intel_crtc =
2785 to_intel_crtc(encoder->base.crtc);
e4607fcf 2786 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 2787 int pipe = intel_crtc->pipe;
89b667f8 2788
8ac33ed3
DV
2789 intel_dp_prepare(encoder);
2790
89b667f8 2791 /* Program Tx lane resets to default */
a580516d 2792 mutex_lock(&dev_priv->sb_lock);
ab3c759a 2793 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
89b667f8
JB
2794 DPIO_PCS_TX_LANE2_RESET |
2795 DPIO_PCS_TX_LANE1_RESET);
ab3c759a 2796 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
89b667f8
JB
2797 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
2798 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
2799 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
2800 DPIO_PCS_CLK_SOFT_RESET);
2801
2802 /* Fix up inter-pair skew failure */
ab3c759a
CML
2803 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
2804 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
2805 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
a580516d 2806 mutex_unlock(&dev_priv->sb_lock);
a4fc5ed6
KP
2807}
2808
e4a1d846
CML
2809static void chv_pre_enable_dp(struct intel_encoder *encoder)
2810{
2811 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2812 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
2813 struct drm_device *dev = encoder->base.dev;
2814 struct drm_i915_private *dev_priv = dev->dev_private;
e4a1d846
CML
2815 struct intel_crtc *intel_crtc =
2816 to_intel_crtc(encoder->base.crtc);
2817 enum dpio_channel ch = vlv_dport_to_channel(dport);
2818 int pipe = intel_crtc->pipe;
2e523e98 2819 int data, i, stagger;
949c1d43 2820 u32 val;
e4a1d846 2821
a580516d 2822 mutex_lock(&dev_priv->sb_lock);
949c1d43 2823
570e2a74
VS
2824 /* allow hardware to manage TX FIFO reset source */
2825 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2826 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2827 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2828
e0fce78f
VS
2829 if (intel_crtc->config->lane_count > 2) {
2830 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2831 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
2832 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2833 }
570e2a74 2834
949c1d43 2835 /* Program Tx lane latency optimal setting*/
e0fce78f 2836 for (i = 0; i < intel_crtc->config->lane_count; i++) {
e4a1d846 2837 /* Set the upar bit */
e0fce78f
VS
2838 if (intel_crtc->config->lane_count == 1)
2839 data = 0x0;
2840 else
2841 data = (i == 1) ? 0x0 : 0x1;
e4a1d846
CML
2842 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
2843 data << DPIO_UPAR_SHIFT);
2844 }
2845
2846 /* Data lane stagger programming */
2e523e98
VS
2847 if (intel_crtc->config->port_clock > 270000)
2848 stagger = 0x18;
2849 else if (intel_crtc->config->port_clock > 135000)
2850 stagger = 0xd;
2851 else if (intel_crtc->config->port_clock > 67500)
2852 stagger = 0x7;
2853 else if (intel_crtc->config->port_clock > 33750)
2854 stagger = 0x4;
2855 else
2856 stagger = 0x2;
2857
2858 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
2859 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2860 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
2861
e0fce78f
VS
2862 if (intel_crtc->config->lane_count > 2) {
2863 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
2864 val |= DPIO_TX2_STAGGER_MASK(0x1f);
2865 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
2866 }
2e523e98
VS
2867
2868 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
2869 DPIO_LANESTAGGER_STRAP(stagger) |
2870 DPIO_LANESTAGGER_STRAP_OVRD |
2871 DPIO_TX1_STAGGER_MASK(0x1f) |
2872 DPIO_TX1_STAGGER_MULT(6) |
2873 DPIO_TX2_STAGGER_MULT(0));
2874
e0fce78f
VS
2875 if (intel_crtc->config->lane_count > 2) {
2876 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
2877 DPIO_LANESTAGGER_STRAP(stagger) |
2878 DPIO_LANESTAGGER_STRAP_OVRD |
2879 DPIO_TX1_STAGGER_MASK(0x1f) |
2880 DPIO_TX1_STAGGER_MULT(7) |
2881 DPIO_TX2_STAGGER_MULT(5));
2882 }
e4a1d846 2883
a8f327fb
VS
2884 /* Deassert data lane reset */
2885 chv_data_lane_soft_reset(encoder, false);
2886
a580516d 2887 mutex_unlock(&dev_priv->sb_lock);
e4a1d846 2888
e4a1d846 2889 intel_enable_dp(encoder);
b0b33846
VS
2890
2891 /* Second common lane will stay alive on its own now */
2892 if (dport->release_cl2_override) {
2893 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
2894 dport->release_cl2_override = false;
2895 }
e4a1d846
CML
2896}
2897
9197c88b
VS
2898static void chv_dp_pre_pll_enable(struct intel_encoder *encoder)
2899{
2900 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
2901 struct drm_device *dev = encoder->base.dev;
2902 struct drm_i915_private *dev_priv = dev->dev_private;
2903 struct intel_crtc *intel_crtc =
2904 to_intel_crtc(encoder->base.crtc);
2905 enum dpio_channel ch = vlv_dport_to_channel(dport);
2906 enum pipe pipe = intel_crtc->pipe;
e0fce78f
VS
2907 unsigned int lane_mask =
2908 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
9197c88b
VS
2909 u32 val;
2910
625695f8
VS
2911 intel_dp_prepare(encoder);
2912
b0b33846
VS
2913 /*
2914 * Must trick the second common lane into life.
2915 * Otherwise we can't even access the PLL.
2916 */
2917 if (ch == DPIO_CH0 && pipe == PIPE_B)
2918 dport->release_cl2_override =
2919 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
2920
e0fce78f
VS
2921 chv_phy_powergate_lanes(encoder, true, lane_mask);
2922
a580516d 2923 mutex_lock(&dev_priv->sb_lock);
9197c88b 2924
a8f327fb
VS
2925 /* Assert data lane reset */
2926 chv_data_lane_soft_reset(encoder, true);
2927
b9e5ac3c
VS
2928 /* program left/right clock distribution */
2929 if (pipe != PIPE_B) {
2930 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2931 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2932 if (ch == DPIO_CH0)
2933 val |= CHV_BUFLEFTENA1_FORCE;
2934 if (ch == DPIO_CH1)
2935 val |= CHV_BUFRIGHTENA1_FORCE;
2936 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2937 } else {
2938 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2939 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2940 if (ch == DPIO_CH0)
2941 val |= CHV_BUFLEFTENA2_FORCE;
2942 if (ch == DPIO_CH1)
2943 val |= CHV_BUFRIGHTENA2_FORCE;
2944 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2945 }
2946
9197c88b
VS
2947 /* program clock channel usage */
2948 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
2949 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2950 if (pipe != PIPE_B)
2951 val &= ~CHV_PCS_USEDCLKCHANNEL;
2952 else
2953 val |= CHV_PCS_USEDCLKCHANNEL;
2954 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
2955
e0fce78f
VS
2956 if (intel_crtc->config->lane_count > 2) {
2957 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
2958 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
2959 if (pipe != PIPE_B)
2960 val &= ~CHV_PCS_USEDCLKCHANNEL;
2961 else
2962 val |= CHV_PCS_USEDCLKCHANNEL;
2963 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
2964 }
9197c88b
VS
2965
2966 /*
2967 * This a a bit weird since generally CL
2968 * matches the pipe, but here we need to
2969 * pick the CL based on the port.
2970 */
2971 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
2972 if (pipe != PIPE_B)
2973 val &= ~CHV_CMN_USEDCLKCHANNEL;
2974 else
2975 val |= CHV_CMN_USEDCLKCHANNEL;
2976 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
2977
a580516d 2978 mutex_unlock(&dev_priv->sb_lock);
9197c88b
VS
2979}
2980
d6db995f
VS
2981static void chv_dp_post_pll_disable(struct intel_encoder *encoder)
2982{
2983 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2984 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
2985 u32 val;
2986
2987 mutex_lock(&dev_priv->sb_lock);
2988
2989 /* disable left/right clock distribution */
2990 if (pipe != PIPE_B) {
2991 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
2992 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
2993 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
2994 } else {
2995 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
2996 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
2997 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
2998 }
2999
3000 mutex_unlock(&dev_priv->sb_lock);
e0fce78f 3001
b0b33846
VS
3002 /*
3003 * Leave the power down bit cleared for at least one
3004 * lane so that chv_powergate_phy_ch() will power
3005 * on something when the channel is otherwise unused.
3006 * When the port is off and the override is removed
3007 * the lanes power down anyway, so otherwise it doesn't
3008 * really matter what the state of power down bits is
3009 * after this.
3010 */
e0fce78f 3011 chv_phy_powergate_lanes(encoder, false, 0x0);
d6db995f
VS
3012}
3013
a4fc5ed6 3014/*
df0c237d
JB
3015 * Native read with retry for link status and receiver capability reads for
3016 * cases where the sink may still be asleep.
9d1a1031
JN
3017 *
3018 * Sinks are *supposed* to come up within 1ms from an off state, but we're also
3019 * supposed to retry 3 times per the spec.
a4fc5ed6 3020 */
9d1a1031
JN
3021static ssize_t
3022intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
3023 void *buffer, size_t size)
a4fc5ed6 3024{
9d1a1031
JN
3025 ssize_t ret;
3026 int i;
61da5fab 3027
f6a19066
VS
3028 /*
3029 * Sometime we just get the same incorrect byte repeated
3030 * over the entire buffer. Doing just one throw away read
3031 * initially seems to "solve" it.
3032 */
3033 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
3034
61da5fab 3035 for (i = 0; i < 3; i++) {
9d1a1031
JN
3036 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
3037 if (ret == size)
3038 return ret;
61da5fab
JB
3039 msleep(1);
3040 }
a4fc5ed6 3041
9d1a1031 3042 return ret;
a4fc5ed6
KP
3043}
3044
3045/*
3046 * Fetch AUX CH registers 0x202 - 0x207 which contain
3047 * link status information
3048 */
3049static bool
93f62dad 3050intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3051{
9d1a1031
JN
3052 return intel_dp_dpcd_read_wake(&intel_dp->aux,
3053 DP_LANE0_1_STATUS,
3054 link_status,
3055 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3056}
3057
1100244e 3058/* These are source-specific values. */
a4fc5ed6 3059static uint8_t
1a2eb460 3060intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3061{
30add22d 3062 struct drm_device *dev = intel_dp_to_dev(intel_dp);
7ad14a29 3063 struct drm_i915_private *dev_priv = dev->dev_private;
bc7d38a4 3064 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3065
9314726b
VK
3066 if (IS_BROXTON(dev))
3067 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3068 else if (INTEL_INFO(dev)->gen >= 9) {
9e458034 3069 if (dev_priv->edp_low_vswing && port == PORT_A)
7ad14a29 3070 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
5a9d1f1a 3071 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
7ad14a29 3072 } else if (IS_VALLEYVIEW(dev))
bd60018a 3073 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
bc7d38a4 3074 else if (IS_GEN7(dev) && port == PORT_A)
bd60018a 3075 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
bc7d38a4 3076 else if (HAS_PCH_CPT(dev) && port != PORT_A)
bd60018a 3077 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3078 else
bd60018a 3079 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3080}
3081
3082static uint8_t
3083intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3084{
30add22d 3085 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 3086 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 3087
5a9d1f1a
DL
3088 if (INTEL_INFO(dev)->gen >= 9) {
3089 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3090 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3091 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3092 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3093 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3094 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3095 return DP_TRAIN_PRE_EMPH_LEVEL_1;
7ad14a29
SJ
3096 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3097 return DP_TRAIN_PRE_EMPH_LEVEL_0;
5a9d1f1a
DL
3098 default:
3099 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3100 }
3101 } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
d6c0d722 3102 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3103 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3104 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3105 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3106 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3107 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3108 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3109 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
d6c0d722 3110 default:
bd60018a 3111 return DP_TRAIN_PRE_EMPH_LEVEL_0;
d6c0d722 3112 }
e2fa6fba
P
3113 } else if (IS_VALLEYVIEW(dev)) {
3114 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3115 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3116 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3117 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3118 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3119 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3120 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3122 default:
bd60018a 3123 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3124 }
bc7d38a4 3125 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460 3126 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3127 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3128 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3129 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3130 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3131 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3132 default:
bd60018a 3133 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3134 }
3135 } else {
3136 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3137 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3138 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3139 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3140 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3141 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3142 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3143 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3144 default:
bd60018a 3145 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3146 }
a4fc5ed6
KP
3147 }
3148}
3149
5829975c 3150static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba
P
3151{
3152 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3153 struct drm_i915_private *dev_priv = dev->dev_private;
3154 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
5e69f97f
CML
3155 struct intel_crtc *intel_crtc =
3156 to_intel_crtc(dport->base.base.crtc);
e2fa6fba
P
3157 unsigned long demph_reg_value, preemph_reg_value,
3158 uniqtranscale_reg_value;
3159 uint8_t train_set = intel_dp->train_set[0];
e4607fcf 3160 enum dpio_channel port = vlv_dport_to_channel(dport);
5e69f97f 3161 int pipe = intel_crtc->pipe;
e2fa6fba
P
3162
3163 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3164 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3165 preemph_reg_value = 0x0004000;
3166 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3167 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3168 demph_reg_value = 0x2B405555;
3169 uniqtranscale_reg_value = 0x552AB83A;
3170 break;
bd60018a 3171 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3172 demph_reg_value = 0x2B404040;
3173 uniqtranscale_reg_value = 0x5548B83A;
3174 break;
bd60018a 3175 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3176 demph_reg_value = 0x2B245555;
3177 uniqtranscale_reg_value = 0x5560B83A;
3178 break;
bd60018a 3179 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3180 demph_reg_value = 0x2B405555;
3181 uniqtranscale_reg_value = 0x5598DA3A;
3182 break;
3183 default:
3184 return 0;
3185 }
3186 break;
bd60018a 3187 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3188 preemph_reg_value = 0x0002000;
3189 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3190 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3191 demph_reg_value = 0x2B404040;
3192 uniqtranscale_reg_value = 0x5552B83A;
3193 break;
bd60018a 3194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3195 demph_reg_value = 0x2B404848;
3196 uniqtranscale_reg_value = 0x5580B83A;
3197 break;
bd60018a 3198 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3199 demph_reg_value = 0x2B404040;
3200 uniqtranscale_reg_value = 0x55ADDA3A;
3201 break;
3202 default:
3203 return 0;
3204 }
3205 break;
bd60018a 3206 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3207 preemph_reg_value = 0x0000000;
3208 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3209 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3210 demph_reg_value = 0x2B305555;
3211 uniqtranscale_reg_value = 0x5570B83A;
3212 break;
bd60018a 3213 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3214 demph_reg_value = 0x2B2B4040;
3215 uniqtranscale_reg_value = 0x55ADDA3A;
3216 break;
3217 default:
3218 return 0;
3219 }
3220 break;
bd60018a 3221 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3222 preemph_reg_value = 0x0006000;
3223 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3224 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3225 demph_reg_value = 0x1B405555;
3226 uniqtranscale_reg_value = 0x55ADDA3A;
3227 break;
3228 default:
3229 return 0;
3230 }
3231 break;
3232 default:
3233 return 0;
3234 }
3235
a580516d 3236 mutex_lock(&dev_priv->sb_lock);
ab3c759a
CML
3237 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
3238 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
3239 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
e2fa6fba 3240 uniqtranscale_reg_value);
ab3c759a
CML
3241 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
3242 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
3243 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
3244 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x80000000);
a580516d 3245 mutex_unlock(&dev_priv->sb_lock);
e2fa6fba
P
3246
3247 return 0;
3248}
3249
67fa24b4
VS
3250static bool chv_need_uniq_trans_scale(uint8_t train_set)
3251{
3252 return (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) == DP_TRAIN_PRE_EMPH_LEVEL_0 &&
3253 (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) == DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3254}
3255
5829975c 3256static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846
CML
3257{
3258 struct drm_device *dev = intel_dp_to_dev(intel_dp);
3259 struct drm_i915_private *dev_priv = dev->dev_private;
3260 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
3261 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
f72df8db 3262 u32 deemph_reg_value, margin_reg_value, val;
e4a1d846
CML
3263 uint8_t train_set = intel_dp->train_set[0];
3264 enum dpio_channel ch = vlv_dport_to_channel(dport);
f72df8db
VS
3265 enum pipe pipe = intel_crtc->pipe;
3266 int i;
e4a1d846
CML
3267
3268 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3269 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3270 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3271 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3272 deemph_reg_value = 128;
3273 margin_reg_value = 52;
3274 break;
bd60018a 3275 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3276 deemph_reg_value = 128;
3277 margin_reg_value = 77;
3278 break;
bd60018a 3279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3280 deemph_reg_value = 128;
3281 margin_reg_value = 102;
3282 break;
bd60018a 3283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3284 deemph_reg_value = 128;
3285 margin_reg_value = 154;
3286 /* FIXME extra to set for 1200 */
3287 break;
3288 default:
3289 return 0;
3290 }
3291 break;
bd60018a 3292 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3293 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3294 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3295 deemph_reg_value = 85;
3296 margin_reg_value = 78;
3297 break;
bd60018a 3298 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3299 deemph_reg_value = 85;
3300 margin_reg_value = 116;
3301 break;
bd60018a 3302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3303 deemph_reg_value = 85;
3304 margin_reg_value = 154;
3305 break;
3306 default:
3307 return 0;
3308 }
3309 break;
bd60018a 3310 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3311 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3312 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3313 deemph_reg_value = 64;
3314 margin_reg_value = 104;
3315 break;
bd60018a 3316 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3317 deemph_reg_value = 64;
3318 margin_reg_value = 154;
3319 break;
3320 default:
3321 return 0;
3322 }
3323 break;
bd60018a 3324 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3325 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3326 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3327 deemph_reg_value = 43;
3328 margin_reg_value = 154;
3329 break;
3330 default:
3331 return 0;
3332 }
3333 break;
3334 default:
3335 return 0;
3336 }
3337
a580516d 3338 mutex_lock(&dev_priv->sb_lock);
e4a1d846
CML
3339
3340 /* Clear calc init */
1966e59e
VS
3341 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3342 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
a02ef3c7
VS
3343 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3344 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
1966e59e
VS
3345 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3346
e0fce78f
VS
3347 if (intel_crtc->config->lane_count > 2) {
3348 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3349 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
3350 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
3351 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
3352 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3353 }
e4a1d846 3354
a02ef3c7
VS
3355 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
3356 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3357 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3358 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
3359
e0fce78f
VS
3360 if (intel_crtc->config->lane_count > 2) {
3361 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
3362 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
3363 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
3364 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
3365 }
a02ef3c7 3366
e4a1d846 3367 /* Program swing deemph */
e0fce78f 3368 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db
VS
3369 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
3370 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
3371 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
3372 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
3373 }
e4a1d846
CML
3374
3375 /* Program swing margin */
e0fce78f 3376 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3377 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
67fa24b4 3378
1fb44505
VS
3379 val &= ~DPIO_SWING_MARGIN000_MASK;
3380 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
67fa24b4
VS
3381
3382 /*
3383 * Supposedly this value shouldn't matter when unique transition
3384 * scale is disabled, but in fact it does matter. Let's just
3385 * always program the same value and hope it's OK.
3386 */
3387 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
3388 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
3389
f72df8db
VS
3390 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
3391 }
e4a1d846 3392
67fa24b4
VS
3393 /*
3394 * The document said it needs to set bit 27 for ch0 and bit 26
3395 * for ch1. Might be a typo in the doc.
3396 * For now, for this unique transition scale selection, set bit
3397 * 27 for ch0 and ch1.
3398 */
e0fce78f 3399 for (i = 0; i < intel_crtc->config->lane_count; i++) {
f72df8db 3400 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
67fa24b4 3401 if (chv_need_uniq_trans_scale(train_set))
f72df8db 3402 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
67fa24b4
VS
3403 else
3404 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
3405 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
e4a1d846
CML
3406 }
3407
3408 /* Start swing calculation */
1966e59e
VS
3409 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
3410 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3411 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
3412
e0fce78f
VS
3413 if (intel_crtc->config->lane_count > 2) {
3414 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
3415 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
3416 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
3417 }
e4a1d846
CML
3418
3419 /* LRC Bypass */
3420 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW30);
3421 val |= DPIO_LRC_BYPASS;
3422 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW30, val);
3423
a580516d 3424 mutex_unlock(&dev_priv->sb_lock);
e4a1d846
CML
3425
3426 return 0;
3427}
3428
a4fc5ed6 3429static void
0301b3ac
JN
3430intel_get_adjust_train(struct intel_dp *intel_dp,
3431 const uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
3432{
3433 uint8_t v = 0;
3434 uint8_t p = 0;
3435 int lane;
1a2eb460
KP
3436 uint8_t voltage_max;
3437 uint8_t preemph_max;
a4fc5ed6 3438
901c2daf 3439 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
3440 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
3441 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
3442
3443 if (this_v > v)
3444 v = this_v;
3445 if (this_p > p)
3446 p = this_p;
3447 }
3448
1a2eb460 3449 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
3450 if (v >= voltage_max)
3451 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 3452
1a2eb460
KP
3453 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
3454 if (p >= preemph_max)
3455 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
3456
3457 for (lane = 0; lane < 4; lane++)
33a34e4e 3458 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
3459}
3460
3461static uint32_t
5829975c 3462gen4_signal_levels(uint8_t train_set)
a4fc5ed6 3463{
3cf2efb1 3464 uint32_t signal_levels = 0;
a4fc5ed6 3465
3cf2efb1 3466 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3467 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3468 default:
3469 signal_levels |= DP_VOLTAGE_0_4;
3470 break;
bd60018a 3471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3472 signal_levels |= DP_VOLTAGE_0_6;
3473 break;
bd60018a 3474 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3475 signal_levels |= DP_VOLTAGE_0_8;
3476 break;
bd60018a 3477 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3478 signal_levels |= DP_VOLTAGE_1_2;
3479 break;
3480 }
3cf2efb1 3481 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3482 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3483 default:
3484 signal_levels |= DP_PRE_EMPHASIS_0;
3485 break;
bd60018a 3486 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3487 signal_levels |= DP_PRE_EMPHASIS_3_5;
3488 break;
bd60018a 3489 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3490 signal_levels |= DP_PRE_EMPHASIS_6;
3491 break;
bd60018a 3492 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3493 signal_levels |= DP_PRE_EMPHASIS_9_5;
3494 break;
3495 }
3496 return signal_levels;
3497}
3498
e3421a18
ZW
3499/* Gen6's DP voltage swing and pre-emphasis control */
3500static uint32_t
5829975c 3501gen6_edp_signal_levels(uint8_t train_set)
e3421a18 3502{
3c5a62b5
YL
3503 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3504 DP_TRAIN_PRE_EMPHASIS_MASK);
3505 switch (signal_levels) {
bd60018a
SJ
3506 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3507 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3508 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3509 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3510 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3511 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3512 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3513 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3514 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3515 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3516 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3517 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3518 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3519 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3520 default:
3c5a62b5
YL
3521 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3522 "0x%x\n", signal_levels);
3523 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3524 }
3525}
3526
1a2eb460
KP
3527/* Gen7's DP voltage swing and pre-emphasis control */
3528static uint32_t
5829975c 3529gen7_edp_signal_levels(uint8_t train_set)
1a2eb460
KP
3530{
3531 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3532 DP_TRAIN_PRE_EMPHASIS_MASK);
3533 switch (signal_levels) {
bd60018a 3534 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3535 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3536 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3537 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3538 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3539 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3540
bd60018a 3541 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3542 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3543 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3544 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3545
bd60018a 3546 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3547 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3548 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3549 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3550
3551 default:
3552 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3553 "0x%x\n", signal_levels);
3554 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3555 }
3556}
3557
f0a3424e
PZ
3558/* Properly updates "DP" with the correct signal levels. */
3559static void
3560intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
3561{
3562 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 3563 enum port port = intel_dig_port->port;
f0a3424e 3564 struct drm_device *dev = intel_dig_port->base.base.dev;
f8896f5d 3565 uint32_t signal_levels, mask = 0;
f0a3424e
PZ
3566 uint8_t train_set = intel_dp->train_set[0];
3567
f8896f5d
DW
3568 if (HAS_DDI(dev)) {
3569 signal_levels = ddi_signal_levels(intel_dp);
3570
3571 if (IS_BROXTON(dev))
3572 signal_levels = 0;
3573 else
3574 mask = DDI_BUF_EMP_MASK;
e4a1d846 3575 } else if (IS_CHERRYVIEW(dev)) {
5829975c 3576 signal_levels = chv_signal_levels(intel_dp);
e2fa6fba 3577 } else if (IS_VALLEYVIEW(dev)) {
5829975c 3578 signal_levels = vlv_signal_levels(intel_dp);
bc7d38a4 3579 } else if (IS_GEN7(dev) && port == PORT_A) {
5829975c 3580 signal_levels = gen7_edp_signal_levels(train_set);
f0a3424e 3581 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 3582 } else if (IS_GEN6(dev) && port == PORT_A) {
5829975c 3583 signal_levels = gen6_edp_signal_levels(train_set);
f0a3424e
PZ
3584 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3585 } else {
5829975c 3586 signal_levels = gen4_signal_levels(train_set);
f0a3424e
PZ
3587 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3588 }
3589
96fb9f9b
VK
3590 if (mask)
3591 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3592
3593 DRM_DEBUG_KMS("Using vswing level %d\n",
3594 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3595 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3596 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3597 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e
PZ
3598
3599 *DP = (*DP & ~mask) | signal_levels;
3600}
3601
a4fc5ed6 3602static bool
ea5b213a 3603intel_dp_set_link_train(struct intel_dp *intel_dp,
70aff66c 3604 uint32_t *DP,
58e10eb9 3605 uint8_t dp_train_pat)
a4fc5ed6 3606{
174edf1f 3607 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3608 struct drm_i915_private *dev_priv =
3609 to_i915(intel_dig_port->base.base.dev);
2cdfe6c8
JN
3610 uint8_t buf[sizeof(intel_dp->train_set) + 1];
3611 int ret, len;
a4fc5ed6 3612
7b13b58a 3613 _intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
47ea7542 3614
70aff66c 3615 I915_WRITE(intel_dp->output_reg, *DP);
ea5b213a 3616 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 3617
2cdfe6c8
JN
3618 buf[0] = dp_train_pat;
3619 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) ==
47ea7542 3620 DP_TRAINING_PATTERN_DISABLE) {
2cdfe6c8
JN
3621 /* don't write DP_TRAINING_LANEx_SET on disable */
3622 len = 1;
3623 } else {
3624 /* DP_TRAINING_LANEx_SET follow DP_TRAINING_PATTERN_SET */
901c2daf
VS
3625 memcpy(buf + 1, intel_dp->train_set, intel_dp->lane_count);
3626 len = intel_dp->lane_count + 1;
47ea7542 3627 }
a4fc5ed6 3628
9d1a1031
JN
3629 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_PATTERN_SET,
3630 buf, len);
2cdfe6c8
JN
3631
3632 return ret == len;
a4fc5ed6
KP
3633}
3634
70aff66c
JN
3635static bool
3636intel_dp_reset_link_train(struct intel_dp *intel_dp, uint32_t *DP,
3637 uint8_t dp_train_pat)
3638{
4e96c977
MK
3639 if (!intel_dp->train_set_valid)
3640 memset(intel_dp->train_set, 0, sizeof(intel_dp->train_set));
70aff66c
JN
3641 intel_dp_set_signal_levels(intel_dp, DP);
3642 return intel_dp_set_link_train(intel_dp, DP, dp_train_pat);
3643}
3644
3645static bool
3646intel_dp_update_link_train(struct intel_dp *intel_dp, uint32_t *DP,
0301b3ac 3647 const uint8_t link_status[DP_LINK_STATUS_SIZE])
70aff66c
JN
3648{
3649 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3650 struct drm_i915_private *dev_priv =
3651 to_i915(intel_dig_port->base.base.dev);
70aff66c
JN
3652 int ret;
3653
3654 intel_get_adjust_train(intel_dp, link_status);
3655 intel_dp_set_signal_levels(intel_dp, DP);
3656
3657 I915_WRITE(intel_dp->output_reg, *DP);
3658 POSTING_READ(intel_dp->output_reg);
3659
9d1a1031 3660 ret = drm_dp_dpcd_write(&intel_dp->aux, DP_TRAINING_LANE0_SET,
901c2daf 3661 intel_dp->train_set, intel_dp->lane_count);
70aff66c 3662
901c2daf 3663 return ret == intel_dp->lane_count;
70aff66c
JN
3664}
3665
3ab9c637
ID
3666static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3667{
3668 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3669 struct drm_device *dev = intel_dig_port->base.base.dev;
3670 struct drm_i915_private *dev_priv = dev->dev_private;
3671 enum port port = intel_dig_port->port;
3672 uint32_t val;
3673
3674 if (!HAS_DDI(dev))
3675 return;
3676
3677 val = I915_READ(DP_TP_CTL(port));
3678 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3679 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3680 I915_WRITE(DP_TP_CTL(port), val);
3681
3682 /*
3683 * On PORT_A we can have only eDP in SST mode. There the only reason
3684 * we need to set idle transmission mode is to work around a HW issue
3685 * where we enable the pipe while not in idle link-training mode.
3686 * In this case there is requirement to wait for a minimum number of
3687 * idle patterns to be sent.
3688 */
3689 if (port == PORT_A)
3690 return;
3691
3692 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
3693 1))
3694 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3695}
3696
33a34e4e 3697/* Enable corresponding port and start training pattern 1 */
c19b0669 3698void
33a34e4e 3699intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 3700{
da63a9f2 3701 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 3702 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
3703 int i;
3704 uint8_t voltage;
cdb0e95b 3705 int voltage_tries, loop_tries;
ea5b213a 3706 uint32_t DP = intel_dp->DP;
6aba5b6c 3707 uint8_t link_config[2];
04a60f9f 3708 uint8_t link_bw, rate_select;
a4fc5ed6 3709
affa9354 3710 if (HAS_DDI(dev))
c19b0669
PZ
3711 intel_ddi_prepare_link_retrain(encoder);
3712
901c2daf 3713 intel_dp_compute_rate(intel_dp, intel_dp->link_rate,
04a60f9f
VS
3714 &link_bw, &rate_select);
3715
3cf2efb1 3716 /* Write the link configuration data */
04a60f9f 3717 link_config[0] = link_bw;
901c2daf 3718 link_config[1] = intel_dp->lane_count;
6aba5b6c
JN
3719 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
3720 link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
9d1a1031 3721 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2);
94ca719e 3722 if (intel_dp->num_sink_rates)
a8f3ef61 3723 drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET,
04a60f9f 3724 &rate_select, 1);
6aba5b6c
JN
3725
3726 link_config[0] = 0;
3727 link_config[1] = DP_SET_ANSI_8B10B;
9d1a1031 3728 drm_dp_dpcd_write(&intel_dp->aux, DP_DOWNSPREAD_CTRL, link_config, 2);
a4fc5ed6
KP
3729
3730 DP |= DP_PORT_EN;
1a2eb460 3731
70aff66c
JN
3732 /* clock recovery */
3733 if (!intel_dp_reset_link_train(intel_dp, &DP,
3734 DP_TRAINING_PATTERN_1 |
3735 DP_LINK_SCRAMBLING_DISABLE)) {
3736 DRM_ERROR("failed to enable link training\n");
3737 return;
3738 }
3739
a4fc5ed6 3740 voltage = 0xff;
cdb0e95b
KP
3741 voltage_tries = 0;
3742 loop_tries = 0;
a4fc5ed6 3743 for (;;) {
70aff66c 3744 uint8_t link_status[DP_LINK_STATUS_SIZE];
a4fc5ed6 3745
a7c9655f 3746 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
3747 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3748 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3749 break;
93f62dad 3750 }
a4fc5ed6 3751
901c2daf 3752 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 3753 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
3754 break;
3755 }
3756
4e96c977
MK
3757 /*
3758 * if we used previously trained voltage and pre-emphasis values
3759 * and we don't get clock recovery, reset link training values
3760 */
3761 if (intel_dp->train_set_valid) {
3762 DRM_DEBUG_KMS("clock recovery not ok, reset");
3763 /* clear the flag as we are not reusing train set */
3764 intel_dp->train_set_valid = false;
3765 if (!intel_dp_reset_link_train(intel_dp, &DP,
3766 DP_TRAINING_PATTERN_1 |
3767 DP_LINK_SCRAMBLING_DISABLE)) {
3768 DRM_ERROR("failed to enable link training\n");
3769 return;
3770 }
3771 continue;
3772 }
3773
3cf2efb1 3774 /* Check to see if we've tried the max voltage */
901c2daf 3775 for (i = 0; i < intel_dp->lane_count; i++)
3cf2efb1 3776 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 3777 break;
901c2daf 3778 if (i == intel_dp->lane_count) {
b06fbda3
DV
3779 ++loop_tries;
3780 if (loop_tries == 5) {
3def84b3 3781 DRM_ERROR("too many full retries, give up\n");
cdb0e95b
KP
3782 break;
3783 }
70aff66c
JN
3784 intel_dp_reset_link_train(intel_dp, &DP,
3785 DP_TRAINING_PATTERN_1 |
3786 DP_LINK_SCRAMBLING_DISABLE);
cdb0e95b
KP
3787 voltage_tries = 0;
3788 continue;
3789 }
a4fc5ed6 3790
3cf2efb1 3791 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 3792 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 3793 ++voltage_tries;
b06fbda3 3794 if (voltage_tries == 5) {
3def84b3 3795 DRM_ERROR("too many voltage retries, give up\n");
b06fbda3
DV
3796 break;
3797 }
3798 } else
3799 voltage_tries = 0;
3800 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 3801
70aff66c
JN
3802 /* Update training set as requested by target */
3803 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3804 DRM_ERROR("failed to update link training\n");
3805 break;
3806 }
a4fc5ed6
KP
3807 }
3808
33a34e4e
JB
3809 intel_dp->DP = DP;
3810}
3811
c19b0669 3812void
33a34e4e
JB
3813intel_dp_complete_link_train(struct intel_dp *intel_dp)
3814{
bc5133d5
JN
3815 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3816 struct drm_device *dev = dig_port->base.base.dev;
33a34e4e 3817 bool channel_eq = false;
37f80975 3818 int tries, cr_tries;
33a34e4e 3819 uint32_t DP = intel_dp->DP;
06ea66b6
TP
3820 uint32_t training_pattern = DP_TRAINING_PATTERN_2;
3821
bc5133d5
JN
3822 /*
3823 * Training Pattern 3 for HBR2 or 1.2 devices that support it.
3824 *
3825 * Intel platforms that support HBR2 also support TPS3. TPS3 support is
3826 * also mandatory for downstream devices that support HBR2.
3827 *
3828 * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
3829 * supported but still not enabled.
3830 */
1da7d713
JN
3831 if (intel_dp_source_supports_hbr2(dev) &&
3832 drm_dp_tps3_supported(intel_dp->dpcd))
06ea66b6 3833 training_pattern = DP_TRAINING_PATTERN_3;
1da7d713
JN
3834 else if (intel_dp->link_rate == 540000)
3835 DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
33a34e4e 3836
a4fc5ed6 3837 /* channel equalization */
70aff66c 3838 if (!intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3839 training_pattern |
70aff66c
JN
3840 DP_LINK_SCRAMBLING_DISABLE)) {
3841 DRM_ERROR("failed to start channel equalization\n");
3842 return;
3843 }
3844
a4fc5ed6 3845 tries = 0;
37f80975 3846 cr_tries = 0;
a4fc5ed6
KP
3847 channel_eq = false;
3848 for (;;) {
70aff66c 3849 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 3850
37f80975
JB
3851 if (cr_tries > 5) {
3852 DRM_ERROR("failed to train DP, aborting\n");
37f80975
JB
3853 break;
3854 }
3855
a7c9655f 3856 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
70aff66c
JN
3857 if (!intel_dp_get_link_status(intel_dp, link_status)) {
3858 DRM_ERROR("failed to get link status\n");
a4fc5ed6 3859 break;
70aff66c 3860 }
a4fc5ed6 3861
37f80975 3862 /* Make sure clock is still ok */
90a6b7b0 3863 if (!drm_dp_clock_recovery_ok(link_status,
901c2daf 3864 intel_dp->lane_count)) {
4e96c977 3865 intel_dp->train_set_valid = false;
37f80975 3866 intel_dp_start_link_train(intel_dp);
70aff66c 3867 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3868 training_pattern |
70aff66c 3869 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3870 cr_tries++;
3871 continue;
3872 }
3873
90a6b7b0 3874 if (drm_dp_channel_eq_ok(link_status,
901c2daf 3875 intel_dp->lane_count)) {
3cf2efb1
CW
3876 channel_eq = true;
3877 break;
3878 }
a4fc5ed6 3879
37f80975
JB
3880 /* Try 5 times, then try clock recovery if that fails */
3881 if (tries > 5) {
4e96c977 3882 intel_dp->train_set_valid = false;
37f80975 3883 intel_dp_start_link_train(intel_dp);
70aff66c 3884 intel_dp_set_link_train(intel_dp, &DP,
06ea66b6 3885 training_pattern |
70aff66c 3886 DP_LINK_SCRAMBLING_DISABLE);
37f80975
JB
3887 tries = 0;
3888 cr_tries++;
3889 continue;
3890 }
a4fc5ed6 3891
70aff66c
JN
3892 /* Update training set as requested by target */
3893 if (!intel_dp_update_link_train(intel_dp, &DP, link_status)) {
3894 DRM_ERROR("failed to update link training\n");
3895 break;
3896 }
3cf2efb1 3897 ++tries;
869184a6 3898 }
3cf2efb1 3899
3ab9c637
ID
3900 intel_dp_set_idle_link_train(intel_dp);
3901
3902 intel_dp->DP = DP;
3903
4e96c977 3904 if (channel_eq) {
5fa836a9 3905 intel_dp->train_set_valid = true;
07f42258 3906 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
4e96c977 3907 }
3ab9c637
ID
3908}
3909
3910void intel_dp_stop_link_train(struct intel_dp *intel_dp)
3911{
70aff66c 3912 intel_dp_set_link_train(intel_dp, &intel_dp->DP,
3ab9c637 3913 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
3914}
3915
3916static void
ea5b213a 3917intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 3918{
da63a9f2 3919 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1612c8bd 3920 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
bc7d38a4 3921 enum port port = intel_dig_port->port;
da63a9f2 3922 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 3923 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 3924 uint32_t DP = intel_dp->DP;
a4fc5ed6 3925
bc76e320 3926 if (WARN_ON(HAS_DDI(dev)))
c19b0669
PZ
3927 return;
3928
0c33d8d7 3929 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3930 return;
3931
28c97730 3932 DRM_DEBUG_KMS("\n");
32f9d658 3933
39e5fa88
VS
3934 if ((IS_GEN7(dev) && port == PORT_A) ||
3935 (HAS_PCH_CPT(dev) && port != PORT_A)) {
e3421a18 3936 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3937 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3938 } else {
aad3d14d
VS
3939 if (IS_CHERRYVIEW(dev))
3940 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3941 else
3942 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3943 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3944 }
1612c8bd 3945 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3946 POSTING_READ(intel_dp->output_reg);
5eb08b69 3947
1612c8bd
VS
3948 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3949 I915_WRITE(intel_dp->output_reg, DP);
3950 POSTING_READ(intel_dp->output_reg);
3951
3952 /*
3953 * HW workaround for IBX, we need to move the port
3954 * to transcoder A after disabling it to allow the
3955 * matching HDMI port to be enabled on transcoder A.
3956 */
3957 if (HAS_PCH_IBX(dev) && crtc->pipe == PIPE_B && port != PORT_A) {
3958 /* always enable with pattern 1 (as per spec) */
3959 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3960 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3961 I915_WRITE(intel_dp->output_reg, DP);
3962 POSTING_READ(intel_dp->output_reg);
3963
3964 DP &= ~DP_PORT_EN;
5bddd17f 3965 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 3966 POSTING_READ(intel_dp->output_reg);
5bddd17f
EA
3967 }
3968
f01eca2e 3969 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
3970}
3971
26d61aad
KP
3972static bool
3973intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 3974{
a031d709
RV
3975 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3976 struct drm_device *dev = dig_port->base.base.dev;
3977 struct drm_i915_private *dev_priv = dev->dev_private;
fc0f8e25 3978 uint8_t rev;
a031d709 3979
9d1a1031
JN
3980 if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd,
3981 sizeof(intel_dp->dpcd)) < 0)
edb39244 3982 return false; /* aux transfer failed */
92fd8fd1 3983
a8e98153 3984 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 3985
edb39244
AJ
3986 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
3987 return false; /* DPCD not present */
3988
2293bb5c
SK
3989 /* Check if the panel supports PSR */
3990 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
50003939 3991 if (is_edp(intel_dp)) {
9d1a1031
JN
3992 intel_dp_dpcd_read_wake(&intel_dp->aux, DP_PSR_SUPPORT,
3993 intel_dp->psr_dpcd,
3994 sizeof(intel_dp->psr_dpcd));
a031d709
RV
3995 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3996 dev_priv->psr.sink_support = true;
50003939 3997 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
a031d709 3998 }
474d1ec4
SJ
3999
4000 if (INTEL_INFO(dev)->gen >= 9 &&
4001 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
4002 uint8_t frame_sync_cap;
4003
4004 dev_priv->psr.sink_support = true;
4005 intel_dp_dpcd_read_wake(&intel_dp->aux,
4006 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
4007 &frame_sync_cap, 1);
4008 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
4009 /* PSR2 needs frame sync as well */
4010 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
4011 DRM_DEBUG_KMS("PSR2 %s on sink",
4012 dev_priv->psr.psr2_support ? "supported" : "not supported");
4013 }
50003939
JN
4014 }
4015
bc5133d5
JN
4016 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4017 intel_dp_source_supports_hbr2(dev) ? "yes" : "no",
4018 drm_dp_tps3_supported(intel_dp->dpcd) ? "yes" : "no");
06ea66b6 4019
fc0f8e25
SJ
4020 /* Intermediate frequency support */
4021 if (is_edp(intel_dp) &&
4022 (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
4023 (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) &&
4024 (rev >= 0x03)) { /* eDp v1.4 or higher */
94ca719e 4025 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
4026 int i;
4027
fc0f8e25
SJ
4028 intel_dp_dpcd_read_wake(&intel_dp->aux,
4029 DP_SUPPORTED_LINK_RATES,
94ca719e
VS
4030 sink_rates,
4031 sizeof(sink_rates));
ea2d8a42 4032
94ca719e
VS
4033 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4034 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
4035
4036 if (val == 0)
4037 break;
4038
af77b974
SJ
4039 /* Value read is in kHz while drm clock is saved in deca-kHz */
4040 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 4041 }
94ca719e 4042 intel_dp->num_sink_rates = i;
fc0f8e25 4043 }
0336400e
VS
4044
4045 intel_dp_print_rates(intel_dp);
4046
edb39244
AJ
4047 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4048 DP_DWN_STRM_PORT_PRESENT))
4049 return true; /* native DP sink */
4050
4051 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4052 return true; /* no per-port downstream info */
4053
9d1a1031
JN
4054 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4055 intel_dp->downstream_ports,
4056 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
4057 return false; /* downstream port status fetch failed */
4058
4059 return true;
92fd8fd1
KP
4060}
4061
0d198328
AJ
4062static void
4063intel_dp_probe_oui(struct intel_dp *intel_dp)
4064{
4065 u8 buf[3];
4066
4067 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
4068 return;
4069
9d1a1031 4070 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_OUI, buf, 3) == 3)
0d198328
AJ
4071 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
4072 buf[0], buf[1], buf[2]);
4073
9d1a1031 4074 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_BRANCH_OUI, buf, 3) == 3)
0d198328
AJ
4075 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
4076 buf[0], buf[1], buf[2]);
4077}
4078
0e32b39c
DA
4079static bool
4080intel_dp_probe_mst(struct intel_dp *intel_dp)
4081{
4082 u8 buf[1];
4083
4084 if (!intel_dp->can_mst)
4085 return false;
4086
4087 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4088 return false;
4089
0e32b39c
DA
4090 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_MSTM_CAP, buf, 1)) {
4091 if (buf[0] & DP_MST_CAP) {
4092 DRM_DEBUG_KMS("Sink is MST capable\n");
4093 intel_dp->is_mst = true;
4094 } else {
4095 DRM_DEBUG_KMS("Sink is not MST capable\n");
4096 intel_dp->is_mst = false;
4097 }
4098 }
0e32b39c
DA
4099
4100 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4101 return intel_dp->is_mst;
4102}
4103
e5a1cab5 4104static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
d2e216d0 4105{
082dcc7c
RV
4106 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4107 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
ad9dc91b 4108 u8 buf;
e5a1cab5 4109 int ret = 0;
d2e216d0 4110
082dcc7c
RV
4111 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
4112 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4113 ret = -EIO;
4114 goto out;
4373f0f2
PZ
4115 }
4116
082dcc7c 4117 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
e5a1cab5 4118 buf & ~DP_TEST_SINK_START) < 0) {
082dcc7c 4119 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
e5a1cab5
RV
4120 ret = -EIO;
4121 goto out;
4122 }
d2e216d0 4123
621d4c76 4124 intel_dp->sink_crc.started = false;
e5a1cab5 4125 out:
082dcc7c 4126 hsw_enable_ips(intel_crtc);
e5a1cab5 4127 return ret;
082dcc7c
RV
4128}
4129
4130static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
4131{
4132 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4133 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4134 u8 buf;
e5a1cab5
RV
4135 int ret;
4136
621d4c76 4137 if (intel_dp->sink_crc.started) {
e5a1cab5
RV
4138 ret = intel_dp_sink_crc_stop(intel_dp);
4139 if (ret)
4140 return ret;
4141 }
082dcc7c
RV
4142
4143 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
4144 return -EIO;
4145
4146 if (!(buf & DP_TEST_CRC_SUPPORTED))
4147 return -ENOTTY;
4148
621d4c76
RV
4149 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4150
082dcc7c
RV
4151 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
4152 return -EIO;
4153
4154 hsw_disable_ips(intel_crtc);
1dda5f93 4155
9d1a1031 4156 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
082dcc7c
RV
4157 buf | DP_TEST_SINK_START) < 0) {
4158 hsw_enable_ips(intel_crtc);
4159 return -EIO;
4373f0f2
PZ
4160 }
4161
621d4c76 4162 intel_dp->sink_crc.started = true;
082dcc7c
RV
4163 return 0;
4164}
4165
4166int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
4167{
4168 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
4169 struct drm_device *dev = dig_port->base.base.dev;
4170 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
4171 u8 buf;
621d4c76 4172 int count, ret;
082dcc7c 4173 int attempts = 6;
aabc95dc 4174 bool old_equal_new;
082dcc7c
RV
4175
4176 ret = intel_dp_sink_crc_start(intel_dp);
4177 if (ret)
4178 return ret;
4179
ad9dc91b 4180 do {
621d4c76
RV
4181 intel_wait_for_vblank(dev, intel_crtc->pipe);
4182
1dda5f93 4183 if (drm_dp_dpcd_readb(&intel_dp->aux,
4373f0f2
PZ
4184 DP_TEST_SINK_MISC, &buf) < 0) {
4185 ret = -EIO;
afe0d67e 4186 goto stop;
4373f0f2 4187 }
621d4c76 4188 count = buf & DP_TEST_COUNT_MASK;
aabc95dc 4189
621d4c76
RV
4190 /*
4191 * Count might be reset during the loop. In this case
4192 * last known count needs to be reset as well.
4193 */
4194 if (count == 0)
4195 intel_dp->sink_crc.last_count = 0;
4196
4197 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
4198 ret = -EIO;
4199 goto stop;
4200 }
aabc95dc
RV
4201
4202 old_equal_new = (count == intel_dp->sink_crc.last_count &&
4203 !memcmp(intel_dp->sink_crc.last_crc, crc,
4204 6 * sizeof(u8)));
4205
4206 } while (--attempts && (count == 0 || old_equal_new));
621d4c76
RV
4207
4208 intel_dp->sink_crc.last_count = buf & DP_TEST_COUNT_MASK;
4209 memcpy(intel_dp->sink_crc.last_crc, crc, 6 * sizeof(u8));
ad9dc91b
RV
4210
4211 if (attempts == 0) {
aabc95dc
RV
4212 if (old_equal_new) {
4213 DRM_DEBUG_KMS("Unreliable Sink CRC counter: Current returned CRC is identical to the previous one\n");
4214 } else {
4215 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
4216 ret = -ETIMEDOUT;
4217 goto stop;
4218 }
ad9dc91b 4219 }
d2e216d0 4220
afe0d67e 4221stop:
082dcc7c 4222 intel_dp_sink_crc_stop(intel_dp);
4373f0f2 4223 return ret;
d2e216d0
RV
4224}
4225
a60f0e38
JB
4226static bool
4227intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4228{
9d1a1031
JN
4229 return intel_dp_dpcd_read_wake(&intel_dp->aux,
4230 DP_DEVICE_SERVICE_IRQ_VECTOR,
4231 sink_irq_vector, 1) == 1;
a60f0e38
JB
4232}
4233
0e32b39c
DA
4234static bool
4235intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4236{
4237 int ret;
4238
4239 ret = intel_dp_dpcd_read_wake(&intel_dp->aux,
4240 DP_SINK_COUNT_ESI,
4241 sink_irq_vector, 14);
4242 if (ret != 14)
4243 return false;
4244
4245 return true;
4246}
4247
c5d5ab7a
TP
4248static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
4249{
4250 uint8_t test_result = DP_TEST_ACK;
4251 return test_result;
4252}
4253
4254static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
4255{
4256 uint8_t test_result = DP_TEST_NAK;
4257 return test_result;
4258}
4259
4260static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4261{
c5d5ab7a 4262 uint8_t test_result = DP_TEST_NAK;
559be30c
TP
4263 struct intel_connector *intel_connector = intel_dp->attached_connector;
4264 struct drm_connector *connector = &intel_connector->base;
4265
4266 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4267 connector->edid_corrupt ||
559be30c
TP
4268 intel_dp->aux.i2c_defer_count > 6) {
4269 /* Check EDID read for NACKs, DEFERs and corruption
4270 * (DP CTS 1.2 Core r1.1)
4271 * 4.2.2.4 : Failed EDID read, I2C_NAK
4272 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4273 * 4.2.2.6 : EDID corruption detected
4274 * Use failsafe mode for all cases
4275 */
4276 if (intel_dp->aux.i2c_nack_count > 0 ||
4277 intel_dp->aux.i2c_defer_count > 0)
4278 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4279 intel_dp->aux.i2c_nack_count,
4280 intel_dp->aux.i2c_defer_count);
4281 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_FAILSAFE;
4282 } else {
f79b468e
TS
4283 struct edid *block = intel_connector->detect_edid;
4284
4285 /* We have to write the checksum
4286 * of the last block read
4287 */
4288 block += intel_connector->detect_edid->extensions;
4289
559be30c
TP
4290 if (!drm_dp_dpcd_write(&intel_dp->aux,
4291 DP_TEST_EDID_CHECKSUM,
f79b468e 4292 &block->checksum,
5a1cc655 4293 1))
559be30c
TP
4294 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4295
4296 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4297 intel_dp->compliance_test_data = INTEL_DP_RESOLUTION_STANDARD;
4298 }
4299
4300 /* Set test active flag here so userspace doesn't interrupt things */
4301 intel_dp->compliance_test_active = 1;
4302
c5d5ab7a
TP
4303 return test_result;
4304}
4305
4306static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4307{
c5d5ab7a
TP
4308 uint8_t test_result = DP_TEST_NAK;
4309 return test_result;
4310}
4311
4312static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4313{
4314 uint8_t response = DP_TEST_NAK;
4315 uint8_t rxdata = 0;
4316 int status = 0;
4317
559be30c 4318 intel_dp->compliance_test_active = 0;
c5d5ab7a 4319 intel_dp->compliance_test_type = 0;
559be30c
TP
4320 intel_dp->compliance_test_data = 0;
4321
c5d5ab7a
TP
4322 intel_dp->aux.i2c_nack_count = 0;
4323 intel_dp->aux.i2c_defer_count = 0;
4324
4325 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_REQUEST, &rxdata, 1);
4326 if (status <= 0) {
4327 DRM_DEBUG_KMS("Could not read test request from sink\n");
4328 goto update_status;
4329 }
4330
4331 switch (rxdata) {
4332 case DP_TEST_LINK_TRAINING:
4333 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4334 intel_dp->compliance_test_type = DP_TEST_LINK_TRAINING;
4335 response = intel_dp_autotest_link_training(intel_dp);
4336 break;
4337 case DP_TEST_LINK_VIDEO_PATTERN:
4338 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4339 intel_dp->compliance_test_type = DP_TEST_LINK_VIDEO_PATTERN;
4340 response = intel_dp_autotest_video_pattern(intel_dp);
4341 break;
4342 case DP_TEST_LINK_EDID_READ:
4343 DRM_DEBUG_KMS("EDID test requested\n");
4344 intel_dp->compliance_test_type = DP_TEST_LINK_EDID_READ;
4345 response = intel_dp_autotest_edid(intel_dp);
4346 break;
4347 case DP_TEST_LINK_PHY_TEST_PATTERN:
4348 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4349 intel_dp->compliance_test_type = DP_TEST_LINK_PHY_TEST_PATTERN;
4350 response = intel_dp_autotest_phy_pattern(intel_dp);
4351 break;
4352 default:
4353 DRM_DEBUG_KMS("Invalid test request '%02x'\n", rxdata);
4354 break;
4355 }
4356
4357update_status:
4358 status = drm_dp_dpcd_write(&intel_dp->aux,
4359 DP_TEST_RESPONSE,
4360 &response, 1);
4361 if (status <= 0)
4362 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4363}
4364
0e32b39c
DA
4365static int
4366intel_dp_check_mst_status(struct intel_dp *intel_dp)
4367{
4368 bool bret;
4369
4370 if (intel_dp->is_mst) {
4371 u8 esi[16] = { 0 };
4372 int ret = 0;
4373 int retry;
4374 bool handled;
4375 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4376go_again:
4377 if (bret == true) {
4378
4379 /* check link status - esi[10] = 0x200c */
90a6b7b0 4380 if (intel_dp->active_mst_links &&
901c2daf 4381 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4382 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4383 intel_dp_start_link_train(intel_dp);
4384 intel_dp_complete_link_train(intel_dp);
4385 intel_dp_stop_link_train(intel_dp);
4386 }
4387
6f34cc39 4388 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4389 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4390
4391 if (handled) {
4392 for (retry = 0; retry < 3; retry++) {
4393 int wret;
4394 wret = drm_dp_dpcd_write(&intel_dp->aux,
4395 DP_SINK_COUNT_ESI+1,
4396 &esi[1], 3);
4397 if (wret == 3) {
4398 break;
4399 }
4400 }
4401
4402 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4403 if (bret == true) {
6f34cc39 4404 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4405 goto go_again;
4406 }
4407 } else
4408 ret = 0;
4409
4410 return ret;
4411 } else {
4412 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4413 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4414 intel_dp->is_mst = false;
4415 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4416 /* send a hotplug event */
4417 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4418 }
4419 }
4420 return -EINVAL;
4421}
4422
a4fc5ed6
KP
4423/*
4424 * According to DP spec
4425 * 5.1.2:
4426 * 1. Read DPCD
4427 * 2. Configure link according to Receiver Capabilities
4428 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4429 * 4. Check link status on receipt of hot-plug interrupt
4430 */
a5146200 4431static void
ea5b213a 4432intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 4433{
5b215bcf 4434 struct drm_device *dev = intel_dp_to_dev(intel_dp);
da63a9f2 4435 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 4436 u8 sink_irq_vector;
93f62dad 4437 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 4438
5b215bcf
DA
4439 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4440
e02f9a06 4441 if (!intel_encoder->base.crtc)
a4fc5ed6
KP
4442 return;
4443
1a125d8a
ID
4444 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4445 return;
4446
92fd8fd1 4447 /* Try to read receiver status if the link appears to be up */
93f62dad 4448 if (!intel_dp_get_link_status(intel_dp, link_status)) {
a4fc5ed6
KP
4449 return;
4450 }
4451
92fd8fd1 4452 /* Now read the DPCD to see if it's actually running */
26d61aad 4453 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
4454 return;
4455 }
4456
a60f0e38
JB
4457 /* Try to read the source of the interrupt */
4458 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4459 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4460 /* Clear interrupt source */
9d1a1031
JN
4461 drm_dp_dpcd_writeb(&intel_dp->aux,
4462 DP_DEVICE_SERVICE_IRQ_VECTOR,
4463 sink_irq_vector);
a60f0e38
JB
4464
4465 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
09b1eb13 4466 DRM_DEBUG_DRIVER("Test request in short pulse not handled\n");
a60f0e38
JB
4467 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4468 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4469 }
4470
901c2daf 4471 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 4472 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
8e329a03 4473 intel_encoder->base.name);
33a34e4e
JB
4474 intel_dp_start_link_train(intel_dp);
4475 intel_dp_complete_link_train(intel_dp);
3ab9c637 4476 intel_dp_stop_link_train(intel_dp);
33a34e4e 4477 }
a4fc5ed6 4478}
a4fc5ed6 4479
caf9ab24 4480/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4481static enum drm_connector_status
26d61aad 4482intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4483{
caf9ab24 4484 uint8_t *dpcd = intel_dp->dpcd;
caf9ab24
AJ
4485 uint8_t type;
4486
4487 if (!intel_dp_get_dpcd(intel_dp))
4488 return connector_status_disconnected;
4489
4490 /* if there's no downstream port, we're done */
4491 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 4492 return connector_status_connected;
caf9ab24
AJ
4493
4494 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4495 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4496 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
23235177 4497 uint8_t reg;
9d1a1031
JN
4498
4499 if (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_SINK_COUNT,
4500 &reg, 1) < 0)
caf9ab24 4501 return connector_status_unknown;
9d1a1031 4502
23235177
AJ
4503 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
4504 : connector_status_disconnected;
caf9ab24
AJ
4505 }
4506
4507 /* If no HPD, poke DDC gently */
0b99836f 4508 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4509 return connector_status_connected;
caf9ab24
AJ
4510
4511 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4512 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4513 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4514 if (type == DP_DS_PORT_TYPE_VGA ||
4515 type == DP_DS_PORT_TYPE_NON_EDID)
4516 return connector_status_unknown;
4517 } else {
4518 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4519 DP_DWN_STRM_PORT_TYPE_MASK;
4520 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4521 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4522 return connector_status_unknown;
4523 }
caf9ab24
AJ
4524
4525 /* Anything else is out of spec, warn and ignore */
4526 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4527 return connector_status_disconnected;
71ba9000
AJ
4528}
4529
d410b56d
CW
4530static enum drm_connector_status
4531edp_detect(struct intel_dp *intel_dp)
4532{
4533 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4534 enum drm_connector_status status;
4535
4536 status = intel_panel_detect(dev);
4537 if (status == connector_status_unknown)
4538 status = connector_status_connected;
4539
4540 return status;
4541}
4542
b93433cc
JN
4543static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4544 struct intel_digital_port *port)
5eb08b69 4545{
b93433cc 4546 u32 bit;
01cb9ea6 4547
0df53b77
JN
4548 switch (port->port) {
4549 case PORT_A:
4550 return true;
4551 case PORT_B:
4552 bit = SDE_PORTB_HOTPLUG;
4553 break;
4554 case PORT_C:
4555 bit = SDE_PORTC_HOTPLUG;
4556 break;
4557 case PORT_D:
4558 bit = SDE_PORTD_HOTPLUG;
4559 break;
4560 default:
4561 MISSING_CASE(port->port);
4562 return false;
4563 }
4564
4565 return I915_READ(SDEISR) & bit;
4566}
4567
4568static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4569 struct intel_digital_port *port)
4570{
4571 u32 bit;
4572
4573 switch (port->port) {
4574 case PORT_A:
4575 return true;
4576 case PORT_B:
4577 bit = SDE_PORTB_HOTPLUG_CPT;
4578 break;
4579 case PORT_C:
4580 bit = SDE_PORTC_HOTPLUG_CPT;
4581 break;
4582 case PORT_D:
4583 bit = SDE_PORTD_HOTPLUG_CPT;
4584 break;
4585 default:
4586 MISSING_CASE(port->port);
4587 return false;
b93433cc 4588 }
1b469639 4589
b93433cc 4590 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4591}
4592
7e66bcf2 4593static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
1d245987 4594 struct intel_digital_port *port)
a4fc5ed6 4595{
9642c81c 4596 u32 bit;
5eb08b69 4597
9642c81c
JN
4598 switch (port->port) {
4599 case PORT_B:
4600 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4601 break;
4602 case PORT_C:
4603 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4604 break;
4605 case PORT_D:
4606 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4607 break;
4608 default:
4609 MISSING_CASE(port->port);
4610 return false;
4611 }
4612
4613 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4614}
4615
4616static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
4617 struct intel_digital_port *port)
4618{
4619 u32 bit;
4620
4621 switch (port->port) {
4622 case PORT_B:
4623 bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
4624 break;
4625 case PORT_C:
4626 bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
4627 break;
4628 case PORT_D:
4629 bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
4630 break;
4631 default:
4632 MISSING_CASE(port->port);
4633 return false;
a4fc5ed6
KP
4634 }
4635
1d245987 4636 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
4637}
4638
e464bfde
JN
4639static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4640 struct intel_digital_port *port)
4641{
4642 u32 bit;
4643
4644 switch (port->port) {
4645 case PORT_A:
4646 bit = BXT_DE_PORT_HP_DDIA;
4647 break;
4648 case PORT_B:
4649 bit = BXT_DE_PORT_HP_DDIB;
4650 break;
4651 case PORT_C:
4652 bit = BXT_DE_PORT_HP_DDIC;
4653 break;
4654 default:
4655 MISSING_CASE(port->port);
4656 return false;
4657 }
4658
4659 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4660}
4661
7e66bcf2
JN
4662/*
4663 * intel_digital_port_connected - is the specified port connected?
4664 * @dev_priv: i915 private structure
4665 * @port: the port to test
4666 *
4667 * Return %true if @port is connected, %false otherwise.
4668 */
4669static bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4670 struct intel_digital_port *port)
4671{
0df53b77 4672 if (HAS_PCH_IBX(dev_priv))
7e66bcf2 4673 return ibx_digital_port_connected(dev_priv, port);
0df53b77
JN
4674 if (HAS_PCH_SPLIT(dev_priv))
4675 return cpt_digital_port_connected(dev_priv, port);
e464bfde
JN
4676 else if (IS_BROXTON(dev_priv))
4677 return bxt_digital_port_connected(dev_priv, port);
9642c81c
JN
4678 else if (IS_VALLEYVIEW(dev_priv))
4679 return vlv_digital_port_connected(dev_priv, port);
7e66bcf2
JN
4680 else
4681 return g4x_digital_port_connected(dev_priv, port);
4682}
4683
b93433cc
JN
4684static enum drm_connector_status
4685ironlake_dp_detect(struct intel_dp *intel_dp)
4686{
4687 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4688 struct drm_i915_private *dev_priv = dev->dev_private;
4689 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4690
7e66bcf2 4691 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
b93433cc
JN
4692 return connector_status_disconnected;
4693
4694 return intel_dp_detect_dpcd(intel_dp);
4695}
4696
2a592bec
DA
4697static enum drm_connector_status
4698g4x_dp_detect(struct intel_dp *intel_dp)
4699{
4700 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4701 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2a592bec
DA
4702
4703 /* Can't disconnect eDP, but you can close the lid... */
4704 if (is_edp(intel_dp)) {
4705 enum drm_connector_status status;
4706
4707 status = intel_panel_detect(dev);
4708 if (status == connector_status_unknown)
4709 status = connector_status_connected;
4710 return status;
4711 }
4712
7e66bcf2 4713 if (!intel_digital_port_connected(dev->dev_private, intel_dig_port))
a4fc5ed6
KP
4714 return connector_status_disconnected;
4715
26d61aad 4716 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
4717}
4718
8c241fef 4719static struct edid *
beb60608 4720intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 4721{
beb60608 4722 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 4723
9cd300e0
JN
4724 /* use cached edid if we have one */
4725 if (intel_connector->edid) {
9cd300e0
JN
4726 /* invalid edid */
4727 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
4728 return NULL;
4729
55e9edeb 4730 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
4731 } else
4732 return drm_get_edid(&intel_connector->base,
4733 &intel_dp->aux.ddc);
4734}
8c241fef 4735
beb60608
CW
4736static void
4737intel_dp_set_edid(struct intel_dp *intel_dp)
4738{
4739 struct intel_connector *intel_connector = intel_dp->attached_connector;
4740 struct edid *edid;
8c241fef 4741
beb60608
CW
4742 edid = intel_dp_get_edid(intel_dp);
4743 intel_connector->detect_edid = edid;
4744
4745 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4746 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4747 else
4748 intel_dp->has_audio = drm_detect_monitor_audio(edid);
8c241fef
KP
4749}
4750
beb60608
CW
4751static void
4752intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 4753{
beb60608 4754 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 4755
beb60608
CW
4756 kfree(intel_connector->detect_edid);
4757 intel_connector->detect_edid = NULL;
9cd300e0 4758
beb60608
CW
4759 intel_dp->has_audio = false;
4760}
d6f24d0f 4761
beb60608
CW
4762static enum intel_display_power_domain
4763intel_dp_power_get(struct intel_dp *dp)
4764{
4765 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4766 enum intel_display_power_domain power_domain;
4767
4768 power_domain = intel_display_port_power_domain(encoder);
4769 intel_display_power_get(to_i915(encoder->base.dev), power_domain);
4770
4771 return power_domain;
4772}
d6f24d0f 4773
beb60608
CW
4774static void
4775intel_dp_power_put(struct intel_dp *dp,
4776 enum intel_display_power_domain power_domain)
4777{
4778 struct intel_encoder *encoder = &dp_to_dig_port(dp)->base;
4779 intel_display_power_put(to_i915(encoder->base.dev), power_domain);
8c241fef
KP
4780}
4781
a9756bb5
ZW
4782static enum drm_connector_status
4783intel_dp_detect(struct drm_connector *connector, bool force)
4784{
4785 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
4786 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4787 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 4788 struct drm_device *dev = connector->dev;
a9756bb5 4789 enum drm_connector_status status;
671dedd2 4790 enum intel_display_power_domain power_domain;
0e32b39c 4791 bool ret;
09b1eb13 4792 u8 sink_irq_vector;
a9756bb5 4793
164c8598 4794 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
c23cc417 4795 connector->base.id, connector->name);
beb60608 4796 intel_dp_unset_edid(intel_dp);
164c8598 4797
0e32b39c
DA
4798 if (intel_dp->is_mst) {
4799 /* MST devices are disconnected from a monitor POV */
4800 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4801 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
beb60608 4802 return connector_status_disconnected;
0e32b39c
DA
4803 }
4804
beb60608 4805 power_domain = intel_dp_power_get(intel_dp);
a9756bb5 4806
d410b56d
CW
4807 /* Can't disconnect eDP, but you can close the lid... */
4808 if (is_edp(intel_dp))
4809 status = edp_detect(intel_dp);
4810 else if (HAS_PCH_SPLIT(dev))
a9756bb5
ZW
4811 status = ironlake_dp_detect(intel_dp);
4812 else
4813 status = g4x_dp_detect(intel_dp);
4814 if (status != connector_status_connected)
c8c8fb33 4815 goto out;
a9756bb5 4816
0d198328
AJ
4817 intel_dp_probe_oui(intel_dp);
4818
0e32b39c
DA
4819 ret = intel_dp_probe_mst(intel_dp);
4820 if (ret) {
4821 /* if we are in MST mode then this connector
4822 won't appear connected or have anything with EDID on it */
4823 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4824 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4825 status = connector_status_disconnected;
4826 goto out;
4827 }
4828
beb60608 4829 intel_dp_set_edid(intel_dp);
a9756bb5 4830
d63885da
PZ
4831 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4832 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
c8c8fb33
PZ
4833 status = connector_status_connected;
4834
09b1eb13
TP
4835 /* Try to read the source of the interrupt */
4836 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4837 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
4838 /* Clear interrupt source */
4839 drm_dp_dpcd_writeb(&intel_dp->aux,
4840 DP_DEVICE_SERVICE_IRQ_VECTOR,
4841 sink_irq_vector);
4842
4843 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4844 intel_dp_handle_test_request(intel_dp);
4845 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4846 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4847 }
4848
c8c8fb33 4849out:
beb60608 4850 intel_dp_power_put(intel_dp, power_domain);
c8c8fb33 4851 return status;
a4fc5ed6
KP
4852}
4853
beb60608
CW
4854static void
4855intel_dp_force(struct drm_connector *connector)
a4fc5ed6 4856{
df0e9248 4857 struct intel_dp *intel_dp = intel_attached_dp(connector);
beb60608 4858 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
671dedd2 4859 enum intel_display_power_domain power_domain;
a4fc5ed6 4860
beb60608
CW
4861 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4862 connector->base.id, connector->name);
4863 intel_dp_unset_edid(intel_dp);
a4fc5ed6 4864
beb60608
CW
4865 if (connector->status != connector_status_connected)
4866 return;
671dedd2 4867
beb60608
CW
4868 power_domain = intel_dp_power_get(intel_dp);
4869
4870 intel_dp_set_edid(intel_dp);
4871
4872 intel_dp_power_put(intel_dp, power_domain);
4873
4874 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4875 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
4876}
4877
4878static int intel_dp_get_modes(struct drm_connector *connector)
4879{
4880 struct intel_connector *intel_connector = to_intel_connector(connector);
4881 struct edid *edid;
4882
4883 edid = intel_connector->detect_edid;
4884 if (edid) {
4885 int ret = intel_connector_update_modes(connector, edid);
4886 if (ret)
4887 return ret;
4888 }
32f9d658 4889
f8779fda 4890 /* if eDP has no EDID, fall back to fixed mode */
beb60608
CW
4891 if (is_edp(intel_attached_dp(connector)) &&
4892 intel_connector->panel.fixed_mode) {
f8779fda 4893 struct drm_display_mode *mode;
beb60608
CW
4894
4895 mode = drm_mode_duplicate(connector->dev,
dd06f90e 4896 intel_connector->panel.fixed_mode);
f8779fda 4897 if (mode) {
32f9d658
ZW
4898 drm_mode_probed_add(connector, mode);
4899 return 1;
4900 }
4901 }
beb60608 4902
32f9d658 4903 return 0;
a4fc5ed6
KP
4904}
4905
1aad7ac0
CW
4906static bool
4907intel_dp_detect_audio(struct drm_connector *connector)
4908{
1aad7ac0 4909 bool has_audio = false;
beb60608 4910 struct edid *edid;
1aad7ac0 4911
beb60608
CW
4912 edid = to_intel_connector(connector)->detect_edid;
4913 if (edid)
1aad7ac0 4914 has_audio = drm_detect_monitor_audio(edid);
671dedd2 4915
1aad7ac0
CW
4916 return has_audio;
4917}
4918
f684960e
CW
4919static int
4920intel_dp_set_property(struct drm_connector *connector,
4921 struct drm_property *property,
4922 uint64_t val)
4923{
e953fd7b 4924 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 4925 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
4926 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4927 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
4928 int ret;
4929
662595df 4930 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
4931 if (ret)
4932 return ret;
4933
3f43c48d 4934 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
4935 int i = val;
4936 bool has_audio;
4937
4938 if (i == intel_dp->force_audio)
f684960e
CW
4939 return 0;
4940
1aad7ac0 4941 intel_dp->force_audio = i;
f684960e 4942
c3e5f67b 4943 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
4944 has_audio = intel_dp_detect_audio(connector);
4945 else
c3e5f67b 4946 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
4947
4948 if (has_audio == intel_dp->has_audio)
f684960e
CW
4949 return 0;
4950
1aad7ac0 4951 intel_dp->has_audio = has_audio;
f684960e
CW
4952 goto done;
4953 }
4954
e953fd7b 4955 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80 4956 bool old_auto = intel_dp->color_range_auto;
0f2a2a75 4957 bool old_range = intel_dp->limited_color_range;
ae4edb80 4958
55bc60db
VS
4959 switch (val) {
4960 case INTEL_BROADCAST_RGB_AUTO:
4961 intel_dp->color_range_auto = true;
4962 break;
4963 case INTEL_BROADCAST_RGB_FULL:
4964 intel_dp->color_range_auto = false;
0f2a2a75 4965 intel_dp->limited_color_range = false;
55bc60db
VS
4966 break;
4967 case INTEL_BROADCAST_RGB_LIMITED:
4968 intel_dp->color_range_auto = false;
0f2a2a75 4969 intel_dp->limited_color_range = true;
55bc60db
VS
4970 break;
4971 default:
4972 return -EINVAL;
4973 }
ae4edb80
DV
4974
4975 if (old_auto == intel_dp->color_range_auto &&
0f2a2a75 4976 old_range == intel_dp->limited_color_range)
ae4edb80
DV
4977 return 0;
4978
e953fd7b
CW
4979 goto done;
4980 }
4981
53b41837
YN
4982 if (is_edp(intel_dp) &&
4983 property == connector->dev->mode_config.scaling_mode_property) {
4984 if (val == DRM_MODE_SCALE_NONE) {
4985 DRM_DEBUG_KMS("no scaling not supported\n");
4986 return -EINVAL;
4987 }
4988
4989 if (intel_connector->panel.fitting_mode == val) {
4990 /* the eDP scaling property is not changed */
4991 return 0;
4992 }
4993 intel_connector->panel.fitting_mode = val;
4994
4995 goto done;
4996 }
4997
f684960e
CW
4998 return -EINVAL;
4999
5000done:
c0c36b94
CW
5001 if (intel_encoder->base.crtc)
5002 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
5003
5004 return 0;
5005}
5006
a4fc5ed6 5007static void
73845adf 5008intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 5009{
1d508706 5010 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 5011
10e972d3 5012 kfree(intel_connector->detect_edid);
beb60608 5013
9cd300e0
JN
5014 if (!IS_ERR_OR_NULL(intel_connector->edid))
5015 kfree(intel_connector->edid);
5016
acd8db10
PZ
5017 /* Can't call is_edp() since the encoder may have been destroyed
5018 * already. */
5019 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 5020 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 5021
a4fc5ed6 5022 drm_connector_cleanup(connector);
55f78c43 5023 kfree(connector);
a4fc5ed6
KP
5024}
5025
00c09d70 5026void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 5027{
da63a9f2
PZ
5028 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5029 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 5030
4f71d0cb 5031 drm_dp_aux_unregister(&intel_dp->aux);
0e32b39c 5032 intel_dp_mst_encoder_cleanup(intel_dig_port);
bd943159
KP
5033 if (is_edp(intel_dp)) {
5034 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5035 /*
5036 * vdd might still be enabled do to the delayed vdd off.
5037 * Make sure vdd is actually turned off here.
5038 */
773538e8 5039 pps_lock(intel_dp);
4be73780 5040 edp_panel_vdd_off_sync(intel_dp);
773538e8
VS
5041 pps_unlock(intel_dp);
5042
01527b31
CT
5043 if (intel_dp->edp_notifier.notifier_call) {
5044 unregister_reboot_notifier(&intel_dp->edp_notifier);
5045 intel_dp->edp_notifier.notifier_call = NULL;
5046 }
bd943159 5047 }
c8bd0e49 5048 drm_encoder_cleanup(encoder);
da63a9f2 5049 kfree(intel_dig_port);
24d05927
DV
5050}
5051
07f9cd0b
ID
5052static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
5053{
5054 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
5055
5056 if (!is_edp(intel_dp))
5057 return;
5058
951468f3
VS
5059 /*
5060 * vdd might still be enabled do to the delayed vdd off.
5061 * Make sure vdd is actually turned off here.
5062 */
afa4e53a 5063 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
773538e8 5064 pps_lock(intel_dp);
07f9cd0b 5065 edp_panel_vdd_off_sync(intel_dp);
773538e8 5066 pps_unlock(intel_dp);
07f9cd0b
ID
5067}
5068
49e6bc51
VS
5069static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
5070{
5071 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5072 struct drm_device *dev = intel_dig_port->base.base.dev;
5073 struct drm_i915_private *dev_priv = dev->dev_private;
5074 enum intel_display_power_domain power_domain;
5075
5076 lockdep_assert_held(&dev_priv->pps_mutex);
5077
5078 if (!edp_have_panel_vdd(intel_dp))
5079 return;
5080
5081 /*
5082 * The VDD bit needs a power domain reference, so if the bit is
5083 * already enabled when we boot or resume, grab this reference and
5084 * schedule a vdd off, so we don't hold on to the reference
5085 * indefinitely.
5086 */
5087 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
5088 power_domain = intel_display_port_power_domain(&intel_dig_port->base);
5089 intel_display_power_get(dev_priv, power_domain);
5090
5091 edp_panel_vdd_schedule_off(intel_dp);
5092}
5093
6d93c0c4
ID
5094static void intel_dp_encoder_reset(struct drm_encoder *encoder)
5095{
49e6bc51
VS
5096 struct intel_dp *intel_dp;
5097
5098 if (to_intel_encoder(encoder)->type != INTEL_OUTPUT_EDP)
5099 return;
5100
5101 intel_dp = enc_to_intel_dp(encoder);
5102
5103 pps_lock(intel_dp);
5104
5105 /*
5106 * Read out the current power sequencer assignment,
5107 * in case the BIOS did something with it.
5108 */
5109 if (IS_VALLEYVIEW(encoder->dev))
5110 vlv_initial_power_sequencer_setup(intel_dp);
5111
5112 intel_edp_panel_vdd_sanitize(intel_dp);
5113
5114 pps_unlock(intel_dp);
6d93c0c4
ID
5115}
5116
a4fc5ed6 5117static const struct drm_connector_funcs intel_dp_connector_funcs = {
4d688a2a 5118 .dpms = drm_atomic_helper_connector_dpms,
a4fc5ed6 5119 .detect = intel_dp_detect,
beb60608 5120 .force = intel_dp_force,
a4fc5ed6 5121 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 5122 .set_property = intel_dp_set_property,
2545e4a6 5123 .atomic_get_property = intel_connector_atomic_get_property,
73845adf 5124 .destroy = intel_dp_connector_destroy,
c6f95f27 5125 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
98969725 5126 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
a4fc5ed6
KP
5127};
5128
5129static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5130 .get_modes = intel_dp_get_modes,
5131 .mode_valid = intel_dp_mode_valid,
df0e9248 5132 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
5133};
5134
a4fc5ed6 5135static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 5136 .reset = intel_dp_encoder_reset,
24d05927 5137 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
5138};
5139
b2c5c181 5140enum irqreturn
13cf5504
DA
5141intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5142{
5143 struct intel_dp *intel_dp = &intel_dig_port->dp;
1c767b33 5144 struct intel_encoder *intel_encoder = &intel_dig_port->base;
0e32b39c
DA
5145 struct drm_device *dev = intel_dig_port->base.base.dev;
5146 struct drm_i915_private *dev_priv = dev->dev_private;
1c767b33 5147 enum intel_display_power_domain power_domain;
b2c5c181 5148 enum irqreturn ret = IRQ_NONE;
1c767b33 5149
0e32b39c
DA
5150 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
5151 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
13cf5504 5152
7a7f84cc
VS
5153 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5154 /*
5155 * vdd off can generate a long pulse on eDP which
5156 * would require vdd on to handle it, and thus we
5157 * would end up in an endless cycle of
5158 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5159 */
5160 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5161 port_name(intel_dig_port->port));
a8b3d52f 5162 return IRQ_HANDLED;
7a7f84cc
VS
5163 }
5164
26fbb774
VS
5165 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5166 port_name(intel_dig_port->port),
0e32b39c 5167 long_hpd ? "long" : "short");
13cf5504 5168
1c767b33
ID
5169 power_domain = intel_display_port_power_domain(intel_encoder);
5170 intel_display_power_get(dev_priv, power_domain);
5171
0e32b39c 5172 if (long_hpd) {
5fa836a9
MK
5173 /* indicate that we need to restart link training */
5174 intel_dp->train_set_valid = false;
2a592bec 5175
7e66bcf2
JN
5176 if (!intel_digital_port_connected(dev_priv, intel_dig_port))
5177 goto mst_fail;
0e32b39c
DA
5178
5179 if (!intel_dp_get_dpcd(intel_dp)) {
5180 goto mst_fail;
5181 }
5182
5183 intel_dp_probe_oui(intel_dp);
5184
d14e7b6d
VS
5185 if (!intel_dp_probe_mst(intel_dp)) {
5186 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
5187 intel_dp_check_link_status(intel_dp);
5188 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c 5189 goto mst_fail;
d14e7b6d 5190 }
0e32b39c
DA
5191 } else {
5192 if (intel_dp->is_mst) {
1c767b33 5193 if (intel_dp_check_mst_status(intel_dp) == -EINVAL)
0e32b39c
DA
5194 goto mst_fail;
5195 }
5196
5197 if (!intel_dp->is_mst) {
5b215bcf 5198 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
0e32b39c 5199 intel_dp_check_link_status(intel_dp);
5b215bcf 5200 drm_modeset_unlock(&dev->mode_config.connection_mutex);
0e32b39c
DA
5201 }
5202 }
b2c5c181
DV
5203
5204 ret = IRQ_HANDLED;
5205
1c767b33 5206 goto put_power;
0e32b39c
DA
5207mst_fail:
5208 /* if we were in MST mode, and device is not there get out of MST mode */
5209 if (intel_dp->is_mst) {
5210 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n", intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5211 intel_dp->is_mst = false;
5212 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
5213 }
1c767b33
ID
5214put_power:
5215 intel_display_power_put(dev_priv, power_domain);
5216
5217 return ret;
13cf5504
DA
5218}
5219
e3421a18
ZW
5220/* Return which DP Port should be selected for Transcoder DP control */
5221int
0206e353 5222intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
5223{
5224 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
5225 struct intel_encoder *intel_encoder;
5226 struct intel_dp *intel_dp;
e3421a18 5227
fa90ecef
PZ
5228 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
5229 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 5230
fa90ecef
PZ
5231 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
5232 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 5233 return intel_dp->output_reg;
e3421a18 5234 }
ea5b213a 5235
e3421a18
ZW
5236 return -1;
5237}
5238
477ec328 5239/* check the VBT to see whether the eDP is on another port */
5d8a7752 5240bool intel_dp_is_edp(struct drm_device *dev, enum port port)
36e83a18
ZY
5241{
5242 struct drm_i915_private *dev_priv = dev->dev_private;
768f69c9 5243 union child_device_config *p_child;
36e83a18 5244 int i;
5d8a7752 5245 static const short port_mapping[] = {
477ec328
RV
5246 [PORT_B] = DVO_PORT_DPB,
5247 [PORT_C] = DVO_PORT_DPC,
5248 [PORT_D] = DVO_PORT_DPD,
5249 [PORT_E] = DVO_PORT_DPE,
5d8a7752 5250 };
36e83a18 5251
3b32a35b
VS
5252 if (port == PORT_A)
5253 return true;
5254
41aa3448 5255 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
5256 return false;
5257
41aa3448
RV
5258 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
5259 p_child = dev_priv->vbt.child_dev + i;
36e83a18 5260
5d8a7752 5261 if (p_child->common.dvo_port == port_mapping[port] &&
f02586df
VS
5262 (p_child->common.device_type & DEVICE_TYPE_eDP_BITS) ==
5263 (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS))
36e83a18
ZY
5264 return true;
5265 }
5266 return false;
5267}
5268
0e32b39c 5269void
f684960e
CW
5270intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5271{
53b41837
YN
5272 struct intel_connector *intel_connector = to_intel_connector(connector);
5273
3f43c48d 5274 intel_attach_force_audio_property(connector);
e953fd7b 5275 intel_attach_broadcast_rgb_property(connector);
55bc60db 5276 intel_dp->color_range_auto = true;
53b41837
YN
5277
5278 if (is_edp(intel_dp)) {
5279 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
5280 drm_object_attach_property(
5281 &connector->base,
53b41837 5282 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
5283 DRM_MODE_SCALE_ASPECT);
5284 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 5285 }
f684960e
CW
5286}
5287
dada1a9f
ID
5288static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5289{
5290 intel_dp->last_power_cycle = jiffies;
5291 intel_dp->last_power_on = jiffies;
5292 intel_dp->last_backlight_off = jiffies;
5293}
5294
67a54566
DV
5295static void
5296intel_dp_init_panel_power_sequencer(struct drm_device *dev,
36b5f425 5297 struct intel_dp *intel_dp)
67a54566
DV
5298{
5299 struct drm_i915_private *dev_priv = dev->dev_private;
36b5f425
VS
5300 struct edp_power_seq cur, vbt, spec,
5301 *final = &intel_dp->pps_delays;
b0a08bec
VK
5302 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5303 int pp_ctrl_reg, pp_on_reg, pp_off_reg, pp_div_reg = 0;
453c5420 5304
e39b999a
VS
5305 lockdep_assert_held(&dev_priv->pps_mutex);
5306
81ddbc69
VS
5307 /* already initialized? */
5308 if (final->t11_t12 != 0)
5309 return;
5310
b0a08bec
VK
5311 if (IS_BROXTON(dev)) {
5312 /*
5313 * TODO: BXT has 2 sets of PPS registers.
5314 * Correct Register for Broxton need to be identified
5315 * using VBT. hardcoding for now
5316 */
5317 pp_ctrl_reg = BXT_PP_CONTROL(0);
5318 pp_on_reg = BXT_PP_ON_DELAYS(0);
5319 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5320 } else if (HAS_PCH_SPLIT(dev)) {
bf13e81b 5321 pp_ctrl_reg = PCH_PP_CONTROL;
453c5420
JB
5322 pp_on_reg = PCH_PP_ON_DELAYS;
5323 pp_off_reg = PCH_PP_OFF_DELAYS;
5324 pp_div_reg = PCH_PP_DIVISOR;
5325 } else {
bf13e81b
JN
5326 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5327
5328 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
5329 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5330 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5331 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420 5332 }
67a54566
DV
5333
5334 /* Workaround: Need to write PP_CONTROL with the unlock key as
5335 * the very first thing. */
b0a08bec 5336 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 5337
453c5420
JB
5338 pp_on = I915_READ(pp_on_reg);
5339 pp_off = I915_READ(pp_off_reg);
b0a08bec
VK
5340 if (!IS_BROXTON(dev)) {
5341 I915_WRITE(pp_ctrl_reg, pp_ctl);
5342 pp_div = I915_READ(pp_div_reg);
5343 }
67a54566
DV
5344
5345 /* Pull timing values out of registers */
5346 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5347 PANEL_POWER_UP_DELAY_SHIFT;
5348
5349 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5350 PANEL_LIGHT_ON_DELAY_SHIFT;
5351
5352 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5353 PANEL_LIGHT_OFF_DELAY_SHIFT;
5354
5355 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5356 PANEL_POWER_DOWN_DELAY_SHIFT;
5357
b0a08bec
VK
5358 if (IS_BROXTON(dev)) {
5359 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5360 BXT_POWER_CYCLE_DELAY_SHIFT;
5361 if (tmp > 0)
5362 cur.t11_t12 = (tmp - 1) * 1000;
5363 else
5364 cur.t11_t12 = 0;
5365 } else {
5366 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
67a54566 5367 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
b0a08bec 5368 }
67a54566
DV
5369
5370 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5371 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
5372
41aa3448 5373 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
5374
5375 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5376 * our hw here, which are all in 100usec. */
5377 spec.t1_t3 = 210 * 10;
5378 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5379 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5380 spec.t10 = 500 * 10;
5381 /* This one is special and actually in units of 100ms, but zero
5382 * based in the hw (so we need to add 100 ms). But the sw vbt
5383 * table multiplies it with 1000 to make it in units of 100usec,
5384 * too. */
5385 spec.t11_t12 = (510 + 100) * 10;
5386
5387 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5388 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
5389
5390 /* Use the max of the register settings and vbt. If both are
5391 * unset, fall back to the spec limits. */
36b5f425 5392#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
5393 spec.field : \
5394 max(cur.field, vbt.field))
5395 assign_final(t1_t3);
5396 assign_final(t8);
5397 assign_final(t9);
5398 assign_final(t10);
5399 assign_final(t11_t12);
5400#undef assign_final
5401
36b5f425 5402#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
5403 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5404 intel_dp->backlight_on_delay = get_delay(t8);
5405 intel_dp->backlight_off_delay = get_delay(t9);
5406 intel_dp->panel_power_down_delay = get_delay(t10);
5407 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5408#undef get_delay
5409
f30d26e4
JN
5410 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5411 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5412 intel_dp->panel_power_cycle_delay);
5413
5414 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5415 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
f30d26e4
JN
5416}
5417
5418static void
5419intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
36b5f425 5420 struct intel_dp *intel_dp)
f30d26e4
JN
5421{
5422 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
5423 u32 pp_on, pp_off, pp_div, port_sel = 0;
5424 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
b0a08bec 5425 int pp_on_reg, pp_off_reg, pp_div_reg = 0, pp_ctrl_reg;
ad933b56 5426 enum port port = dp_to_dig_port(intel_dp)->port;
36b5f425 5427 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 5428
e39b999a 5429 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 5430
b0a08bec
VK
5431 if (IS_BROXTON(dev)) {
5432 /*
5433 * TODO: BXT has 2 sets of PPS registers.
5434 * Correct Register for Broxton need to be identified
5435 * using VBT. hardcoding for now
5436 */
5437 pp_ctrl_reg = BXT_PP_CONTROL(0);
5438 pp_on_reg = BXT_PP_ON_DELAYS(0);
5439 pp_off_reg = BXT_PP_OFF_DELAYS(0);
5440
5441 } else if (HAS_PCH_SPLIT(dev)) {
453c5420
JB
5442 pp_on_reg = PCH_PP_ON_DELAYS;
5443 pp_off_reg = PCH_PP_OFF_DELAYS;
5444 pp_div_reg = PCH_PP_DIVISOR;
5445 } else {
bf13e81b
JN
5446 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
5447
5448 pp_on_reg = VLV_PIPE_PP_ON_DELAYS(pipe);
5449 pp_off_reg = VLV_PIPE_PP_OFF_DELAYS(pipe);
5450 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
453c5420
JB
5451 }
5452
b2f19d1a
PZ
5453 /*
5454 * And finally store the new values in the power sequencer. The
5455 * backlight delays are set to 1 because we do manual waits on them. For
5456 * T8, even BSpec recommends doing it. For T9, if we don't do this,
5457 * we'll end up waiting for the backlight off delay twice: once when we
5458 * do the manual sleep, and once when we disable the panel and wait for
5459 * the PP_STATUS bit to become zero.
5460 */
f30d26e4 5461 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
b2f19d1a
PZ
5462 (1 << PANEL_LIGHT_ON_DELAY_SHIFT);
5463 pp_off = (1 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
f30d26e4 5464 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
5465 /* Compute the divisor for the pp clock, simply match the Bspec
5466 * formula. */
b0a08bec
VK
5467 if (IS_BROXTON(dev)) {
5468 pp_div = I915_READ(pp_ctrl_reg);
5469 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5470 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5471 << BXT_POWER_CYCLE_DELAY_SHIFT);
5472 } else {
5473 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5474 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5475 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5476 }
67a54566
DV
5477
5478 /* Haswell doesn't have any port selection bits for the panel
5479 * power sequencer any more. */
bc7d38a4 5480 if (IS_VALLEYVIEW(dev)) {
ad933b56 5481 port_sel = PANEL_PORT_SELECT_VLV(port);
bc7d38a4 5482 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
ad933b56 5483 if (port == PORT_A)
a24c144c 5484 port_sel = PANEL_PORT_SELECT_DPA;
67a54566 5485 else
a24c144c 5486 port_sel = PANEL_PORT_SELECT_DPD;
67a54566
DV
5487 }
5488
453c5420
JB
5489 pp_on |= port_sel;
5490
5491 I915_WRITE(pp_on_reg, pp_on);
5492 I915_WRITE(pp_off_reg, pp_off);
b0a08bec
VK
5493 if (IS_BROXTON(dev))
5494 I915_WRITE(pp_ctrl_reg, pp_div);
5495 else
5496 I915_WRITE(pp_div_reg, pp_div);
67a54566 5497
67a54566 5498 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
5499 I915_READ(pp_on_reg),
5500 I915_READ(pp_off_reg),
b0a08bec
VK
5501 IS_BROXTON(dev) ?
5502 (I915_READ(pp_ctrl_reg) & BXT_POWER_CYCLE_DELAY_MASK) :
453c5420 5503 I915_READ(pp_div_reg));
f684960e
CW
5504}
5505
b33a2815
VK
5506/**
5507 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5508 * @dev: DRM device
5509 * @refresh_rate: RR to be programmed
5510 *
5511 * This function gets called when refresh rate (RR) has to be changed from
5512 * one frequency to another. Switches can be between high and low RR
5513 * supported by the panel or to any other RR based on media playback (in
5514 * this case, RR value needs to be passed from user space).
5515 *
5516 * The caller of this function needs to take a lock on dev_priv->drrs.
5517 */
96178eeb 5518static void intel_dp_set_drrs_state(struct drm_device *dev, int refresh_rate)
439d7ac0
PB
5519{
5520 struct drm_i915_private *dev_priv = dev->dev_private;
5521 struct intel_encoder *encoder;
96178eeb
VK
5522 struct intel_digital_port *dig_port = NULL;
5523 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5cec258b 5524 struct intel_crtc_state *config = NULL;
439d7ac0 5525 struct intel_crtc *intel_crtc = NULL;
439d7ac0 5526 u32 reg, val;
96178eeb 5527 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
5528
5529 if (refresh_rate <= 0) {
5530 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5531 return;
5532 }
5533
96178eeb
VK
5534 if (intel_dp == NULL) {
5535 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
5536 return;
5537 }
5538
1fcc9d1c 5539 /*
e4d59f6b
RV
5540 * FIXME: This needs proper synchronization with psr state for some
5541 * platforms that cannot have PSR and DRRS enabled at the same time.
1fcc9d1c 5542 */
439d7ac0 5543
96178eeb
VK
5544 dig_port = dp_to_dig_port(intel_dp);
5545 encoder = &dig_port->base;
723f9aab 5546 intel_crtc = to_intel_crtc(encoder->base.crtc);
439d7ac0
PB
5547
5548 if (!intel_crtc) {
5549 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5550 return;
5551 }
5552
6e3c9717 5553 config = intel_crtc->config;
439d7ac0 5554
96178eeb 5555 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
5556 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5557 return;
5558 }
5559
96178eeb
VK
5560 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5561 refresh_rate)
439d7ac0
PB
5562 index = DRRS_LOW_RR;
5563
96178eeb 5564 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
5565 DRM_DEBUG_KMS(
5566 "DRRS requested for previously set RR...ignoring\n");
5567 return;
5568 }
5569
5570 if (!intel_crtc->active) {
5571 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5572 return;
5573 }
5574
44395bfe 5575 if (INTEL_INFO(dev)->gen >= 8 && !IS_CHERRYVIEW(dev)) {
a4c30b1d
VK
5576 switch (index) {
5577 case DRRS_HIGH_RR:
5578 intel_dp_set_m_n(intel_crtc, M1_N1);
5579 break;
5580 case DRRS_LOW_RR:
5581 intel_dp_set_m_n(intel_crtc, M2_N2);
5582 break;
5583 case DRRS_MAX_RR:
5584 default:
5585 DRM_ERROR("Unsupported refreshrate type\n");
5586 }
5587 } else if (INTEL_INFO(dev)->gen > 6) {
6e3c9717 5588 reg = PIPECONF(intel_crtc->config->cpu_transcoder);
439d7ac0 5589 val = I915_READ(reg);
a4c30b1d 5590
439d7ac0 5591 if (index > DRRS_HIGH_RR) {
6fa7aec1
VK
5592 if (IS_VALLEYVIEW(dev))
5593 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5594 else
5595 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 5596 } else {
6fa7aec1
VK
5597 if (IS_VALLEYVIEW(dev))
5598 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5599 else
5600 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
5601 }
5602 I915_WRITE(reg, val);
5603 }
5604
4e9ac947
VK
5605 dev_priv->drrs.refresh_rate_type = index;
5606
5607 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5608}
5609
b33a2815
VK
5610/**
5611 * intel_edp_drrs_enable - init drrs struct if supported
5612 * @intel_dp: DP struct
5613 *
5614 * Initializes frontbuffer_bits and drrs.dp
5615 */
c395578e
VK
5616void intel_edp_drrs_enable(struct intel_dp *intel_dp)
5617{
5618 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5619 struct drm_i915_private *dev_priv = dev->dev_private;
5620 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5621 struct drm_crtc *crtc = dig_port->base.base.crtc;
5622 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5623
5624 if (!intel_crtc->config->has_drrs) {
5625 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5626 return;
5627 }
5628
5629 mutex_lock(&dev_priv->drrs.mutex);
5630 if (WARN_ON(dev_priv->drrs.dp)) {
5631 DRM_ERROR("DRRS already enabled\n");
5632 goto unlock;
5633 }
5634
5635 dev_priv->drrs.busy_frontbuffer_bits = 0;
5636
5637 dev_priv->drrs.dp = intel_dp;
5638
5639unlock:
5640 mutex_unlock(&dev_priv->drrs.mutex);
5641}
5642
b33a2815
VK
5643/**
5644 * intel_edp_drrs_disable - Disable DRRS
5645 * @intel_dp: DP struct
5646 *
5647 */
c395578e
VK
5648void intel_edp_drrs_disable(struct intel_dp *intel_dp)
5649{
5650 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5651 struct drm_i915_private *dev_priv = dev->dev_private;
5652 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5653 struct drm_crtc *crtc = dig_port->base.base.crtc;
5654 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5655
5656 if (!intel_crtc->config->has_drrs)
5657 return;
5658
5659 mutex_lock(&dev_priv->drrs.mutex);
5660 if (!dev_priv->drrs.dp) {
5661 mutex_unlock(&dev_priv->drrs.mutex);
5662 return;
5663 }
5664
5665 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5666 intel_dp_set_drrs_state(dev_priv->dev,
5667 intel_dp->attached_connector->panel.
5668 fixed_mode->vrefresh);
5669
5670 dev_priv->drrs.dp = NULL;
5671 mutex_unlock(&dev_priv->drrs.mutex);
5672
5673 cancel_delayed_work_sync(&dev_priv->drrs.work);
5674}
5675
4e9ac947
VK
5676static void intel_edp_drrs_downclock_work(struct work_struct *work)
5677{
5678 struct drm_i915_private *dev_priv =
5679 container_of(work, typeof(*dev_priv), drrs.work.work);
5680 struct intel_dp *intel_dp;
5681
5682 mutex_lock(&dev_priv->drrs.mutex);
5683
5684 intel_dp = dev_priv->drrs.dp;
5685
5686 if (!intel_dp)
5687 goto unlock;
5688
439d7ac0 5689 /*
4e9ac947
VK
5690 * The delayed work can race with an invalidate hence we need to
5691 * recheck.
439d7ac0
PB
5692 */
5693
4e9ac947
VK
5694 if (dev_priv->drrs.busy_frontbuffer_bits)
5695 goto unlock;
439d7ac0 5696
4e9ac947
VK
5697 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR)
5698 intel_dp_set_drrs_state(dev_priv->dev,
5699 intel_dp->attached_connector->panel.
5700 downclock_mode->vrefresh);
439d7ac0 5701
4e9ac947 5702unlock:
4e9ac947 5703 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
5704}
5705
b33a2815 5706/**
0ddfd203 5707 * intel_edp_drrs_invalidate - Disable Idleness DRRS
b33a2815
VK
5708 * @dev: DRM device
5709 * @frontbuffer_bits: frontbuffer plane tracking bits
5710 *
0ddfd203
R
5711 * This function gets called everytime rendering on the given planes start.
5712 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
5713 *
5714 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5715 */
a93fad0f
VK
5716void intel_edp_drrs_invalidate(struct drm_device *dev,
5717 unsigned frontbuffer_bits)
5718{
5719 struct drm_i915_private *dev_priv = dev->dev_private;
5720 struct drm_crtc *crtc;
5721 enum pipe pipe;
5722
9da7d693 5723 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5724 return;
5725
88f933a8 5726 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5727
a93fad0f 5728 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5729 if (!dev_priv->drrs.dp) {
5730 mutex_unlock(&dev_priv->drrs.mutex);
5731 return;
5732 }
5733
a93fad0f
VK
5734 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5735 pipe = to_intel_crtc(crtc)->pipe;
5736
c1d038c6
DV
5737 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5738 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5739
0ddfd203 5740 /* invalidate means busy screen hence upclock */
c1d038c6 5741 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
a93fad0f
VK
5742 intel_dp_set_drrs_state(dev_priv->dev,
5743 dev_priv->drrs.dp->attached_connector->panel.
5744 fixed_mode->vrefresh);
a93fad0f 5745
a93fad0f
VK
5746 mutex_unlock(&dev_priv->drrs.mutex);
5747}
5748
b33a2815 5749/**
0ddfd203 5750 * intel_edp_drrs_flush - Restart Idleness DRRS
b33a2815
VK
5751 * @dev: DRM device
5752 * @frontbuffer_bits: frontbuffer plane tracking bits
5753 *
0ddfd203
R
5754 * This function gets called every time rendering on the given planes has
5755 * completed or flip on a crtc is completed. So DRRS should be upclocked
5756 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5757 * if no other planes are dirty.
b33a2815
VK
5758 *
5759 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5760 */
a93fad0f
VK
5761void intel_edp_drrs_flush(struct drm_device *dev,
5762 unsigned frontbuffer_bits)
5763{
5764 struct drm_i915_private *dev_priv = dev->dev_private;
5765 struct drm_crtc *crtc;
5766 enum pipe pipe;
5767
9da7d693 5768 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
5769 return;
5770
88f933a8 5771 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 5772
a93fad0f 5773 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
5774 if (!dev_priv->drrs.dp) {
5775 mutex_unlock(&dev_priv->drrs.mutex);
5776 return;
5777 }
5778
a93fad0f
VK
5779 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5780 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
5781
5782 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
5783 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5784
0ddfd203 5785 /* flush means busy screen hence upclock */
c1d038c6 5786 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
0ddfd203
R
5787 intel_dp_set_drrs_state(dev_priv->dev,
5788 dev_priv->drrs.dp->attached_connector->panel.
5789 fixed_mode->vrefresh);
5790
5791 /*
5792 * flush also means no more activity hence schedule downclock, if all
5793 * other fbs are quiescent too
5794 */
5795 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
5796 schedule_delayed_work(&dev_priv->drrs.work,
5797 msecs_to_jiffies(1000));
5798 mutex_unlock(&dev_priv->drrs.mutex);
5799}
5800
b33a2815
VK
5801/**
5802 * DOC: Display Refresh Rate Switching (DRRS)
5803 *
5804 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5805 * which enables swtching between low and high refresh rates,
5806 * dynamically, based on the usage scenario. This feature is applicable
5807 * for internal panels.
5808 *
5809 * Indication that the panel supports DRRS is given by the panel EDID, which
5810 * would list multiple refresh rates for one resolution.
5811 *
5812 * DRRS is of 2 types - static and seamless.
5813 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5814 * (may appear as a blink on screen) and is used in dock-undock scenario.
5815 * Seamless DRRS involves changing RR without any visual effect to the user
5816 * and can be used during normal system usage. This is done by programming
5817 * certain registers.
5818 *
5819 * Support for static/seamless DRRS may be indicated in the VBT based on
5820 * inputs from the panel spec.
5821 *
5822 * DRRS saves power by switching to low RR based on usage scenarios.
5823 *
5824 * eDP DRRS:-
5825 * The implementation is based on frontbuffer tracking implementation.
5826 * When there is a disturbance on the screen triggered by user activity or a
5827 * periodic system activity, DRRS is disabled (RR is changed to high RR).
5828 * When there is no movement on screen, after a timeout of 1 second, a switch
5829 * to low RR is made.
5830 * For integration with frontbuffer tracking code,
5831 * intel_edp_drrs_invalidate() and intel_edp_drrs_flush() are called.
5832 *
5833 * DRRS can be further extended to support other internal panels and also
5834 * the scenario of video playback wherein RR is set based on the rate
5835 * requested by userspace.
5836 */
5837
5838/**
5839 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5840 * @intel_connector: eDP connector
5841 * @fixed_mode: preferred mode of panel
5842 *
5843 * This function is called only once at driver load to initialize basic
5844 * DRRS stuff.
5845 *
5846 * Returns:
5847 * Downclock mode if panel supports it, else return NULL.
5848 * DRRS support is determined by the presence of downclock mode (apart
5849 * from VBT setting).
5850 */
4f9db5b5 5851static struct drm_display_mode *
96178eeb
VK
5852intel_dp_drrs_init(struct intel_connector *intel_connector,
5853 struct drm_display_mode *fixed_mode)
4f9db5b5
PB
5854{
5855 struct drm_connector *connector = &intel_connector->base;
96178eeb 5856 struct drm_device *dev = connector->dev;
4f9db5b5
PB
5857 struct drm_i915_private *dev_priv = dev->dev_private;
5858 struct drm_display_mode *downclock_mode = NULL;
5859
9da7d693
DV
5860 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5861 mutex_init(&dev_priv->drrs.mutex);
5862
4f9db5b5
PB
5863 if (INTEL_INFO(dev)->gen <= 6) {
5864 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5865 return NULL;
5866 }
5867
5868 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 5869 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
5870 return NULL;
5871 }
5872
5873 downclock_mode = intel_find_panel_downclock
5874 (dev, fixed_mode, connector);
5875
5876 if (!downclock_mode) {
a1d26342 5877 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
5878 return NULL;
5879 }
5880
96178eeb 5881 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 5882
96178eeb 5883 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 5884 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
5885 return downclock_mode;
5886}
5887
ed92f0b2 5888static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 5889 struct intel_connector *intel_connector)
ed92f0b2
PZ
5890{
5891 struct drm_connector *connector = &intel_connector->base;
5892 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
63635217
PZ
5893 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5894 struct drm_device *dev = intel_encoder->base.dev;
ed92f0b2
PZ
5895 struct drm_i915_private *dev_priv = dev->dev_private;
5896 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 5897 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2
PZ
5898 bool has_dpcd;
5899 struct drm_display_mode *scan;
5900 struct edid *edid;
6517d273 5901 enum pipe pipe = INVALID_PIPE;
ed92f0b2
PZ
5902
5903 if (!is_edp(intel_dp))
5904 return true;
5905
49e6bc51
VS
5906 pps_lock(intel_dp);
5907 intel_edp_panel_vdd_sanitize(intel_dp);
5908 pps_unlock(intel_dp);
63635217 5909
ed92f0b2 5910 /* Cache DPCD and EDID for edp. */
ed92f0b2 5911 has_dpcd = intel_dp_get_dpcd(intel_dp);
ed92f0b2
PZ
5912
5913 if (has_dpcd) {
5914 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
5915 dev_priv->no_aux_handshake =
5916 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
5917 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
5918 } else {
5919 /* if this fails, presume the device is a ghost */
5920 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
5921 return false;
5922 }
5923
5924 /* We now know it's not a ghost, init power sequence regs. */
773538e8 5925 pps_lock(intel_dp);
36b5f425 5926 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp);
773538e8 5927 pps_unlock(intel_dp);
ed92f0b2 5928
060c8778 5929 mutex_lock(&dev->mode_config.mutex);
0b99836f 5930 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
5931 if (edid) {
5932 if (drm_add_edid_modes(connector, edid)) {
5933 drm_mode_connector_update_edid_property(connector,
5934 edid);
5935 drm_edid_to_eld(connector, edid);
5936 } else {
5937 kfree(edid);
5938 edid = ERR_PTR(-EINVAL);
5939 }
5940 } else {
5941 edid = ERR_PTR(-ENOENT);
5942 }
5943 intel_connector->edid = edid;
5944
5945 /* prefer fixed mode from EDID if available */
5946 list_for_each_entry(scan, &connector->probed_modes, head) {
5947 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5948 fixed_mode = drm_mode_duplicate(dev, scan);
4f9db5b5 5949 downclock_mode = intel_dp_drrs_init(
4f9db5b5 5950 intel_connector, fixed_mode);
ed92f0b2
PZ
5951 break;
5952 }
5953 }
5954
5955 /* fallback to VBT if available for eDP */
5956 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5957 fixed_mode = drm_mode_duplicate(dev,
5958 dev_priv->vbt.lfp_lvds_vbt_mode);
5959 if (fixed_mode)
5960 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5961 }
060c8778 5962 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 5963
01527b31
CT
5964 if (IS_VALLEYVIEW(dev)) {
5965 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5966 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
5967
5968 /*
5969 * Figure out the current pipe for the initial backlight setup.
5970 * If the current pipe isn't valid, try the PPS pipe, and if that
5971 * fails just assume pipe A.
5972 */
5973 if (IS_CHERRYVIEW(dev))
5974 pipe = DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5975 else
5976 pipe = PORT_TO_PIPE(intel_dp->DP);
5977
5978 if (pipe != PIPE_A && pipe != PIPE_B)
5979 pipe = intel_dp->pps_pipe;
5980
5981 if (pipe != PIPE_A && pipe != PIPE_B)
5982 pipe = PIPE_A;
5983
5984 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5985 pipe_name(pipe));
01527b31
CT
5986 }
5987
4f9db5b5 5988 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
73580fb7 5989 intel_connector->panel.backlight_power = intel_edp_backlight_power;
6517d273 5990 intel_panel_setup_backlight(connector, pipe);
ed92f0b2
PZ
5991
5992 return true;
5993}
5994
16c25533 5995bool
f0fec3f2
PZ
5996intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5997 struct intel_connector *intel_connector)
a4fc5ed6 5998{
f0fec3f2
PZ
5999 struct drm_connector *connector = &intel_connector->base;
6000 struct intel_dp *intel_dp = &intel_dig_port->dp;
6001 struct intel_encoder *intel_encoder = &intel_dig_port->base;
6002 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 6003 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 6004 enum port port = intel_dig_port->port;
0b99836f 6005 int type;
a4fc5ed6 6006
a4a5d2f8
VS
6007 intel_dp->pps_pipe = INVALID_PIPE;
6008
ec5b01dd 6009 /* intel_dp vfuncs */
b6b5e383
DL
6010 if (INTEL_INFO(dev)->gen >= 9)
6011 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
6012 else if (IS_VALLEYVIEW(dev))
ec5b01dd
DL
6013 intel_dp->get_aux_clock_divider = vlv_get_aux_clock_divider;
6014 else if (IS_HASWELL(dev) || IS_BROADWELL(dev))
6015 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
6016 else if (HAS_PCH_SPLIT(dev))
6017 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
6018 else
6019 intel_dp->get_aux_clock_divider = i9xx_get_aux_clock_divider;
6020
b9ca5fad
DL
6021 if (INTEL_INFO(dev)->gen >= 9)
6022 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
6023 else
6024 intel_dp->get_aux_send_ctl = i9xx_get_aux_send_ctl;
153b1100 6025
0767935e
DV
6026 /* Preserve the current hw state. */
6027 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 6028 intel_dp->attached_connector = intel_connector;
3d3dc149 6029
3b32a35b 6030 if (intel_dp_is_edp(dev, port))
b329530c 6031 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
6032 else
6033 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 6034
f7d24902
ID
6035 /*
6036 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6037 * for DP the encoder type can be set by the caller to
6038 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6039 */
6040 if (type == DRM_MODE_CONNECTOR_eDP)
6041 intel_encoder->type = INTEL_OUTPUT_EDP;
6042
c17ed5b5
VS
6043 /* eDP only on port B and/or C on vlv/chv */
6044 if (WARN_ON(IS_VALLEYVIEW(dev) && is_edp(intel_dp) &&
6045 port != PORT_B && port != PORT_C))
6046 return false;
6047
e7281eab
ID
6048 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6049 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6050 port_name(port));
6051
b329530c 6052 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
6053 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6054
a4fc5ed6
KP
6055 connector->interlace_allowed = true;
6056 connector->doublescan_allowed = 0;
6057
f0fec3f2 6058 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
4be73780 6059 edp_panel_vdd_work);
a4fc5ed6 6060
df0e9248 6061 intel_connector_attach_encoder(intel_connector, intel_encoder);
34ea3d38 6062 drm_connector_register(connector);
a4fc5ed6 6063
affa9354 6064 if (HAS_DDI(dev))
bcbc889b
PZ
6065 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6066 else
6067 intel_connector->get_hw_state = intel_connector_get_hw_state;
80f65de3 6068 intel_connector->unregister = intel_dp_connector_unregister;
bcbc889b 6069
0b99836f 6070 /* Set up the hotplug pin. */
ab9d7c30
PZ
6071 switch (port) {
6072 case PORT_A:
1d843f9d 6073 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
6074 break;
6075 case PORT_B:
1d843f9d 6076 intel_encoder->hpd_pin = HPD_PORT_B;
cf1d5883
SJ
6077 if (IS_BROXTON(dev_priv) && (INTEL_REVID(dev) < BXT_REVID_B0))
6078 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
6079 break;
6080 case PORT_C:
1d843f9d 6081 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
6082 break;
6083 case PORT_D:
1d843f9d 6084 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30 6085 break;
26951caf
XZ
6086 case PORT_E:
6087 intel_encoder->hpd_pin = HPD_PORT_E;
6088 break;
ab9d7c30 6089 default:
ad1c0b19 6090 BUG();
5eb08b69
ZW
6091 }
6092
dada1a9f 6093 if (is_edp(intel_dp)) {
773538e8 6094 pps_lock(intel_dp);
1e74a324
VS
6095 intel_dp_init_panel_power_timestamps(intel_dp);
6096 if (IS_VALLEYVIEW(dev))
a4a5d2f8 6097 vlv_initial_power_sequencer_setup(intel_dp);
1e74a324 6098 else
36b5f425 6099 intel_dp_init_panel_power_sequencer(dev, intel_dp);
773538e8 6100 pps_unlock(intel_dp);
dada1a9f 6101 }
0095e6dc 6102
9d1a1031 6103 intel_dp_aux_init(intel_dp, intel_connector);
c1f05264 6104
0e32b39c 6105 /* init MST on ports that can support it */
0c9b3715
JN
6106 if (HAS_DP_MST(dev) &&
6107 (port == PORT_B || port == PORT_C || port == PORT_D))
6108 intel_dp_mst_encoder_init(intel_dig_port,
6109 intel_connector->base.base.id);
0e32b39c 6110
36b5f425 6111 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
4f71d0cb 6112 drm_dp_aux_unregister(&intel_dp->aux);
15b1d171
PZ
6113 if (is_edp(intel_dp)) {
6114 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
6115 /*
6116 * vdd might still be enabled do to the delayed vdd off.
6117 * Make sure vdd is actually turned off here.
6118 */
773538e8 6119 pps_lock(intel_dp);
4be73780 6120 edp_panel_vdd_off_sync(intel_dp);
773538e8 6121 pps_unlock(intel_dp);
15b1d171 6122 }
34ea3d38 6123 drm_connector_unregister(connector);
b2f246a8 6124 drm_connector_cleanup(connector);
16c25533 6125 return false;
b2f246a8 6126 }
32f9d658 6127
f684960e
CW
6128 intel_dp_add_properties(intel_dp, connector);
6129
a4fc5ed6
KP
6130 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6131 * 0xd. Failure to do so will result in spurious interrupts being
6132 * generated on the port when a cable is not attached.
6133 */
6134 if (IS_G4X(dev) && !IS_GM45(dev)) {
6135 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6136 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6137 }
16c25533 6138
aa7471d2
JN
6139 i915_debugfs_connector_add(connector);
6140
16c25533 6141 return true;
a4fc5ed6 6142}
f0fec3f2
PZ
6143
6144void
6145intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
6146{
13cf5504 6147 struct drm_i915_private *dev_priv = dev->dev_private;
f0fec3f2
PZ
6148 struct intel_digital_port *intel_dig_port;
6149 struct intel_encoder *intel_encoder;
6150 struct drm_encoder *encoder;
6151 struct intel_connector *intel_connector;
6152
b14c5679 6153 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2
PZ
6154 if (!intel_dig_port)
6155 return;
6156
08d9bc92 6157 intel_connector = intel_connector_alloc();
f0fec3f2
PZ
6158 if (!intel_connector) {
6159 kfree(intel_dig_port);
6160 return;
6161 }
6162
6163 intel_encoder = &intel_dig_port->base;
6164 encoder = &intel_encoder->base;
6165
6166 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
6167 DRM_MODE_ENCODER_TMDS);
6168
5bfe2ac0 6169 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 6170 intel_encoder->disable = intel_disable_dp;
00c09d70 6171 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 6172 intel_encoder->get_config = intel_dp_get_config;
07f9cd0b 6173 intel_encoder->suspend = intel_dp_encoder_suspend;
e4a1d846 6174 if (IS_CHERRYVIEW(dev)) {
9197c88b 6175 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
6176 intel_encoder->pre_enable = chv_pre_enable_dp;
6177 intel_encoder->enable = vlv_enable_dp;
580d3811 6178 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 6179 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
e4a1d846 6180 } else if (IS_VALLEYVIEW(dev)) {
ecff4f3b 6181 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
6182 intel_encoder->pre_enable = vlv_pre_enable_dp;
6183 intel_encoder->enable = vlv_enable_dp;
49277c31 6184 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 6185 } else {
ecff4f3b
JN
6186 intel_encoder->pre_enable = g4x_pre_enable_dp;
6187 intel_encoder->enable = g4x_enable_dp;
08aff3fe
VS
6188 if (INTEL_INFO(dev)->gen >= 5)
6189 intel_encoder->post_disable = ilk_post_disable_dp;
ab1f90f9 6190 }
f0fec3f2 6191
174edf1f 6192 intel_dig_port->port = port;
f0fec3f2
PZ
6193 intel_dig_port->dp.output_reg = output_reg;
6194
00c09d70 6195 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
882ec384
VS
6196 if (IS_CHERRYVIEW(dev)) {
6197 if (port == PORT_D)
6198 intel_encoder->crtc_mask = 1 << 2;
6199 else
6200 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6201 } else {
6202 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6203 }
bc079e8b 6204 intel_encoder->cloneable = 0;
f0fec3f2 6205
13cf5504 6206 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
5fcece80 6207 dev_priv->hotplug.irq_port[port] = intel_dig_port;
13cf5504 6208
15b1d171
PZ
6209 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
6210 drm_encoder_cleanup(encoder);
6211 kfree(intel_dig_port);
b2f246a8 6212 kfree(intel_connector);
15b1d171 6213 }
f0fec3f2 6214}
0e32b39c
DA
6215
6216void intel_dp_mst_suspend(struct drm_device *dev)
6217{
6218 struct drm_i915_private *dev_priv = dev->dev_private;
6219 int i;
6220
6221 /* disable MST */
6222 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6223 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6224 if (!intel_dig_port)
6225 continue;
6226
6227 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6228 if (!intel_dig_port->dp.can_mst)
6229 continue;
6230 if (intel_dig_port->dp.is_mst)
6231 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6232 }
6233 }
6234}
6235
6236void intel_dp_mst_resume(struct drm_device *dev)
6237{
6238 struct drm_i915_private *dev_priv = dev->dev_private;
6239 int i;
6240
6241 for (i = 0; i < I915_MAX_PORTS; i++) {
5fcece80 6242 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
0e32b39c
DA
6243 if (!intel_dig_port)
6244 continue;
6245 if (intel_dig_port->base.type == INTEL_OUTPUT_DISPLAYPORT) {
6246 int ret;
6247
6248 if (!intel_dig_port->dp.can_mst)
6249 continue;
6250
6251 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6252 if (ret != 0) {
6253 intel_dp_check_mst_status(&intel_dig_port->dp);
6254 }
6255 }
6256 }
6257}