]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/i915/intel_dp.c
drm/i915/dp: use the sink rates array for max sink rates
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28 #include <linux/i2c.h>
29 #include <linux/slab.h>
30 #include <linux/export.h>
31 #include <linux/types.h>
32 #include <linux/notifier.h>
33 #include <linux/reboot.h>
34 #include <asm/byteorder.h>
35 #include <drm/drmP.h>
36 #include <drm/drm_atomic_helper.h>
37 #include <drm/drm_crtc.h>
38 #include <drm/drm_crtc_helper.h>
39 #include <drm/drm_edid.h>
40 #include "intel_drv.h"
41 #include <drm/i915_drm.h>
42 #include "i915_drv.h"
43
44 #define DP_LINK_CHECK_TIMEOUT (10 * 1000)
45
46 /* Compliance test status bits */
47 #define INTEL_DP_RESOLUTION_SHIFT_MASK 0
48 #define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
49 #define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
50 #define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
51
52 struct dp_link_dpll {
53 int clock;
54 struct dpll dpll;
55 };
56
57 static const struct dp_link_dpll gen4_dpll[] = {
58 { 162000,
59 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
60 { 270000,
61 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
62 };
63
64 static const struct dp_link_dpll pch_dpll[] = {
65 { 162000,
66 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
67 { 270000,
68 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
69 };
70
71 static const struct dp_link_dpll vlv_dpll[] = {
72 { 162000,
73 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
74 { 270000,
75 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
76 };
77
78 /*
79 * CHV supports eDP 1.4 that have more link rates.
80 * Below only provides the fixed rate but exclude variable rate.
81 */
82 static const struct dp_link_dpll chv_dpll[] = {
83 /*
84 * CHV requires to program fractional division for m2.
85 * m2 is stored in fixed point format using formula below
86 * (m2_int << 22) | m2_fraction
87 */
88 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
89 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
90 { 270000, /* m2_int = 27, m2_fraction = 0 */
91 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
92 { 540000, /* m2_int = 27, m2_fraction = 0 */
93 { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } }
94 };
95
96 static const int bxt_rates[] = { 162000, 216000, 243000, 270000,
97 324000, 432000, 540000 };
98 static const int skl_rates[] = { 162000, 216000, 270000,
99 324000, 432000, 540000 };
100 static const int default_rates[] = { 162000, 270000, 540000 };
101
102 /**
103 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
104 * @intel_dp: DP struct
105 *
106 * If a CPU or PCH DP output is attached to an eDP panel, this function
107 * will return true, and false otherwise.
108 */
109 static bool is_edp(struct intel_dp *intel_dp)
110 {
111 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
112
113 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
114 }
115
116 static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
117 {
118 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
119
120 return intel_dig_port->base.base.dev;
121 }
122
123 static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
124 {
125 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
126 }
127
128 static void intel_dp_link_down(struct intel_dp *intel_dp);
129 static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
130 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
131 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp);
132 static void vlv_steal_power_sequencer(struct drm_device *dev,
133 enum pipe pipe);
134 static void intel_dp_unset_edid(struct intel_dp *intel_dp);
135
136 static int intel_dp_num_rates(u8 link_bw_code)
137 {
138 switch (link_bw_code) {
139 default:
140 WARN(1, "invalid max DP link bw val %x, using 1.62Gbps\n",
141 link_bw_code);
142 case DP_LINK_BW_1_62:
143 return 1;
144 case DP_LINK_BW_2_7:
145 return 2;
146 case DP_LINK_BW_5_4:
147 return 3;
148 }
149 }
150
151 /* update sink rates from dpcd */
152 static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
153 {
154 int i, num_rates;
155
156 num_rates = intel_dp_num_rates(intel_dp->dpcd[DP_MAX_LINK_RATE]);
157
158 for (i = 0; i < num_rates; i++)
159 intel_dp->sink_rates[i] = default_rates[i];
160
161 intel_dp->num_sink_rates = num_rates;
162 }
163
164 static int intel_dp_max_sink_rate(struct intel_dp *intel_dp)
165 {
166 return intel_dp->sink_rates[intel_dp->num_sink_rates - 1];
167 }
168
169 static u8 intel_dp_max_lane_count(struct intel_dp *intel_dp)
170 {
171 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
172 u8 source_max, sink_max;
173
174 source_max = intel_dig_port->max_lanes;
175 sink_max = intel_dp->max_sink_lane_count;
176
177 return min(source_max, sink_max);
178 }
179
180 int
181 intel_dp_link_required(int pixel_clock, int bpp)
182 {
183 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
184 return DIV_ROUND_UP(pixel_clock * bpp, 8);
185 }
186
187 int
188 intel_dp_max_data_rate(int max_link_clock, int max_lanes)
189 {
190 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
191 * link rate that is generally expressed in Gbps. Since, 8 bits of data
192 * is transmitted every LS_Clk per lane, there is no need to account for
193 * the channel encoding that is done in the PHY layer here.
194 */
195
196 return max_link_clock * max_lanes;
197 }
198
199 static int
200 intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
201 {
202 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
203 struct intel_encoder *encoder = &intel_dig_port->base;
204 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
205 int max_dotclk = dev_priv->max_dotclk_freq;
206 int ds_max_dotclk;
207
208 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
209
210 if (type != DP_DS_PORT_TYPE_VGA)
211 return max_dotclk;
212
213 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
214 intel_dp->downstream_ports);
215
216 if (ds_max_dotclk != 0)
217 max_dotclk = min(max_dotclk, ds_max_dotclk);
218
219 return max_dotclk;
220 }
221
222 static void
223 intel_dp_set_source_rates(struct intel_dp *intel_dp)
224 {
225 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
226 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
227 const int *source_rates;
228 int size;
229
230 /* This should only be done once */
231 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
232
233 if (IS_GEN9_LP(dev_priv)) {
234 source_rates = bxt_rates;
235 size = ARRAY_SIZE(bxt_rates);
236 } else if (IS_GEN9_BC(dev_priv)) {
237 source_rates = skl_rates;
238 size = ARRAY_SIZE(skl_rates);
239 } else {
240 source_rates = default_rates;
241 size = ARRAY_SIZE(default_rates);
242 }
243
244 /* This depends on the fact that 5.4 is last value in the array */
245 if (!intel_dp_source_supports_hbr2(intel_dp))
246 size--;
247
248 intel_dp->source_rates = source_rates;
249 intel_dp->num_source_rates = size;
250 }
251
252 static int intersect_rates(const int *source_rates, int source_len,
253 const int *sink_rates, int sink_len,
254 int *common_rates)
255 {
256 int i = 0, j = 0, k = 0;
257
258 while (i < source_len && j < sink_len) {
259 if (source_rates[i] == sink_rates[j]) {
260 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
261 return k;
262 common_rates[k] = source_rates[i];
263 ++k;
264 ++i;
265 ++j;
266 } else if (source_rates[i] < sink_rates[j]) {
267 ++i;
268 } else {
269 ++j;
270 }
271 }
272 return k;
273 }
274
275 /* return index of rate in rates array, or -1 if not found */
276 static int intel_dp_rate_index(const int *rates, int len, int rate)
277 {
278 int i;
279
280 for (i = 0; i < len; i++)
281 if (rate == rates[i])
282 return i;
283
284 return -1;
285 }
286
287 static int intel_dp_common_rates(struct intel_dp *intel_dp,
288 int *common_rates)
289 {
290 int max_rate = intel_dp->max_sink_link_rate;
291 int i, common_len;
292
293 common_len = intersect_rates(intel_dp->source_rates,
294 intel_dp->num_source_rates,
295 intel_dp->sink_rates,
296 intel_dp->num_sink_rates,
297 common_rates);
298
299 /* Limit results by potentially reduced max rate */
300 for (i = 0; i < common_len; i++) {
301 if (common_rates[common_len - i - 1] <= max_rate)
302 return common_len - i;
303 }
304
305 return 0;
306 }
307
308 static int intel_dp_link_rate_index(struct intel_dp *intel_dp,
309 int *common_rates, int link_rate)
310 {
311 int common_len;
312
313 common_len = intel_dp_common_rates(intel_dp, common_rates);
314
315 return intel_dp_rate_index(common_rates, common_len, link_rate);
316 }
317
318 int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
319 int link_rate, uint8_t lane_count)
320 {
321 int common_rates[DP_MAX_SUPPORTED_RATES];
322 int link_rate_index;
323
324 link_rate_index = intel_dp_link_rate_index(intel_dp,
325 common_rates,
326 link_rate);
327 if (link_rate_index > 0) {
328 intel_dp->max_sink_link_rate = common_rates[link_rate_index - 1];
329 intel_dp->max_sink_lane_count = lane_count;
330 } else if (lane_count > 1) {
331 intel_dp->max_sink_link_rate = intel_dp_max_sink_rate(intel_dp);
332 intel_dp->max_sink_lane_count = lane_count >> 1;
333 } else {
334 DRM_ERROR("Link Training Unsuccessful\n");
335 return -1;
336 }
337
338 return 0;
339 }
340
341 static enum drm_mode_status
342 intel_dp_mode_valid(struct drm_connector *connector,
343 struct drm_display_mode *mode)
344 {
345 struct intel_dp *intel_dp = intel_attached_dp(connector);
346 struct intel_connector *intel_connector = to_intel_connector(connector);
347 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
348 int target_clock = mode->clock;
349 int max_rate, mode_rate, max_lanes, max_link_clock;
350 int max_dotclk;
351
352 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
353
354 if (is_edp(intel_dp) && fixed_mode) {
355 if (mode->hdisplay > fixed_mode->hdisplay)
356 return MODE_PANEL;
357
358 if (mode->vdisplay > fixed_mode->vdisplay)
359 return MODE_PANEL;
360
361 target_clock = fixed_mode->clock;
362 }
363
364 max_link_clock = intel_dp_max_link_rate(intel_dp);
365 max_lanes = intel_dp_max_lane_count(intel_dp);
366
367 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
368 mode_rate = intel_dp_link_required(target_clock, 18);
369
370 if (mode_rate > max_rate || target_clock > max_dotclk)
371 return MODE_CLOCK_HIGH;
372
373 if (mode->clock < 10000)
374 return MODE_CLOCK_LOW;
375
376 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
377 return MODE_H_ILLEGAL;
378
379 return MODE_OK;
380 }
381
382 uint32_t intel_dp_pack_aux(const uint8_t *src, int src_bytes)
383 {
384 int i;
385 uint32_t v = 0;
386
387 if (src_bytes > 4)
388 src_bytes = 4;
389 for (i = 0; i < src_bytes; i++)
390 v |= ((uint32_t) src[i]) << ((3-i) * 8);
391 return v;
392 }
393
394 static void intel_dp_unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
395 {
396 int i;
397 if (dst_bytes > 4)
398 dst_bytes = 4;
399 for (i = 0; i < dst_bytes; i++)
400 dst[i] = src >> ((3-i) * 8);
401 }
402
403 static void
404 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
405 struct intel_dp *intel_dp);
406 static void
407 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
408 struct intel_dp *intel_dp,
409 bool force_disable_vdd);
410 static void
411 intel_dp_pps_init(struct drm_device *dev, struct intel_dp *intel_dp);
412
413 static void pps_lock(struct intel_dp *intel_dp)
414 {
415 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
416 struct intel_encoder *encoder = &intel_dig_port->base;
417 struct drm_device *dev = encoder->base.dev;
418 struct drm_i915_private *dev_priv = to_i915(dev);
419
420 /*
421 * See vlv_power_sequencer_reset() why we need
422 * a power domain reference here.
423 */
424 intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
425
426 mutex_lock(&dev_priv->pps_mutex);
427 }
428
429 static void pps_unlock(struct intel_dp *intel_dp)
430 {
431 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
432 struct intel_encoder *encoder = &intel_dig_port->base;
433 struct drm_device *dev = encoder->base.dev;
434 struct drm_i915_private *dev_priv = to_i915(dev);
435
436 mutex_unlock(&dev_priv->pps_mutex);
437
438 intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
439 }
440
441 static void
442 vlv_power_sequencer_kick(struct intel_dp *intel_dp)
443 {
444 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
445 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
446 enum pipe pipe = intel_dp->pps_pipe;
447 bool pll_enabled, release_cl_override = false;
448 enum dpio_phy phy = DPIO_PHY(pipe);
449 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
450 uint32_t DP;
451
452 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
453 "skipping pipe %c power seqeuncer kick due to port %c being active\n",
454 pipe_name(pipe), port_name(intel_dig_port->port)))
455 return;
456
457 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
458 pipe_name(pipe), port_name(intel_dig_port->port));
459
460 /* Preserve the BIOS-computed detected bit. This is
461 * supposed to be read-only.
462 */
463 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
464 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
465 DP |= DP_PORT_WIDTH(1);
466 DP |= DP_LINK_TRAIN_PAT_1;
467
468 if (IS_CHERRYVIEW(dev_priv))
469 DP |= DP_PIPE_SELECT_CHV(pipe);
470 else if (pipe == PIPE_B)
471 DP |= DP_PIPEB_SELECT;
472
473 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
474
475 /*
476 * The DPLL for the pipe must be enabled for this to work.
477 * So enable temporarily it if it's not already enabled.
478 */
479 if (!pll_enabled) {
480 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
481 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
482
483 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
484 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
485 DRM_ERROR("Failed to force on pll for pipe %c!\n",
486 pipe_name(pipe));
487 return;
488 }
489 }
490
491 /*
492 * Similar magic as in intel_dp_enable_port().
493 * We _must_ do this port enable + disable trick
494 * to make this power seqeuencer lock onto the port.
495 * Otherwise even VDD force bit won't work.
496 */
497 I915_WRITE(intel_dp->output_reg, DP);
498 POSTING_READ(intel_dp->output_reg);
499
500 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
501 POSTING_READ(intel_dp->output_reg);
502
503 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
504 POSTING_READ(intel_dp->output_reg);
505
506 if (!pll_enabled) {
507 vlv_force_pll_off(dev_priv, pipe);
508
509 if (release_cl_override)
510 chv_phy_powergate_ch(dev_priv, phy, ch, false);
511 }
512 }
513
514 static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
515 {
516 struct intel_encoder *encoder;
517 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
518
519 /*
520 * We don't have power sequencer currently.
521 * Pick one that's not used by other ports.
522 */
523 for_each_intel_encoder(&dev_priv->drm, encoder) {
524 struct intel_dp *intel_dp;
525
526 if (encoder->type != INTEL_OUTPUT_DP &&
527 encoder->type != INTEL_OUTPUT_EDP)
528 continue;
529
530 intel_dp = enc_to_intel_dp(&encoder->base);
531
532 if (encoder->type == INTEL_OUTPUT_EDP) {
533 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
534 intel_dp->active_pipe != intel_dp->pps_pipe);
535
536 if (intel_dp->pps_pipe != INVALID_PIPE)
537 pipes &= ~(1 << intel_dp->pps_pipe);
538 } else {
539 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
540
541 if (intel_dp->active_pipe != INVALID_PIPE)
542 pipes &= ~(1 << intel_dp->active_pipe);
543 }
544 }
545
546 if (pipes == 0)
547 return INVALID_PIPE;
548
549 return ffs(pipes) - 1;
550 }
551
552 static enum pipe
553 vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
554 {
555 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
556 struct drm_device *dev = intel_dig_port->base.base.dev;
557 struct drm_i915_private *dev_priv = to_i915(dev);
558 enum pipe pipe;
559
560 lockdep_assert_held(&dev_priv->pps_mutex);
561
562 /* We should never land here with regular DP ports */
563 WARN_ON(!is_edp(intel_dp));
564
565 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
566 intel_dp->active_pipe != intel_dp->pps_pipe);
567
568 if (intel_dp->pps_pipe != INVALID_PIPE)
569 return intel_dp->pps_pipe;
570
571 pipe = vlv_find_free_pps(dev_priv);
572
573 /*
574 * Didn't find one. This should not happen since there
575 * are two power sequencers and up to two eDP ports.
576 */
577 if (WARN_ON(pipe == INVALID_PIPE))
578 pipe = PIPE_A;
579
580 vlv_steal_power_sequencer(dev, pipe);
581 intel_dp->pps_pipe = pipe;
582
583 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
584 pipe_name(intel_dp->pps_pipe),
585 port_name(intel_dig_port->port));
586
587 /* init power sequencer on this pipe and port */
588 intel_dp_init_panel_power_sequencer(dev, intel_dp);
589 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
590
591 /*
592 * Even vdd force doesn't work until we've made
593 * the power sequencer lock in on the port.
594 */
595 vlv_power_sequencer_kick(intel_dp);
596
597 return intel_dp->pps_pipe;
598 }
599
600 static int
601 bxt_power_sequencer_idx(struct intel_dp *intel_dp)
602 {
603 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
604 struct drm_device *dev = intel_dig_port->base.base.dev;
605 struct drm_i915_private *dev_priv = to_i915(dev);
606
607 lockdep_assert_held(&dev_priv->pps_mutex);
608
609 /* We should never land here with regular DP ports */
610 WARN_ON(!is_edp(intel_dp));
611
612 /*
613 * TODO: BXT has 2 PPS instances. The correct port->PPS instance
614 * mapping needs to be retrieved from VBT, for now just hard-code to
615 * use instance #0 always.
616 */
617 if (!intel_dp->pps_reset)
618 return 0;
619
620 intel_dp->pps_reset = false;
621
622 /*
623 * Only the HW needs to be reprogrammed, the SW state is fixed and
624 * has been setup during connector init.
625 */
626 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
627
628 return 0;
629 }
630
631 typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
632 enum pipe pipe);
633
634 static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
635 enum pipe pipe)
636 {
637 return I915_READ(PP_STATUS(pipe)) & PP_ON;
638 }
639
640 static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
641 enum pipe pipe)
642 {
643 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
644 }
645
646 static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
647 enum pipe pipe)
648 {
649 return true;
650 }
651
652 static enum pipe
653 vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
654 enum port port,
655 vlv_pipe_check pipe_check)
656 {
657 enum pipe pipe;
658
659 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
660 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
661 PANEL_PORT_SELECT_MASK;
662
663 if (port_sel != PANEL_PORT_SELECT_VLV(port))
664 continue;
665
666 if (!pipe_check(dev_priv, pipe))
667 continue;
668
669 return pipe;
670 }
671
672 return INVALID_PIPE;
673 }
674
675 static void
676 vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
677 {
678 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
679 struct drm_device *dev = intel_dig_port->base.base.dev;
680 struct drm_i915_private *dev_priv = to_i915(dev);
681 enum port port = intel_dig_port->port;
682
683 lockdep_assert_held(&dev_priv->pps_mutex);
684
685 /* try to find a pipe with this port selected */
686 /* first pick one where the panel is on */
687 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
688 vlv_pipe_has_pp_on);
689 /* didn't find one? pick one where vdd is on */
690 if (intel_dp->pps_pipe == INVALID_PIPE)
691 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
692 vlv_pipe_has_vdd_on);
693 /* didn't find one? pick one with just the correct port */
694 if (intel_dp->pps_pipe == INVALID_PIPE)
695 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
696 vlv_pipe_any);
697
698 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
699 if (intel_dp->pps_pipe == INVALID_PIPE) {
700 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
701 port_name(port));
702 return;
703 }
704
705 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
706 port_name(port), pipe_name(intel_dp->pps_pipe));
707
708 intel_dp_init_panel_power_sequencer(dev, intel_dp);
709 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
710 }
711
712 void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
713 {
714 struct drm_device *dev = &dev_priv->drm;
715 struct intel_encoder *encoder;
716
717 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
718 !IS_GEN9_LP(dev_priv)))
719 return;
720
721 /*
722 * We can't grab pps_mutex here due to deadlock with power_domain
723 * mutex when power_domain functions are called while holding pps_mutex.
724 * That also means that in order to use pps_pipe the code needs to
725 * hold both a power domain reference and pps_mutex, and the power domain
726 * reference get/put must be done while _not_ holding pps_mutex.
727 * pps_{lock,unlock}() do these steps in the correct order, so one
728 * should use them always.
729 */
730
731 for_each_intel_encoder(dev, encoder) {
732 struct intel_dp *intel_dp;
733
734 if (encoder->type != INTEL_OUTPUT_DP &&
735 encoder->type != INTEL_OUTPUT_EDP)
736 continue;
737
738 intel_dp = enc_to_intel_dp(&encoder->base);
739
740 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
741
742 if (encoder->type != INTEL_OUTPUT_EDP)
743 continue;
744
745 if (IS_GEN9_LP(dev_priv))
746 intel_dp->pps_reset = true;
747 else
748 intel_dp->pps_pipe = INVALID_PIPE;
749 }
750 }
751
752 struct pps_registers {
753 i915_reg_t pp_ctrl;
754 i915_reg_t pp_stat;
755 i915_reg_t pp_on;
756 i915_reg_t pp_off;
757 i915_reg_t pp_div;
758 };
759
760 static void intel_pps_get_registers(struct drm_i915_private *dev_priv,
761 struct intel_dp *intel_dp,
762 struct pps_registers *regs)
763 {
764 int pps_idx = 0;
765
766 memset(regs, 0, sizeof(*regs));
767
768 if (IS_GEN9_LP(dev_priv))
769 pps_idx = bxt_power_sequencer_idx(intel_dp);
770 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
771 pps_idx = vlv_power_sequencer_pipe(intel_dp);
772
773 regs->pp_ctrl = PP_CONTROL(pps_idx);
774 regs->pp_stat = PP_STATUS(pps_idx);
775 regs->pp_on = PP_ON_DELAYS(pps_idx);
776 regs->pp_off = PP_OFF_DELAYS(pps_idx);
777 if (!IS_GEN9_LP(dev_priv))
778 regs->pp_div = PP_DIVISOR(pps_idx);
779 }
780
781 static i915_reg_t
782 _pp_ctrl_reg(struct intel_dp *intel_dp)
783 {
784 struct pps_registers regs;
785
786 intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
787 &regs);
788
789 return regs.pp_ctrl;
790 }
791
792 static i915_reg_t
793 _pp_stat_reg(struct intel_dp *intel_dp)
794 {
795 struct pps_registers regs;
796
797 intel_pps_get_registers(to_i915(intel_dp_to_dev(intel_dp)), intel_dp,
798 &regs);
799
800 return regs.pp_stat;
801 }
802
803 /* Reboot notifier handler to shutdown panel power to guarantee T12 timing
804 This function only applicable when panel PM state is not to be tracked */
805 static int edp_notify_handler(struct notifier_block *this, unsigned long code,
806 void *unused)
807 {
808 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
809 edp_notifier);
810 struct drm_device *dev = intel_dp_to_dev(intel_dp);
811 struct drm_i915_private *dev_priv = to_i915(dev);
812
813 if (!is_edp(intel_dp) || code != SYS_RESTART)
814 return 0;
815
816 pps_lock(intel_dp);
817
818 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
819 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
820 i915_reg_t pp_ctrl_reg, pp_div_reg;
821 u32 pp_div;
822
823 pp_ctrl_reg = PP_CONTROL(pipe);
824 pp_div_reg = PP_DIVISOR(pipe);
825 pp_div = I915_READ(pp_div_reg);
826 pp_div &= PP_REFERENCE_DIVIDER_MASK;
827
828 /* 0x1F write to PP_DIV_REG sets max cycle delay */
829 I915_WRITE(pp_div_reg, pp_div | 0x1F);
830 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
831 msleep(intel_dp->panel_power_cycle_delay);
832 }
833
834 pps_unlock(intel_dp);
835
836 return 0;
837 }
838
839 static bool edp_have_panel_power(struct intel_dp *intel_dp)
840 {
841 struct drm_device *dev = intel_dp_to_dev(intel_dp);
842 struct drm_i915_private *dev_priv = to_i915(dev);
843
844 lockdep_assert_held(&dev_priv->pps_mutex);
845
846 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
847 intel_dp->pps_pipe == INVALID_PIPE)
848 return false;
849
850 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
851 }
852
853 static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
854 {
855 struct drm_device *dev = intel_dp_to_dev(intel_dp);
856 struct drm_i915_private *dev_priv = to_i915(dev);
857
858 lockdep_assert_held(&dev_priv->pps_mutex);
859
860 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
861 intel_dp->pps_pipe == INVALID_PIPE)
862 return false;
863
864 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
865 }
866
867 static void
868 intel_dp_check_edp(struct intel_dp *intel_dp)
869 {
870 struct drm_device *dev = intel_dp_to_dev(intel_dp);
871 struct drm_i915_private *dev_priv = to_i915(dev);
872
873 if (!is_edp(intel_dp))
874 return;
875
876 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
877 WARN(1, "eDP powered off while attempting aux channel communication.\n");
878 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
879 I915_READ(_pp_stat_reg(intel_dp)),
880 I915_READ(_pp_ctrl_reg(intel_dp)));
881 }
882 }
883
884 static uint32_t
885 intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
886 {
887 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
888 struct drm_device *dev = intel_dig_port->base.base.dev;
889 struct drm_i915_private *dev_priv = to_i915(dev);
890 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
891 uint32_t status;
892 bool done;
893
894 #define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
895 if (has_aux_irq)
896 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
897 msecs_to_jiffies_timeout(10));
898 else
899 done = wait_for(C, 10) == 0;
900 if (!done)
901 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
902 has_aux_irq);
903 #undef C
904
905 return status;
906 }
907
908 static uint32_t g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
909 {
910 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
911 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
912
913 if (index)
914 return 0;
915
916 /*
917 * The clock divider is based off the hrawclk, and would like to run at
918 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
919 */
920 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
921 }
922
923 static uint32_t ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
924 {
925 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
926 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
927
928 if (index)
929 return 0;
930
931 /*
932 * The clock divider is based off the cdclk or PCH rawclk, and would
933 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
934 * divide by 2000 and use that
935 */
936 if (intel_dig_port->port == PORT_A)
937 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
938 else
939 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
940 }
941
942 static uint32_t hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
943 {
944 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
945 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
946
947 if (intel_dig_port->port != PORT_A && HAS_PCH_LPT_H(dev_priv)) {
948 /* Workaround for non-ULT HSW */
949 switch (index) {
950 case 0: return 63;
951 case 1: return 72;
952 default: return 0;
953 }
954 }
955
956 return ilk_get_aux_clock_divider(intel_dp, index);
957 }
958
959 static uint32_t skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
960 {
961 /*
962 * SKL doesn't need us to program the AUX clock divider (Hardware will
963 * derive the clock from CDCLK automatically). We still implement the
964 * get_aux_clock_divider vfunc to plug-in into the existing code.
965 */
966 return index ? 0 : 1;
967 }
968
969 static uint32_t g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
970 bool has_aux_irq,
971 int send_bytes,
972 uint32_t aux_clock_divider)
973 {
974 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
975 struct drm_i915_private *dev_priv =
976 to_i915(intel_dig_port->base.base.dev);
977 uint32_t precharge, timeout;
978
979 if (IS_GEN6(dev_priv))
980 precharge = 3;
981 else
982 precharge = 5;
983
984 if (IS_BROADWELL(dev_priv) && intel_dig_port->port == PORT_A)
985 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
986 else
987 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
988
989 return DP_AUX_CH_CTL_SEND_BUSY |
990 DP_AUX_CH_CTL_DONE |
991 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
992 DP_AUX_CH_CTL_TIME_OUT_ERROR |
993 timeout |
994 DP_AUX_CH_CTL_RECEIVE_ERROR |
995 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
996 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
997 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
998 }
999
1000 static uint32_t skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1001 bool has_aux_irq,
1002 int send_bytes,
1003 uint32_t unused)
1004 {
1005 return DP_AUX_CH_CTL_SEND_BUSY |
1006 DP_AUX_CH_CTL_DONE |
1007 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
1008 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1009 DP_AUX_CH_CTL_TIME_OUT_1600us |
1010 DP_AUX_CH_CTL_RECEIVE_ERROR |
1011 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1012 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1013 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1014 }
1015
1016 static int
1017 intel_dp_aux_ch(struct intel_dp *intel_dp,
1018 const uint8_t *send, int send_bytes,
1019 uint8_t *recv, int recv_size)
1020 {
1021 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1022 struct drm_i915_private *dev_priv =
1023 to_i915(intel_dig_port->base.base.dev);
1024 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg;
1025 uint32_t aux_clock_divider;
1026 int i, ret, recv_bytes;
1027 uint32_t status;
1028 int try, clock = 0;
1029 bool has_aux_irq = HAS_AUX_IRQ(dev_priv);
1030 bool vdd;
1031
1032 pps_lock(intel_dp);
1033
1034 /*
1035 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1036 * In such cases we want to leave VDD enabled and it's up to upper layers
1037 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1038 * ourselves.
1039 */
1040 vdd = edp_panel_vdd_on(intel_dp);
1041
1042 /* dp aux is extremely sensitive to irq latency, hence request the
1043 * lowest possible wakeup latency and so prevent the cpu from going into
1044 * deep sleep states.
1045 */
1046 pm_qos_update_request(&dev_priv->pm_qos, 0);
1047
1048 intel_dp_check_edp(intel_dp);
1049
1050 /* Try to wait for any previous AUX channel activity */
1051 for (try = 0; try < 3; try++) {
1052 status = I915_READ_NOTRACE(ch_ctl);
1053 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1054 break;
1055 msleep(1);
1056 }
1057
1058 if (try == 3) {
1059 static u32 last_status = -1;
1060 const u32 status = I915_READ(ch_ctl);
1061
1062 if (status != last_status) {
1063 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1064 status);
1065 last_status = status;
1066 }
1067
1068 ret = -EBUSY;
1069 goto out;
1070 }
1071
1072 /* Only 5 data registers! */
1073 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1074 ret = -E2BIG;
1075 goto out;
1076 }
1077
1078 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
1079 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
1080 has_aux_irq,
1081 send_bytes,
1082 aux_clock_divider);
1083
1084 /* Must try at least 3 times according to DP spec */
1085 for (try = 0; try < 5; try++) {
1086 /* Load the send data into the aux channel data registers */
1087 for (i = 0; i < send_bytes; i += 4)
1088 I915_WRITE(intel_dp->aux_ch_data_reg[i >> 2],
1089 intel_dp_pack_aux(send + i,
1090 send_bytes - i));
1091
1092 /* Send the command and wait for it to complete */
1093 I915_WRITE(ch_ctl, send_ctl);
1094
1095 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
1096
1097 /* Clear done status and any errors */
1098 I915_WRITE(ch_ctl,
1099 status |
1100 DP_AUX_CH_CTL_DONE |
1101 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1102 DP_AUX_CH_CTL_RECEIVE_ERROR);
1103
1104 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1105 continue;
1106
1107 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1108 * 400us delay required for errors and timeouts
1109 * Timeout errors from the HW already meet this
1110 * requirement so skip to next iteration
1111 */
1112 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1113 usleep_range(400, 500);
1114 continue;
1115 }
1116 if (status & DP_AUX_CH_CTL_DONE)
1117 goto done;
1118 }
1119 }
1120
1121 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1122 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
1123 ret = -EBUSY;
1124 goto out;
1125 }
1126
1127 done:
1128 /* Check for timeout or receive error.
1129 * Timeouts occur when the sink is not connected
1130 */
1131 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1132 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
1133 ret = -EIO;
1134 goto out;
1135 }
1136
1137 /* Timeouts occur when the device isn't connected, so they're
1138 * "normal" -- don't fill the kernel log with these */
1139 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
1140 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
1141 ret = -ETIMEDOUT;
1142 goto out;
1143 }
1144
1145 /* Unload any bytes sent back from the other side */
1146 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1147 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
1148
1149 /*
1150 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1151 * We have no idea of what happened so we return -EBUSY so
1152 * drm layer takes care for the necessary retries.
1153 */
1154 if (recv_bytes == 0 || recv_bytes > 20) {
1155 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1156 recv_bytes);
1157 /*
1158 * FIXME: This patch was created on top of a series that
1159 * organize the retries at drm level. There EBUSY should
1160 * also take care for 1ms wait before retrying.
1161 * That aux retries re-org is still needed and after that is
1162 * merged we remove this sleep from here.
1163 */
1164 usleep_range(1000, 1500);
1165 ret = -EBUSY;
1166 goto out;
1167 }
1168
1169 if (recv_bytes > recv_size)
1170 recv_bytes = recv_size;
1171
1172 for (i = 0; i < recv_bytes; i += 4)
1173 intel_dp_unpack_aux(I915_READ(intel_dp->aux_ch_data_reg[i >> 2]),
1174 recv + i, recv_bytes - i);
1175
1176 ret = recv_bytes;
1177 out:
1178 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1179
1180 if (vdd)
1181 edp_panel_vdd_off(intel_dp, false);
1182
1183 pps_unlock(intel_dp);
1184
1185 return ret;
1186 }
1187
1188 #define BARE_ADDRESS_SIZE 3
1189 #define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
1190 static ssize_t
1191 intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
1192 {
1193 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
1194 uint8_t txbuf[20], rxbuf[20];
1195 size_t txsize, rxsize;
1196 int ret;
1197
1198 txbuf[0] = (msg->request << 4) |
1199 ((msg->address >> 16) & 0xf);
1200 txbuf[1] = (msg->address >> 8) & 0xff;
1201 txbuf[2] = msg->address & 0xff;
1202 txbuf[3] = msg->size - 1;
1203
1204 switch (msg->request & ~DP_AUX_I2C_MOT) {
1205 case DP_AUX_NATIVE_WRITE:
1206 case DP_AUX_I2C_WRITE:
1207 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
1208 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
1209 rxsize = 2; /* 0 or 1 data bytes */
1210
1211 if (WARN_ON(txsize > 20))
1212 return -E2BIG;
1213
1214 WARN_ON(!msg->buffer != !msg->size);
1215
1216 if (msg->buffer)
1217 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
1218
1219 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1220 if (ret > 0) {
1221 msg->reply = rxbuf[0] >> 4;
1222
1223 if (ret > 1) {
1224 /* Number of bytes written in a short write. */
1225 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1226 } else {
1227 /* Return payload size. */
1228 ret = msg->size;
1229 }
1230 }
1231 break;
1232
1233 case DP_AUX_NATIVE_READ:
1234 case DP_AUX_I2C_READ:
1235 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
1236 rxsize = msg->size + 1;
1237
1238 if (WARN_ON(rxsize > 20))
1239 return -E2BIG;
1240
1241 ret = intel_dp_aux_ch(intel_dp, txbuf, txsize, rxbuf, rxsize);
1242 if (ret > 0) {
1243 msg->reply = rxbuf[0] >> 4;
1244 /*
1245 * Assume happy day, and copy the data. The caller is
1246 * expected to check msg->reply before touching it.
1247 *
1248 * Return payload size.
1249 */
1250 ret--;
1251 memcpy(msg->buffer, rxbuf + 1, ret);
1252 }
1253 break;
1254
1255 default:
1256 ret = -EINVAL;
1257 break;
1258 }
1259
1260 return ret;
1261 }
1262
1263 static enum port intel_aux_port(struct drm_i915_private *dev_priv,
1264 enum port port)
1265 {
1266 const struct ddi_vbt_port_info *info =
1267 &dev_priv->vbt.ddi_port_info[port];
1268 enum port aux_port;
1269
1270 if (!info->alternate_aux_channel) {
1271 DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
1272 port_name(port), port_name(port));
1273 return port;
1274 }
1275
1276 switch (info->alternate_aux_channel) {
1277 case DP_AUX_A:
1278 aux_port = PORT_A;
1279 break;
1280 case DP_AUX_B:
1281 aux_port = PORT_B;
1282 break;
1283 case DP_AUX_C:
1284 aux_port = PORT_C;
1285 break;
1286 case DP_AUX_D:
1287 aux_port = PORT_D;
1288 break;
1289 default:
1290 MISSING_CASE(info->alternate_aux_channel);
1291 aux_port = PORT_A;
1292 break;
1293 }
1294
1295 DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
1296 port_name(aux_port), port_name(port));
1297
1298 return aux_port;
1299 }
1300
1301 static i915_reg_t g4x_aux_ctl_reg(struct drm_i915_private *dev_priv,
1302 enum port port)
1303 {
1304 switch (port) {
1305 case PORT_B:
1306 case PORT_C:
1307 case PORT_D:
1308 return DP_AUX_CH_CTL(port);
1309 default:
1310 MISSING_CASE(port);
1311 return DP_AUX_CH_CTL(PORT_B);
1312 }
1313 }
1314
1315 static i915_reg_t g4x_aux_data_reg(struct drm_i915_private *dev_priv,
1316 enum port port, int index)
1317 {
1318 switch (port) {
1319 case PORT_B:
1320 case PORT_C:
1321 case PORT_D:
1322 return DP_AUX_CH_DATA(port, index);
1323 default:
1324 MISSING_CASE(port);
1325 return DP_AUX_CH_DATA(PORT_B, index);
1326 }
1327 }
1328
1329 static i915_reg_t ilk_aux_ctl_reg(struct drm_i915_private *dev_priv,
1330 enum port port)
1331 {
1332 switch (port) {
1333 case PORT_A:
1334 return DP_AUX_CH_CTL(port);
1335 case PORT_B:
1336 case PORT_C:
1337 case PORT_D:
1338 return PCH_DP_AUX_CH_CTL(port);
1339 default:
1340 MISSING_CASE(port);
1341 return DP_AUX_CH_CTL(PORT_A);
1342 }
1343 }
1344
1345 static i915_reg_t ilk_aux_data_reg(struct drm_i915_private *dev_priv,
1346 enum port port, int index)
1347 {
1348 switch (port) {
1349 case PORT_A:
1350 return DP_AUX_CH_DATA(port, index);
1351 case PORT_B:
1352 case PORT_C:
1353 case PORT_D:
1354 return PCH_DP_AUX_CH_DATA(port, index);
1355 default:
1356 MISSING_CASE(port);
1357 return DP_AUX_CH_DATA(PORT_A, index);
1358 }
1359 }
1360
1361 static i915_reg_t skl_aux_ctl_reg(struct drm_i915_private *dev_priv,
1362 enum port port)
1363 {
1364 switch (port) {
1365 case PORT_A:
1366 case PORT_B:
1367 case PORT_C:
1368 case PORT_D:
1369 return DP_AUX_CH_CTL(port);
1370 default:
1371 MISSING_CASE(port);
1372 return DP_AUX_CH_CTL(PORT_A);
1373 }
1374 }
1375
1376 static i915_reg_t skl_aux_data_reg(struct drm_i915_private *dev_priv,
1377 enum port port, int index)
1378 {
1379 switch (port) {
1380 case PORT_A:
1381 case PORT_B:
1382 case PORT_C:
1383 case PORT_D:
1384 return DP_AUX_CH_DATA(port, index);
1385 default:
1386 MISSING_CASE(port);
1387 return DP_AUX_CH_DATA(PORT_A, index);
1388 }
1389 }
1390
1391 static i915_reg_t intel_aux_ctl_reg(struct drm_i915_private *dev_priv,
1392 enum port port)
1393 {
1394 if (INTEL_INFO(dev_priv)->gen >= 9)
1395 return skl_aux_ctl_reg(dev_priv, port);
1396 else if (HAS_PCH_SPLIT(dev_priv))
1397 return ilk_aux_ctl_reg(dev_priv, port);
1398 else
1399 return g4x_aux_ctl_reg(dev_priv, port);
1400 }
1401
1402 static i915_reg_t intel_aux_data_reg(struct drm_i915_private *dev_priv,
1403 enum port port, int index)
1404 {
1405 if (INTEL_INFO(dev_priv)->gen >= 9)
1406 return skl_aux_data_reg(dev_priv, port, index);
1407 else if (HAS_PCH_SPLIT(dev_priv))
1408 return ilk_aux_data_reg(dev_priv, port, index);
1409 else
1410 return g4x_aux_data_reg(dev_priv, port, index);
1411 }
1412
1413 static void intel_aux_reg_init(struct intel_dp *intel_dp)
1414 {
1415 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
1416 enum port port = intel_aux_port(dev_priv,
1417 dp_to_dig_port(intel_dp)->port);
1418 int i;
1419
1420 intel_dp->aux_ch_ctl_reg = intel_aux_ctl_reg(dev_priv, port);
1421 for (i = 0; i < ARRAY_SIZE(intel_dp->aux_ch_data_reg); i++)
1422 intel_dp->aux_ch_data_reg[i] = intel_aux_data_reg(dev_priv, port, i);
1423 }
1424
1425 static void
1426 intel_dp_aux_fini(struct intel_dp *intel_dp)
1427 {
1428 kfree(intel_dp->aux.name);
1429 }
1430
1431 static void
1432 intel_dp_aux_init(struct intel_dp *intel_dp)
1433 {
1434 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1435 enum port port = intel_dig_port->port;
1436
1437 intel_aux_reg_init(intel_dp);
1438 drm_dp_aux_init(&intel_dp->aux);
1439
1440 /* Failure to allocate our preferred name is not critical */
1441 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c", port_name(port));
1442 intel_dp->aux.transfer = intel_dp_aux_transfer;
1443 }
1444
1445 bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
1446 {
1447 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1448 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1449
1450 if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
1451 IS_BROADWELL(dev_priv) || (INTEL_GEN(dev_priv) >= 9))
1452 return true;
1453 else
1454 return false;
1455 }
1456
1457 static void
1458 intel_dp_set_clock(struct intel_encoder *encoder,
1459 struct intel_crtc_state *pipe_config)
1460 {
1461 struct drm_device *dev = encoder->base.dev;
1462 struct drm_i915_private *dev_priv = to_i915(dev);
1463 const struct dp_link_dpll *divisor = NULL;
1464 int i, count = 0;
1465
1466 if (IS_G4X(dev_priv)) {
1467 divisor = gen4_dpll;
1468 count = ARRAY_SIZE(gen4_dpll);
1469 } else if (HAS_PCH_SPLIT(dev_priv)) {
1470 divisor = pch_dpll;
1471 count = ARRAY_SIZE(pch_dpll);
1472 } else if (IS_CHERRYVIEW(dev_priv)) {
1473 divisor = chv_dpll;
1474 count = ARRAY_SIZE(chv_dpll);
1475 } else if (IS_VALLEYVIEW(dev_priv)) {
1476 divisor = vlv_dpll;
1477 count = ARRAY_SIZE(vlv_dpll);
1478 }
1479
1480 if (divisor && count) {
1481 for (i = 0; i < count; i++) {
1482 if (pipe_config->port_clock == divisor[i].clock) {
1483 pipe_config->dpll = divisor[i].dpll;
1484 pipe_config->clock_set = true;
1485 break;
1486 }
1487 }
1488 }
1489 }
1490
1491 static void snprintf_int_array(char *str, size_t len,
1492 const int *array, int nelem)
1493 {
1494 int i;
1495
1496 str[0] = '\0';
1497
1498 for (i = 0; i < nelem; i++) {
1499 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
1500 if (r >= len)
1501 return;
1502 str += r;
1503 len -= r;
1504 }
1505 }
1506
1507 static void intel_dp_print_rates(struct intel_dp *intel_dp)
1508 {
1509 int common_len;
1510 int common_rates[DP_MAX_SUPPORTED_RATES];
1511 char str[128]; /* FIXME: too big for stack? */
1512
1513 if ((drm_debug & DRM_UT_KMS) == 0)
1514 return;
1515
1516 snprintf_int_array(str, sizeof(str),
1517 intel_dp->source_rates, intel_dp->num_source_rates);
1518 DRM_DEBUG_KMS("source rates: %s\n", str);
1519
1520 snprintf_int_array(str, sizeof(str),
1521 intel_dp->sink_rates, intel_dp->num_sink_rates);
1522 DRM_DEBUG_KMS("sink rates: %s\n", str);
1523
1524 common_len = intel_dp_common_rates(intel_dp, common_rates);
1525 snprintf_int_array(str, sizeof(str), common_rates, common_len);
1526 DRM_DEBUG_KMS("common rates: %s\n", str);
1527 }
1528
1529 bool
1530 __intel_dp_read_desc(struct intel_dp *intel_dp, struct intel_dp_desc *desc)
1531 {
1532 u32 base = drm_dp_is_branch(intel_dp->dpcd) ? DP_BRANCH_OUI :
1533 DP_SINK_OUI;
1534
1535 return drm_dp_dpcd_read(&intel_dp->aux, base, desc, sizeof(*desc)) ==
1536 sizeof(*desc);
1537 }
1538
1539 bool intel_dp_read_desc(struct intel_dp *intel_dp)
1540 {
1541 struct intel_dp_desc *desc = &intel_dp->desc;
1542 bool oui_sup = intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] &
1543 DP_OUI_SUPPORT;
1544 int dev_id_len;
1545
1546 if (!__intel_dp_read_desc(intel_dp, desc))
1547 return false;
1548
1549 dev_id_len = strnlen(desc->device_id, sizeof(desc->device_id));
1550 DRM_DEBUG_KMS("DP %s: OUI %*phD%s dev-ID %*pE HW-rev %d.%d SW-rev %d.%d\n",
1551 drm_dp_is_branch(intel_dp->dpcd) ? "branch" : "sink",
1552 (int)sizeof(desc->oui), desc->oui, oui_sup ? "" : "(NS)",
1553 dev_id_len, desc->device_id,
1554 desc->hw_rev >> 4, desc->hw_rev & 0xf,
1555 desc->sw_major_rev, desc->sw_minor_rev);
1556
1557 return true;
1558 }
1559
1560 int
1561 intel_dp_max_link_rate(struct intel_dp *intel_dp)
1562 {
1563 int rates[DP_MAX_SUPPORTED_RATES] = {};
1564 int len;
1565
1566 len = intel_dp_common_rates(intel_dp, rates);
1567 if (WARN_ON(len <= 0))
1568 return 162000;
1569
1570 return rates[len - 1];
1571 }
1572
1573 int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1574 {
1575 int i = intel_dp_rate_index(intel_dp->sink_rates,
1576 intel_dp->num_sink_rates, rate);
1577
1578 if (WARN_ON(i < 0))
1579 i = 0;
1580
1581 return i;
1582 }
1583
1584 void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
1585 uint8_t *link_bw, uint8_t *rate_select)
1586 {
1587 /* eDP 1.4 rate select method. */
1588 if (intel_dp->use_rate_select) {
1589 *link_bw = 0;
1590 *rate_select =
1591 intel_dp_rate_select(intel_dp, port_clock);
1592 } else {
1593 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1594 *rate_select = 0;
1595 }
1596 }
1597
1598 static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1599 struct intel_crtc_state *pipe_config)
1600 {
1601 int bpp, bpc;
1602
1603 bpp = pipe_config->pipe_bpp;
1604 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1605
1606 if (bpc > 0)
1607 bpp = min(bpp, 3*bpc);
1608
1609 /* For DP Compliance we override the computed bpp for the pipe */
1610 if (intel_dp->compliance.test_data.bpc != 0) {
1611 pipe_config->pipe_bpp = 3*intel_dp->compliance.test_data.bpc;
1612 pipe_config->dither_force_disable = pipe_config->pipe_bpp == 6*3;
1613 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n",
1614 pipe_config->pipe_bpp);
1615 }
1616 return bpp;
1617 }
1618
1619 bool
1620 intel_dp_compute_config(struct intel_encoder *encoder,
1621 struct intel_crtc_state *pipe_config,
1622 struct drm_connector_state *conn_state)
1623 {
1624 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1625 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1626 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1627 enum port port = dp_to_dig_port(intel_dp)->port;
1628 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
1629 struct intel_connector *intel_connector = intel_dp->attached_connector;
1630 int lane_count, clock;
1631 int min_lane_count = 1;
1632 int max_lane_count = intel_dp_max_lane_count(intel_dp);
1633 /* Conveniently, the link BW constants become indices with a shift...*/
1634 int min_clock = 0;
1635 int max_clock;
1636 int link_rate_index;
1637 int bpp, mode_rate;
1638 int link_avail, link_clock;
1639 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
1640 int common_len;
1641 uint8_t link_bw, rate_select;
1642
1643 common_len = intel_dp_common_rates(intel_dp, common_rates);
1644
1645 /* No common link rates between source and sink */
1646 WARN_ON(common_len <= 0);
1647
1648 max_clock = common_len - 1;
1649
1650 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
1651 pipe_config->has_pch_encoder = true;
1652
1653 pipe_config->has_drrs = false;
1654 pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
1655
1656 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
1657 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
1658 adjusted_mode);
1659
1660 if (INTEL_GEN(dev_priv) >= 9) {
1661 int ret;
1662 ret = skl_update_scaler_crtc(pipe_config);
1663 if (ret)
1664 return ret;
1665 }
1666
1667 if (HAS_GMCH_DISPLAY(dev_priv))
1668 intel_gmch_panel_fitting(intel_crtc, pipe_config,
1669 intel_connector->panel.fitting_mode);
1670 else
1671 intel_pch_panel_fitting(intel_crtc, pipe_config,
1672 intel_connector->panel.fitting_mode);
1673 }
1674
1675 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
1676 return false;
1677
1678 /* Use values requested by Compliance Test Request */
1679 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1680 link_rate_index = intel_dp_link_rate_index(intel_dp,
1681 common_rates,
1682 intel_dp->compliance.test_link_rate);
1683 if (link_rate_index >= 0)
1684 min_clock = max_clock = link_rate_index;
1685 min_lane_count = max_lane_count = intel_dp->compliance.test_lane_count;
1686 }
1687 DRM_DEBUG_KMS("DP link computation with max lane count %i "
1688 "max bw %d pixel clock %iKHz\n",
1689 max_lane_count, common_rates[max_clock],
1690 adjusted_mode->crtc_clock);
1691
1692 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
1693 * bpc in between. */
1694 bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
1695 if (is_edp(intel_dp)) {
1696
1697 /* Get bpp from vbt only for panels that dont have bpp in edid */
1698 if (intel_connector->base.display_info.bpc == 0 &&
1699 (dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp)) {
1700 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1701 dev_priv->vbt.edp.bpp);
1702 bpp = dev_priv->vbt.edp.bpp;
1703 }
1704
1705 /*
1706 * Use the maximum clock and number of lanes the eDP panel
1707 * advertizes being capable of. The panels are generally
1708 * designed to support only a single clock and lane
1709 * configuration, and typically these values correspond to the
1710 * native resolution of the panel.
1711 */
1712 min_lane_count = max_lane_count;
1713 min_clock = max_clock;
1714 }
1715
1716 for (; bpp >= 6*3; bpp -= 2*3) {
1717 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1718 bpp);
1719
1720 for (clock = min_clock; clock <= max_clock; clock++) {
1721 for (lane_count = min_lane_count;
1722 lane_count <= max_lane_count;
1723 lane_count <<= 1) {
1724
1725 link_clock = common_rates[clock];
1726 link_avail = intel_dp_max_data_rate(link_clock,
1727 lane_count);
1728
1729 if (mode_rate <= link_avail) {
1730 goto found;
1731 }
1732 }
1733 }
1734 }
1735
1736 return false;
1737
1738 found:
1739 if (intel_dp->color_range_auto) {
1740 /*
1741 * See:
1742 * CEA-861-E - 5.1 Default Encoding Parameters
1743 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
1744 */
1745 pipe_config->limited_color_range =
1746 bpp != 18 &&
1747 drm_default_rgb_quant_range(adjusted_mode) ==
1748 HDMI_QUANTIZATION_RANGE_LIMITED;
1749 } else {
1750 pipe_config->limited_color_range =
1751 intel_dp->limited_color_range;
1752 }
1753
1754 pipe_config->lane_count = lane_count;
1755
1756 pipe_config->pipe_bpp = bpp;
1757 pipe_config->port_clock = common_rates[clock];
1758
1759 intel_dp_compute_rate(intel_dp, pipe_config->port_clock,
1760 &link_bw, &rate_select);
1761
1762 DRM_DEBUG_KMS("DP link bw %02x rate select %02x lane count %d clock %d bpp %d\n",
1763 link_bw, rate_select, pipe_config->lane_count,
1764 pipe_config->port_clock, bpp);
1765 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
1766 mode_rate, link_avail);
1767
1768 intel_link_compute_m_n(bpp, lane_count,
1769 adjusted_mode->crtc_clock,
1770 pipe_config->port_clock,
1771 &pipe_config->dp_m_n);
1772
1773 if (intel_connector->panel.downclock_mode != NULL &&
1774 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
1775 pipe_config->has_drrs = true;
1776 intel_link_compute_m_n(bpp, lane_count,
1777 intel_connector->panel.downclock_mode->clock,
1778 pipe_config->port_clock,
1779 &pipe_config->dp_m2_n2);
1780 }
1781
1782 /*
1783 * DPLL0 VCO may need to be adjusted to get the correct
1784 * clock for eDP. This will affect cdclk as well.
1785 */
1786 if (is_edp(intel_dp) && IS_GEN9_BC(dev_priv)) {
1787 int vco;
1788
1789 switch (pipe_config->port_clock / 2) {
1790 case 108000:
1791 case 216000:
1792 vco = 8640000;
1793 break;
1794 default:
1795 vco = 8100000;
1796 break;
1797 }
1798
1799 to_intel_atomic_state(pipe_config->base.state)->cdclk.logical.vco = vco;
1800 }
1801
1802 if (!HAS_DDI(dev_priv))
1803 intel_dp_set_clock(encoder, pipe_config);
1804
1805 return true;
1806 }
1807
1808 void intel_dp_set_link_params(struct intel_dp *intel_dp,
1809 int link_rate, uint8_t lane_count,
1810 bool link_mst)
1811 {
1812 intel_dp->link_rate = link_rate;
1813 intel_dp->lane_count = lane_count;
1814 intel_dp->link_mst = link_mst;
1815 }
1816
1817 static void intel_dp_prepare(struct intel_encoder *encoder,
1818 struct intel_crtc_state *pipe_config)
1819 {
1820 struct drm_device *dev = encoder->base.dev;
1821 struct drm_i915_private *dev_priv = to_i915(dev);
1822 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1823 enum port port = dp_to_dig_port(intel_dp)->port;
1824 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
1825 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1826
1827 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
1828 pipe_config->lane_count,
1829 intel_crtc_has_type(pipe_config,
1830 INTEL_OUTPUT_DP_MST));
1831
1832 /*
1833 * There are four kinds of DP registers:
1834 *
1835 * IBX PCH
1836 * SNB CPU
1837 * IVB CPU
1838 * CPT PCH
1839 *
1840 * IBX PCH and CPU are the same for almost everything,
1841 * except that the CPU DP PLL is configured in this
1842 * register
1843 *
1844 * CPT PCH is quite different, having many bits moved
1845 * to the TRANS_DP_CTL register instead. That
1846 * configuration happens (oddly) in ironlake_pch_enable
1847 */
1848
1849 /* Preserve the BIOS-computed detected bit. This is
1850 * supposed to be read-only.
1851 */
1852 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
1853
1854 /* Handle DP bits in common between all three register formats */
1855 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
1856 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
1857
1858 /* Split out the IBX/CPU vs CPT settings */
1859
1860 if (IS_GEN7(dev_priv) && port == PORT_A) {
1861 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1862 intel_dp->DP |= DP_SYNC_HS_HIGH;
1863 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1864 intel_dp->DP |= DP_SYNC_VS_HIGH;
1865 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1866
1867 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1868 intel_dp->DP |= DP_ENHANCED_FRAMING;
1869
1870 intel_dp->DP |= crtc->pipe << 29;
1871 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
1872 u32 trans_dp;
1873
1874 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
1875
1876 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1877 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1878 trans_dp |= TRANS_DP_ENH_FRAMING;
1879 else
1880 trans_dp &= ~TRANS_DP_ENH_FRAMING;
1881 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
1882 } else {
1883 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
1884 intel_dp->DP |= DP_COLOR_RANGE_16_235;
1885
1886 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
1887 intel_dp->DP |= DP_SYNC_HS_HIGH;
1888 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
1889 intel_dp->DP |= DP_SYNC_VS_HIGH;
1890 intel_dp->DP |= DP_LINK_TRAIN_OFF;
1891
1892 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1893 intel_dp->DP |= DP_ENHANCED_FRAMING;
1894
1895 if (IS_CHERRYVIEW(dev_priv))
1896 intel_dp->DP |= DP_PIPE_SELECT_CHV(crtc->pipe);
1897 else if (crtc->pipe == PIPE_B)
1898 intel_dp->DP |= DP_PIPEB_SELECT;
1899 }
1900 }
1901
1902 #define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
1903 #define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
1904
1905 #define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
1906 #define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
1907
1908 #define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
1909 #define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
1910
1911 static void intel_pps_verify_state(struct drm_i915_private *dev_priv,
1912 struct intel_dp *intel_dp);
1913
1914 static void wait_panel_status(struct intel_dp *intel_dp,
1915 u32 mask,
1916 u32 value)
1917 {
1918 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1919 struct drm_i915_private *dev_priv = to_i915(dev);
1920 i915_reg_t pp_stat_reg, pp_ctrl_reg;
1921
1922 lockdep_assert_held(&dev_priv->pps_mutex);
1923
1924 intel_pps_verify_state(dev_priv, intel_dp);
1925
1926 pp_stat_reg = _pp_stat_reg(intel_dp);
1927 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
1928
1929 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
1930 mask, value,
1931 I915_READ(pp_stat_reg),
1932 I915_READ(pp_ctrl_reg));
1933
1934 if (intel_wait_for_register(dev_priv,
1935 pp_stat_reg, mask, value,
1936 5000))
1937 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
1938 I915_READ(pp_stat_reg),
1939 I915_READ(pp_ctrl_reg));
1940
1941 DRM_DEBUG_KMS("Wait complete\n");
1942 }
1943
1944 static void wait_panel_on(struct intel_dp *intel_dp)
1945 {
1946 DRM_DEBUG_KMS("Wait for panel power on\n");
1947 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
1948 }
1949
1950 static void wait_panel_off(struct intel_dp *intel_dp)
1951 {
1952 DRM_DEBUG_KMS("Wait for panel power off time\n");
1953 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
1954 }
1955
1956 static void wait_panel_power_cycle(struct intel_dp *intel_dp)
1957 {
1958 ktime_t panel_power_on_time;
1959 s64 panel_power_off_duration;
1960
1961 DRM_DEBUG_KMS("Wait for panel power cycle\n");
1962
1963 /* take the difference of currrent time and panel power off time
1964 * and then make panel wait for t11_t12 if needed. */
1965 panel_power_on_time = ktime_get_boottime();
1966 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
1967
1968 /* When we disable the VDD override bit last we have to do the manual
1969 * wait. */
1970 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
1971 wait_remaining_ms_from_jiffies(jiffies,
1972 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
1973
1974 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
1975 }
1976
1977 static void wait_backlight_on(struct intel_dp *intel_dp)
1978 {
1979 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
1980 intel_dp->backlight_on_delay);
1981 }
1982
1983 static void edp_wait_backlight_off(struct intel_dp *intel_dp)
1984 {
1985 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
1986 intel_dp->backlight_off_delay);
1987 }
1988
1989 /* Read the current pp_control value, unlocking the register if it
1990 * is locked
1991 */
1992
1993 static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
1994 {
1995 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1996 struct drm_i915_private *dev_priv = to_i915(dev);
1997 u32 control;
1998
1999 lockdep_assert_held(&dev_priv->pps_mutex);
2000
2001 control = I915_READ(_pp_ctrl_reg(intel_dp));
2002 if (WARN_ON(!HAS_DDI(dev_priv) &&
2003 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
2004 control &= ~PANEL_UNLOCK_MASK;
2005 control |= PANEL_UNLOCK_REGS;
2006 }
2007 return control;
2008 }
2009
2010 /*
2011 * Must be paired with edp_panel_vdd_off().
2012 * Must hold pps_mutex around the whole on/off sequence.
2013 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2014 */
2015 static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
2016 {
2017 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2018 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2019 struct drm_i915_private *dev_priv = to_i915(dev);
2020 u32 pp;
2021 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2022 bool need_to_disable = !intel_dp->want_panel_vdd;
2023
2024 lockdep_assert_held(&dev_priv->pps_mutex);
2025
2026 if (!is_edp(intel_dp))
2027 return false;
2028
2029 cancel_delayed_work(&intel_dp->panel_vdd_work);
2030 intel_dp->want_panel_vdd = true;
2031
2032 if (edp_have_panel_vdd(intel_dp))
2033 return need_to_disable;
2034
2035 intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
2036
2037 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
2038 port_name(intel_dig_port->port));
2039
2040 if (!edp_have_panel_power(intel_dp))
2041 wait_panel_power_cycle(intel_dp);
2042
2043 pp = ironlake_get_pp_control(intel_dp);
2044 pp |= EDP_FORCE_VDD;
2045
2046 pp_stat_reg = _pp_stat_reg(intel_dp);
2047 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2048
2049 I915_WRITE(pp_ctrl_reg, pp);
2050 POSTING_READ(pp_ctrl_reg);
2051 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2052 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2053 /*
2054 * If the panel wasn't on, delay before accessing aux channel
2055 */
2056 if (!edp_have_panel_power(intel_dp)) {
2057 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
2058 port_name(intel_dig_port->port));
2059 msleep(intel_dp->panel_power_up_delay);
2060 }
2061
2062 return need_to_disable;
2063 }
2064
2065 /*
2066 * Must be paired with intel_edp_panel_vdd_off() or
2067 * intel_edp_panel_off().
2068 * Nested calls to these functions are not allowed since
2069 * we drop the lock. Caller must use some higher level
2070 * locking to prevent nested calls from other threads.
2071 */
2072 void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
2073 {
2074 bool vdd;
2075
2076 if (!is_edp(intel_dp))
2077 return;
2078
2079 pps_lock(intel_dp);
2080 vdd = edp_panel_vdd_on(intel_dp);
2081 pps_unlock(intel_dp);
2082
2083 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
2084 port_name(dp_to_dig_port(intel_dp)->port));
2085 }
2086
2087 static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
2088 {
2089 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2090 struct drm_i915_private *dev_priv = to_i915(dev);
2091 struct intel_digital_port *intel_dig_port =
2092 dp_to_dig_port(intel_dp);
2093 u32 pp;
2094 i915_reg_t pp_stat_reg, pp_ctrl_reg;
2095
2096 lockdep_assert_held(&dev_priv->pps_mutex);
2097
2098 WARN_ON(intel_dp->want_panel_vdd);
2099
2100 if (!edp_have_panel_vdd(intel_dp))
2101 return;
2102
2103 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
2104 port_name(intel_dig_port->port));
2105
2106 pp = ironlake_get_pp_control(intel_dp);
2107 pp &= ~EDP_FORCE_VDD;
2108
2109 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2110 pp_stat_reg = _pp_stat_reg(intel_dp);
2111
2112 I915_WRITE(pp_ctrl_reg, pp);
2113 POSTING_READ(pp_ctrl_reg);
2114
2115 /* Make sure sequencer is idle before allowing subsequent activity */
2116 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2117 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
2118
2119 if ((pp & PANEL_POWER_ON) == 0)
2120 intel_dp->panel_power_off_time = ktime_get_boottime();
2121
2122 intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2123 }
2124
2125 static void edp_panel_vdd_work(struct work_struct *__work)
2126 {
2127 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
2128 struct intel_dp, panel_vdd_work);
2129
2130 pps_lock(intel_dp);
2131 if (!intel_dp->want_panel_vdd)
2132 edp_panel_vdd_off_sync(intel_dp);
2133 pps_unlock(intel_dp);
2134 }
2135
2136 static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2137 {
2138 unsigned long delay;
2139
2140 /*
2141 * Queue the timer to fire a long time from now (relative to the power
2142 * down delay) to keep the panel power up across a sequence of
2143 * operations.
2144 */
2145 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2146 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2147 }
2148
2149 /*
2150 * Must be paired with edp_panel_vdd_on().
2151 * Must hold pps_mutex around the whole on/off sequence.
2152 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2153 */
2154 static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
2155 {
2156 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
2157
2158 lockdep_assert_held(&dev_priv->pps_mutex);
2159
2160 if (!is_edp(intel_dp))
2161 return;
2162
2163 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
2164 port_name(dp_to_dig_port(intel_dp)->port));
2165
2166 intel_dp->want_panel_vdd = false;
2167
2168 if (sync)
2169 edp_panel_vdd_off_sync(intel_dp);
2170 else
2171 edp_panel_vdd_schedule_off(intel_dp);
2172 }
2173
2174 static void edp_panel_on(struct intel_dp *intel_dp)
2175 {
2176 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2177 struct drm_i915_private *dev_priv = to_i915(dev);
2178 u32 pp;
2179 i915_reg_t pp_ctrl_reg;
2180
2181 lockdep_assert_held(&dev_priv->pps_mutex);
2182
2183 if (!is_edp(intel_dp))
2184 return;
2185
2186 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
2187 port_name(dp_to_dig_port(intel_dp)->port));
2188
2189 if (WARN(edp_have_panel_power(intel_dp),
2190 "eDP port %c panel power already on\n",
2191 port_name(dp_to_dig_port(intel_dp)->port)))
2192 return;
2193
2194 wait_panel_power_cycle(intel_dp);
2195
2196 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2197 pp = ironlake_get_pp_control(intel_dp);
2198 if (IS_GEN5(dev_priv)) {
2199 /* ILK workaround: disable reset around power sequence */
2200 pp &= ~PANEL_POWER_RESET;
2201 I915_WRITE(pp_ctrl_reg, pp);
2202 POSTING_READ(pp_ctrl_reg);
2203 }
2204
2205 pp |= PANEL_POWER_ON;
2206 if (!IS_GEN5(dev_priv))
2207 pp |= PANEL_POWER_RESET;
2208
2209 I915_WRITE(pp_ctrl_reg, pp);
2210 POSTING_READ(pp_ctrl_reg);
2211
2212 wait_panel_on(intel_dp);
2213 intel_dp->last_power_on = jiffies;
2214
2215 if (IS_GEN5(dev_priv)) {
2216 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
2217 I915_WRITE(pp_ctrl_reg, pp);
2218 POSTING_READ(pp_ctrl_reg);
2219 }
2220 }
2221
2222 void intel_edp_panel_on(struct intel_dp *intel_dp)
2223 {
2224 if (!is_edp(intel_dp))
2225 return;
2226
2227 pps_lock(intel_dp);
2228 edp_panel_on(intel_dp);
2229 pps_unlock(intel_dp);
2230 }
2231
2232
2233 static void edp_panel_off(struct intel_dp *intel_dp)
2234 {
2235 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2236 struct drm_i915_private *dev_priv = to_i915(dev);
2237 u32 pp;
2238 i915_reg_t pp_ctrl_reg;
2239
2240 lockdep_assert_held(&dev_priv->pps_mutex);
2241
2242 if (!is_edp(intel_dp))
2243 return;
2244
2245 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
2246 port_name(dp_to_dig_port(intel_dp)->port));
2247
2248 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
2249 port_name(dp_to_dig_port(intel_dp)->port));
2250
2251 pp = ironlake_get_pp_control(intel_dp);
2252 /* We need to switch off panel power _and_ force vdd, for otherwise some
2253 * panels get very unhappy and cease to work. */
2254 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
2255 EDP_BLC_ENABLE);
2256
2257 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2258
2259 intel_dp->want_panel_vdd = false;
2260
2261 I915_WRITE(pp_ctrl_reg, pp);
2262 POSTING_READ(pp_ctrl_reg);
2263
2264 intel_dp->panel_power_off_time = ktime_get_boottime();
2265 wait_panel_off(intel_dp);
2266
2267 /* We got a reference when we enabled the VDD. */
2268 intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
2269 }
2270
2271 void intel_edp_panel_off(struct intel_dp *intel_dp)
2272 {
2273 if (!is_edp(intel_dp))
2274 return;
2275
2276 pps_lock(intel_dp);
2277 edp_panel_off(intel_dp);
2278 pps_unlock(intel_dp);
2279 }
2280
2281 /* Enable backlight in the panel power control. */
2282 static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
2283 {
2284 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2285 struct drm_device *dev = intel_dig_port->base.base.dev;
2286 struct drm_i915_private *dev_priv = to_i915(dev);
2287 u32 pp;
2288 i915_reg_t pp_ctrl_reg;
2289
2290 /*
2291 * If we enable the backlight right away following a panel power
2292 * on, we may see slight flicker as the panel syncs with the eDP
2293 * link. So delay a bit to make sure the image is solid before
2294 * allowing it to appear.
2295 */
2296 wait_backlight_on(intel_dp);
2297
2298 pps_lock(intel_dp);
2299
2300 pp = ironlake_get_pp_control(intel_dp);
2301 pp |= EDP_BLC_ENABLE;
2302
2303 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2304
2305 I915_WRITE(pp_ctrl_reg, pp);
2306 POSTING_READ(pp_ctrl_reg);
2307
2308 pps_unlock(intel_dp);
2309 }
2310
2311 /* Enable backlight PWM and backlight PP control. */
2312 void intel_edp_backlight_on(struct intel_dp *intel_dp)
2313 {
2314 if (!is_edp(intel_dp))
2315 return;
2316
2317 DRM_DEBUG_KMS("\n");
2318
2319 intel_panel_enable_backlight(intel_dp->attached_connector);
2320 _intel_edp_backlight_on(intel_dp);
2321 }
2322
2323 /* Disable backlight in the panel power control. */
2324 static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
2325 {
2326 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2327 struct drm_i915_private *dev_priv = to_i915(dev);
2328 u32 pp;
2329 i915_reg_t pp_ctrl_reg;
2330
2331 if (!is_edp(intel_dp))
2332 return;
2333
2334 pps_lock(intel_dp);
2335
2336 pp = ironlake_get_pp_control(intel_dp);
2337 pp &= ~EDP_BLC_ENABLE;
2338
2339 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2340
2341 I915_WRITE(pp_ctrl_reg, pp);
2342 POSTING_READ(pp_ctrl_reg);
2343
2344 pps_unlock(intel_dp);
2345
2346 intel_dp->last_backlight_off = jiffies;
2347 edp_wait_backlight_off(intel_dp);
2348 }
2349
2350 /* Disable backlight PP control and backlight PWM. */
2351 void intel_edp_backlight_off(struct intel_dp *intel_dp)
2352 {
2353 if (!is_edp(intel_dp))
2354 return;
2355
2356 DRM_DEBUG_KMS("\n");
2357
2358 _intel_edp_backlight_off(intel_dp);
2359 intel_panel_disable_backlight(intel_dp->attached_connector);
2360 }
2361
2362 /*
2363 * Hook for controlling the panel power control backlight through the bl_power
2364 * sysfs attribute. Take care to handle multiple calls.
2365 */
2366 static void intel_edp_backlight_power(struct intel_connector *connector,
2367 bool enable)
2368 {
2369 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
2370 bool is_enabled;
2371
2372 pps_lock(intel_dp);
2373 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
2374 pps_unlock(intel_dp);
2375
2376 if (is_enabled == enable)
2377 return;
2378
2379 DRM_DEBUG_KMS("panel power control backlight %s\n",
2380 enable ? "enable" : "disable");
2381
2382 if (enable)
2383 _intel_edp_backlight_on(intel_dp);
2384 else
2385 _intel_edp_backlight_off(intel_dp);
2386 }
2387
2388 static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2389 {
2390 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2391 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2392 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2393
2394 I915_STATE_WARN(cur_state != state,
2395 "DP port %c state assertion failure (expected %s, current %s)\n",
2396 port_name(dig_port->port),
2397 onoff(state), onoff(cur_state));
2398 }
2399 #define assert_dp_port_disabled(d) assert_dp_port((d), false)
2400
2401 static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2402 {
2403 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2404
2405 I915_STATE_WARN(cur_state != state,
2406 "eDP PLL state assertion failure (expected %s, current %s)\n",
2407 onoff(state), onoff(cur_state));
2408 }
2409 #define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2410 #define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2411
2412 static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
2413 struct intel_crtc_state *pipe_config)
2414 {
2415 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
2416 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2417
2418 assert_pipe_disabled(dev_priv, crtc->pipe);
2419 assert_dp_port_disabled(intel_dp);
2420 assert_edp_pll_disabled(dev_priv);
2421
2422 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
2423 pipe_config->port_clock);
2424
2425 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2426
2427 if (pipe_config->port_clock == 162000)
2428 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2429 else
2430 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2431
2432 I915_WRITE(DP_A, intel_dp->DP);
2433 POSTING_READ(DP_A);
2434 udelay(500);
2435
2436 /*
2437 * [DevILK] Work around required when enabling DP PLL
2438 * while a pipe is enabled going to FDI:
2439 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2440 * 2. Program DP PLL enable
2441 */
2442 if (IS_GEN5(dev_priv))
2443 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
2444
2445 intel_dp->DP |= DP_PLL_ENABLE;
2446
2447 I915_WRITE(DP_A, intel_dp->DP);
2448 POSTING_READ(DP_A);
2449 udelay(200);
2450 }
2451
2452 static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
2453 {
2454 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2455 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
2456 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2457
2458 assert_pipe_disabled(dev_priv, crtc->pipe);
2459 assert_dp_port_disabled(intel_dp);
2460 assert_edp_pll_enabled(dev_priv);
2461
2462 DRM_DEBUG_KMS("disabling eDP PLL\n");
2463
2464 intel_dp->DP &= ~DP_PLL_ENABLE;
2465
2466 I915_WRITE(DP_A, intel_dp->DP);
2467 POSTING_READ(DP_A);
2468 udelay(200);
2469 }
2470
2471 /* If the sink supports it, try to set the power state appropriately */
2472 void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
2473 {
2474 int ret, i;
2475
2476 /* Should have a valid DPCD by this point */
2477 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2478 return;
2479
2480 if (mode != DRM_MODE_DPMS_ON) {
2481 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2482 DP_SET_POWER_D3);
2483 } else {
2484 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2485
2486 /*
2487 * When turning on, we need to retry for 1ms to give the sink
2488 * time to wake up.
2489 */
2490 for (i = 0; i < 3; i++) {
2491 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2492 DP_SET_POWER_D0);
2493 if (ret == 1)
2494 break;
2495 msleep(1);
2496 }
2497
2498 if (ret == 1 && lspcon->active)
2499 lspcon_wait_pcon_mode(lspcon);
2500 }
2501
2502 if (ret != 1)
2503 DRM_DEBUG_KMS("failed to %s sink power state\n",
2504 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
2505 }
2506
2507 static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
2508 enum pipe *pipe)
2509 {
2510 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2511 enum port port = dp_to_dig_port(intel_dp)->port;
2512 struct drm_device *dev = encoder->base.dev;
2513 struct drm_i915_private *dev_priv = to_i915(dev);
2514 u32 tmp;
2515 bool ret;
2516
2517 if (!intel_display_power_get_if_enabled(dev_priv,
2518 encoder->power_domain))
2519 return false;
2520
2521 ret = false;
2522
2523 tmp = I915_READ(intel_dp->output_reg);
2524
2525 if (!(tmp & DP_PORT_EN))
2526 goto out;
2527
2528 if (IS_GEN7(dev_priv) && port == PORT_A) {
2529 *pipe = PORT_TO_PIPE_CPT(tmp);
2530 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2531 enum pipe p;
2532
2533 for_each_pipe(dev_priv, p) {
2534 u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
2535 if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
2536 *pipe = p;
2537 ret = true;
2538
2539 goto out;
2540 }
2541 }
2542
2543 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
2544 i915_mmio_reg_offset(intel_dp->output_reg));
2545 } else if (IS_CHERRYVIEW(dev_priv)) {
2546 *pipe = DP_PORT_TO_PIPE_CHV(tmp);
2547 } else {
2548 *pipe = PORT_TO_PIPE(tmp);
2549 }
2550
2551 ret = true;
2552
2553 out:
2554 intel_display_power_put(dev_priv, encoder->power_domain);
2555
2556 return ret;
2557 }
2558
2559 static void intel_dp_get_config(struct intel_encoder *encoder,
2560 struct intel_crtc_state *pipe_config)
2561 {
2562 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2563 u32 tmp, flags = 0;
2564 struct drm_device *dev = encoder->base.dev;
2565 struct drm_i915_private *dev_priv = to_i915(dev);
2566 enum port port = dp_to_dig_port(intel_dp)->port;
2567 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2568
2569 tmp = I915_READ(intel_dp->output_reg);
2570
2571 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
2572
2573 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
2574 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2575
2576 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
2577 flags |= DRM_MODE_FLAG_PHSYNC;
2578 else
2579 flags |= DRM_MODE_FLAG_NHSYNC;
2580
2581 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
2582 flags |= DRM_MODE_FLAG_PVSYNC;
2583 else
2584 flags |= DRM_MODE_FLAG_NVSYNC;
2585 } else {
2586 if (tmp & DP_SYNC_HS_HIGH)
2587 flags |= DRM_MODE_FLAG_PHSYNC;
2588 else
2589 flags |= DRM_MODE_FLAG_NHSYNC;
2590
2591 if (tmp & DP_SYNC_VS_HIGH)
2592 flags |= DRM_MODE_FLAG_PVSYNC;
2593 else
2594 flags |= DRM_MODE_FLAG_NVSYNC;
2595 }
2596
2597 pipe_config->base.adjusted_mode.flags |= flags;
2598
2599 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
2600 pipe_config->limited_color_range = true;
2601
2602 pipe_config->lane_count =
2603 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
2604
2605 intel_dp_get_m_n(crtc, pipe_config);
2606
2607 if (port == PORT_A) {
2608 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
2609 pipe_config->port_clock = 162000;
2610 else
2611 pipe_config->port_clock = 270000;
2612 }
2613
2614 pipe_config->base.adjusted_mode.crtc_clock =
2615 intel_dotclock_calculate(pipe_config->port_clock,
2616 &pipe_config->dp_m_n);
2617
2618 if (is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
2619 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
2620 /*
2621 * This is a big fat ugly hack.
2622 *
2623 * Some machines in UEFI boot mode provide us a VBT that has 18
2624 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
2625 * unknown we fail to light up. Yet the same BIOS boots up with
2626 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
2627 * max, not what it tells us to use.
2628 *
2629 * Note: This will still be broken if the eDP panel is not lit
2630 * up by the BIOS, and thus we can't get the mode at module
2631 * load.
2632 */
2633 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
2634 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
2635 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
2636 }
2637 }
2638
2639 static void intel_disable_dp(struct intel_encoder *encoder,
2640 struct intel_crtc_state *old_crtc_state,
2641 struct drm_connector_state *old_conn_state)
2642 {
2643 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2644 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2645
2646 if (old_crtc_state->has_audio)
2647 intel_audio_codec_disable(encoder);
2648
2649 if (HAS_PSR(dev_priv) && !HAS_DDI(dev_priv))
2650 intel_psr_disable(intel_dp);
2651
2652 /* Make sure the panel is off before trying to change the mode. But also
2653 * ensure that we have vdd while we switch off the panel. */
2654 intel_edp_panel_vdd_on(intel_dp);
2655 intel_edp_backlight_off(intel_dp);
2656 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
2657 intel_edp_panel_off(intel_dp);
2658
2659 /* disable the port before the pipe on g4x */
2660 if (INTEL_GEN(dev_priv) < 5)
2661 intel_dp_link_down(intel_dp);
2662 }
2663
2664 static void ilk_post_disable_dp(struct intel_encoder *encoder,
2665 struct intel_crtc_state *old_crtc_state,
2666 struct drm_connector_state *old_conn_state)
2667 {
2668 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2669 enum port port = dp_to_dig_port(intel_dp)->port;
2670
2671 intel_dp_link_down(intel_dp);
2672
2673 /* Only ilk+ has port A */
2674 if (port == PORT_A)
2675 ironlake_edp_pll_off(intel_dp);
2676 }
2677
2678 static void vlv_post_disable_dp(struct intel_encoder *encoder,
2679 struct intel_crtc_state *old_crtc_state,
2680 struct drm_connector_state *old_conn_state)
2681 {
2682 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2683
2684 intel_dp_link_down(intel_dp);
2685 }
2686
2687 static void chv_post_disable_dp(struct intel_encoder *encoder,
2688 struct intel_crtc_state *old_crtc_state,
2689 struct drm_connector_state *old_conn_state)
2690 {
2691 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2692 struct drm_device *dev = encoder->base.dev;
2693 struct drm_i915_private *dev_priv = to_i915(dev);
2694
2695 intel_dp_link_down(intel_dp);
2696
2697 mutex_lock(&dev_priv->sb_lock);
2698
2699 /* Assert data lane reset */
2700 chv_data_lane_soft_reset(encoder, true);
2701
2702 mutex_unlock(&dev_priv->sb_lock);
2703 }
2704
2705 static void
2706 _intel_dp_set_link_train(struct intel_dp *intel_dp,
2707 uint32_t *DP,
2708 uint8_t dp_train_pat)
2709 {
2710 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2711 struct drm_device *dev = intel_dig_port->base.base.dev;
2712 struct drm_i915_private *dev_priv = to_i915(dev);
2713 enum port port = intel_dig_port->port;
2714
2715 if (dp_train_pat & DP_TRAINING_PATTERN_MASK)
2716 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2717 dp_train_pat & DP_TRAINING_PATTERN_MASK);
2718
2719 if (HAS_DDI(dev_priv)) {
2720 uint32_t temp = I915_READ(DP_TP_CTL(port));
2721
2722 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2723 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2724 else
2725 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2726
2727 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2728 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2729 case DP_TRAINING_PATTERN_DISABLE:
2730 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2731
2732 break;
2733 case DP_TRAINING_PATTERN_1:
2734 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2735 break;
2736 case DP_TRAINING_PATTERN_2:
2737 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2738 break;
2739 case DP_TRAINING_PATTERN_3:
2740 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2741 break;
2742 }
2743 I915_WRITE(DP_TP_CTL(port), temp);
2744
2745 } else if ((IS_GEN7(dev_priv) && port == PORT_A) ||
2746 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
2747 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
2748
2749 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2750 case DP_TRAINING_PATTERN_DISABLE:
2751 *DP |= DP_LINK_TRAIN_OFF_CPT;
2752 break;
2753 case DP_TRAINING_PATTERN_1:
2754 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
2755 break;
2756 case DP_TRAINING_PATTERN_2:
2757 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2758 break;
2759 case DP_TRAINING_PATTERN_3:
2760 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2761 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
2762 break;
2763 }
2764
2765 } else {
2766 if (IS_CHERRYVIEW(dev_priv))
2767 *DP &= ~DP_LINK_TRAIN_MASK_CHV;
2768 else
2769 *DP &= ~DP_LINK_TRAIN_MASK;
2770
2771 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2772 case DP_TRAINING_PATTERN_DISABLE:
2773 *DP |= DP_LINK_TRAIN_OFF;
2774 break;
2775 case DP_TRAINING_PATTERN_1:
2776 *DP |= DP_LINK_TRAIN_PAT_1;
2777 break;
2778 case DP_TRAINING_PATTERN_2:
2779 *DP |= DP_LINK_TRAIN_PAT_2;
2780 break;
2781 case DP_TRAINING_PATTERN_3:
2782 if (IS_CHERRYVIEW(dev_priv)) {
2783 *DP |= DP_LINK_TRAIN_PAT_3_CHV;
2784 } else {
2785 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
2786 *DP |= DP_LINK_TRAIN_PAT_2;
2787 }
2788 break;
2789 }
2790 }
2791 }
2792
2793 static void intel_dp_enable_port(struct intel_dp *intel_dp,
2794 struct intel_crtc_state *old_crtc_state)
2795 {
2796 struct drm_device *dev = intel_dp_to_dev(intel_dp);
2797 struct drm_i915_private *dev_priv = to_i915(dev);
2798
2799 /* enable with pattern 1 (as per spec) */
2800
2801 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
2802
2803 /*
2804 * Magic for VLV/CHV. We _must_ first set up the register
2805 * without actually enabling the port, and then do another
2806 * write to enable the port. Otherwise link training will
2807 * fail when the power sequencer is freshly used for this port.
2808 */
2809 intel_dp->DP |= DP_PORT_EN;
2810 if (old_crtc_state->has_audio)
2811 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
2812
2813 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
2814 POSTING_READ(intel_dp->output_reg);
2815 }
2816
2817 static void intel_enable_dp(struct intel_encoder *encoder,
2818 struct intel_crtc_state *pipe_config,
2819 struct drm_connector_state *conn_state)
2820 {
2821 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2822 struct drm_device *dev = encoder->base.dev;
2823 struct drm_i915_private *dev_priv = to_i915(dev);
2824 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2825 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
2826 enum pipe pipe = crtc->pipe;
2827
2828 if (WARN_ON(dp_reg & DP_PORT_EN))
2829 return;
2830
2831 pps_lock(intel_dp);
2832
2833 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2834 vlv_init_panel_power_sequencer(intel_dp);
2835
2836 intel_dp_enable_port(intel_dp, pipe_config);
2837
2838 edp_panel_vdd_on(intel_dp);
2839 edp_panel_on(intel_dp);
2840 edp_panel_vdd_off(intel_dp, true);
2841
2842 pps_unlock(intel_dp);
2843
2844 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
2845 unsigned int lane_mask = 0x0;
2846
2847 if (IS_CHERRYVIEW(dev_priv))
2848 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
2849
2850 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
2851 lane_mask);
2852 }
2853
2854 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
2855 intel_dp_start_link_train(intel_dp);
2856 intel_dp_stop_link_train(intel_dp);
2857
2858 if (pipe_config->has_audio) {
2859 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
2860 pipe_name(pipe));
2861 intel_audio_codec_enable(encoder, pipe_config, conn_state);
2862 }
2863 }
2864
2865 static void g4x_enable_dp(struct intel_encoder *encoder,
2866 struct intel_crtc_state *pipe_config,
2867 struct drm_connector_state *conn_state)
2868 {
2869 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2870
2871 intel_enable_dp(encoder, pipe_config, conn_state);
2872 intel_edp_backlight_on(intel_dp);
2873 }
2874
2875 static void vlv_enable_dp(struct intel_encoder *encoder,
2876 struct intel_crtc_state *pipe_config,
2877 struct drm_connector_state *conn_state)
2878 {
2879 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2880
2881 intel_edp_backlight_on(intel_dp);
2882 intel_psr_enable(intel_dp);
2883 }
2884
2885 static void g4x_pre_enable_dp(struct intel_encoder *encoder,
2886 struct intel_crtc_state *pipe_config,
2887 struct drm_connector_state *conn_state)
2888 {
2889 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
2890 enum port port = dp_to_dig_port(intel_dp)->port;
2891
2892 intel_dp_prepare(encoder, pipe_config);
2893
2894 /* Only ilk+ has port A */
2895 if (port == PORT_A)
2896 ironlake_edp_pll_on(intel_dp, pipe_config);
2897 }
2898
2899 static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
2900 {
2901 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2902 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
2903 enum pipe pipe = intel_dp->pps_pipe;
2904 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
2905
2906 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
2907
2908 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
2909 return;
2910
2911 edp_panel_vdd_off_sync(intel_dp);
2912
2913 /*
2914 * VLV seems to get confused when multiple power seqeuencers
2915 * have the same port selected (even if only one has power/vdd
2916 * enabled). The failure manifests as vlv_wait_port_ready() failing
2917 * CHV on the other hand doesn't seem to mind having the same port
2918 * selected in multiple power seqeuencers, but let's clear the
2919 * port select always when logically disconnecting a power sequencer
2920 * from a port.
2921 */
2922 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
2923 pipe_name(pipe), port_name(intel_dig_port->port));
2924 I915_WRITE(pp_on_reg, 0);
2925 POSTING_READ(pp_on_reg);
2926
2927 intel_dp->pps_pipe = INVALID_PIPE;
2928 }
2929
2930 static void vlv_steal_power_sequencer(struct drm_device *dev,
2931 enum pipe pipe)
2932 {
2933 struct drm_i915_private *dev_priv = to_i915(dev);
2934 struct intel_encoder *encoder;
2935
2936 lockdep_assert_held(&dev_priv->pps_mutex);
2937
2938 for_each_intel_encoder(dev, encoder) {
2939 struct intel_dp *intel_dp;
2940 enum port port;
2941
2942 if (encoder->type != INTEL_OUTPUT_DP &&
2943 encoder->type != INTEL_OUTPUT_EDP)
2944 continue;
2945
2946 intel_dp = enc_to_intel_dp(&encoder->base);
2947 port = dp_to_dig_port(intel_dp)->port;
2948
2949 WARN(intel_dp->active_pipe == pipe,
2950 "stealing pipe %c power sequencer from active (e)DP port %c\n",
2951 pipe_name(pipe), port_name(port));
2952
2953 if (intel_dp->pps_pipe != pipe)
2954 continue;
2955
2956 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
2957 pipe_name(pipe), port_name(port));
2958
2959 /* make sure vdd is off before we steal it */
2960 vlv_detach_power_sequencer(intel_dp);
2961 }
2962 }
2963
2964 static void vlv_init_panel_power_sequencer(struct intel_dp *intel_dp)
2965 {
2966 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2967 struct intel_encoder *encoder = &intel_dig_port->base;
2968 struct drm_device *dev = encoder->base.dev;
2969 struct drm_i915_private *dev_priv = to_i915(dev);
2970 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
2971
2972 lockdep_assert_held(&dev_priv->pps_mutex);
2973
2974 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
2975
2976 if (intel_dp->pps_pipe != INVALID_PIPE &&
2977 intel_dp->pps_pipe != crtc->pipe) {
2978 /*
2979 * If another power sequencer was being used on this
2980 * port previously make sure to turn off vdd there while
2981 * we still have control of it.
2982 */
2983 vlv_detach_power_sequencer(intel_dp);
2984 }
2985
2986 /*
2987 * We may be stealing the power
2988 * sequencer from another port.
2989 */
2990 vlv_steal_power_sequencer(dev, crtc->pipe);
2991
2992 intel_dp->active_pipe = crtc->pipe;
2993
2994 if (!is_edp(intel_dp))
2995 return;
2996
2997 /* now it's all ours */
2998 intel_dp->pps_pipe = crtc->pipe;
2999
3000 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
3001 pipe_name(intel_dp->pps_pipe), port_name(intel_dig_port->port));
3002
3003 /* init power sequencer on this pipe and port */
3004 intel_dp_init_panel_power_sequencer(dev, intel_dp);
3005 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, true);
3006 }
3007
3008 static void vlv_pre_enable_dp(struct intel_encoder *encoder,
3009 struct intel_crtc_state *pipe_config,
3010 struct drm_connector_state *conn_state)
3011 {
3012 vlv_phy_pre_encoder_enable(encoder);
3013
3014 intel_enable_dp(encoder, pipe_config, conn_state);
3015 }
3016
3017 static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
3018 struct intel_crtc_state *pipe_config,
3019 struct drm_connector_state *conn_state)
3020 {
3021 intel_dp_prepare(encoder, pipe_config);
3022
3023 vlv_phy_pre_pll_enable(encoder);
3024 }
3025
3026 static void chv_pre_enable_dp(struct intel_encoder *encoder,
3027 struct intel_crtc_state *pipe_config,
3028 struct drm_connector_state *conn_state)
3029 {
3030 chv_phy_pre_encoder_enable(encoder);
3031
3032 intel_enable_dp(encoder, pipe_config, conn_state);
3033
3034 /* Second common lane will stay alive on its own now */
3035 chv_phy_release_cl2_override(encoder);
3036 }
3037
3038 static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
3039 struct intel_crtc_state *pipe_config,
3040 struct drm_connector_state *conn_state)
3041 {
3042 intel_dp_prepare(encoder, pipe_config);
3043
3044 chv_phy_pre_pll_enable(encoder);
3045 }
3046
3047 static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
3048 struct intel_crtc_state *pipe_config,
3049 struct drm_connector_state *conn_state)
3050 {
3051 chv_phy_post_pll_disable(encoder);
3052 }
3053
3054 /*
3055 * Fetch AUX CH registers 0x202 - 0x207 which contain
3056 * link status information
3057 */
3058 bool
3059 intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
3060 {
3061 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3062 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
3063 }
3064
3065 static bool intel_dp_get_y_cord_status(struct intel_dp *intel_dp)
3066 {
3067 uint8_t psr_caps = 0;
3068
3069 drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_CAPS, &psr_caps);
3070 return psr_caps & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
3071 }
3072
3073 static bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
3074 {
3075 uint8_t dprx = 0;
3076
3077 drm_dp_dpcd_readb(&intel_dp->aux,
3078 DP_DPRX_FEATURE_ENUMERATION_LIST,
3079 &dprx);
3080 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
3081 }
3082
3083 static bool intel_dp_get_alpm_status(struct intel_dp *intel_dp)
3084 {
3085 uint8_t alpm_caps = 0;
3086
3087 drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &alpm_caps);
3088 return alpm_caps & DP_ALPM_CAP;
3089 }
3090
3091 /* These are source-specific values. */
3092 uint8_t
3093 intel_dp_voltage_max(struct intel_dp *intel_dp)
3094 {
3095 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3096 enum port port = dp_to_dig_port(intel_dp)->port;
3097
3098 if (IS_GEN9_LP(dev_priv))
3099 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3100 else if (INTEL_GEN(dev_priv) >= 9) {
3101 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3102 return intel_ddi_dp_voltage_max(encoder);
3103 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3104 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3105 else if (IS_GEN7(dev_priv) && port == PORT_A)
3106 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3107 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3108 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
3109 else
3110 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
3111 }
3112
3113 uint8_t
3114 intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
3115 {
3116 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
3117 enum port port = dp_to_dig_port(intel_dp)->port;
3118
3119 if (INTEL_GEN(dev_priv) >= 9) {
3120 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3121 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3122 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3123 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3124 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3125 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3126 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3127 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3128 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3129 default:
3130 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3131 }
3132 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3133 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3134 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3135 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3136 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3137 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3138 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3139 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3140 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3141 default:
3142 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3143 }
3144 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3145 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3146 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3147 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3148 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3149 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3150 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3151 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3152 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3153 default:
3154 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3155 }
3156 } else if (IS_GEN7(dev_priv) && port == PORT_A) {
3157 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3158 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3159 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3160 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3161 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3162 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3163 default:
3164 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3165 }
3166 } else {
3167 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
3168 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3169 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3170 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3171 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3172 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3173 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3174 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3175 default:
3176 return DP_TRAIN_PRE_EMPH_LEVEL_0;
3177 }
3178 }
3179 }
3180
3181 static uint32_t vlv_signal_levels(struct intel_dp *intel_dp)
3182 {
3183 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3184 unsigned long demph_reg_value, preemph_reg_value,
3185 uniqtranscale_reg_value;
3186 uint8_t train_set = intel_dp->train_set[0];
3187
3188 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3189 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3190 preemph_reg_value = 0x0004000;
3191 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3192 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3193 demph_reg_value = 0x2B405555;
3194 uniqtranscale_reg_value = 0x552AB83A;
3195 break;
3196 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3197 demph_reg_value = 0x2B404040;
3198 uniqtranscale_reg_value = 0x5548B83A;
3199 break;
3200 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3201 demph_reg_value = 0x2B245555;
3202 uniqtranscale_reg_value = 0x5560B83A;
3203 break;
3204 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3205 demph_reg_value = 0x2B405555;
3206 uniqtranscale_reg_value = 0x5598DA3A;
3207 break;
3208 default:
3209 return 0;
3210 }
3211 break;
3212 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3213 preemph_reg_value = 0x0002000;
3214 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3215 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3216 demph_reg_value = 0x2B404040;
3217 uniqtranscale_reg_value = 0x5552B83A;
3218 break;
3219 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3220 demph_reg_value = 0x2B404848;
3221 uniqtranscale_reg_value = 0x5580B83A;
3222 break;
3223 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3224 demph_reg_value = 0x2B404040;
3225 uniqtranscale_reg_value = 0x55ADDA3A;
3226 break;
3227 default:
3228 return 0;
3229 }
3230 break;
3231 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3232 preemph_reg_value = 0x0000000;
3233 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3234 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3235 demph_reg_value = 0x2B305555;
3236 uniqtranscale_reg_value = 0x5570B83A;
3237 break;
3238 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3239 demph_reg_value = 0x2B2B4040;
3240 uniqtranscale_reg_value = 0x55ADDA3A;
3241 break;
3242 default:
3243 return 0;
3244 }
3245 break;
3246 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3247 preemph_reg_value = 0x0006000;
3248 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3249 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3250 demph_reg_value = 0x1B405555;
3251 uniqtranscale_reg_value = 0x55ADDA3A;
3252 break;
3253 default:
3254 return 0;
3255 }
3256 break;
3257 default:
3258 return 0;
3259 }
3260
3261 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3262 uniqtranscale_reg_value, 0);
3263
3264 return 0;
3265 }
3266
3267 static uint32_t chv_signal_levels(struct intel_dp *intel_dp)
3268 {
3269 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3270 u32 deemph_reg_value, margin_reg_value;
3271 bool uniq_trans_scale = false;
3272 uint8_t train_set = intel_dp->train_set[0];
3273
3274 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3275 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3276 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3277 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3278 deemph_reg_value = 128;
3279 margin_reg_value = 52;
3280 break;
3281 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3282 deemph_reg_value = 128;
3283 margin_reg_value = 77;
3284 break;
3285 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3286 deemph_reg_value = 128;
3287 margin_reg_value = 102;
3288 break;
3289 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3290 deemph_reg_value = 128;
3291 margin_reg_value = 154;
3292 uniq_trans_scale = true;
3293 break;
3294 default:
3295 return 0;
3296 }
3297 break;
3298 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3299 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3300 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3301 deemph_reg_value = 85;
3302 margin_reg_value = 78;
3303 break;
3304 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3305 deemph_reg_value = 85;
3306 margin_reg_value = 116;
3307 break;
3308 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3309 deemph_reg_value = 85;
3310 margin_reg_value = 154;
3311 break;
3312 default:
3313 return 0;
3314 }
3315 break;
3316 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3317 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3318 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3319 deemph_reg_value = 64;
3320 margin_reg_value = 104;
3321 break;
3322 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3323 deemph_reg_value = 64;
3324 margin_reg_value = 154;
3325 break;
3326 default:
3327 return 0;
3328 }
3329 break;
3330 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3331 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3332 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3333 deemph_reg_value = 43;
3334 margin_reg_value = 154;
3335 break;
3336 default:
3337 return 0;
3338 }
3339 break;
3340 default:
3341 return 0;
3342 }
3343
3344 chv_set_phy_signal_level(encoder, deemph_reg_value,
3345 margin_reg_value, uniq_trans_scale);
3346
3347 return 0;
3348 }
3349
3350 static uint32_t
3351 gen4_signal_levels(uint8_t train_set)
3352 {
3353 uint32_t signal_levels = 0;
3354
3355 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
3356 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3357 default:
3358 signal_levels |= DP_VOLTAGE_0_4;
3359 break;
3360 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3361 signal_levels |= DP_VOLTAGE_0_6;
3362 break;
3363 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3364 signal_levels |= DP_VOLTAGE_0_8;
3365 break;
3366 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
3367 signal_levels |= DP_VOLTAGE_1_2;
3368 break;
3369 }
3370 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
3371 case DP_TRAIN_PRE_EMPH_LEVEL_0:
3372 default:
3373 signal_levels |= DP_PRE_EMPHASIS_0;
3374 break;
3375 case DP_TRAIN_PRE_EMPH_LEVEL_1:
3376 signal_levels |= DP_PRE_EMPHASIS_3_5;
3377 break;
3378 case DP_TRAIN_PRE_EMPH_LEVEL_2:
3379 signal_levels |= DP_PRE_EMPHASIS_6;
3380 break;
3381 case DP_TRAIN_PRE_EMPH_LEVEL_3:
3382 signal_levels |= DP_PRE_EMPHASIS_9_5;
3383 break;
3384 }
3385 return signal_levels;
3386 }
3387
3388 /* Gen6's DP voltage swing and pre-emphasis control */
3389 static uint32_t
3390 gen6_edp_signal_levels(uint8_t train_set)
3391 {
3392 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3393 DP_TRAIN_PRE_EMPHASIS_MASK);
3394 switch (signal_levels) {
3395 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3396 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3397 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3398 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3399 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
3400 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3401 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3402 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
3403 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3404 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3405 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
3406 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3407 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3408 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
3409 default:
3410 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3411 "0x%x\n", signal_levels);
3412 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
3413 }
3414 }
3415
3416 /* Gen7's DP voltage swing and pre-emphasis control */
3417 static uint32_t
3418 gen7_edp_signal_levels(uint8_t train_set)
3419 {
3420 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3421 DP_TRAIN_PRE_EMPHASIS_MASK);
3422 switch (signal_levels) {
3423 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3424 return EDP_LINK_TRAIN_400MV_0DB_IVB;
3425 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3426 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
3427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3428 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3429
3430 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3431 return EDP_LINK_TRAIN_600MV_0DB_IVB;
3432 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3433 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3434
3435 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3436 return EDP_LINK_TRAIN_800MV_0DB_IVB;
3437 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3438 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3439
3440 default:
3441 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3442 "0x%x\n", signal_levels);
3443 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3444 }
3445 }
3446
3447 void
3448 intel_dp_set_signal_levels(struct intel_dp *intel_dp)
3449 {
3450 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3451 enum port port = intel_dig_port->port;
3452 struct drm_device *dev = intel_dig_port->base.base.dev;
3453 struct drm_i915_private *dev_priv = to_i915(dev);
3454 uint32_t signal_levels, mask = 0;
3455 uint8_t train_set = intel_dp->train_set[0];
3456
3457 if (HAS_DDI(dev_priv)) {
3458 signal_levels = ddi_signal_levels(intel_dp);
3459
3460 if (IS_GEN9_LP(dev_priv))
3461 signal_levels = 0;
3462 else
3463 mask = DDI_BUF_EMP_MASK;
3464 } else if (IS_CHERRYVIEW(dev_priv)) {
3465 signal_levels = chv_signal_levels(intel_dp);
3466 } else if (IS_VALLEYVIEW(dev_priv)) {
3467 signal_levels = vlv_signal_levels(intel_dp);
3468 } else if (IS_GEN7(dev_priv) && port == PORT_A) {
3469 signal_levels = gen7_edp_signal_levels(train_set);
3470 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
3471 } else if (IS_GEN6(dev_priv) && port == PORT_A) {
3472 signal_levels = gen6_edp_signal_levels(train_set);
3473 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3474 } else {
3475 signal_levels = gen4_signal_levels(train_set);
3476 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3477 }
3478
3479 if (mask)
3480 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3481
3482 DRM_DEBUG_KMS("Using vswing level %d\n",
3483 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3484 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3485 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3486 DP_TRAIN_PRE_EMPHASIS_SHIFT);
3487
3488 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
3489
3490 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3491 POSTING_READ(intel_dp->output_reg);
3492 }
3493
3494 void
3495 intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
3496 uint8_t dp_train_pat)
3497 {
3498 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3499 struct drm_i915_private *dev_priv =
3500 to_i915(intel_dig_port->base.base.dev);
3501
3502 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
3503
3504 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3505 POSTING_READ(intel_dp->output_reg);
3506 }
3507
3508 void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3509 {
3510 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3511 struct drm_device *dev = intel_dig_port->base.base.dev;
3512 struct drm_i915_private *dev_priv = to_i915(dev);
3513 enum port port = intel_dig_port->port;
3514 uint32_t val;
3515
3516 if (!HAS_DDI(dev_priv))
3517 return;
3518
3519 val = I915_READ(DP_TP_CTL(port));
3520 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3521 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3522 I915_WRITE(DP_TP_CTL(port), val);
3523
3524 /*
3525 * On PORT_A we can have only eDP in SST mode. There the only reason
3526 * we need to set idle transmission mode is to work around a HW issue
3527 * where we enable the pipe while not in idle link-training mode.
3528 * In this case there is requirement to wait for a minimum number of
3529 * idle patterns to be sent.
3530 */
3531 if (port == PORT_A)
3532 return;
3533
3534 if (intel_wait_for_register(dev_priv,DP_TP_STATUS(port),
3535 DP_TP_STATUS_IDLE_DONE,
3536 DP_TP_STATUS_IDLE_DONE,
3537 1))
3538 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3539 }
3540
3541 static void
3542 intel_dp_link_down(struct intel_dp *intel_dp)
3543 {
3544 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3545 struct intel_crtc *crtc = to_intel_crtc(intel_dig_port->base.base.crtc);
3546 enum port port = intel_dig_port->port;
3547 struct drm_device *dev = intel_dig_port->base.base.dev;
3548 struct drm_i915_private *dev_priv = to_i915(dev);
3549 uint32_t DP = intel_dp->DP;
3550
3551 if (WARN_ON(HAS_DDI(dev_priv)))
3552 return;
3553
3554 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
3555 return;
3556
3557 DRM_DEBUG_KMS("\n");
3558
3559 if ((IS_GEN7(dev_priv) && port == PORT_A) ||
3560 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
3561 DP &= ~DP_LINK_TRAIN_MASK_CPT;
3562 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
3563 } else {
3564 if (IS_CHERRYVIEW(dev_priv))
3565 DP &= ~DP_LINK_TRAIN_MASK_CHV;
3566 else
3567 DP &= ~DP_LINK_TRAIN_MASK;
3568 DP |= DP_LINK_TRAIN_PAT_IDLE;
3569 }
3570 I915_WRITE(intel_dp->output_reg, DP);
3571 POSTING_READ(intel_dp->output_reg);
3572
3573 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3574 I915_WRITE(intel_dp->output_reg, DP);
3575 POSTING_READ(intel_dp->output_reg);
3576
3577 /*
3578 * HW workaround for IBX, we need to move the port
3579 * to transcoder A after disabling it to allow the
3580 * matching HDMI port to be enabled on transcoder A.
3581 */
3582 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
3583 /*
3584 * We get CPU/PCH FIFO underruns on the other pipe when
3585 * doing the workaround. Sweep them under the rug.
3586 */
3587 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3588 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3589
3590 /* always enable with pattern 1 (as per spec) */
3591 DP &= ~(DP_PIPEB_SELECT | DP_LINK_TRAIN_MASK);
3592 DP |= DP_PORT_EN | DP_LINK_TRAIN_PAT_1;
3593 I915_WRITE(intel_dp->output_reg, DP);
3594 POSTING_READ(intel_dp->output_reg);
3595
3596 DP &= ~DP_PORT_EN;
3597 I915_WRITE(intel_dp->output_reg, DP);
3598 POSTING_READ(intel_dp->output_reg);
3599
3600 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
3601 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3602 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
3603 }
3604
3605 msleep(intel_dp->panel_power_down_delay);
3606
3607 intel_dp->DP = DP;
3608
3609 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
3610 pps_lock(intel_dp);
3611 intel_dp->active_pipe = INVALID_PIPE;
3612 pps_unlock(intel_dp);
3613 }
3614 }
3615
3616 bool
3617 intel_dp_read_dpcd(struct intel_dp *intel_dp)
3618 {
3619 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
3620 sizeof(intel_dp->dpcd)) < 0)
3621 return false; /* aux transfer failed */
3622
3623 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
3624
3625 return intel_dp->dpcd[DP_DPCD_REV] != 0;
3626 }
3627
3628 static bool
3629 intel_edp_init_dpcd(struct intel_dp *intel_dp)
3630 {
3631 struct drm_i915_private *dev_priv =
3632 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
3633
3634 /* this function is meant to be called only once */
3635 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
3636
3637 if (!intel_dp_read_dpcd(intel_dp))
3638 return false;
3639
3640 intel_dp_read_desc(intel_dp);
3641
3642 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3643 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3644 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3645
3646 /* Check if the panel supports PSR */
3647 drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT,
3648 intel_dp->psr_dpcd,
3649 sizeof(intel_dp->psr_dpcd));
3650 if (intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED) {
3651 dev_priv->psr.sink_support = true;
3652 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
3653 }
3654
3655 if (INTEL_GEN(dev_priv) >= 9 &&
3656 (intel_dp->psr_dpcd[0] & DP_PSR2_IS_SUPPORTED)) {
3657 uint8_t frame_sync_cap;
3658
3659 dev_priv->psr.sink_support = true;
3660 drm_dp_dpcd_read(&intel_dp->aux,
3661 DP_SINK_DEVICE_AUX_FRAME_SYNC_CAP,
3662 &frame_sync_cap, 1);
3663 dev_priv->psr.aux_frame_sync = frame_sync_cap ? true : false;
3664 /* PSR2 needs frame sync as well */
3665 dev_priv->psr.psr2_support = dev_priv->psr.aux_frame_sync;
3666 DRM_DEBUG_KMS("PSR2 %s on sink",
3667 dev_priv->psr.psr2_support ? "supported" : "not supported");
3668
3669 if (dev_priv->psr.psr2_support) {
3670 dev_priv->psr.y_cord_support =
3671 intel_dp_get_y_cord_status(intel_dp);
3672 dev_priv->psr.colorimetry_support =
3673 intel_dp_get_colorimetry_status(intel_dp);
3674 dev_priv->psr.alpm =
3675 intel_dp_get_alpm_status(intel_dp);
3676 }
3677
3678 }
3679
3680 /* Read the eDP Display control capabilities registers */
3681 if ((intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) &&
3682 drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
3683 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
3684 sizeof(intel_dp->edp_dpcd))
3685 DRM_DEBUG_KMS("EDP DPCD : %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
3686 intel_dp->edp_dpcd);
3687
3688 /* Intermediate frequency support */
3689 if (intel_dp->edp_dpcd[0] >= 0x03) { /* eDp v1.4 or higher */
3690 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
3691 int i;
3692
3693 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
3694 sink_rates, sizeof(sink_rates));
3695
3696 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
3697 int val = le16_to_cpu(sink_rates[i]);
3698
3699 if (val == 0)
3700 break;
3701
3702 /* Value read multiplied by 200kHz gives the per-lane
3703 * link rate in kHz. The source rates are, however,
3704 * stored in terms of LS_Clk kHz. The full conversion
3705 * back to symbols is
3706 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
3707 */
3708 intel_dp->sink_rates[i] = (val * 200) / 10;
3709 }
3710 intel_dp->num_sink_rates = i;
3711 }
3712
3713 if (intel_dp->num_sink_rates)
3714 intel_dp->use_rate_select = true;
3715 else
3716 intel_dp_set_sink_rates(intel_dp);
3717
3718 return true;
3719 }
3720
3721
3722 static bool
3723 intel_dp_get_dpcd(struct intel_dp *intel_dp)
3724 {
3725 if (!intel_dp_read_dpcd(intel_dp))
3726 return false;
3727
3728 /* Don't clobber cached eDP rates. */
3729 if (!is_edp(intel_dp))
3730 intel_dp_set_sink_rates(intel_dp);
3731
3732 if (drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT,
3733 &intel_dp->sink_count, 1) < 0)
3734 return false;
3735
3736 /*
3737 * Sink count can change between short pulse hpd hence
3738 * a member variable in intel_dp will track any changes
3739 * between short pulse interrupts.
3740 */
3741 intel_dp->sink_count = DP_GET_SINK_COUNT(intel_dp->sink_count);
3742
3743 /*
3744 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
3745 * a dongle is present but no display. Unless we require to know
3746 * if a dongle is present or not, we don't need to update
3747 * downstream port information. So, an early return here saves
3748 * time from performing other operations which are not required.
3749 */
3750 if (!is_edp(intel_dp) && !intel_dp->sink_count)
3751 return false;
3752
3753 if (!drm_dp_is_branch(intel_dp->dpcd))
3754 return true; /* native DP sink */
3755
3756 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
3757 return true; /* no per-port downstream info */
3758
3759 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
3760 intel_dp->downstream_ports,
3761 DP_MAX_DOWNSTREAM_PORTS) < 0)
3762 return false; /* downstream port status fetch failed */
3763
3764 return true;
3765 }
3766
3767 static bool
3768 intel_dp_can_mst(struct intel_dp *intel_dp)
3769 {
3770 u8 buf[1];
3771
3772 if (!i915.enable_dp_mst)
3773 return false;
3774
3775 if (!intel_dp->can_mst)
3776 return false;
3777
3778 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
3779 return false;
3780
3781 if (drm_dp_dpcd_read(&intel_dp->aux, DP_MSTM_CAP, buf, 1) != 1)
3782 return false;
3783
3784 return buf[0] & DP_MST_CAP;
3785 }
3786
3787 static void
3788 intel_dp_configure_mst(struct intel_dp *intel_dp)
3789 {
3790 if (!i915.enable_dp_mst)
3791 return;
3792
3793 if (!intel_dp->can_mst)
3794 return;
3795
3796 intel_dp->is_mst = intel_dp_can_mst(intel_dp);
3797
3798 if (intel_dp->is_mst)
3799 DRM_DEBUG_KMS("Sink is MST capable\n");
3800 else
3801 DRM_DEBUG_KMS("Sink is not MST capable\n");
3802
3803 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
3804 intel_dp->is_mst);
3805 }
3806
3807 static int intel_dp_sink_crc_stop(struct intel_dp *intel_dp)
3808 {
3809 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3810 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3811 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3812 u8 buf;
3813 int ret = 0;
3814 int count = 0;
3815 int attempts = 10;
3816
3817 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0) {
3818 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3819 ret = -EIO;
3820 goto out;
3821 }
3822
3823 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3824 buf & ~DP_TEST_SINK_START) < 0) {
3825 DRM_DEBUG_KMS("Sink CRC couldn't be stopped properly\n");
3826 ret = -EIO;
3827 goto out;
3828 }
3829
3830 do {
3831 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3832
3833 if (drm_dp_dpcd_readb(&intel_dp->aux,
3834 DP_TEST_SINK_MISC, &buf) < 0) {
3835 ret = -EIO;
3836 goto out;
3837 }
3838 count = buf & DP_TEST_COUNT_MASK;
3839 } while (--attempts && count);
3840
3841 if (attempts == 0) {
3842 DRM_DEBUG_KMS("TIMEOUT: Sink CRC counter is not zeroed after calculation is stopped\n");
3843 ret = -ETIMEDOUT;
3844 }
3845
3846 out:
3847 hsw_enable_ips(intel_crtc);
3848 return ret;
3849 }
3850
3851 static int intel_dp_sink_crc_start(struct intel_dp *intel_dp)
3852 {
3853 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3854 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3855 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3856 u8 buf;
3857 int ret;
3858
3859 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK_MISC, &buf) < 0)
3860 return -EIO;
3861
3862 if (!(buf & DP_TEST_CRC_SUPPORTED))
3863 return -ENOTTY;
3864
3865 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_SINK, &buf) < 0)
3866 return -EIO;
3867
3868 if (buf & DP_TEST_SINK_START) {
3869 ret = intel_dp_sink_crc_stop(intel_dp);
3870 if (ret)
3871 return ret;
3872 }
3873
3874 hsw_disable_ips(intel_crtc);
3875
3876 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_SINK,
3877 buf | DP_TEST_SINK_START) < 0) {
3878 hsw_enable_ips(intel_crtc);
3879 return -EIO;
3880 }
3881
3882 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3883 return 0;
3884 }
3885
3886 int intel_dp_sink_crc(struct intel_dp *intel_dp, u8 *crc)
3887 {
3888 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3889 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
3890 struct intel_crtc *intel_crtc = to_intel_crtc(dig_port->base.base.crtc);
3891 u8 buf;
3892 int count, ret;
3893 int attempts = 6;
3894
3895 ret = intel_dp_sink_crc_start(intel_dp);
3896 if (ret)
3897 return ret;
3898
3899 do {
3900 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
3901
3902 if (drm_dp_dpcd_readb(&intel_dp->aux,
3903 DP_TEST_SINK_MISC, &buf) < 0) {
3904 ret = -EIO;
3905 goto stop;
3906 }
3907 count = buf & DP_TEST_COUNT_MASK;
3908
3909 } while (--attempts && count == 0);
3910
3911 if (attempts == 0) {
3912 DRM_ERROR("Panel is unable to calculate any CRC after 6 vblanks\n");
3913 ret = -ETIMEDOUT;
3914 goto stop;
3915 }
3916
3917 if (drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_CRC_R_CR, crc, 6) < 0) {
3918 ret = -EIO;
3919 goto stop;
3920 }
3921
3922 stop:
3923 intel_dp_sink_crc_stop(intel_dp);
3924 return ret;
3925 }
3926
3927 static bool
3928 intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3929 {
3930 return drm_dp_dpcd_read(&intel_dp->aux,
3931 DP_DEVICE_SERVICE_IRQ_VECTOR,
3932 sink_irq_vector, 1) == 1;
3933 }
3934
3935 static bool
3936 intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
3937 {
3938 int ret;
3939
3940 ret = drm_dp_dpcd_read(&intel_dp->aux,
3941 DP_SINK_COUNT_ESI,
3942 sink_irq_vector, 14);
3943 if (ret != 14)
3944 return false;
3945
3946 return true;
3947 }
3948
3949 static uint8_t intel_dp_autotest_link_training(struct intel_dp *intel_dp)
3950 {
3951 int status = 0;
3952 int min_lane_count = 1;
3953 int common_rates[DP_MAX_SUPPORTED_RATES] = {};
3954 int link_rate_index, test_link_rate;
3955 uint8_t test_lane_count, test_link_bw;
3956 /* (DP CTS 1.2)
3957 * 4.3.1.11
3958 */
3959 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
3960 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
3961 &test_lane_count);
3962
3963 if (status <= 0) {
3964 DRM_DEBUG_KMS("Lane count read failed\n");
3965 return DP_TEST_NAK;
3966 }
3967 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
3968 /* Validate the requested lane count */
3969 if (test_lane_count < min_lane_count ||
3970 test_lane_count > intel_dp->max_sink_lane_count)
3971 return DP_TEST_NAK;
3972
3973 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
3974 &test_link_bw);
3975 if (status <= 0) {
3976 DRM_DEBUG_KMS("Link Rate read failed\n");
3977 return DP_TEST_NAK;
3978 }
3979 /* Validate the requested link rate */
3980 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
3981 link_rate_index = intel_dp_link_rate_index(intel_dp,
3982 common_rates,
3983 test_link_rate);
3984 if (link_rate_index < 0)
3985 return DP_TEST_NAK;
3986
3987 intel_dp->compliance.test_lane_count = test_lane_count;
3988 intel_dp->compliance.test_link_rate = test_link_rate;
3989
3990 return DP_TEST_ACK;
3991 }
3992
3993 static uint8_t intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
3994 {
3995 uint8_t test_pattern;
3996 uint16_t test_misc;
3997 __be16 h_width, v_height;
3998 int status = 0;
3999
4000 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
4001 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_PATTERN,
4002 &test_pattern, 1);
4003 if (status <= 0) {
4004 DRM_DEBUG_KMS("Test pattern read failed\n");
4005 return DP_TEST_NAK;
4006 }
4007 if (test_pattern != DP_COLOR_RAMP)
4008 return DP_TEST_NAK;
4009
4010 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4011 &h_width, 2);
4012 if (status <= 0) {
4013 DRM_DEBUG_KMS("H Width read failed\n");
4014 return DP_TEST_NAK;
4015 }
4016
4017 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4018 &v_height, 2);
4019 if (status <= 0) {
4020 DRM_DEBUG_KMS("V Height read failed\n");
4021 return DP_TEST_NAK;
4022 }
4023
4024 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_MISC0,
4025 &test_misc, 1);
4026 if (status <= 0) {
4027 DRM_DEBUG_KMS("TEST MISC read failed\n");
4028 return DP_TEST_NAK;
4029 }
4030 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4031 return DP_TEST_NAK;
4032 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4033 return DP_TEST_NAK;
4034 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4035 case DP_TEST_BIT_DEPTH_6:
4036 intel_dp->compliance.test_data.bpc = 6;
4037 break;
4038 case DP_TEST_BIT_DEPTH_8:
4039 intel_dp->compliance.test_data.bpc = 8;
4040 break;
4041 default:
4042 return DP_TEST_NAK;
4043 }
4044
4045 intel_dp->compliance.test_data.video_pattern = test_pattern;
4046 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4047 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4048 /* Set test active flag here so userspace doesn't interrupt things */
4049 intel_dp->compliance.test_active = 1;
4050
4051 return DP_TEST_ACK;
4052 }
4053
4054 static uint8_t intel_dp_autotest_edid(struct intel_dp *intel_dp)
4055 {
4056 uint8_t test_result = DP_TEST_ACK;
4057 struct intel_connector *intel_connector = intel_dp->attached_connector;
4058 struct drm_connector *connector = &intel_connector->base;
4059
4060 if (intel_connector->detect_edid == NULL ||
4061 connector->edid_corrupt ||
4062 intel_dp->aux.i2c_defer_count > 6) {
4063 /* Check EDID read for NACKs, DEFERs and corruption
4064 * (DP CTS 1.2 Core r1.1)
4065 * 4.2.2.4 : Failed EDID read, I2C_NAK
4066 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4067 * 4.2.2.6 : EDID corruption detected
4068 * Use failsafe mode for all cases
4069 */
4070 if (intel_dp->aux.i2c_nack_count > 0 ||
4071 intel_dp->aux.i2c_defer_count > 0)
4072 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4073 intel_dp->aux.i2c_nack_count,
4074 intel_dp->aux.i2c_defer_count);
4075 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
4076 } else {
4077 struct edid *block = intel_connector->detect_edid;
4078
4079 /* We have to write the checksum
4080 * of the last block read
4081 */
4082 block += intel_connector->detect_edid->extensions;
4083
4084 if (!drm_dp_dpcd_write(&intel_dp->aux,
4085 DP_TEST_EDID_CHECKSUM,
4086 &block->checksum,
4087 1))
4088 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4089
4090 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
4091 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
4092 }
4093
4094 /* Set test active flag here so userspace doesn't interrupt things */
4095 intel_dp->compliance.test_active = 1;
4096
4097 return test_result;
4098 }
4099
4100 static uint8_t intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
4101 {
4102 uint8_t test_result = DP_TEST_NAK;
4103 return test_result;
4104 }
4105
4106 static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4107 {
4108 uint8_t response = DP_TEST_NAK;
4109 uint8_t request = 0;
4110 int status;
4111
4112 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
4113 if (status <= 0) {
4114 DRM_DEBUG_KMS("Could not read test request from sink\n");
4115 goto update_status;
4116 }
4117
4118 switch (request) {
4119 case DP_TEST_LINK_TRAINING:
4120 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
4121 response = intel_dp_autotest_link_training(intel_dp);
4122 break;
4123 case DP_TEST_LINK_VIDEO_PATTERN:
4124 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
4125 response = intel_dp_autotest_video_pattern(intel_dp);
4126 break;
4127 case DP_TEST_LINK_EDID_READ:
4128 DRM_DEBUG_KMS("EDID test requested\n");
4129 response = intel_dp_autotest_edid(intel_dp);
4130 break;
4131 case DP_TEST_LINK_PHY_TEST_PATTERN:
4132 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
4133 response = intel_dp_autotest_phy_pattern(intel_dp);
4134 break;
4135 default:
4136 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
4137 break;
4138 }
4139
4140 if (response & DP_TEST_ACK)
4141 intel_dp->compliance.test_type = request;
4142
4143 update_status:
4144 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
4145 if (status <= 0)
4146 DRM_DEBUG_KMS("Could not write test response to sink\n");
4147 }
4148
4149 static int
4150 intel_dp_check_mst_status(struct intel_dp *intel_dp)
4151 {
4152 bool bret;
4153
4154 if (intel_dp->is_mst) {
4155 u8 esi[16] = { 0 };
4156 int ret = 0;
4157 int retry;
4158 bool handled;
4159 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4160 go_again:
4161 if (bret == true) {
4162
4163 /* check link status - esi[10] = 0x200c */
4164 if (intel_dp->active_mst_links &&
4165 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
4166 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4167 intel_dp_start_link_train(intel_dp);
4168 intel_dp_stop_link_train(intel_dp);
4169 }
4170
4171 DRM_DEBUG_KMS("got esi %3ph\n", esi);
4172 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4173
4174 if (handled) {
4175 for (retry = 0; retry < 3; retry++) {
4176 int wret;
4177 wret = drm_dp_dpcd_write(&intel_dp->aux,
4178 DP_SINK_COUNT_ESI+1,
4179 &esi[1], 3);
4180 if (wret == 3) {
4181 break;
4182 }
4183 }
4184
4185 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4186 if (bret == true) {
4187 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
4188 goto go_again;
4189 }
4190 } else
4191 ret = 0;
4192
4193 return ret;
4194 } else {
4195 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4196 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4197 intel_dp->is_mst = false;
4198 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr, intel_dp->is_mst);
4199 /* send a hotplug event */
4200 drm_kms_helper_hotplug_event(intel_dig_port->base.base.dev);
4201 }
4202 }
4203 return -EINVAL;
4204 }
4205
4206 static void
4207 intel_dp_retrain_link(struct intel_dp *intel_dp)
4208 {
4209 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4210 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
4211 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
4212
4213 /* Suppress underruns caused by re-training */
4214 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
4215 if (crtc->config->has_pch_encoder)
4216 intel_set_pch_fifo_underrun_reporting(dev_priv,
4217 intel_crtc_pch_transcoder(crtc), false);
4218
4219 intel_dp_start_link_train(intel_dp);
4220 intel_dp_stop_link_train(intel_dp);
4221
4222 /* Keep underrun reporting disabled until things are stable */
4223 intel_wait_for_vblank(dev_priv, crtc->pipe);
4224
4225 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
4226 if (crtc->config->has_pch_encoder)
4227 intel_set_pch_fifo_underrun_reporting(dev_priv,
4228 intel_crtc_pch_transcoder(crtc), true);
4229 }
4230
4231 static void
4232 intel_dp_check_link_status(struct intel_dp *intel_dp)
4233 {
4234 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4235 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4236 u8 link_status[DP_LINK_STATUS_SIZE];
4237
4238 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
4239
4240 if (!intel_dp_get_link_status(intel_dp, link_status)) {
4241 DRM_ERROR("Failed to get link status\n");
4242 return;
4243 }
4244
4245 if (!intel_encoder->base.crtc)
4246 return;
4247
4248 if (!to_intel_crtc(intel_encoder->base.crtc)->active)
4249 return;
4250
4251 /* FIXME: we need to synchronize this sort of stuff with hardware
4252 * readout. Currently fast link training doesn't work on boot-up. */
4253 if (!intel_dp->lane_count)
4254 return;
4255
4256 /* Retrain if Channel EQ or CR not ok */
4257 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
4258 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
4259 intel_encoder->base.name);
4260
4261 intel_dp_retrain_link(intel_dp);
4262 }
4263 }
4264
4265 /*
4266 * According to DP spec
4267 * 5.1.2:
4268 * 1. Read DPCD
4269 * 2. Configure link according to Receiver Capabilities
4270 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4271 * 4. Check link status on receipt of hot-plug interrupt
4272 *
4273 * intel_dp_short_pulse - handles short pulse interrupts
4274 * when full detection is not required.
4275 * Returns %true if short pulse is handled and full detection
4276 * is NOT required and %false otherwise.
4277 */
4278 static bool
4279 intel_dp_short_pulse(struct intel_dp *intel_dp)
4280 {
4281 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4282 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4283 u8 sink_irq_vector = 0;
4284 u8 old_sink_count = intel_dp->sink_count;
4285 bool ret;
4286
4287 /*
4288 * Clearing compliance test variables to allow capturing
4289 * of values for next automated test request.
4290 */
4291 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4292
4293 /*
4294 * Now read the DPCD to see if it's actually running
4295 * If the current value of sink count doesn't match with
4296 * the value that was stored earlier or dpcd read failed
4297 * we need to do full detection
4298 */
4299 ret = intel_dp_get_dpcd(intel_dp);
4300
4301 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4302 /* No need to proceed if we are going to do full detect */
4303 return false;
4304 }
4305
4306 /* Try to read the source of the interrupt */
4307 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4308 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4309 sink_irq_vector != 0) {
4310 /* Clear interrupt source */
4311 drm_dp_dpcd_writeb(&intel_dp->aux,
4312 DP_DEVICE_SERVICE_IRQ_VECTOR,
4313 sink_irq_vector);
4314
4315 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4316 intel_dp_handle_test_request(intel_dp);
4317 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4318 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4319 }
4320
4321 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4322 intel_dp_check_link_status(intel_dp);
4323 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4324 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4325 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4326 /* Send a Hotplug Uevent to userspace to start modeset */
4327 drm_kms_helper_hotplug_event(intel_encoder->base.dev);
4328 }
4329
4330 return true;
4331 }
4332
4333 /* XXX this is probably wrong for multiple downstream ports */
4334 static enum drm_connector_status
4335 intel_dp_detect_dpcd(struct intel_dp *intel_dp)
4336 {
4337 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
4338 uint8_t *dpcd = intel_dp->dpcd;
4339 uint8_t type;
4340
4341 if (lspcon->active)
4342 lspcon_resume(lspcon);
4343
4344 if (!intel_dp_get_dpcd(intel_dp))
4345 return connector_status_disconnected;
4346
4347 if (is_edp(intel_dp))
4348 return connector_status_connected;
4349
4350 /* if there's no downstream port, we're done */
4351 if (!drm_dp_is_branch(dpcd))
4352 return connector_status_connected;
4353
4354 /* If we're HPD-aware, SINK_COUNT changes dynamically */
4355 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4356 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
4357
4358 return intel_dp->sink_count ?
4359 connector_status_connected : connector_status_disconnected;
4360 }
4361
4362 if (intel_dp_can_mst(intel_dp))
4363 return connector_status_connected;
4364
4365 /* If no HPD, poke DDC gently */
4366 if (drm_probe_ddc(&intel_dp->aux.ddc))
4367 return connector_status_connected;
4368
4369 /* Well we tried, say unknown for unreliable port types */
4370 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4371 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4372 if (type == DP_DS_PORT_TYPE_VGA ||
4373 type == DP_DS_PORT_TYPE_NON_EDID)
4374 return connector_status_unknown;
4375 } else {
4376 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4377 DP_DWN_STRM_PORT_TYPE_MASK;
4378 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4379 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4380 return connector_status_unknown;
4381 }
4382
4383 /* Anything else is out of spec, warn and ignore */
4384 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
4385 return connector_status_disconnected;
4386 }
4387
4388 static enum drm_connector_status
4389 edp_detect(struct intel_dp *intel_dp)
4390 {
4391 struct drm_device *dev = intel_dp_to_dev(intel_dp);
4392 struct drm_i915_private *dev_priv = to_i915(dev);
4393 enum drm_connector_status status;
4394
4395 status = intel_panel_detect(dev_priv);
4396 if (status == connector_status_unknown)
4397 status = connector_status_connected;
4398
4399 return status;
4400 }
4401
4402 static bool ibx_digital_port_connected(struct drm_i915_private *dev_priv,
4403 struct intel_digital_port *port)
4404 {
4405 u32 bit;
4406
4407 switch (port->port) {
4408 case PORT_A:
4409 return true;
4410 case PORT_B:
4411 bit = SDE_PORTB_HOTPLUG;
4412 break;
4413 case PORT_C:
4414 bit = SDE_PORTC_HOTPLUG;
4415 break;
4416 case PORT_D:
4417 bit = SDE_PORTD_HOTPLUG;
4418 break;
4419 default:
4420 MISSING_CASE(port->port);
4421 return false;
4422 }
4423
4424 return I915_READ(SDEISR) & bit;
4425 }
4426
4427 static bool cpt_digital_port_connected(struct drm_i915_private *dev_priv,
4428 struct intel_digital_port *port)
4429 {
4430 u32 bit;
4431
4432 switch (port->port) {
4433 case PORT_A:
4434 return true;
4435 case PORT_B:
4436 bit = SDE_PORTB_HOTPLUG_CPT;
4437 break;
4438 case PORT_C:
4439 bit = SDE_PORTC_HOTPLUG_CPT;
4440 break;
4441 case PORT_D:
4442 bit = SDE_PORTD_HOTPLUG_CPT;
4443 break;
4444 case PORT_E:
4445 bit = SDE_PORTE_HOTPLUG_SPT;
4446 break;
4447 default:
4448 MISSING_CASE(port->port);
4449 return false;
4450 }
4451
4452 return I915_READ(SDEISR) & bit;
4453 }
4454
4455 static bool g4x_digital_port_connected(struct drm_i915_private *dev_priv,
4456 struct intel_digital_port *port)
4457 {
4458 u32 bit;
4459
4460 switch (port->port) {
4461 case PORT_B:
4462 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4463 break;
4464 case PORT_C:
4465 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4466 break;
4467 case PORT_D:
4468 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4469 break;
4470 default:
4471 MISSING_CASE(port->port);
4472 return false;
4473 }
4474
4475 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4476 }
4477
4478 static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
4479 struct intel_digital_port *port)
4480 {
4481 u32 bit;
4482
4483 switch (port->port) {
4484 case PORT_B:
4485 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
4486 break;
4487 case PORT_C:
4488 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
4489 break;
4490 case PORT_D:
4491 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
4492 break;
4493 default:
4494 MISSING_CASE(port->port);
4495 return false;
4496 }
4497
4498 return I915_READ(PORT_HOTPLUG_STAT) & bit;
4499 }
4500
4501 static bool bxt_digital_port_connected(struct drm_i915_private *dev_priv,
4502 struct intel_digital_port *intel_dig_port)
4503 {
4504 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4505 enum port port;
4506 u32 bit;
4507
4508 intel_hpd_pin_to_port(intel_encoder->hpd_pin, &port);
4509 switch (port) {
4510 case PORT_A:
4511 bit = BXT_DE_PORT_HP_DDIA;
4512 break;
4513 case PORT_B:
4514 bit = BXT_DE_PORT_HP_DDIB;
4515 break;
4516 case PORT_C:
4517 bit = BXT_DE_PORT_HP_DDIC;
4518 break;
4519 default:
4520 MISSING_CASE(port);
4521 return false;
4522 }
4523
4524 return I915_READ(GEN8_DE_PORT_ISR) & bit;
4525 }
4526
4527 /*
4528 * intel_digital_port_connected - is the specified port connected?
4529 * @dev_priv: i915 private structure
4530 * @port: the port to test
4531 *
4532 * Return %true if @port is connected, %false otherwise.
4533 */
4534 bool intel_digital_port_connected(struct drm_i915_private *dev_priv,
4535 struct intel_digital_port *port)
4536 {
4537 if (HAS_PCH_IBX(dev_priv))
4538 return ibx_digital_port_connected(dev_priv, port);
4539 else if (HAS_PCH_SPLIT(dev_priv))
4540 return cpt_digital_port_connected(dev_priv, port);
4541 else if (IS_GEN9_LP(dev_priv))
4542 return bxt_digital_port_connected(dev_priv, port);
4543 else if (IS_GM45(dev_priv))
4544 return gm45_digital_port_connected(dev_priv, port);
4545 else
4546 return g4x_digital_port_connected(dev_priv, port);
4547 }
4548
4549 static struct edid *
4550 intel_dp_get_edid(struct intel_dp *intel_dp)
4551 {
4552 struct intel_connector *intel_connector = intel_dp->attached_connector;
4553
4554 /* use cached edid if we have one */
4555 if (intel_connector->edid) {
4556 /* invalid edid */
4557 if (IS_ERR(intel_connector->edid))
4558 return NULL;
4559
4560 return drm_edid_duplicate(intel_connector->edid);
4561 } else
4562 return drm_get_edid(&intel_connector->base,
4563 &intel_dp->aux.ddc);
4564 }
4565
4566 static void
4567 intel_dp_set_edid(struct intel_dp *intel_dp)
4568 {
4569 struct intel_connector *intel_connector = intel_dp->attached_connector;
4570 struct edid *edid;
4571
4572 intel_dp_unset_edid(intel_dp);
4573 edid = intel_dp_get_edid(intel_dp);
4574 intel_connector->detect_edid = edid;
4575
4576 if (intel_dp->force_audio != HDMI_AUDIO_AUTO)
4577 intel_dp->has_audio = intel_dp->force_audio == HDMI_AUDIO_ON;
4578 else
4579 intel_dp->has_audio = drm_detect_monitor_audio(edid);
4580 }
4581
4582 static void
4583 intel_dp_unset_edid(struct intel_dp *intel_dp)
4584 {
4585 struct intel_connector *intel_connector = intel_dp->attached_connector;
4586
4587 kfree(intel_connector->detect_edid);
4588 intel_connector->detect_edid = NULL;
4589
4590 intel_dp->has_audio = false;
4591 }
4592
4593 static enum drm_connector_status
4594 intel_dp_long_pulse(struct intel_connector *intel_connector)
4595 {
4596 struct drm_connector *connector = &intel_connector->base;
4597 struct intel_dp *intel_dp = intel_attached_dp(connector);
4598 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4599 struct intel_encoder *intel_encoder = &intel_dig_port->base;
4600 struct drm_device *dev = connector->dev;
4601 enum drm_connector_status status;
4602 u8 sink_irq_vector = 0;
4603
4604 intel_display_power_get(to_i915(dev), intel_dp->aux_power_domain);
4605
4606 /* Can't disconnect eDP, but you can close the lid... */
4607 if (is_edp(intel_dp))
4608 status = edp_detect(intel_dp);
4609 else if (intel_digital_port_connected(to_i915(dev),
4610 dp_to_dig_port(intel_dp)))
4611 status = intel_dp_detect_dpcd(intel_dp);
4612 else
4613 status = connector_status_disconnected;
4614
4615 if (status == connector_status_disconnected) {
4616 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4617
4618 if (intel_dp->is_mst) {
4619 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
4620 intel_dp->is_mst,
4621 intel_dp->mst_mgr.mst_state);
4622 intel_dp->is_mst = false;
4623 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4624 intel_dp->is_mst);
4625 }
4626
4627 goto out;
4628 }
4629
4630 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4631 intel_encoder->type = INTEL_OUTPUT_DP;
4632
4633 DRM_DEBUG_KMS("Display Port TPS3 support: source %s, sink %s\n",
4634 yesno(intel_dp_source_supports_hbr2(intel_dp)),
4635 yesno(drm_dp_tps3_supported(intel_dp->dpcd)));
4636
4637 if (intel_dp->reset_link_params) {
4638 /* Set the max lane count for sink */
4639 intel_dp->max_sink_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
4640
4641 /* Set the max link rate for sink */
4642 intel_dp->max_sink_link_rate = intel_dp_max_sink_rate(intel_dp);
4643
4644 intel_dp->reset_link_params = false;
4645 }
4646
4647 intel_dp_print_rates(intel_dp);
4648
4649 intel_dp_read_desc(intel_dp);
4650
4651 intel_dp_configure_mst(intel_dp);
4652
4653 if (intel_dp->is_mst) {
4654 /*
4655 * If we are in MST mode then this connector
4656 * won't appear connected or have anything
4657 * with EDID on it
4658 */
4659 status = connector_status_disconnected;
4660 goto out;
4661 } else if (connector->status == connector_status_connected) {
4662 /*
4663 * If display was connected already and is still connected
4664 * check links status, there has been known issues of
4665 * link loss triggerring long pulse!!!!
4666 */
4667 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
4668 intel_dp_check_link_status(intel_dp);
4669 drm_modeset_unlock(&dev->mode_config.connection_mutex);
4670 goto out;
4671 }
4672
4673 /*
4674 * Clearing NACK and defer counts to get their exact values
4675 * while reading EDID which are required by Compliance tests
4676 * 4.2.2.4 and 4.2.2.5
4677 */
4678 intel_dp->aux.i2c_nack_count = 0;
4679 intel_dp->aux.i2c_defer_count = 0;
4680
4681 intel_dp_set_edid(intel_dp);
4682 if (is_edp(intel_dp) || intel_connector->detect_edid)
4683 status = connector_status_connected;
4684 intel_dp->detect_done = true;
4685
4686 /* Try to read the source of the interrupt */
4687 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4688 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector) &&
4689 sink_irq_vector != 0) {
4690 /* Clear interrupt source */
4691 drm_dp_dpcd_writeb(&intel_dp->aux,
4692 DP_DEVICE_SERVICE_IRQ_VECTOR,
4693 sink_irq_vector);
4694
4695 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
4696 intel_dp_handle_test_request(intel_dp);
4697 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
4698 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
4699 }
4700
4701 out:
4702 if (status != connector_status_connected && !intel_dp->is_mst)
4703 intel_dp_unset_edid(intel_dp);
4704
4705 intel_display_power_put(to_i915(dev), intel_dp->aux_power_domain);
4706 return status;
4707 }
4708
4709 static enum drm_connector_status
4710 intel_dp_detect(struct drm_connector *connector, bool force)
4711 {
4712 struct intel_dp *intel_dp = intel_attached_dp(connector);
4713 enum drm_connector_status status = connector->status;
4714
4715 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4716 connector->base.id, connector->name);
4717
4718 /* If full detect is not performed yet, do a full detect */
4719 if (!intel_dp->detect_done)
4720 status = intel_dp_long_pulse(intel_dp->attached_connector);
4721
4722 intel_dp->detect_done = false;
4723
4724 return status;
4725 }
4726
4727 static void
4728 intel_dp_force(struct drm_connector *connector)
4729 {
4730 struct intel_dp *intel_dp = intel_attached_dp(connector);
4731 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
4732 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
4733
4734 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
4735 connector->base.id, connector->name);
4736 intel_dp_unset_edid(intel_dp);
4737
4738 if (connector->status != connector_status_connected)
4739 return;
4740
4741 intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
4742
4743 intel_dp_set_edid(intel_dp);
4744
4745 intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
4746
4747 if (intel_encoder->type != INTEL_OUTPUT_EDP)
4748 intel_encoder->type = INTEL_OUTPUT_DP;
4749 }
4750
4751 static int intel_dp_get_modes(struct drm_connector *connector)
4752 {
4753 struct intel_connector *intel_connector = to_intel_connector(connector);
4754 struct edid *edid;
4755
4756 edid = intel_connector->detect_edid;
4757 if (edid) {
4758 int ret = intel_connector_update_modes(connector, edid);
4759 if (ret)
4760 return ret;
4761 }
4762
4763 /* if eDP has no EDID, fall back to fixed mode */
4764 if (is_edp(intel_attached_dp(connector)) &&
4765 intel_connector->panel.fixed_mode) {
4766 struct drm_display_mode *mode;
4767
4768 mode = drm_mode_duplicate(connector->dev,
4769 intel_connector->panel.fixed_mode);
4770 if (mode) {
4771 drm_mode_probed_add(connector, mode);
4772 return 1;
4773 }
4774 }
4775
4776 return 0;
4777 }
4778
4779 static bool
4780 intel_dp_detect_audio(struct drm_connector *connector)
4781 {
4782 bool has_audio = false;
4783 struct edid *edid;
4784
4785 edid = to_intel_connector(connector)->detect_edid;
4786 if (edid)
4787 has_audio = drm_detect_monitor_audio(edid);
4788
4789 return has_audio;
4790 }
4791
4792 static int
4793 intel_dp_set_property(struct drm_connector *connector,
4794 struct drm_property *property,
4795 uint64_t val)
4796 {
4797 struct drm_i915_private *dev_priv = to_i915(connector->dev);
4798 struct intel_connector *intel_connector = to_intel_connector(connector);
4799 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
4800 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4801 int ret;
4802
4803 ret = drm_object_property_set_value(&connector->base, property, val);
4804 if (ret)
4805 return ret;
4806
4807 if (property == dev_priv->force_audio_property) {
4808 int i = val;
4809 bool has_audio;
4810
4811 if (i == intel_dp->force_audio)
4812 return 0;
4813
4814 intel_dp->force_audio = i;
4815
4816 if (i == HDMI_AUDIO_AUTO)
4817 has_audio = intel_dp_detect_audio(connector);
4818 else
4819 has_audio = (i == HDMI_AUDIO_ON);
4820
4821 if (has_audio == intel_dp->has_audio)
4822 return 0;
4823
4824 intel_dp->has_audio = has_audio;
4825 goto done;
4826 }
4827
4828 if (property == dev_priv->broadcast_rgb_property) {
4829 bool old_auto = intel_dp->color_range_auto;
4830 bool old_range = intel_dp->limited_color_range;
4831
4832 switch (val) {
4833 case INTEL_BROADCAST_RGB_AUTO:
4834 intel_dp->color_range_auto = true;
4835 break;
4836 case INTEL_BROADCAST_RGB_FULL:
4837 intel_dp->color_range_auto = false;
4838 intel_dp->limited_color_range = false;
4839 break;
4840 case INTEL_BROADCAST_RGB_LIMITED:
4841 intel_dp->color_range_auto = false;
4842 intel_dp->limited_color_range = true;
4843 break;
4844 default:
4845 return -EINVAL;
4846 }
4847
4848 if (old_auto == intel_dp->color_range_auto &&
4849 old_range == intel_dp->limited_color_range)
4850 return 0;
4851
4852 goto done;
4853 }
4854
4855 if (is_edp(intel_dp) &&
4856 property == connector->dev->mode_config.scaling_mode_property) {
4857 if (val == DRM_MODE_SCALE_NONE) {
4858 DRM_DEBUG_KMS("no scaling not supported\n");
4859 return -EINVAL;
4860 }
4861 if (HAS_GMCH_DISPLAY(dev_priv) &&
4862 val == DRM_MODE_SCALE_CENTER) {
4863 DRM_DEBUG_KMS("centering not supported\n");
4864 return -EINVAL;
4865 }
4866
4867 if (intel_connector->panel.fitting_mode == val) {
4868 /* the eDP scaling property is not changed */
4869 return 0;
4870 }
4871 intel_connector->panel.fitting_mode = val;
4872
4873 goto done;
4874 }
4875
4876 return -EINVAL;
4877
4878 done:
4879 if (intel_encoder->base.crtc)
4880 intel_crtc_restore_mode(intel_encoder->base.crtc);
4881
4882 return 0;
4883 }
4884
4885 static int
4886 intel_dp_connector_register(struct drm_connector *connector)
4887 {
4888 struct intel_dp *intel_dp = intel_attached_dp(connector);
4889 int ret;
4890
4891 ret = intel_connector_register(connector);
4892 if (ret)
4893 return ret;
4894
4895 i915_debugfs_connector_add(connector);
4896
4897 DRM_DEBUG_KMS("registering %s bus for %s\n",
4898 intel_dp->aux.name, connector->kdev->kobj.name);
4899
4900 intel_dp->aux.dev = connector->kdev;
4901 return drm_dp_aux_register(&intel_dp->aux);
4902 }
4903
4904 static void
4905 intel_dp_connector_unregister(struct drm_connector *connector)
4906 {
4907 drm_dp_aux_unregister(&intel_attached_dp(connector)->aux);
4908 intel_connector_unregister(connector);
4909 }
4910
4911 static void
4912 intel_dp_connector_destroy(struct drm_connector *connector)
4913 {
4914 struct intel_connector *intel_connector = to_intel_connector(connector);
4915
4916 kfree(intel_connector->detect_edid);
4917
4918 if (!IS_ERR_OR_NULL(intel_connector->edid))
4919 kfree(intel_connector->edid);
4920
4921 /* Can't call is_edp() since the encoder may have been destroyed
4922 * already. */
4923 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
4924 intel_panel_fini(&intel_connector->panel);
4925
4926 drm_connector_cleanup(connector);
4927 kfree(connector);
4928 }
4929
4930 void intel_dp_encoder_destroy(struct drm_encoder *encoder)
4931 {
4932 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
4933 struct intel_dp *intel_dp = &intel_dig_port->dp;
4934
4935 intel_dp_mst_encoder_cleanup(intel_dig_port);
4936 if (is_edp(intel_dp)) {
4937 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4938 /*
4939 * vdd might still be enabled do to the delayed vdd off.
4940 * Make sure vdd is actually turned off here.
4941 */
4942 pps_lock(intel_dp);
4943 edp_panel_vdd_off_sync(intel_dp);
4944 pps_unlock(intel_dp);
4945
4946 if (intel_dp->edp_notifier.notifier_call) {
4947 unregister_reboot_notifier(&intel_dp->edp_notifier);
4948 intel_dp->edp_notifier.notifier_call = NULL;
4949 }
4950 }
4951
4952 intel_dp_aux_fini(intel_dp);
4953
4954 drm_encoder_cleanup(encoder);
4955 kfree(intel_dig_port);
4956 }
4957
4958 void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4959 {
4960 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
4961
4962 if (!is_edp(intel_dp))
4963 return;
4964
4965 /*
4966 * vdd might still be enabled do to the delayed vdd off.
4967 * Make sure vdd is actually turned off here.
4968 */
4969 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4970 pps_lock(intel_dp);
4971 edp_panel_vdd_off_sync(intel_dp);
4972 pps_unlock(intel_dp);
4973 }
4974
4975 static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
4976 {
4977 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
4978 struct drm_device *dev = intel_dig_port->base.base.dev;
4979 struct drm_i915_private *dev_priv = to_i915(dev);
4980
4981 lockdep_assert_held(&dev_priv->pps_mutex);
4982
4983 if (!edp_have_panel_vdd(intel_dp))
4984 return;
4985
4986 /*
4987 * The VDD bit needs a power domain reference, so if the bit is
4988 * already enabled when we boot or resume, grab this reference and
4989 * schedule a vdd off, so we don't hold on to the reference
4990 * indefinitely.
4991 */
4992 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
4993 intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
4994
4995 edp_panel_vdd_schedule_off(intel_dp);
4996 }
4997
4998 static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
4999 {
5000 struct drm_i915_private *dev_priv = to_i915(intel_dp_to_dev(intel_dp));
5001
5002 if ((intel_dp->DP & DP_PORT_EN) == 0)
5003 return INVALID_PIPE;
5004
5005 if (IS_CHERRYVIEW(dev_priv))
5006 return DP_PORT_TO_PIPE_CHV(intel_dp->DP);
5007 else
5008 return PORT_TO_PIPE(intel_dp->DP);
5009 }
5010
5011 void intel_dp_encoder_reset(struct drm_encoder *encoder)
5012 {
5013 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
5014 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5015 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
5016
5017 if (!HAS_DDI(dev_priv))
5018 intel_dp->DP = I915_READ(intel_dp->output_reg);
5019
5020 if (lspcon->active)
5021 lspcon_resume(lspcon);
5022
5023 intel_dp->reset_link_params = true;
5024
5025 pps_lock(intel_dp);
5026
5027 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5028 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
5029
5030 if (is_edp(intel_dp)) {
5031 /* Reinit the power sequencer, in case BIOS did something with it. */
5032 intel_dp_pps_init(encoder->dev, intel_dp);
5033 intel_edp_panel_vdd_sanitize(intel_dp);
5034 }
5035
5036 pps_unlock(intel_dp);
5037 }
5038
5039 static const struct drm_connector_funcs intel_dp_connector_funcs = {
5040 .dpms = drm_atomic_helper_connector_dpms,
5041 .detect = intel_dp_detect,
5042 .force = intel_dp_force,
5043 .fill_modes = drm_helper_probe_single_connector_modes,
5044 .set_property = intel_dp_set_property,
5045 .atomic_get_property = intel_connector_atomic_get_property,
5046 .late_register = intel_dp_connector_register,
5047 .early_unregister = intel_dp_connector_unregister,
5048 .destroy = intel_dp_connector_destroy,
5049 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
5050 .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
5051 };
5052
5053 static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
5054 .get_modes = intel_dp_get_modes,
5055 .mode_valid = intel_dp_mode_valid,
5056 };
5057
5058 static const struct drm_encoder_funcs intel_dp_enc_funcs = {
5059 .reset = intel_dp_encoder_reset,
5060 .destroy = intel_dp_encoder_destroy,
5061 };
5062
5063 enum irqreturn
5064 intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
5065 {
5066 struct intel_dp *intel_dp = &intel_dig_port->dp;
5067 struct drm_device *dev = intel_dig_port->base.base.dev;
5068 struct drm_i915_private *dev_priv = to_i915(dev);
5069 enum irqreturn ret = IRQ_NONE;
5070
5071 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP &&
5072 intel_dig_port->base.type != INTEL_OUTPUT_HDMI)
5073 intel_dig_port->base.type = INTEL_OUTPUT_DP;
5074
5075 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
5076 /*
5077 * vdd off can generate a long pulse on eDP which
5078 * would require vdd on to handle it, and thus we
5079 * would end up in an endless cycle of
5080 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
5081 */
5082 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
5083 port_name(intel_dig_port->port));
5084 return IRQ_HANDLED;
5085 }
5086
5087 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
5088 port_name(intel_dig_port->port),
5089 long_hpd ? "long" : "short");
5090
5091 if (long_hpd) {
5092 intel_dp->reset_link_params = true;
5093 intel_dp->detect_done = false;
5094 return IRQ_NONE;
5095 }
5096
5097 intel_display_power_get(dev_priv, intel_dp->aux_power_domain);
5098
5099 if (intel_dp->is_mst) {
5100 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
5101 /*
5102 * If we were in MST mode, and device is not
5103 * there, get out of MST mode
5104 */
5105 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5106 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
5107 intel_dp->is_mst = false;
5108 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5109 intel_dp->is_mst);
5110 intel_dp->detect_done = false;
5111 goto put_power;
5112 }
5113 }
5114
5115 if (!intel_dp->is_mst) {
5116 if (!intel_dp_short_pulse(intel_dp)) {
5117 intel_dp->detect_done = false;
5118 goto put_power;
5119 }
5120 }
5121
5122 ret = IRQ_HANDLED;
5123
5124 put_power:
5125 intel_display_power_put(dev_priv, intel_dp->aux_power_domain);
5126
5127 return ret;
5128 }
5129
5130 /* check the VBT to see whether the eDP is on another port */
5131 bool intel_dp_is_edp(struct drm_i915_private *dev_priv, enum port port)
5132 {
5133 /*
5134 * eDP not supported on g4x. so bail out early just
5135 * for a bit extra safety in case the VBT is bonkers.
5136 */
5137 if (INTEL_GEN(dev_priv) < 5)
5138 return false;
5139
5140 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
5141 return true;
5142
5143 return intel_bios_is_port_edp(dev_priv, port);
5144 }
5145
5146 void
5147 intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
5148 {
5149 struct intel_connector *intel_connector = to_intel_connector(connector);
5150
5151 intel_attach_force_audio_property(connector);
5152 intel_attach_broadcast_rgb_property(connector);
5153 intel_dp->color_range_auto = true;
5154
5155 if (is_edp(intel_dp)) {
5156 drm_mode_create_scaling_mode_property(connector->dev);
5157 drm_object_attach_property(
5158 &connector->base,
5159 connector->dev->mode_config.scaling_mode_property,
5160 DRM_MODE_SCALE_ASPECT);
5161 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
5162 }
5163 }
5164
5165 static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
5166 {
5167 intel_dp->panel_power_off_time = ktime_get_boottime();
5168 intel_dp->last_power_on = jiffies;
5169 intel_dp->last_backlight_off = jiffies;
5170 }
5171
5172 static void
5173 intel_pps_readout_hw_state(struct drm_i915_private *dev_priv,
5174 struct intel_dp *intel_dp, struct edp_power_seq *seq)
5175 {
5176 u32 pp_on, pp_off, pp_div = 0, pp_ctl = 0;
5177 struct pps_registers regs;
5178
5179 intel_pps_get_registers(dev_priv, intel_dp, &regs);
5180
5181 /* Workaround: Need to write PP_CONTROL with the unlock key as
5182 * the very first thing. */
5183 pp_ctl = ironlake_get_pp_control(intel_dp);
5184
5185 pp_on = I915_READ(regs.pp_on);
5186 pp_off = I915_READ(regs.pp_off);
5187 if (!IS_GEN9_LP(dev_priv)) {
5188 I915_WRITE(regs.pp_ctrl, pp_ctl);
5189 pp_div = I915_READ(regs.pp_div);
5190 }
5191
5192 /* Pull timing values out of registers */
5193 seq->t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
5194 PANEL_POWER_UP_DELAY_SHIFT;
5195
5196 seq->t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
5197 PANEL_LIGHT_ON_DELAY_SHIFT;
5198
5199 seq->t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
5200 PANEL_LIGHT_OFF_DELAY_SHIFT;
5201
5202 seq->t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
5203 PANEL_POWER_DOWN_DELAY_SHIFT;
5204
5205 if (IS_GEN9_LP(dev_priv)) {
5206 u16 tmp = (pp_ctl & BXT_POWER_CYCLE_DELAY_MASK) >>
5207 BXT_POWER_CYCLE_DELAY_SHIFT;
5208 if (tmp > 0)
5209 seq->t11_t12 = (tmp - 1) * 1000;
5210 else
5211 seq->t11_t12 = 0;
5212 } else {
5213 seq->t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
5214 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
5215 }
5216 }
5217
5218 static void
5219 intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
5220 {
5221 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
5222 state_name,
5223 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
5224 }
5225
5226 static void
5227 intel_pps_verify_state(struct drm_i915_private *dev_priv,
5228 struct intel_dp *intel_dp)
5229 {
5230 struct edp_power_seq hw;
5231 struct edp_power_seq *sw = &intel_dp->pps_delays;
5232
5233 intel_pps_readout_hw_state(dev_priv, intel_dp, &hw);
5234
5235 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
5236 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
5237 DRM_ERROR("PPS state mismatch\n");
5238 intel_pps_dump_state("sw", sw);
5239 intel_pps_dump_state("hw", &hw);
5240 }
5241 }
5242
5243 static void
5244 intel_dp_init_panel_power_sequencer(struct drm_device *dev,
5245 struct intel_dp *intel_dp)
5246 {
5247 struct drm_i915_private *dev_priv = to_i915(dev);
5248 struct edp_power_seq cur, vbt, spec,
5249 *final = &intel_dp->pps_delays;
5250
5251 lockdep_assert_held(&dev_priv->pps_mutex);
5252
5253 /* already initialized? */
5254 if (final->t11_t12 != 0)
5255 return;
5256
5257 intel_pps_readout_hw_state(dev_priv, intel_dp, &cur);
5258
5259 intel_pps_dump_state("cur", &cur);
5260
5261 vbt = dev_priv->vbt.edp.pps;
5262
5263 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
5264 * our hw here, which are all in 100usec. */
5265 spec.t1_t3 = 210 * 10;
5266 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
5267 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
5268 spec.t10 = 500 * 10;
5269 /* This one is special and actually in units of 100ms, but zero
5270 * based in the hw (so we need to add 100 ms). But the sw vbt
5271 * table multiplies it with 1000 to make it in units of 100usec,
5272 * too. */
5273 spec.t11_t12 = (510 + 100) * 10;
5274
5275 intel_pps_dump_state("vbt", &vbt);
5276
5277 /* Use the max of the register settings and vbt. If both are
5278 * unset, fall back to the spec limits. */
5279 #define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
5280 spec.field : \
5281 max(cur.field, vbt.field))
5282 assign_final(t1_t3);
5283 assign_final(t8);
5284 assign_final(t9);
5285 assign_final(t10);
5286 assign_final(t11_t12);
5287 #undef assign_final
5288
5289 #define get_delay(field) (DIV_ROUND_UP(final->field, 10))
5290 intel_dp->panel_power_up_delay = get_delay(t1_t3);
5291 intel_dp->backlight_on_delay = get_delay(t8);
5292 intel_dp->backlight_off_delay = get_delay(t9);
5293 intel_dp->panel_power_down_delay = get_delay(t10);
5294 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
5295 #undef get_delay
5296
5297 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
5298 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
5299 intel_dp->panel_power_cycle_delay);
5300
5301 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
5302 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
5303
5304 /*
5305 * We override the HW backlight delays to 1 because we do manual waits
5306 * on them. For T8, even BSpec recommends doing it. For T9, if we
5307 * don't do this, we'll end up waiting for the backlight off delay
5308 * twice: once when we do the manual sleep, and once when we disable
5309 * the panel and wait for the PP_STATUS bit to become zero.
5310 */
5311 final->t8 = 1;
5312 final->t9 = 1;
5313 }
5314
5315 static void
5316 intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
5317 struct intel_dp *intel_dp,
5318 bool force_disable_vdd)
5319 {
5320 struct drm_i915_private *dev_priv = to_i915(dev);
5321 u32 pp_on, pp_off, pp_div, port_sel = 0;
5322 int div = dev_priv->rawclk_freq / 1000;
5323 struct pps_registers regs;
5324 enum port port = dp_to_dig_port(intel_dp)->port;
5325 const struct edp_power_seq *seq = &intel_dp->pps_delays;
5326
5327 lockdep_assert_held(&dev_priv->pps_mutex);
5328
5329 intel_pps_get_registers(dev_priv, intel_dp, &regs);
5330
5331 /*
5332 * On some VLV machines the BIOS can leave the VDD
5333 * enabled even on power seqeuencers which aren't
5334 * hooked up to any port. This would mess up the
5335 * power domain tracking the first time we pick
5336 * one of these power sequencers for use since
5337 * edp_panel_vdd_on() would notice that the VDD was
5338 * already on and therefore wouldn't grab the power
5339 * domain reference. Disable VDD first to avoid this.
5340 * This also avoids spuriously turning the VDD on as
5341 * soon as the new power seqeuencer gets initialized.
5342 */
5343 if (force_disable_vdd) {
5344 u32 pp = ironlake_get_pp_control(intel_dp);
5345
5346 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
5347
5348 if (pp & EDP_FORCE_VDD)
5349 DRM_DEBUG_KMS("VDD already on, disabling first\n");
5350
5351 pp &= ~EDP_FORCE_VDD;
5352
5353 I915_WRITE(regs.pp_ctrl, pp);
5354 }
5355
5356 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
5357 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
5358 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
5359 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
5360 /* Compute the divisor for the pp clock, simply match the Bspec
5361 * formula. */
5362 if (IS_GEN9_LP(dev_priv)) {
5363 pp_div = I915_READ(regs.pp_ctrl);
5364 pp_div &= ~BXT_POWER_CYCLE_DELAY_MASK;
5365 pp_div |= (DIV_ROUND_UP((seq->t11_t12 + 1), 1000)
5366 << BXT_POWER_CYCLE_DELAY_SHIFT);
5367 } else {
5368 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
5369 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
5370 << PANEL_POWER_CYCLE_DELAY_SHIFT);
5371 }
5372
5373 /* Haswell doesn't have any port selection bits for the panel
5374 * power sequencer any more. */
5375 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5376 port_sel = PANEL_PORT_SELECT_VLV(port);
5377 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
5378 if (port == PORT_A)
5379 port_sel = PANEL_PORT_SELECT_DPA;
5380 else
5381 port_sel = PANEL_PORT_SELECT_DPD;
5382 }
5383
5384 pp_on |= port_sel;
5385
5386 I915_WRITE(regs.pp_on, pp_on);
5387 I915_WRITE(regs.pp_off, pp_off);
5388 if (IS_GEN9_LP(dev_priv))
5389 I915_WRITE(regs.pp_ctrl, pp_div);
5390 else
5391 I915_WRITE(regs.pp_div, pp_div);
5392
5393 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
5394 I915_READ(regs.pp_on),
5395 I915_READ(regs.pp_off),
5396 IS_GEN9_LP(dev_priv) ?
5397 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK) :
5398 I915_READ(regs.pp_div));
5399 }
5400
5401 static void intel_dp_pps_init(struct drm_device *dev,
5402 struct intel_dp *intel_dp)
5403 {
5404 struct drm_i915_private *dev_priv = to_i915(dev);
5405
5406 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5407 vlv_initial_power_sequencer_setup(intel_dp);
5408 } else {
5409 intel_dp_init_panel_power_sequencer(dev, intel_dp);
5410 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp, false);
5411 }
5412 }
5413
5414 /**
5415 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5416 * @dev_priv: i915 device
5417 * @crtc_state: a pointer to the active intel_crtc_state
5418 * @refresh_rate: RR to be programmed
5419 *
5420 * This function gets called when refresh rate (RR) has to be changed from
5421 * one frequency to another. Switches can be between high and low RR
5422 * supported by the panel or to any other RR based on media playback (in
5423 * this case, RR value needs to be passed from user space).
5424 *
5425 * The caller of this function needs to take a lock on dev_priv->drrs.
5426 */
5427 static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5428 struct intel_crtc_state *crtc_state,
5429 int refresh_rate)
5430 {
5431 struct intel_encoder *encoder;
5432 struct intel_digital_port *dig_port = NULL;
5433 struct intel_dp *intel_dp = dev_priv->drrs.dp;
5434 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
5435 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
5436
5437 if (refresh_rate <= 0) {
5438 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
5439 return;
5440 }
5441
5442 if (intel_dp == NULL) {
5443 DRM_DEBUG_KMS("DRRS not supported.\n");
5444 return;
5445 }
5446
5447 /*
5448 * FIXME: This needs proper synchronization with psr state for some
5449 * platforms that cannot have PSR and DRRS enabled at the same time.
5450 */
5451
5452 dig_port = dp_to_dig_port(intel_dp);
5453 encoder = &dig_port->base;
5454 intel_crtc = to_intel_crtc(encoder->base.crtc);
5455
5456 if (!intel_crtc) {
5457 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
5458 return;
5459 }
5460
5461 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
5462 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
5463 return;
5464 }
5465
5466 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
5467 refresh_rate)
5468 index = DRRS_LOW_RR;
5469
5470 if (index == dev_priv->drrs.refresh_rate_type) {
5471 DRM_DEBUG_KMS(
5472 "DRRS requested for previously set RR...ignoring\n");
5473 return;
5474 }
5475
5476 if (!crtc_state->base.active) {
5477 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
5478 return;
5479 }
5480
5481 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
5482 switch (index) {
5483 case DRRS_HIGH_RR:
5484 intel_dp_set_m_n(intel_crtc, M1_N1);
5485 break;
5486 case DRRS_LOW_RR:
5487 intel_dp_set_m_n(intel_crtc, M2_N2);
5488 break;
5489 case DRRS_MAX_RR:
5490 default:
5491 DRM_ERROR("Unsupported refreshrate type\n");
5492 }
5493 } else if (INTEL_GEN(dev_priv) > 6) {
5494 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
5495 u32 val;
5496
5497 val = I915_READ(reg);
5498 if (index > DRRS_HIGH_RR) {
5499 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5500 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5501 else
5502 val |= PIPECONF_EDP_RR_MODE_SWITCH;
5503 } else {
5504 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5505 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
5506 else
5507 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
5508 }
5509 I915_WRITE(reg, val);
5510 }
5511
5512 dev_priv->drrs.refresh_rate_type = index;
5513
5514 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
5515 }
5516
5517 /**
5518 * intel_edp_drrs_enable - init drrs struct if supported
5519 * @intel_dp: DP struct
5520 * @crtc_state: A pointer to the active crtc state.
5521 *
5522 * Initializes frontbuffer_bits and drrs.dp
5523 */
5524 void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5525 struct intel_crtc_state *crtc_state)
5526 {
5527 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5528 struct drm_i915_private *dev_priv = to_i915(dev);
5529
5530 if (!crtc_state->has_drrs) {
5531 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
5532 return;
5533 }
5534
5535 mutex_lock(&dev_priv->drrs.mutex);
5536 if (WARN_ON(dev_priv->drrs.dp)) {
5537 DRM_ERROR("DRRS already enabled\n");
5538 goto unlock;
5539 }
5540
5541 dev_priv->drrs.busy_frontbuffer_bits = 0;
5542
5543 dev_priv->drrs.dp = intel_dp;
5544
5545 unlock:
5546 mutex_unlock(&dev_priv->drrs.mutex);
5547 }
5548
5549 /**
5550 * intel_edp_drrs_disable - Disable DRRS
5551 * @intel_dp: DP struct
5552 * @old_crtc_state: Pointer to old crtc_state.
5553 *
5554 */
5555 void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5556 struct intel_crtc_state *old_crtc_state)
5557 {
5558 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5559 struct drm_i915_private *dev_priv = to_i915(dev);
5560
5561 if (!old_crtc_state->has_drrs)
5562 return;
5563
5564 mutex_lock(&dev_priv->drrs.mutex);
5565 if (!dev_priv->drrs.dp) {
5566 mutex_unlock(&dev_priv->drrs.mutex);
5567 return;
5568 }
5569
5570 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5571 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
5572 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
5573
5574 dev_priv->drrs.dp = NULL;
5575 mutex_unlock(&dev_priv->drrs.mutex);
5576
5577 cancel_delayed_work_sync(&dev_priv->drrs.work);
5578 }
5579
5580 static void intel_edp_drrs_downclock_work(struct work_struct *work)
5581 {
5582 struct drm_i915_private *dev_priv =
5583 container_of(work, typeof(*dev_priv), drrs.work.work);
5584 struct intel_dp *intel_dp;
5585
5586 mutex_lock(&dev_priv->drrs.mutex);
5587
5588 intel_dp = dev_priv->drrs.dp;
5589
5590 if (!intel_dp)
5591 goto unlock;
5592
5593 /*
5594 * The delayed work can race with an invalidate hence we need to
5595 * recheck.
5596 */
5597
5598 if (dev_priv->drrs.busy_frontbuffer_bits)
5599 goto unlock;
5600
5601 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
5602 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
5603
5604 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5605 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
5606 }
5607
5608 unlock:
5609 mutex_unlock(&dev_priv->drrs.mutex);
5610 }
5611
5612 /**
5613 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5614 * @dev_priv: i915 device
5615 * @frontbuffer_bits: frontbuffer plane tracking bits
5616 *
5617 * This function gets called everytime rendering on the given planes start.
5618 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
5619 *
5620 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5621 */
5622 void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
5623 unsigned int frontbuffer_bits)
5624 {
5625 struct drm_crtc *crtc;
5626 enum pipe pipe;
5627
5628 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5629 return;
5630
5631 cancel_delayed_work(&dev_priv->drrs.work);
5632
5633 mutex_lock(&dev_priv->drrs.mutex);
5634 if (!dev_priv->drrs.dp) {
5635 mutex_unlock(&dev_priv->drrs.mutex);
5636 return;
5637 }
5638
5639 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5640 pipe = to_intel_crtc(crtc)->pipe;
5641
5642 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5643 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
5644
5645 /* invalidate means busy screen hence upclock */
5646 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5647 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5648 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5649
5650 mutex_unlock(&dev_priv->drrs.mutex);
5651 }
5652
5653 /**
5654 * intel_edp_drrs_flush - Restart Idleness DRRS
5655 * @dev_priv: i915 device
5656 * @frontbuffer_bits: frontbuffer plane tracking bits
5657 *
5658 * This function gets called every time rendering on the given planes has
5659 * completed or flip on a crtc is completed. So DRRS should be upclocked
5660 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
5661 * if no other planes are dirty.
5662 *
5663 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
5664 */
5665 void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
5666 unsigned int frontbuffer_bits)
5667 {
5668 struct drm_crtc *crtc;
5669 enum pipe pipe;
5670
5671 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
5672 return;
5673
5674 cancel_delayed_work(&dev_priv->drrs.work);
5675
5676 mutex_lock(&dev_priv->drrs.mutex);
5677 if (!dev_priv->drrs.dp) {
5678 mutex_unlock(&dev_priv->drrs.mutex);
5679 return;
5680 }
5681
5682 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
5683 pipe = to_intel_crtc(crtc)->pipe;
5684
5685 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
5686 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
5687
5688 /* flush means busy screen hence upclock */
5689 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
5690 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
5691 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
5692
5693 /*
5694 * flush also means no more activity hence schedule downclock, if all
5695 * other fbs are quiescent too
5696 */
5697 if (!dev_priv->drrs.busy_frontbuffer_bits)
5698 schedule_delayed_work(&dev_priv->drrs.work,
5699 msecs_to_jiffies(1000));
5700 mutex_unlock(&dev_priv->drrs.mutex);
5701 }
5702
5703 /**
5704 * DOC: Display Refresh Rate Switching (DRRS)
5705 *
5706 * Display Refresh Rate Switching (DRRS) is a power conservation feature
5707 * which enables swtching between low and high refresh rates,
5708 * dynamically, based on the usage scenario. This feature is applicable
5709 * for internal panels.
5710 *
5711 * Indication that the panel supports DRRS is given by the panel EDID, which
5712 * would list multiple refresh rates for one resolution.
5713 *
5714 * DRRS is of 2 types - static and seamless.
5715 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
5716 * (may appear as a blink on screen) and is used in dock-undock scenario.
5717 * Seamless DRRS involves changing RR without any visual effect to the user
5718 * and can be used during normal system usage. This is done by programming
5719 * certain registers.
5720 *
5721 * Support for static/seamless DRRS may be indicated in the VBT based on
5722 * inputs from the panel spec.
5723 *
5724 * DRRS saves power by switching to low RR based on usage scenarios.
5725 *
5726 * The implementation is based on frontbuffer tracking implementation. When
5727 * there is a disturbance on the screen triggered by user activity or a periodic
5728 * system activity, DRRS is disabled (RR is changed to high RR). When there is
5729 * no movement on screen, after a timeout of 1 second, a switch to low RR is
5730 * made.
5731 *
5732 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
5733 * and intel_edp_drrs_flush() are called.
5734 *
5735 * DRRS can be further extended to support other internal panels and also
5736 * the scenario of video playback wherein RR is set based on the rate
5737 * requested by userspace.
5738 */
5739
5740 /**
5741 * intel_dp_drrs_init - Init basic DRRS work and mutex.
5742 * @intel_connector: eDP connector
5743 * @fixed_mode: preferred mode of panel
5744 *
5745 * This function is called only once at driver load to initialize basic
5746 * DRRS stuff.
5747 *
5748 * Returns:
5749 * Downclock mode if panel supports it, else return NULL.
5750 * DRRS support is determined by the presence of downclock mode (apart
5751 * from VBT setting).
5752 */
5753 static struct drm_display_mode *
5754 intel_dp_drrs_init(struct intel_connector *intel_connector,
5755 struct drm_display_mode *fixed_mode)
5756 {
5757 struct drm_connector *connector = &intel_connector->base;
5758 struct drm_device *dev = connector->dev;
5759 struct drm_i915_private *dev_priv = to_i915(dev);
5760 struct drm_display_mode *downclock_mode = NULL;
5761
5762 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
5763 mutex_init(&dev_priv->drrs.mutex);
5764
5765 if (INTEL_GEN(dev_priv) <= 6) {
5766 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
5767 return NULL;
5768 }
5769
5770 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
5771 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
5772 return NULL;
5773 }
5774
5775 downclock_mode = intel_find_panel_downclock
5776 (dev_priv, fixed_mode, connector);
5777
5778 if (!downclock_mode) {
5779 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
5780 return NULL;
5781 }
5782
5783 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
5784
5785 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
5786 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
5787 return downclock_mode;
5788 }
5789
5790 static bool intel_edp_init_connector(struct intel_dp *intel_dp,
5791 struct intel_connector *intel_connector)
5792 {
5793 struct drm_connector *connector = &intel_connector->base;
5794 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5795 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5796 struct drm_device *dev = intel_encoder->base.dev;
5797 struct drm_i915_private *dev_priv = to_i915(dev);
5798 struct drm_display_mode *fixed_mode = NULL;
5799 struct drm_display_mode *downclock_mode = NULL;
5800 bool has_dpcd;
5801 struct drm_display_mode *scan;
5802 struct edid *edid;
5803 enum pipe pipe = INVALID_PIPE;
5804
5805 if (!is_edp(intel_dp))
5806 return true;
5807
5808 /*
5809 * On IBX/CPT we may get here with LVDS already registered. Since the
5810 * driver uses the only internal power sequencer available for both
5811 * eDP and LVDS bail out early in this case to prevent interfering
5812 * with an already powered-on LVDS power sequencer.
5813 */
5814 if (intel_get_lvds_encoder(dev)) {
5815 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
5816 DRM_INFO("LVDS was detected, not registering eDP\n");
5817
5818 return false;
5819 }
5820
5821 pps_lock(intel_dp);
5822
5823 intel_dp_init_panel_power_timestamps(intel_dp);
5824 intel_dp_pps_init(dev, intel_dp);
5825 intel_edp_panel_vdd_sanitize(intel_dp);
5826
5827 pps_unlock(intel_dp);
5828
5829 /* Cache DPCD and EDID for edp. */
5830 has_dpcd = intel_edp_init_dpcd(intel_dp);
5831
5832 if (!has_dpcd) {
5833 /* if this fails, presume the device is a ghost */
5834 DRM_INFO("failed to retrieve link info, disabling eDP\n");
5835 goto out_vdd_off;
5836 }
5837
5838 mutex_lock(&dev->mode_config.mutex);
5839 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
5840 if (edid) {
5841 if (drm_add_edid_modes(connector, edid)) {
5842 drm_mode_connector_update_edid_property(connector,
5843 edid);
5844 drm_edid_to_eld(connector, edid);
5845 } else {
5846 kfree(edid);
5847 edid = ERR_PTR(-EINVAL);
5848 }
5849 } else {
5850 edid = ERR_PTR(-ENOENT);
5851 }
5852 intel_connector->edid = edid;
5853
5854 /* prefer fixed mode from EDID if available */
5855 list_for_each_entry(scan, &connector->probed_modes, head) {
5856 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
5857 fixed_mode = drm_mode_duplicate(dev, scan);
5858 downclock_mode = intel_dp_drrs_init(
5859 intel_connector, fixed_mode);
5860 break;
5861 }
5862 }
5863
5864 /* fallback to VBT if available for eDP */
5865 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
5866 fixed_mode = drm_mode_duplicate(dev,
5867 dev_priv->vbt.lfp_lvds_vbt_mode);
5868 if (fixed_mode) {
5869 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
5870 connector->display_info.width_mm = fixed_mode->width_mm;
5871 connector->display_info.height_mm = fixed_mode->height_mm;
5872 }
5873 }
5874 mutex_unlock(&dev->mode_config.mutex);
5875
5876 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
5877 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
5878 register_reboot_notifier(&intel_dp->edp_notifier);
5879
5880 /*
5881 * Figure out the current pipe for the initial backlight setup.
5882 * If the current pipe isn't valid, try the PPS pipe, and if that
5883 * fails just assume pipe A.
5884 */
5885 pipe = vlv_active_pipe(intel_dp);
5886
5887 if (pipe != PIPE_A && pipe != PIPE_B)
5888 pipe = intel_dp->pps_pipe;
5889
5890 if (pipe != PIPE_A && pipe != PIPE_B)
5891 pipe = PIPE_A;
5892
5893 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
5894 pipe_name(pipe));
5895 }
5896
5897 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5898 intel_connector->panel.backlight.power = intel_edp_backlight_power;
5899 intel_panel_setup_backlight(connector, pipe);
5900
5901 return true;
5902
5903 out_vdd_off:
5904 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
5905 /*
5906 * vdd might still be enabled do to the delayed vdd off.
5907 * Make sure vdd is actually turned off here.
5908 */
5909 pps_lock(intel_dp);
5910 edp_panel_vdd_off_sync(intel_dp);
5911 pps_unlock(intel_dp);
5912
5913 return false;
5914 }
5915
5916 /* Set up the hotplug pin and aux power domain. */
5917 static void
5918 intel_dp_init_connector_port_info(struct intel_digital_port *intel_dig_port)
5919 {
5920 struct intel_encoder *encoder = &intel_dig_port->base;
5921 struct intel_dp *intel_dp = &intel_dig_port->dp;
5922
5923 switch (intel_dig_port->port) {
5924 case PORT_A:
5925 encoder->hpd_pin = HPD_PORT_A;
5926 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_A;
5927 break;
5928 case PORT_B:
5929 encoder->hpd_pin = HPD_PORT_B;
5930 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_B;
5931 break;
5932 case PORT_C:
5933 encoder->hpd_pin = HPD_PORT_C;
5934 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_C;
5935 break;
5936 case PORT_D:
5937 encoder->hpd_pin = HPD_PORT_D;
5938 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
5939 break;
5940 case PORT_E:
5941 encoder->hpd_pin = HPD_PORT_E;
5942
5943 /* FIXME: Check VBT for actual wiring of PORT E */
5944 intel_dp->aux_power_domain = POWER_DOMAIN_AUX_D;
5945 break;
5946 default:
5947 MISSING_CASE(intel_dig_port->port);
5948 }
5949 }
5950
5951 bool
5952 intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
5953 struct intel_connector *intel_connector)
5954 {
5955 struct drm_connector *connector = &intel_connector->base;
5956 struct intel_dp *intel_dp = &intel_dig_port->dp;
5957 struct intel_encoder *intel_encoder = &intel_dig_port->base;
5958 struct drm_device *dev = intel_encoder->base.dev;
5959 struct drm_i915_private *dev_priv = to_i915(dev);
5960 enum port port = intel_dig_port->port;
5961 int type;
5962
5963 if (WARN(intel_dig_port->max_lanes < 1,
5964 "Not enough lanes (%d) for DP on port %c\n",
5965 intel_dig_port->max_lanes, port_name(port)))
5966 return false;
5967
5968 intel_dp_set_source_rates(intel_dp);
5969
5970 intel_dp->reset_link_params = true;
5971 intel_dp->pps_pipe = INVALID_PIPE;
5972 intel_dp->active_pipe = INVALID_PIPE;
5973
5974 /* intel_dp vfuncs */
5975 if (INTEL_GEN(dev_priv) >= 9)
5976 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
5977 else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5978 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
5979 else if (HAS_PCH_SPLIT(dev_priv))
5980 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
5981 else
5982 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
5983
5984 if (INTEL_GEN(dev_priv) >= 9)
5985 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
5986 else
5987 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
5988
5989 if (HAS_DDI(dev_priv))
5990 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
5991
5992 /* Preserve the current hw state. */
5993 intel_dp->DP = I915_READ(intel_dp->output_reg);
5994 intel_dp->attached_connector = intel_connector;
5995
5996 if (intel_dp_is_edp(dev_priv, port))
5997 type = DRM_MODE_CONNECTOR_eDP;
5998 else
5999 type = DRM_MODE_CONNECTOR_DisplayPort;
6000
6001 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6002 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
6003
6004 /*
6005 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
6006 * for DP the encoder type can be set by the caller to
6007 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
6008 */
6009 if (type == DRM_MODE_CONNECTOR_eDP)
6010 intel_encoder->type = INTEL_OUTPUT_EDP;
6011
6012 /* eDP only on port B and/or C on vlv/chv */
6013 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
6014 is_edp(intel_dp) && port != PORT_B && port != PORT_C))
6015 return false;
6016
6017 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
6018 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
6019 port_name(port));
6020
6021 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
6022 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
6023
6024 connector->interlace_allowed = true;
6025 connector->doublescan_allowed = 0;
6026
6027 intel_dp_init_connector_port_info(intel_dig_port);
6028
6029 intel_dp_aux_init(intel_dp);
6030
6031 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
6032 edp_panel_vdd_work);
6033
6034 intel_connector_attach_encoder(intel_connector, intel_encoder);
6035
6036 if (HAS_DDI(dev_priv))
6037 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
6038 else
6039 intel_connector->get_hw_state = intel_connector_get_hw_state;
6040
6041 /* init MST on ports that can support it */
6042 if (HAS_DP_MST(dev_priv) && !is_edp(intel_dp) &&
6043 (port == PORT_B || port == PORT_C || port == PORT_D))
6044 intel_dp_mst_encoder_init(intel_dig_port,
6045 intel_connector->base.base.id);
6046
6047 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
6048 intel_dp_aux_fini(intel_dp);
6049 intel_dp_mst_encoder_cleanup(intel_dig_port);
6050 goto fail;
6051 }
6052
6053 intel_dp_add_properties(intel_dp, connector);
6054
6055 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
6056 * 0xd. Failure to do so will result in spurious interrupts being
6057 * generated on the port when a cable is not attached.
6058 */
6059 if (IS_G4X(dev_priv) && !IS_GM45(dev_priv)) {
6060 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
6061 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
6062 }
6063
6064 return true;
6065
6066 fail:
6067 drm_connector_cleanup(connector);
6068
6069 return false;
6070 }
6071
6072 bool intel_dp_init(struct drm_i915_private *dev_priv,
6073 i915_reg_t output_reg,
6074 enum port port)
6075 {
6076 struct intel_digital_port *intel_dig_port;
6077 struct intel_encoder *intel_encoder;
6078 struct drm_encoder *encoder;
6079 struct intel_connector *intel_connector;
6080
6081 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
6082 if (!intel_dig_port)
6083 return false;
6084
6085 intel_connector = intel_connector_alloc();
6086 if (!intel_connector)
6087 goto err_connector_alloc;
6088
6089 intel_encoder = &intel_dig_port->base;
6090 encoder = &intel_encoder->base;
6091
6092 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
6093 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
6094 "DP %c", port_name(port)))
6095 goto err_encoder_init;
6096
6097 intel_encoder->compute_config = intel_dp_compute_config;
6098 intel_encoder->disable = intel_disable_dp;
6099 intel_encoder->get_hw_state = intel_dp_get_hw_state;
6100 intel_encoder->get_config = intel_dp_get_config;
6101 intel_encoder->suspend = intel_dp_encoder_suspend;
6102 if (IS_CHERRYVIEW(dev_priv)) {
6103 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
6104 intel_encoder->pre_enable = chv_pre_enable_dp;
6105 intel_encoder->enable = vlv_enable_dp;
6106 intel_encoder->post_disable = chv_post_disable_dp;
6107 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
6108 } else if (IS_VALLEYVIEW(dev_priv)) {
6109 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
6110 intel_encoder->pre_enable = vlv_pre_enable_dp;
6111 intel_encoder->enable = vlv_enable_dp;
6112 intel_encoder->post_disable = vlv_post_disable_dp;
6113 } else {
6114 intel_encoder->pre_enable = g4x_pre_enable_dp;
6115 intel_encoder->enable = g4x_enable_dp;
6116 if (INTEL_GEN(dev_priv) >= 5)
6117 intel_encoder->post_disable = ilk_post_disable_dp;
6118 }
6119
6120 intel_dig_port->port = port;
6121 intel_dig_port->dp.output_reg = output_reg;
6122 intel_dig_port->max_lanes = 4;
6123
6124 intel_encoder->type = INTEL_OUTPUT_DP;
6125 intel_encoder->power_domain = intel_port_to_power_domain(port);
6126 if (IS_CHERRYVIEW(dev_priv)) {
6127 if (port == PORT_D)
6128 intel_encoder->crtc_mask = 1 << 2;
6129 else
6130 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
6131 } else {
6132 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
6133 }
6134 intel_encoder->cloneable = 0;
6135 intel_encoder->port = port;
6136
6137 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
6138 dev_priv->hotplug.irq_port[port] = intel_dig_port;
6139
6140 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
6141 goto err_init_connector;
6142
6143 return true;
6144
6145 err_init_connector:
6146 drm_encoder_cleanup(encoder);
6147 err_encoder_init:
6148 kfree(intel_connector);
6149 err_connector_alloc:
6150 kfree(intel_dig_port);
6151 return false;
6152 }
6153
6154 void intel_dp_mst_suspend(struct drm_device *dev)
6155 {
6156 struct drm_i915_private *dev_priv = to_i915(dev);
6157 int i;
6158
6159 /* disable MST */
6160 for (i = 0; i < I915_MAX_PORTS; i++) {
6161 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6162
6163 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6164 continue;
6165
6166 if (intel_dig_port->dp.is_mst)
6167 drm_dp_mst_topology_mgr_suspend(&intel_dig_port->dp.mst_mgr);
6168 }
6169 }
6170
6171 void intel_dp_mst_resume(struct drm_device *dev)
6172 {
6173 struct drm_i915_private *dev_priv = to_i915(dev);
6174 int i;
6175
6176 for (i = 0; i < I915_MAX_PORTS; i++) {
6177 struct intel_digital_port *intel_dig_port = dev_priv->hotplug.irq_port[i];
6178 int ret;
6179
6180 if (!intel_dig_port || !intel_dig_port->dp.can_mst)
6181 continue;
6182
6183 ret = drm_dp_mst_topology_mgr_resume(&intel_dig_port->dp.mst_mgr);
6184 if (ret)
6185 intel_dp_check_mst_status(&intel_dig_port->dp);
6186 }
6187 }