]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: extract intel_audio.h from intel_drv.h
[mirror_ubuntu-hirsute-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
2d1a8a48 28#include <linux/export.h>
331c201a 29#include <linux/i2c.h>
01527b31
CT
30#include <linux/notifier.h>
31#include <linux/reboot.h>
331c201a
JN
32#include <linux/slab.h>
33#include <linux/types.h>
611032bf 34#include <asm/byteorder.h>
331c201a 35
c6f95f27 36#include <drm/drm_atomic_helper.h>
760285e7 37#include <drm/drm_crtc.h>
20f24d77 38#include <drm/drm_dp_helper.h>
760285e7 39#include <drm/drm_edid.h>
20f24d77 40#include <drm/drm_hdcp.h>
fcd70cd3 41#include <drm/drm_probe_helper.h>
760285e7 42#include <drm/i915_drm.h>
331c201a 43
a4fc5ed6 44#include "i915_drv.h"
331c201a
JN
45#include "intel_audio.h"
46#include "intel_drv.h"
a4fc5ed6 47
e8b2577c 48#define DP_DPRX_ESI_LEN 14
a4fc5ed6 49
d9218c8f
MN
50/* DP DSC small joiner has 2 FIFOs each of 640 x 6 bytes */
51#define DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER 61440
a4a15777
MN
52#define DP_DSC_MIN_SUPPORTED_BPC 8
53#define DP_DSC_MAX_SUPPORTED_BPC 10
d9218c8f
MN
54
55/* DP DSC throughput values used for slice count calculations KPixels/s */
56#define DP_DSC_PEAK_PIXEL_RATE 2720000
57#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
58#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
59
60/* DP DSC FEC Overhead factor = (100 - 2.4)/100 */
61#define DP_DSC_FEC_OVERHEAD_FACTOR 976
62
559be30c
TP
63/* Compliance test status bits */
64#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
65#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
66#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
67#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
68
9dd4ffdf 69struct dp_link_dpll {
840b32b7 70 int clock;
9dd4ffdf
CML
71 struct dpll dpll;
72};
73
45101e93 74static const struct dp_link_dpll g4x_dpll[] = {
840b32b7 75 { 162000,
9dd4ffdf 76 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 77 { 270000,
9dd4ffdf
CML
78 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
79};
80
81static const struct dp_link_dpll pch_dpll[] = {
840b32b7 82 { 162000,
9dd4ffdf 83 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 84 { 270000,
9dd4ffdf
CML
85 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
86};
87
65ce4bf5 88static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 89 { 162000,
58f6e632 90 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 91 { 270000,
65ce4bf5
CML
92 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
93};
94
ef9348c8
CML
95/*
96 * CHV supports eDP 1.4 that have more link rates.
97 * Below only provides the fixed rate but exclude variable rate.
98 */
99static const struct dp_link_dpll chv_dpll[] = {
100 /*
101 * CHV requires to program fractional division for m2.
102 * m2 is stored in fixed point format using formula below
103 * (m2_int << 22) | m2_fraction
104 */
840b32b7 105 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 106 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 107 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 108 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
ef9348c8 109};
637a9c63 110
d9218c8f
MN
111/* Constants for DP DSC configurations */
112static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
113
114/* With Single pipe configuration, HW is capable of supporting maximum
115 * of 4 slices per line.
116 */
117static const u8 valid_dsc_slicecount[] = {1, 2, 4};
118
cfcb0fc9 119/**
1853a9da 120 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
cfcb0fc9
JB
121 * @intel_dp: DP struct
122 *
123 * If a CPU or PCH DP output is attached to an eDP panel, this function
124 * will return true, and false otherwise.
125 */
1853a9da 126bool intel_dp_is_edp(struct intel_dp *intel_dp)
cfcb0fc9 127{
da63a9f2
PZ
128 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
129
130 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
131}
132
df0e9248
CW
133static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
134{
fa90ecef 135 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
136}
137
adc10304
VS
138static void intel_dp_link_down(struct intel_encoder *encoder,
139 const struct intel_crtc_state *old_crtc_state);
1e0560e0 140static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 141static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
adc10304
VS
142static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
143 const struct intel_crtc_state *crtc_state);
46bd8383 144static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
a8c3344e 145 enum pipe pipe);
f21a2198 146static void intel_dp_unset_edid(struct intel_dp *intel_dp);
a4fc5ed6 147
68f357cb
JN
148/* update sink rates from dpcd */
149static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
150{
229675d5 151 static const int dp_rates[] = {
c71b53cc 152 162000, 270000, 540000, 810000
229675d5 153 };
a8a08886 154 int i, max_rate;
68f357cb 155
a8a08886 156 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
68f357cb 157
229675d5
JN
158 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
159 if (dp_rates[i] > max_rate)
a8a08886 160 break;
229675d5 161 intel_dp->sink_rates[i] = dp_rates[i];
a8a08886 162 }
68f357cb 163
a8a08886 164 intel_dp->num_sink_rates = i;
68f357cb
JN
165}
166
10ebb736
JN
167/* Get length of rates array potentially limited by max_rate. */
168static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
169{
170 int i;
171
172 /* Limit results by potentially reduced max rate */
173 for (i = 0; i < len; i++) {
174 if (rates[len - i - 1] <= max_rate)
175 return len - i;
176 }
177
178 return 0;
179}
180
181/* Get length of common rates array potentially limited by max_rate. */
182static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
183 int max_rate)
184{
185 return intel_dp_rate_limit_len(intel_dp->common_rates,
186 intel_dp->num_common_rates, max_rate);
187}
188
540b0b7f
JN
189/* Theoretical max between source and sink */
190static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
a4fc5ed6 191{
540b0b7f 192 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
a4fc5ed6
KP
193}
194
db7295c2
AM
195static int intel_dp_get_fia_supported_lane_count(struct intel_dp *intel_dp)
196{
197 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
198 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
199 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
200 u32 lane_info;
201
202 if (tc_port == PORT_TC_NONE || dig_port->tc_type != TC_PORT_TYPEC)
203 return 4;
204
205 lane_info = (I915_READ(PORT_TX_DFLEXDPSP) &
206 DP_LANE_ASSIGNMENT_MASK(tc_port)) >>
207 DP_LANE_ASSIGNMENT_SHIFT(tc_port);
208
209 switch (lane_info) {
210 default:
211 MISSING_CASE(lane_info);
212 case 1:
213 case 2:
214 case 4:
215 case 8:
216 return 1;
217 case 3:
218 case 12:
219 return 2;
220 case 15:
221 return 4;
222 }
223}
224
540b0b7f
JN
225/* Theoretical max between source and sink */
226static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
eeb6324d
PZ
227{
228 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
540b0b7f
JN
229 int source_max = intel_dig_port->max_lanes;
230 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
db7295c2 231 int fia_max = intel_dp_get_fia_supported_lane_count(intel_dp);
eeb6324d 232
db7295c2 233 return min3(source_max, sink_max, fia_max);
eeb6324d
PZ
234}
235
3d65a735 236int intel_dp_max_lane_count(struct intel_dp *intel_dp)
540b0b7f
JN
237{
238 return intel_dp->max_link_lane_count;
239}
240
22a2c8e0 241int
c898261c 242intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 243{
fd81c44e
DP
244 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
245 return DIV_ROUND_UP(pixel_clock * bpp, 8);
a4fc5ed6
KP
246}
247
22a2c8e0 248int
fe27d53e
DA
249intel_dp_max_data_rate(int max_link_clock, int max_lanes)
250{
fd81c44e
DP
251 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
252 * link rate that is generally expressed in Gbps. Since, 8 bits of data
253 * is transmitted every LS_Clk per lane, there is no need to account for
254 * the channel encoding that is done in the PHY layer here.
255 */
256
257 return max_link_clock * max_lanes;
fe27d53e
DA
258}
259
70ec0645
MK
260static int
261intel_dp_downstream_max_dotclock(struct intel_dp *intel_dp)
262{
263 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
264 struct intel_encoder *encoder = &intel_dig_port->base;
265 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
266 int max_dotclk = dev_priv->max_dotclk_freq;
267 int ds_max_dotclk;
268
269 int type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
270
271 if (type != DP_DS_PORT_TYPE_VGA)
272 return max_dotclk;
273
274 ds_max_dotclk = drm_dp_downstream_max_clock(intel_dp->dpcd,
275 intel_dp->downstream_ports);
276
277 if (ds_max_dotclk != 0)
278 max_dotclk = min(max_dotclk, ds_max_dotclk);
279
280 return max_dotclk;
281}
282
4ba285d4 283static int cnl_max_source_rate(struct intel_dp *intel_dp)
53ddb3cd
RV
284{
285 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
286 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
287 enum port port = dig_port->base.port;
288
289 u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
290
291 /* Low voltage SKUs are limited to max of 5.4G */
292 if (voltage == VOLTAGE_INFO_0_85V)
4ba285d4 293 return 540000;
53ddb3cd
RV
294
295 /* For this SKU 8.1G is supported in all ports */
296 if (IS_CNL_WITH_PORT_F(dev_priv))
4ba285d4 297 return 810000;
53ddb3cd 298
3758d968 299 /* For other SKUs, max rate on ports A and D is 5.4G */
53ddb3cd 300 if (port == PORT_A || port == PORT_D)
4ba285d4 301 return 540000;
53ddb3cd 302
4ba285d4 303 return 810000;
53ddb3cd
RV
304}
305
46b527d1
MN
306static int icl_max_source_rate(struct intel_dp *intel_dp)
307{
308 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
b265a2a6 309 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
46b527d1
MN
310 enum port port = dig_port->base.port;
311
b265a2a6
CT
312 if (intel_port_is_combophy(dev_priv, port) &&
313 !intel_dp_is_edp(intel_dp))
46b527d1
MN
314 return 540000;
315
316 return 810000;
317}
318
55cfc580
JN
319static void
320intel_dp_set_source_rates(struct intel_dp *intel_dp)
40dba341 321{
229675d5
JN
322 /* The values must be in increasing order */
323 static const int cnl_rates[] = {
324 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
325 };
326 static const int bxt_rates[] = {
327 162000, 216000, 243000, 270000, 324000, 432000, 540000
328 };
329 static const int skl_rates[] = {
330 162000, 216000, 270000, 324000, 432000, 540000
331 };
332 static const int hsw_rates[] = {
333 162000, 270000, 540000
334 };
335 static const int g4x_rates[] = {
336 162000, 270000
337 };
40dba341
NM
338 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
339 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
99b91bda
JN
340 const struct ddi_vbt_port_info *info =
341 &dev_priv->vbt.ddi_port_info[dig_port->base.port];
55cfc580 342 const int *source_rates;
99b91bda 343 int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
40dba341 344
55cfc580
JN
345 /* This should only be done once */
346 WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
347
46b527d1 348 if (INTEL_GEN(dev_priv) >= 10) {
d907b665 349 source_rates = cnl_rates;
4ba285d4 350 size = ARRAY_SIZE(cnl_rates);
cf819eff 351 if (IS_GEN(dev_priv, 10))
46b527d1
MN
352 max_rate = cnl_max_source_rate(intel_dp);
353 else
354 max_rate = icl_max_source_rate(intel_dp);
ba1c06a5
MN
355 } else if (IS_GEN9_LP(dev_priv)) {
356 source_rates = bxt_rates;
357 size = ARRAY_SIZE(bxt_rates);
b976dc53 358 } else if (IS_GEN9_BC(dev_priv)) {
55cfc580 359 source_rates = skl_rates;
40dba341 360 size = ARRAY_SIZE(skl_rates);
fc603ca7
JN
361 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
362 IS_BROADWELL(dev_priv)) {
229675d5
JN
363 source_rates = hsw_rates;
364 size = ARRAY_SIZE(hsw_rates);
fc603ca7 365 } else {
229675d5
JN
366 source_rates = g4x_rates;
367 size = ARRAY_SIZE(g4x_rates);
40dba341
NM
368 }
369
99b91bda
JN
370 if (max_rate && vbt_max_rate)
371 max_rate = min(max_rate, vbt_max_rate);
372 else if (vbt_max_rate)
373 max_rate = vbt_max_rate;
374
4ba285d4
JN
375 if (max_rate)
376 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
377
55cfc580
JN
378 intel_dp->source_rates = source_rates;
379 intel_dp->num_source_rates = size;
40dba341
NM
380}
381
382static int intersect_rates(const int *source_rates, int source_len,
383 const int *sink_rates, int sink_len,
384 int *common_rates)
385{
386 int i = 0, j = 0, k = 0;
387
388 while (i < source_len && j < sink_len) {
389 if (source_rates[i] == sink_rates[j]) {
390 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
391 return k;
392 common_rates[k] = source_rates[i];
393 ++k;
394 ++i;
395 ++j;
396 } else if (source_rates[i] < sink_rates[j]) {
397 ++i;
398 } else {
399 ++j;
400 }
401 }
402 return k;
403}
404
8001b754
JN
405/* return index of rate in rates array, or -1 if not found */
406static int intel_dp_rate_index(const int *rates, int len, int rate)
407{
408 int i;
409
410 for (i = 0; i < len; i++)
411 if (rate == rates[i])
412 return i;
413
414 return -1;
415}
416
975ee5fc 417static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
40dba341 418{
975ee5fc 419 WARN_ON(!intel_dp->num_source_rates || !intel_dp->num_sink_rates);
40dba341 420
975ee5fc
JN
421 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
422 intel_dp->num_source_rates,
423 intel_dp->sink_rates,
424 intel_dp->num_sink_rates,
425 intel_dp->common_rates);
426
427 /* Paranoia, there should always be something in common. */
428 if (WARN_ON(intel_dp->num_common_rates == 0)) {
229675d5 429 intel_dp->common_rates[0] = 162000;
975ee5fc
JN
430 intel_dp->num_common_rates = 1;
431 }
432}
433
1a92c70e 434static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
830de422 435 u8 lane_count)
14c562c0
MN
436{
437 /*
438 * FIXME: we need to synchronize the current link parameters with
439 * hardware readout. Currently fast link training doesn't work on
440 * boot-up.
441 */
1a92c70e
MN
442 if (link_rate == 0 ||
443 link_rate > intel_dp->max_link_rate)
14c562c0
MN
444 return false;
445
1a92c70e
MN
446 if (lane_count == 0 ||
447 lane_count > intel_dp_max_lane_count(intel_dp))
14c562c0
MN
448 return false;
449
450 return true;
451}
452
1e712535
MN
453static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
454 int link_rate,
830de422 455 u8 lane_count)
1e712535
MN
456{
457 const struct drm_display_mode *fixed_mode =
458 intel_dp->attached_connector->panel.fixed_mode;
459 int mode_rate, max_rate;
460
461 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
462 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
463 if (mode_rate > max_rate)
464 return false;
465
466 return true;
467}
468
fdb14d33 469int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
830de422 470 int link_rate, u8 lane_count)
fdb14d33 471{
b1810a74 472 int index;
fdb14d33 473
b1810a74
JN
474 index = intel_dp_rate_index(intel_dp->common_rates,
475 intel_dp->num_common_rates,
476 link_rate);
477 if (index > 0) {
1e712535
MN
478 if (intel_dp_is_edp(intel_dp) &&
479 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
480 intel_dp->common_rates[index - 1],
481 lane_count)) {
482 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
483 return 0;
484 }
e6c0c64a
JN
485 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
486 intel_dp->max_link_lane_count = lane_count;
fdb14d33 487 } else if (lane_count > 1) {
1e712535
MN
488 if (intel_dp_is_edp(intel_dp) &&
489 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
490 intel_dp_max_common_rate(intel_dp),
491 lane_count >> 1)) {
492 DRM_DEBUG_KMS("Retrying Link training for eDP with same parameters\n");
493 return 0;
494 }
540b0b7f 495 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
e6c0c64a 496 intel_dp->max_link_lane_count = lane_count >> 1;
fdb14d33
MN
497 } else {
498 DRM_ERROR("Link Training Unsuccessful\n");
499 return -1;
500 }
501
502 return 0;
503}
504
c19de8eb 505static enum drm_mode_status
a4fc5ed6
KP
506intel_dp_mode_valid(struct drm_connector *connector,
507 struct drm_display_mode *mode)
508{
df0e9248 509 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
510 struct intel_connector *intel_connector = to_intel_connector(connector);
511 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
6cfd04b0 512 struct drm_i915_private *dev_priv = to_i915(connector->dev);
36008365
DV
513 int target_clock = mode->clock;
514 int max_rate, mode_rate, max_lanes, max_link_clock;
70ec0645 515 int max_dotclk;
6cfd04b0
MN
516 u16 dsc_max_output_bpp = 0;
517 u8 dsc_slice_count = 0;
70ec0645 518
e4dd27aa
VS
519 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
520 return MODE_NO_DBLESCAN;
521
70ec0645 522 max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
a4fc5ed6 523
1853a9da 524 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
dd06f90e 525 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
526 return MODE_PANEL;
527
dd06f90e 528 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 529 return MODE_PANEL;
03afc4a2
DV
530
531 target_clock = fixed_mode->clock;
7de56f43
ZY
532 }
533
50fec21a 534 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 535 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
536
537 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
538 mode_rate = intel_dp_link_required(target_clock, 18);
539
6cfd04b0
MN
540 /*
541 * Output bpp is stored in 6.4 format so right shift by 4 to get the
542 * integer value since we support only integer values of bpp.
543 */
544 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
545 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
546 if (intel_dp_is_edp(intel_dp)) {
547 dsc_max_output_bpp =
548 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
549 dsc_slice_count =
550 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
551 true);
240999cf 552 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
6cfd04b0
MN
553 dsc_max_output_bpp =
554 intel_dp_dsc_get_output_bpp(max_link_clock,
555 max_lanes,
556 target_clock,
557 mode->hdisplay) >> 4;
558 dsc_slice_count =
559 intel_dp_dsc_get_slice_count(intel_dp,
560 target_clock,
561 mode->hdisplay);
562 }
563 }
564
565 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
566 target_clock > max_dotclk)
c4867936 567 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
568
569 if (mode->clock < 10000)
570 return MODE_CLOCK_LOW;
571
0af78a2b
DV
572 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
573 return MODE_H_ILLEGAL;
574
a4fc5ed6
KP
575 return MODE_OK;
576}
577
830de422 578u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
a4fc5ed6 579{
830de422
JN
580 int i;
581 u32 v = 0;
a4fc5ed6
KP
582
583 if (src_bytes > 4)
584 src_bytes = 4;
585 for (i = 0; i < src_bytes; i++)
830de422 586 v |= ((u32)src[i]) << ((3 - i) * 8);
a4fc5ed6
KP
587 return v;
588}
589
830de422 590static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
a4fc5ed6
KP
591{
592 int i;
593 if (dst_bytes > 4)
594 dst_bytes = 4;
595 for (i = 0; i < dst_bytes; i++)
596 dst[i] = src >> ((3-i) * 8);
597}
598
bf13e81b 599static void
46bd8383 600intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
bf13e81b 601static void
46bd8383 602intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5d5ab2d2 603 bool force_disable_vdd);
335f752b 604static void
46bd8383 605intel_dp_pps_init(struct intel_dp *intel_dp);
bf13e81b 606
69d93820
CW
607static intel_wakeref_t
608pps_lock(struct intel_dp *intel_dp)
773538e8 609{
de25eb7f 610 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 611 intel_wakeref_t wakeref;
773538e8
VS
612
613 /*
40c7ae45 614 * See intel_power_sequencer_reset() why we need
773538e8
VS
615 * a power domain reference here.
616 */
69d93820
CW
617 wakeref = intel_display_power_get(dev_priv,
618 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
773538e8
VS
619
620 mutex_lock(&dev_priv->pps_mutex);
69d93820
CW
621
622 return wakeref;
773538e8
VS
623}
624
69d93820
CW
625static intel_wakeref_t
626pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
773538e8 627{
de25eb7f 628 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
773538e8
VS
629
630 mutex_unlock(&dev_priv->pps_mutex);
69d93820
CW
631 intel_display_power_put(dev_priv,
632 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
633 wakeref);
634 return 0;
773538e8
VS
635}
636
69d93820
CW
637#define with_pps_lock(dp, wf) \
638 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
639
961a0db0
VS
640static void
641vlv_power_sequencer_kick(struct intel_dp *intel_dp)
642{
de25eb7f 643 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
961a0db0 644 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
961a0db0 645 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
646 bool pll_enabled, release_cl_override = false;
647 enum dpio_phy phy = DPIO_PHY(pipe);
648 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
830de422 649 u32 DP;
961a0db0
VS
650
651 if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
e7f2af78 652 "skipping pipe %c power sequencer kick due to port %c being active\n",
8f4f2797 653 pipe_name(pipe), port_name(intel_dig_port->base.port)))
961a0db0
VS
654 return;
655
656 DRM_DEBUG_KMS("kicking pipe %c power sequencer for port %c\n",
8f4f2797 657 pipe_name(pipe), port_name(intel_dig_port->base.port));
961a0db0
VS
658
659 /* Preserve the BIOS-computed detected bit. This is
660 * supposed to be read-only.
661 */
662 DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
663 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
664 DP |= DP_PORT_WIDTH(1);
665 DP |= DP_LINK_TRAIN_PAT_1;
666
920a14b2 667 if (IS_CHERRYVIEW(dev_priv))
59b74c49
VS
668 DP |= DP_PIPE_SEL_CHV(pipe);
669 else
670 DP |= DP_PIPE_SEL(pipe);
961a0db0 671
d288f65f
VS
672 pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
673
674 /*
675 * The DPLL for the pipe must be enabled for this to work.
676 * So enable temporarily it if it's not already enabled.
677 */
0047eedc 678 if (!pll_enabled) {
920a14b2 679 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
0047eedc
VS
680 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
681
30ad9814 682 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
3f36b937
TU
683 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
684 DRM_ERROR("Failed to force on pll for pipe %c!\n",
685 pipe_name(pipe));
686 return;
687 }
0047eedc 688 }
d288f65f 689
961a0db0
VS
690 /*
691 * Similar magic as in intel_dp_enable_port().
692 * We _must_ do this port enable + disable trick
e7f2af78 693 * to make this power sequencer lock onto the port.
961a0db0
VS
694 * Otherwise even VDD force bit won't work.
695 */
696 I915_WRITE(intel_dp->output_reg, DP);
697 POSTING_READ(intel_dp->output_reg);
698
699 I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
700 POSTING_READ(intel_dp->output_reg);
701
702 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
703 POSTING_READ(intel_dp->output_reg);
d288f65f 704
0047eedc 705 if (!pll_enabled) {
30ad9814 706 vlv_force_pll_off(dev_priv, pipe);
0047eedc
VS
707
708 if (release_cl_override)
709 chv_phy_powergate_ch(dev_priv, phy, ch, false);
710 }
961a0db0
VS
711}
712
9f2bdb00
VS
713static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
714{
715 struct intel_encoder *encoder;
716 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
717
718 /*
719 * We don't have power sequencer currently.
720 * Pick one that's not used by other ports.
721 */
14aa521c
VS
722 for_each_intel_dp(&dev_priv->drm, encoder) {
723 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
9f2bdb00
VS
724
725 if (encoder->type == INTEL_OUTPUT_EDP) {
726 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
727 intel_dp->active_pipe != intel_dp->pps_pipe);
728
729 if (intel_dp->pps_pipe != INVALID_PIPE)
730 pipes &= ~(1 << intel_dp->pps_pipe);
731 } else {
732 WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
733
734 if (intel_dp->active_pipe != INVALID_PIPE)
735 pipes &= ~(1 << intel_dp->active_pipe);
736 }
737 }
738
739 if (pipes == 0)
740 return INVALID_PIPE;
741
742 return ffs(pipes) - 1;
743}
744
bf13e81b
JN
745static enum pipe
746vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
747{
de25eb7f 748 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
bf13e81b 749 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
a8c3344e 750 enum pipe pipe;
bf13e81b 751
e39b999a 752 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 753
a8c3344e 754 /* We should never land here with regular DP ports */
1853a9da 755 WARN_ON(!intel_dp_is_edp(intel_dp));
a8c3344e 756
9f2bdb00
VS
757 WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
758 intel_dp->active_pipe != intel_dp->pps_pipe);
759
a4a5d2f8
VS
760 if (intel_dp->pps_pipe != INVALID_PIPE)
761 return intel_dp->pps_pipe;
762
9f2bdb00 763 pipe = vlv_find_free_pps(dev_priv);
a4a5d2f8
VS
764
765 /*
766 * Didn't find one. This should not happen since there
767 * are two power sequencers and up to two eDP ports.
768 */
9f2bdb00 769 if (WARN_ON(pipe == INVALID_PIPE))
a8c3344e 770 pipe = PIPE_A;
a4a5d2f8 771
46bd8383 772 vlv_steal_power_sequencer(dev_priv, pipe);
a8c3344e 773 intel_dp->pps_pipe = pipe;
a4a5d2f8
VS
774
775 DRM_DEBUG_KMS("picked pipe %c power sequencer for port %c\n",
776 pipe_name(intel_dp->pps_pipe),
8f4f2797 777 port_name(intel_dig_port->base.port));
a4a5d2f8
VS
778
779 /* init power sequencer on this pipe and port */
46bd8383
VS
780 intel_dp_init_panel_power_sequencer(intel_dp);
781 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
a4a5d2f8 782
961a0db0
VS
783 /*
784 * Even vdd force doesn't work until we've made
785 * the power sequencer lock in on the port.
786 */
787 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
788
789 return intel_dp->pps_pipe;
790}
791
78597996
ID
792static int
793bxt_power_sequencer_idx(struct intel_dp *intel_dp)
794{
de25eb7f 795 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
73c0fcac 796 int backlight_controller = dev_priv->vbt.backlight.controller;
78597996
ID
797
798 lockdep_assert_held(&dev_priv->pps_mutex);
799
800 /* We should never land here with regular DP ports */
1853a9da 801 WARN_ON(!intel_dp_is_edp(intel_dp));
78597996 802
78597996 803 if (!intel_dp->pps_reset)
73c0fcac 804 return backlight_controller;
78597996
ID
805
806 intel_dp->pps_reset = false;
807
808 /*
809 * Only the HW needs to be reprogrammed, the SW state is fixed and
810 * has been setup during connector init.
811 */
46bd8383 812 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
78597996 813
73c0fcac 814 return backlight_controller;
78597996
ID
815}
816
6491ab27
VS
817typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
818 enum pipe pipe);
819
820static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
821 enum pipe pipe)
822{
44cb734c 823 return I915_READ(PP_STATUS(pipe)) & PP_ON;
6491ab27
VS
824}
825
826static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
827 enum pipe pipe)
828{
44cb734c 829 return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
6491ab27
VS
830}
831
832static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
833 enum pipe pipe)
834{
835 return true;
836}
bf13e81b 837
a4a5d2f8 838static enum pipe
6491ab27
VS
839vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
840 enum port port,
841 vlv_pipe_check pipe_check)
a4a5d2f8
VS
842{
843 enum pipe pipe;
bf13e81b 844
bf13e81b 845 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
44cb734c 846 u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
bf13e81b 847 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
848
849 if (port_sel != PANEL_PORT_SELECT_VLV(port))
850 continue;
851
6491ab27
VS
852 if (!pipe_check(dev_priv, pipe))
853 continue;
854
a4a5d2f8 855 return pipe;
bf13e81b
JN
856 }
857
a4a5d2f8
VS
858 return INVALID_PIPE;
859}
860
861static void
862vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
863{
de25eb7f 864 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
a4a5d2f8 865 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 866 enum port port = intel_dig_port->base.port;
a4a5d2f8
VS
867
868 lockdep_assert_held(&dev_priv->pps_mutex);
869
870 /* try to find a pipe with this port selected */
6491ab27
VS
871 /* first pick one where the panel is on */
872 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
873 vlv_pipe_has_pp_on);
874 /* didn't find one? pick one where vdd is on */
875 if (intel_dp->pps_pipe == INVALID_PIPE)
876 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
877 vlv_pipe_has_vdd_on);
878 /* didn't find one? pick one with just the correct port */
879 if (intel_dp->pps_pipe == INVALID_PIPE)
880 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
881 vlv_pipe_any);
a4a5d2f8
VS
882
883 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
884 if (intel_dp->pps_pipe == INVALID_PIPE) {
885 DRM_DEBUG_KMS("no initial power sequencer for port %c\n",
886 port_name(port));
887 return;
bf13e81b
JN
888 }
889
a4a5d2f8
VS
890 DRM_DEBUG_KMS("initial power sequencer for port %c: pipe %c\n",
891 port_name(port), pipe_name(intel_dp->pps_pipe));
892
46bd8383
VS
893 intel_dp_init_panel_power_sequencer(intel_dp);
894 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
bf13e81b
JN
895}
896
78597996 897void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
773538e8 898{
773538e8
VS
899 struct intel_encoder *encoder;
900
920a14b2 901 if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
cc3f90f0 902 !IS_GEN9_LP(dev_priv)))
773538e8
VS
903 return;
904
905 /*
906 * We can't grab pps_mutex here due to deadlock with power_domain
907 * mutex when power_domain functions are called while holding pps_mutex.
908 * That also means that in order to use pps_pipe the code needs to
909 * hold both a power domain reference and pps_mutex, and the power domain
910 * reference get/put must be done while _not_ holding pps_mutex.
911 * pps_{lock,unlock}() do these steps in the correct order, so one
912 * should use them always.
913 */
914
14aa521c
VS
915 for_each_intel_dp(&dev_priv->drm, encoder) {
916 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
7e732cac 917
9f2bdb00
VS
918 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
919
920 if (encoder->type != INTEL_OUTPUT_EDP)
921 continue;
922
cc3f90f0 923 if (IS_GEN9_LP(dev_priv))
78597996
ID
924 intel_dp->pps_reset = true;
925 else
926 intel_dp->pps_pipe = INVALID_PIPE;
773538e8 927 }
bf13e81b
JN
928}
929
8e8232d5
ID
930struct pps_registers {
931 i915_reg_t pp_ctrl;
932 i915_reg_t pp_stat;
933 i915_reg_t pp_on;
934 i915_reg_t pp_off;
935 i915_reg_t pp_div;
936};
937
46bd8383 938static void intel_pps_get_registers(struct intel_dp *intel_dp,
8e8232d5
ID
939 struct pps_registers *regs)
940{
de25eb7f 941 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
44cb734c
ID
942 int pps_idx = 0;
943
8e8232d5
ID
944 memset(regs, 0, sizeof(*regs));
945
cc3f90f0 946 if (IS_GEN9_LP(dev_priv))
44cb734c
ID
947 pps_idx = bxt_power_sequencer_idx(intel_dp);
948 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
949 pps_idx = vlv_power_sequencer_pipe(intel_dp);
8e8232d5 950
44cb734c
ID
951 regs->pp_ctrl = PP_CONTROL(pps_idx);
952 regs->pp_stat = PP_STATUS(pps_idx);
953 regs->pp_on = PP_ON_DELAYS(pps_idx);
954 regs->pp_off = PP_OFF_DELAYS(pps_idx);
ab3517c1
JN
955
956 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
c6c30b91 957 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
ab3517c1
JN
958 regs->pp_div = INVALID_MMIO_REG;
959 else
44cb734c 960 regs->pp_div = PP_DIVISOR(pps_idx);
8e8232d5
ID
961}
962
f0f59a00
VS
963static i915_reg_t
964_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b 965{
8e8232d5 966 struct pps_registers regs;
bf13e81b 967
46bd8383 968 intel_pps_get_registers(intel_dp, &regs);
8e8232d5
ID
969
970 return regs.pp_ctrl;
bf13e81b
JN
971}
972
f0f59a00
VS
973static i915_reg_t
974_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b 975{
8e8232d5 976 struct pps_registers regs;
bf13e81b 977
46bd8383 978 intel_pps_get_registers(intel_dp, &regs);
8e8232d5
ID
979
980 return regs.pp_stat;
bf13e81b
JN
981}
982
01527b31
CT
983/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
984 This function only applicable when panel PM state is not to be tracked */
985static int edp_notify_handler(struct notifier_block *this, unsigned long code,
986 void *unused)
987{
988 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
989 edp_notifier);
de25eb7f 990 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 991 intel_wakeref_t wakeref;
01527b31 992
1853a9da 993 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
01527b31
CT
994 return 0;
995
69d93820
CW
996 with_pps_lock(intel_dp, wakeref) {
997 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
998 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
999 i915_reg_t pp_ctrl_reg, pp_div_reg;
1000 u32 pp_div;
1001
1002 pp_ctrl_reg = PP_CONTROL(pipe);
1003 pp_div_reg = PP_DIVISOR(pipe);
1004 pp_div = I915_READ(pp_div_reg);
1005 pp_div &= PP_REFERENCE_DIVIDER_MASK;
1006
1007 /* 0x1F write to PP_DIV_REG sets max cycle delay */
1008 I915_WRITE(pp_div_reg, pp_div | 0x1F);
bfb0a2cb 1009 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
69d93820
CW
1010 msleep(intel_dp->panel_power_cycle_delay);
1011 }
01527b31
CT
1012 }
1013
1014 return 0;
1015}
1016
4be73780 1017static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 1018{
de25eb7f 1019 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1020
e39b999a
VS
1021 lockdep_assert_held(&dev_priv->pps_mutex);
1022
920a14b2 1023 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9a42356b
VS
1024 intel_dp->pps_pipe == INVALID_PIPE)
1025 return false;
1026
bf13e81b 1027 return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
1028}
1029
4be73780 1030static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 1031{
de25eb7f 1032 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1033
e39b999a
VS
1034 lockdep_assert_held(&dev_priv->pps_mutex);
1035
920a14b2 1036 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9a42356b
VS
1037 intel_dp->pps_pipe == INVALID_PIPE)
1038 return false;
1039
773538e8 1040 return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
1041}
1042
9b984dae
KP
1043static void
1044intel_dp_check_edp(struct intel_dp *intel_dp)
1045{
de25eb7f 1046 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1047
1853a9da 1048 if (!intel_dp_is_edp(intel_dp))
9b984dae 1049 return;
453c5420 1050
4be73780 1051 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
1052 WARN(1, "eDP powered off while attempting aux channel communication.\n");
1053 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
bf13e81b
JN
1054 I915_READ(_pp_stat_reg(intel_dp)),
1055 I915_READ(_pp_ctrl_reg(intel_dp)));
9b984dae
KP
1056 }
1057}
1058
830de422 1059static u32
8a29c778 1060intel_dp_aux_wait_done(struct intel_dp *intel_dp)
9ee32fea 1061{
de25eb7f 1062 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4904fa66 1063 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
830de422 1064 u32 status;
9ee32fea
DV
1065 bool done;
1066
ef04f00d 1067#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
8a29c778
LDM
1068 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
1069 msecs_to_jiffies_timeout(10));
39806c3f
VS
1070
1071 /* just trace the final value */
1072 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1073
9ee32fea 1074 if (!done)
8a29c778 1075 DRM_ERROR("dp aux hw did not signal timeout!\n");
9ee32fea
DV
1076#undef C
1077
1078 return status;
1079}
1080
830de422 1081static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 1082{
de25eb7f 1083 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
9ee32fea 1084
a457f54b
VS
1085 if (index)
1086 return 0;
1087
ec5b01dd
DL
1088 /*
1089 * The clock divider is based off the hrawclk, and would like to run at
a457f54b 1090 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
a4fc5ed6 1091 */
a457f54b 1092 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
ec5b01dd
DL
1093}
1094
830de422 1095static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
ec5b01dd 1096{
de25eb7f 1097 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0 1098 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ec5b01dd
DL
1099
1100 if (index)
1101 return 0;
1102
a457f54b
VS
1103 /*
1104 * The clock divider is based off the cdclk or PCH rawclk, and would
1105 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1106 * divide by 2000 and use that
1107 */
563d22a0 1108 if (dig_port->aux_ch == AUX_CH_A)
49cd97a3 1109 return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
e7dc33f3
VS
1110 else
1111 return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
ec5b01dd
DL
1112}
1113
830de422 1114static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
ec5b01dd 1115{
de25eb7f 1116 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0 1117 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ec5b01dd 1118
563d22a0 1119 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
2c55c336 1120 /* Workaround for non-ULT HSW */
bc86625a
CW
1121 switch (index) {
1122 case 0: return 63;
1123 case 1: return 72;
1124 default: return 0;
1125 }
2c55c336 1126 }
a457f54b
VS
1127
1128 return ilk_get_aux_clock_divider(intel_dp, index);
b84a1cf8
RV
1129}
1130
830de422 1131static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
b6b5e383
DL
1132{
1133 /*
1134 * SKL doesn't need us to program the AUX clock divider (Hardware will
1135 * derive the clock from CDCLK automatically). We still implement the
1136 * get_aux_clock_divider vfunc to plug-in into the existing code.
1137 */
1138 return index ? 0 : 1;
1139}
1140
830de422
JN
1141static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1142 int send_bytes,
1143 u32 aux_clock_divider)
5ed12a19
DL
1144{
1145 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8652744b
TU
1146 struct drm_i915_private *dev_priv =
1147 to_i915(intel_dig_port->base.base.dev);
830de422 1148 u32 precharge, timeout;
5ed12a19 1149
cf819eff 1150 if (IS_GEN(dev_priv, 6))
5ed12a19
DL
1151 precharge = 3;
1152 else
1153 precharge = 5;
1154
8f5f63d5 1155 if (IS_BROADWELL(dev_priv))
5ed12a19
DL
1156 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1157 else
1158 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1159
1160 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 1161 DP_AUX_CH_CTL_DONE |
8a29c778 1162 DP_AUX_CH_CTL_INTERRUPT |
788d4433 1163 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 1164 timeout |
788d4433 1165 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
1166 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1167 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 1168 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
1169}
1170
830de422
JN
1171static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1172 int send_bytes,
1173 u32 unused)
b9ca5fad 1174{
6f211ed4 1175 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
830de422 1176 u32 ret;
6f211ed4
AS
1177
1178 ret = DP_AUX_CH_CTL_SEND_BUSY |
1179 DP_AUX_CH_CTL_DONE |
1180 DP_AUX_CH_CTL_INTERRUPT |
1181 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1182 DP_AUX_CH_CTL_TIME_OUT_MAX |
1183 DP_AUX_CH_CTL_RECEIVE_ERROR |
1184 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1185 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1186 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1187
1188 if (intel_dig_port->tc_type == TC_PORT_TBT)
1189 ret |= DP_AUX_CH_CTL_TBT_IO;
1190
1191 return ret;
b9ca5fad
DL
1192}
1193
b84a1cf8 1194static int
f7606265 1195intel_dp_aux_xfer(struct intel_dp *intel_dp,
830de422
JN
1196 const u8 *send, int send_bytes,
1197 u8 *recv, int recv_size,
8159c796 1198 u32 aux_send_ctl_flags)
b84a1cf8
RV
1199{
1200 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
0031fb96
TU
1201 struct drm_i915_private *dev_priv =
1202 to_i915(intel_dig_port->base.base.dev);
4904fa66 1203 i915_reg_t ch_ctl, ch_data[5];
830de422 1204 u32 aux_clock_divider;
69d93820 1205 intel_wakeref_t wakeref;
b84a1cf8 1206 int i, ret, recv_bytes;
5ed12a19 1207 int try, clock = 0;
830de422 1208 u32 status;
884f19e9
JN
1209 bool vdd;
1210
4904fa66
VS
1211 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1212 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1213 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1214
69d93820 1215 wakeref = pps_lock(intel_dp);
e39b999a 1216
72c3500a
VS
1217 /*
1218 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1219 * In such cases we want to leave VDD enabled and it's up to upper layers
1220 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1221 * ourselves.
1222 */
1e0560e0 1223 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
1224
1225 /* dp aux is extremely sensitive to irq latency, hence request the
1226 * lowest possible wakeup latency and so prevent the cpu from going into
1227 * deep sleep states.
1228 */
1229 pm_qos_update_request(&dev_priv->pm_qos, 0);
1230
1231 intel_dp_check_edp(intel_dp);
5eb08b69 1232
11bee43e
JB
1233 /* Try to wait for any previous AUX channel activity */
1234 for (try = 0; try < 3; try++) {
ef04f00d 1235 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
1236 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1237 break;
1238 msleep(1);
1239 }
39806c3f
VS
1240 /* just trace the final value */
1241 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
11bee43e
JB
1242
1243 if (try == 3) {
02196c77
MK
1244 static u32 last_status = -1;
1245 const u32 status = I915_READ(ch_ctl);
1246
1247 if (status != last_status) {
1248 WARN(1, "dp_aux_ch not started status 0x%08x\n",
1249 status);
1250 last_status = status;
1251 }
1252
9ee32fea
DV
1253 ret = -EBUSY;
1254 goto out;
4f7f7b7e
CW
1255 }
1256
46a5ae9f
PZ
1257 /* Only 5 data registers! */
1258 if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
1259 ret = -E2BIG;
1260 goto out;
1261 }
1262
ec5b01dd 1263 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
8159c796 1264 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
8159c796
VS
1265 send_bytes,
1266 aux_clock_divider);
1267
1268 send_ctl |= aux_send_ctl_flags;
5ed12a19 1269
bc86625a
CW
1270 /* Must try at least 3 times according to DP spec */
1271 for (try = 0; try < 5; try++) {
1272 /* Load the send data into the aux channel data registers */
1273 for (i = 0; i < send_bytes; i += 4)
4904fa66 1274 I915_WRITE(ch_data[i >> 2],
a4f1289e
RV
1275 intel_dp_pack_aux(send + i,
1276 send_bytes - i));
bc86625a
CW
1277
1278 /* Send the command and wait for it to complete */
5ed12a19 1279 I915_WRITE(ch_ctl, send_ctl);
bc86625a 1280
8a29c778 1281 status = intel_dp_aux_wait_done(intel_dp);
bc86625a
CW
1282
1283 /* Clear done status and any errors */
1284 I915_WRITE(ch_ctl,
1285 status |
1286 DP_AUX_CH_CTL_DONE |
1287 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1288 DP_AUX_CH_CTL_RECEIVE_ERROR);
1289
74ebf294
TP
1290 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1291 * 400us delay required for errors and timeouts
1292 * Timeout errors from the HW already meet this
1293 * requirement so skip to next iteration
1294 */
3975f0aa
DP
1295 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1296 continue;
1297
74ebf294
TP
1298 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1299 usleep_range(400, 500);
bc86625a 1300 continue;
74ebf294 1301 }
bc86625a 1302 if (status & DP_AUX_CH_CTL_DONE)
e058c945 1303 goto done;
bc86625a 1304 }
a4fc5ed6
KP
1305 }
1306
a4fc5ed6 1307 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 1308 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
1309 ret = -EBUSY;
1310 goto out;
a4fc5ed6
KP
1311 }
1312
e058c945 1313done:
a4fc5ed6
KP
1314 /* Check for timeout or receive error.
1315 * Timeouts occur when the sink is not connected
1316 */
a5b3da54 1317 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 1318 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
1319 ret = -EIO;
1320 goto out;
a5b3da54 1321 }
1ae8c0a5
KP
1322
1323 /* Timeouts occur when the device isn't connected, so they're
1324 * "normal" -- don't fill the kernel log with these */
a5b3da54 1325 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
a5570fe5 1326 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
1327 ret = -ETIMEDOUT;
1328 goto out;
a4fc5ed6
KP
1329 }
1330
1331 /* Unload any bytes sent back from the other side */
1332 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1333 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
14e01889
RV
1334
1335 /*
1336 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1337 * We have no idea of what happened so we return -EBUSY so
1338 * drm layer takes care for the necessary retries.
1339 */
1340 if (recv_bytes == 0 || recv_bytes > 20) {
1341 DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
1342 recv_bytes);
14e01889
RV
1343 ret = -EBUSY;
1344 goto out;
1345 }
1346
a4fc5ed6
KP
1347 if (recv_bytes > recv_size)
1348 recv_bytes = recv_size;
0206e353 1349
4f7f7b7e 1350 for (i = 0; i < recv_bytes; i += 4)
4904fa66 1351 intel_dp_unpack_aux(I915_READ(ch_data[i >> 2]),
a4f1289e 1352 recv + i, recv_bytes - i);
a4fc5ed6 1353
9ee32fea
DV
1354 ret = recv_bytes;
1355out:
1356 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
1357
884f19e9
JN
1358 if (vdd)
1359 edp_panel_vdd_off(intel_dp, false);
1360
69d93820 1361 pps_unlock(intel_dp, wakeref);
e39b999a 1362
9ee32fea 1363 return ret;
a4fc5ed6
KP
1364}
1365
a6c8aff0
JN
1366#define BARE_ADDRESS_SIZE 3
1367#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
32078b72
VS
1368
1369static void
1370intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1371 const struct drm_dp_aux_msg *msg)
1372{
1373 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1374 txbuf[1] = (msg->address >> 8) & 0xff;
1375 txbuf[2] = msg->address & 0xff;
1376 txbuf[3] = msg->size - 1;
1377}
1378
9d1a1031
JN
1379static ssize_t
1380intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 1381{
9d1a1031 1382 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
830de422 1383 u8 txbuf[20], rxbuf[20];
9d1a1031 1384 size_t txsize, rxsize;
a4fc5ed6 1385 int ret;
a4fc5ed6 1386
32078b72 1387 intel_dp_aux_header(txbuf, msg);
46a5ae9f 1388
9d1a1031
JN
1389 switch (msg->request & ~DP_AUX_I2C_MOT) {
1390 case DP_AUX_NATIVE_WRITE:
1391 case DP_AUX_I2C_WRITE:
c1e74122 1392 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 1393 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 1394 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 1395
9d1a1031
JN
1396 if (WARN_ON(txsize > 20))
1397 return -E2BIG;
a4fc5ed6 1398
dd788090
VS
1399 WARN_ON(!msg->buffer != !msg->size);
1400
d81a67cc
ID
1401 if (msg->buffer)
1402 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 1403
f7606265 1404 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
8159c796 1405 rxbuf, rxsize, 0);
9d1a1031
JN
1406 if (ret > 0) {
1407 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 1408
a1ddefd8
JN
1409 if (ret > 1) {
1410 /* Number of bytes written in a short write. */
1411 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1412 } else {
1413 /* Return payload size. */
1414 ret = msg->size;
1415 }
9d1a1031
JN
1416 }
1417 break;
46a5ae9f 1418
9d1a1031
JN
1419 case DP_AUX_NATIVE_READ:
1420 case DP_AUX_I2C_READ:
a6c8aff0 1421 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1422 rxsize = msg->size + 1;
a4fc5ed6 1423
9d1a1031
JN
1424 if (WARN_ON(rxsize > 20))
1425 return -E2BIG;
a4fc5ed6 1426
f7606265 1427 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
8159c796 1428 rxbuf, rxsize, 0);
9d1a1031
JN
1429 if (ret > 0) {
1430 msg->reply = rxbuf[0] >> 4;
1431 /*
1432 * Assume happy day, and copy the data. The caller is
1433 * expected to check msg->reply before touching it.
1434 *
1435 * Return payload size.
1436 */
1437 ret--;
1438 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1439 }
9d1a1031
JN
1440 break;
1441
1442 default:
1443 ret = -EINVAL;
1444 break;
a4fc5ed6 1445 }
f51a44b9 1446
9d1a1031 1447 return ret;
a4fc5ed6
KP
1448}
1449
8f7ce038 1450
4904fa66 1451static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
da00bdcf 1452{
de25eb7f 1453 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1454 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1455 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1456
bdabdb63
VS
1457 switch (aux_ch) {
1458 case AUX_CH_B:
1459 case AUX_CH_C:
1460 case AUX_CH_D:
1461 return DP_AUX_CH_CTL(aux_ch);
da00bdcf 1462 default:
bdabdb63
VS
1463 MISSING_CASE(aux_ch);
1464 return DP_AUX_CH_CTL(AUX_CH_B);
da00bdcf
VS
1465 }
1466}
1467
4904fa66 1468static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
330e20ec 1469{
de25eb7f 1470 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1471 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1472 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1473
bdabdb63
VS
1474 switch (aux_ch) {
1475 case AUX_CH_B:
1476 case AUX_CH_C:
1477 case AUX_CH_D:
1478 return DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1479 default:
bdabdb63
VS
1480 MISSING_CASE(aux_ch);
1481 return DP_AUX_CH_DATA(AUX_CH_B, index);
330e20ec
VS
1482 }
1483}
1484
4904fa66 1485static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
bdabdb63 1486{
de25eb7f 1487 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1488 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1489 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1490
bdabdb63
VS
1491 switch (aux_ch) {
1492 case AUX_CH_A:
1493 return DP_AUX_CH_CTL(aux_ch);
1494 case AUX_CH_B:
1495 case AUX_CH_C:
1496 case AUX_CH_D:
1497 return PCH_DP_AUX_CH_CTL(aux_ch);
da00bdcf 1498 default:
bdabdb63
VS
1499 MISSING_CASE(aux_ch);
1500 return DP_AUX_CH_CTL(AUX_CH_A);
da00bdcf
VS
1501 }
1502}
1503
4904fa66 1504static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
bdabdb63 1505{
de25eb7f 1506 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1507 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1508 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1509
bdabdb63
VS
1510 switch (aux_ch) {
1511 case AUX_CH_A:
1512 return DP_AUX_CH_DATA(aux_ch, index);
1513 case AUX_CH_B:
1514 case AUX_CH_C:
1515 case AUX_CH_D:
1516 return PCH_DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1517 default:
bdabdb63
VS
1518 MISSING_CASE(aux_ch);
1519 return DP_AUX_CH_DATA(AUX_CH_A, index);
330e20ec
VS
1520 }
1521}
1522
4904fa66 1523static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
bdabdb63 1524{
de25eb7f 1525 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1526 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1527 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1528
bdabdb63
VS
1529 switch (aux_ch) {
1530 case AUX_CH_A:
1531 case AUX_CH_B:
1532 case AUX_CH_C:
1533 case AUX_CH_D:
bb187e93 1534 case AUX_CH_E:
bdabdb63
VS
1535 case AUX_CH_F:
1536 return DP_AUX_CH_CTL(aux_ch);
da00bdcf 1537 default:
bdabdb63
VS
1538 MISSING_CASE(aux_ch);
1539 return DP_AUX_CH_CTL(AUX_CH_A);
da00bdcf
VS
1540 }
1541}
1542
4904fa66 1543static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
bdabdb63 1544{
de25eb7f 1545 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1546 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1547 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1548
bdabdb63
VS
1549 switch (aux_ch) {
1550 case AUX_CH_A:
1551 case AUX_CH_B:
1552 case AUX_CH_C:
1553 case AUX_CH_D:
bb187e93 1554 case AUX_CH_E:
bdabdb63
VS
1555 case AUX_CH_F:
1556 return DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1557 default:
bdabdb63
VS
1558 MISSING_CASE(aux_ch);
1559 return DP_AUX_CH_DATA(AUX_CH_A, index);
330e20ec
VS
1560 }
1561}
1562
91e939ae
VS
1563static void
1564intel_dp_aux_fini(struct intel_dp *intel_dp)
1565{
1566 kfree(intel_dp->aux.name);
1567}
1568
1569static void
1570intel_dp_aux_init(struct intel_dp *intel_dp)
330e20ec 1571{
de25eb7f 1572 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1573 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1574 struct intel_encoder *encoder = &dig_port->base;
91e939ae 1575
4904fa66
VS
1576 if (INTEL_GEN(dev_priv) >= 9) {
1577 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1578 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1579 } else if (HAS_PCH_SPLIT(dev_priv)) {
1580 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1581 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1582 } else {
1583 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1584 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1585 }
330e20ec 1586
91e939ae
VS
1587 if (INTEL_GEN(dev_priv) >= 9)
1588 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1589 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1590 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1591 else if (HAS_PCH_SPLIT(dev_priv))
1592 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1593 else
1594 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
bdabdb63 1595
91e939ae
VS
1596 if (INTEL_GEN(dev_priv) >= 9)
1597 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1598 else
1599 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
ab2c0672 1600
7a418e34 1601 drm_dp_aux_init(&intel_dp->aux);
8316f337 1602
7a418e34 1603 /* Failure to allocate our preferred name is not critical */
bdabdb63
VS
1604 intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
1605 port_name(encoder->port));
9d1a1031 1606 intel_dp->aux.transfer = intel_dp_aux_transfer;
a4fc5ed6
KP
1607}
1608
e588fa18 1609bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1610{
fc603ca7 1611 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
e588fa18 1612
fc603ca7 1613 return max_rate >= 540000;
ed63baaf
TS
1614}
1615
2edd5327
MN
1616bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1617{
1618 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1619
1620 return max_rate >= 810000;
1621}
1622
c6bb3538
DV
1623static void
1624intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1625 struct intel_crtc_state *pipe_config)
c6bb3538 1626{
2f773477 1627 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9dd4ffdf
CML
1628 const struct dp_link_dpll *divisor = NULL;
1629 int i, count = 0;
c6bb3538 1630
9beb5fea 1631 if (IS_G4X(dev_priv)) {
45101e93
VS
1632 divisor = g4x_dpll;
1633 count = ARRAY_SIZE(g4x_dpll);
6e266956 1634 } else if (HAS_PCH_SPLIT(dev_priv)) {
9dd4ffdf
CML
1635 divisor = pch_dpll;
1636 count = ARRAY_SIZE(pch_dpll);
920a14b2 1637 } else if (IS_CHERRYVIEW(dev_priv)) {
ef9348c8
CML
1638 divisor = chv_dpll;
1639 count = ARRAY_SIZE(chv_dpll);
11a914c2 1640 } else if (IS_VALLEYVIEW(dev_priv)) {
65ce4bf5
CML
1641 divisor = vlv_dpll;
1642 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1643 }
9dd4ffdf
CML
1644
1645 if (divisor && count) {
1646 for (i = 0; i < count; i++) {
840b32b7 1647 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1648 pipe_config->dpll = divisor[i].dpll;
1649 pipe_config->clock_set = true;
1650 break;
1651 }
1652 }
c6bb3538
DV
1653 }
1654}
1655
0336400e
VS
1656static void snprintf_int_array(char *str, size_t len,
1657 const int *array, int nelem)
1658{
1659 int i;
1660
1661 str[0] = '\0';
1662
1663 for (i = 0; i < nelem; i++) {
b2f505be 1664 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1665 if (r >= len)
1666 return;
1667 str += r;
1668 len -= r;
1669 }
1670}
1671
1672static void intel_dp_print_rates(struct intel_dp *intel_dp)
1673{
0336400e
VS
1674 char str[128]; /* FIXME: too big for stack? */
1675
1676 if ((drm_debug & DRM_UT_KMS) == 0)
1677 return;
1678
55cfc580
JN
1679 snprintf_int_array(str, sizeof(str),
1680 intel_dp->source_rates, intel_dp->num_source_rates);
0336400e
VS
1681 DRM_DEBUG_KMS("source rates: %s\n", str);
1682
68f357cb
JN
1683 snprintf_int_array(str, sizeof(str),
1684 intel_dp->sink_rates, intel_dp->num_sink_rates);
0336400e
VS
1685 DRM_DEBUG_KMS("sink rates: %s\n", str);
1686
975ee5fc
JN
1687 snprintf_int_array(str, sizeof(str),
1688 intel_dp->common_rates, intel_dp->num_common_rates);
94ca719e 1689 DRM_DEBUG_KMS("common rates: %s\n", str);
0336400e
VS
1690}
1691
50fec21a
VS
1692int
1693intel_dp_max_link_rate(struct intel_dp *intel_dp)
1694{
50fec21a
VS
1695 int len;
1696
e6c0c64a 1697 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
50fec21a
VS
1698 if (WARN_ON(len <= 0))
1699 return 162000;
1700
975ee5fc 1701 return intel_dp->common_rates[len - 1];
50fec21a
VS
1702}
1703
ed4e9c1d
VS
1704int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1705{
8001b754
JN
1706 int i = intel_dp_rate_index(intel_dp->sink_rates,
1707 intel_dp->num_sink_rates, rate);
b5c72b20
JN
1708
1709 if (WARN_ON(i < 0))
1710 i = 0;
1711
1712 return i;
ed4e9c1d
VS
1713}
1714
94223d04 1715void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
830de422 1716 u8 *link_bw, u8 *rate_select)
04a60f9f 1717{
68f357cb
JN
1718 /* eDP 1.4 rate select method. */
1719 if (intel_dp->use_rate_select) {
04a60f9f
VS
1720 *link_bw = 0;
1721 *rate_select =
1722 intel_dp_rate_select(intel_dp, port_clock);
1723 } else {
1724 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1725 *rate_select = 0;
1726 }
1727}
1728
240999cf 1729static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
a4a15777
MN
1730 const struct intel_crtc_state *pipe_config)
1731{
1732 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1733
240999cf
AS
1734 return INTEL_GEN(dev_priv) >= 11 &&
1735 pipe_config->cpu_transcoder != TRANSCODER_A;
1736}
1737
1738static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
1739 const struct intel_crtc_state *pipe_config)
1740{
1741 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
1742 drm_dp_sink_supports_fec(intel_dp->fec_capable);
1743}
1744
1745static bool intel_dp_source_supports_dsc(struct intel_dp *intel_dp,
1746 const struct intel_crtc_state *pipe_config)
1747{
1748 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
a4a15777
MN
1749
1750 return INTEL_GEN(dev_priv) >= 10 &&
1751 pipe_config->cpu_transcoder != TRANSCODER_A;
1752}
1753
1754static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
1755 const struct intel_crtc_state *pipe_config)
1756{
240999cf
AS
1757 if (!intel_dp_is_edp(intel_dp) && !pipe_config->fec_enable)
1758 return false;
1759
a4a15777
MN
1760 return intel_dp_source_supports_dsc(intel_dp, pipe_config) &&
1761 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
1762}
1763
f580bea9
JN
1764static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
1765 struct intel_crtc_state *pipe_config)
f9bb705e 1766{
de25eb7f 1767 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ef32659a 1768 struct intel_connector *intel_connector = intel_dp->attached_connector;
f9bb705e
MK
1769 int bpp, bpc;
1770
1771 bpp = pipe_config->pipe_bpp;
1772 bpc = drm_dp_downstream_max_bpc(intel_dp->dpcd, intel_dp->downstream_ports);
1773
1774 if (bpc > 0)
1775 bpp = min(bpp, 3*bpc);
1776
ef32659a
JN
1777 if (intel_dp_is_edp(intel_dp)) {
1778 /* Get bpp from vbt only for panels that dont have bpp in edid */
1779 if (intel_connector->base.display_info.bpc == 0 &&
1780 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
1781 DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
1782 dev_priv->vbt.edp.bpp);
1783 bpp = dev_priv->vbt.edp.bpp;
1784 }
1785 }
1786
f9bb705e
MK
1787 return bpp;
1788}
1789
a4971453 1790/* Adjust link config limits based on compliance test requests. */
f1477219 1791void
a4971453
JN
1792intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
1793 struct intel_crtc_state *pipe_config,
1794 struct link_config_limits *limits)
1795{
1796 /* For DP Compliance we override the computed bpp for the pipe */
1797 if (intel_dp->compliance.test_data.bpc != 0) {
1798 int bpp = 3 * intel_dp->compliance.test_data.bpc;
1799
1800 limits->min_bpp = limits->max_bpp = bpp;
1801 pipe_config->dither_force_disable = bpp == 6 * 3;
1802
1803 DRM_DEBUG_KMS("Setting pipe_bpp to %d\n", bpp);
1804 }
1805
1806 /* Use values requested by Compliance Test Request */
1807 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
1808 int index;
1809
1810 /* Validate the compliance test data since max values
1811 * might have changed due to link train fallback.
1812 */
1813 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
1814 intel_dp->compliance.test_lane_count)) {
1815 index = intel_dp_rate_index(intel_dp->common_rates,
1816 intel_dp->num_common_rates,
1817 intel_dp->compliance.test_link_rate);
1818 if (index >= 0)
1819 limits->min_clock = limits->max_clock = index;
1820 limits->min_lane_count = limits->max_lane_count =
1821 intel_dp->compliance.test_lane_count;
1822 }
1823 }
1824}
1825
3acd115d 1826/* Optimize link config in order: max bpp, min clock, min lanes */
204474a6 1827static int
3acd115d
JN
1828intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
1829 struct intel_crtc_state *pipe_config,
1830 const struct link_config_limits *limits)
1831{
1832 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1833 int bpp, clock, lane_count;
1834 int mode_rate, link_clock, link_avail;
1835
1836 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1837 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1838 bpp);
1839
1840 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1841 for (lane_count = limits->min_lane_count;
1842 lane_count <= limits->max_lane_count;
1843 lane_count <<= 1) {
1844 link_clock = intel_dp->common_rates[clock];
1845 link_avail = intel_dp_max_data_rate(link_clock,
1846 lane_count);
1847
1848 if (mode_rate <= link_avail) {
1849 pipe_config->lane_count = lane_count;
1850 pipe_config->pipe_bpp = bpp;
1851 pipe_config->port_clock = link_clock;
1852
204474a6 1853 return 0;
3acd115d
JN
1854 }
1855 }
1856 }
1857 }
1858
204474a6 1859 return -EINVAL;
3acd115d
JN
1860}
1861
7769db58 1862/* Optimize link config in order: max bpp, min lanes, min clock */
204474a6 1863static int
7769db58
JN
1864intel_dp_compute_link_config_fast(struct intel_dp *intel_dp,
1865 struct intel_crtc_state *pipe_config,
1866 const struct link_config_limits *limits)
1867{
1868 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1869 int bpp, clock, lane_count;
1870 int mode_rate, link_clock, link_avail;
1871
1872 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
1873 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
1874 bpp);
1875
1876 for (lane_count = limits->min_lane_count;
1877 lane_count <= limits->max_lane_count;
1878 lane_count <<= 1) {
1879 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
1880 link_clock = intel_dp->common_rates[clock];
1881 link_avail = intel_dp_max_data_rate(link_clock,
1882 lane_count);
1883
1884 if (mode_rate <= link_avail) {
1885 pipe_config->lane_count = lane_count;
1886 pipe_config->pipe_bpp = bpp;
1887 pipe_config->port_clock = link_clock;
1888
204474a6 1889 return 0;
7769db58
JN
1890 }
1891 }
1892 }
1893 }
1894
204474a6 1895 return -EINVAL;
7769db58
JN
1896}
1897
a4a15777
MN
1898static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
1899{
1900 int i, num_bpc;
1901 u8 dsc_bpc[3] = {0};
1902
1903 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
1904 dsc_bpc);
1905 for (i = 0; i < num_bpc; i++) {
1906 if (dsc_max_bpc >= dsc_bpc[i])
1907 return dsc_bpc[i] * 3;
1908 }
1909
1910 return 0;
1911}
1912
204474a6
LP
1913static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
1914 struct intel_crtc_state *pipe_config,
1915 struct drm_connector_state *conn_state,
1916 struct link_config_limits *limits)
a4a15777
MN
1917{
1918 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1919 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
1920 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
1921 u8 dsc_max_bpc;
1922 int pipe_bpp;
204474a6 1923 int ret;
a4a15777
MN
1924
1925 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
204474a6 1926 return -EINVAL;
a4a15777
MN
1927
1928 dsc_max_bpc = min_t(u8, DP_DSC_MAX_SUPPORTED_BPC,
1929 conn_state->max_requested_bpc);
1930
1931 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
1932 if (pipe_bpp < DP_DSC_MIN_SUPPORTED_BPC * 3) {
1933 DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
204474a6 1934 return -EINVAL;
a4a15777
MN
1935 }
1936
1937 /*
1938 * For now enable DSC for max bpp, max link rate, max lane count.
1939 * Optimize this later for the minimum possible link rate/lane count
1940 * with DSC enabled for the requested mode.
1941 */
1942 pipe_config->pipe_bpp = pipe_bpp;
1943 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
1944 pipe_config->lane_count = limits->max_lane_count;
1945
1946 if (intel_dp_is_edp(intel_dp)) {
1947 pipe_config->dsc_params.compressed_bpp =
1948 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
1949 pipe_config->pipe_bpp);
1950 pipe_config->dsc_params.slice_count =
1951 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
1952 true);
1953 } else {
1954 u16 dsc_max_output_bpp;
1955 u8 dsc_dp_slice_count;
1956
1957 dsc_max_output_bpp =
1958 intel_dp_dsc_get_output_bpp(pipe_config->port_clock,
1959 pipe_config->lane_count,
1960 adjusted_mode->crtc_clock,
1961 adjusted_mode->crtc_hdisplay);
1962 dsc_dp_slice_count =
1963 intel_dp_dsc_get_slice_count(intel_dp,
1964 adjusted_mode->crtc_clock,
1965 adjusted_mode->crtc_hdisplay);
1966 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
1967 DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
204474a6 1968 return -EINVAL;
a4a15777
MN
1969 }
1970 pipe_config->dsc_params.compressed_bpp = min_t(u16,
1971 dsc_max_output_bpp >> 4,
1972 pipe_config->pipe_bpp);
1973 pipe_config->dsc_params.slice_count = dsc_dp_slice_count;
1974 }
1975 /*
1976 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
1977 * is greater than the maximum Cdclock and if slice count is even
1978 * then we need to use 2 VDSC instances.
1979 */
1980 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
1981 if (pipe_config->dsc_params.slice_count > 1) {
1982 pipe_config->dsc_params.dsc_split = true;
1983 } else {
1984 DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
204474a6 1985 return -EINVAL;
a4a15777
MN
1986 }
1987 }
204474a6
LP
1988
1989 ret = intel_dp_compute_dsc_params(intel_dp, pipe_config);
1990 if (ret < 0) {
168243c1
GS
1991 DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
1992 "Compressed BPP = %d\n",
1993 pipe_config->pipe_bpp,
1994 pipe_config->dsc_params.compressed_bpp);
204474a6 1995 return ret;
168243c1 1996 }
204474a6 1997
a4a15777
MN
1998 pipe_config->dsc_params.compression_enable = true;
1999 DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
2000 "Compressed Bpp = %d Slice Count = %d\n",
2001 pipe_config->pipe_bpp,
2002 pipe_config->dsc_params.compressed_bpp,
2003 pipe_config->dsc_params.slice_count);
2004
204474a6 2005 return 0;
a4a15777
MN
2006}
2007
204474a6 2008static int
981a63eb 2009intel_dp_compute_link_config(struct intel_encoder *encoder,
a4a15777
MN
2010 struct intel_crtc_state *pipe_config,
2011 struct drm_connector_state *conn_state)
a4fc5ed6 2012{
2d112de7 2013 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
5bfe2ac0 2014 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
7c2781e4 2015 struct link_config_limits limits;
94ca719e 2016 int common_len;
204474a6 2017 int ret;
7c2781e4 2018
975ee5fc 2019 common_len = intel_dp_common_len_rate_limit(intel_dp,
e6c0c64a 2020 intel_dp->max_link_rate);
a8f3ef61
SJ
2021
2022 /* No common link rates between source and sink */
94ca719e 2023 WARN_ON(common_len <= 0);
a8f3ef61 2024
7c2781e4
JN
2025 limits.min_clock = 0;
2026 limits.max_clock = common_len - 1;
2027
2028 limits.min_lane_count = 1;
2029 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2030
2031 limits.min_bpp = 6 * 3;
2032 limits.max_bpp = intel_dp_compute_bpp(intel_dp, pipe_config);
a4fc5ed6 2033
7769db58 2034 if (intel_dp_is_edp(intel_dp) && intel_dp->edp_dpcd[0] < DP_EDP_14) {
344c5bbc
JN
2035 /*
2036 * Use the maximum clock and number of lanes the eDP panel
7769db58
JN
2037 * advertizes being capable of. The eDP 1.3 and earlier panels
2038 * are generally designed to support only a single clock and
2039 * lane configuration, and typically these values correspond to
2040 * the native resolution of the panel. With eDP 1.4 rate select
2041 * and DSC, this is decreasingly the case, and we need to be
2042 * able to select less than maximum link config.
344c5bbc 2043 */
7c2781e4
JN
2044 limits.min_lane_count = limits.max_lane_count;
2045 limits.min_clock = limits.max_clock;
7984211e 2046 }
657445fe 2047
a4971453
JN
2048 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2049
7c2781e4
JN
2050 DRM_DEBUG_KMS("DP link computation with max lane count %i "
2051 "max rate %d max bpp %d pixel clock %iKHz\n",
2052 limits.max_lane_count,
2053 intel_dp->common_rates[limits.max_clock],
2054 limits.max_bpp, adjusted_mode->crtc_clock);
2055
a4a15777 2056 if (intel_dp_is_edp(intel_dp))
7769db58
JN
2057 /*
2058 * Optimize for fast and narrow. eDP 1.3 section 3.3 and eDP 1.4
2059 * section A.1: "It is recommended that the minimum number of
2060 * lanes be used, using the minimum link rate allowed for that
2061 * lane configuration."
2062 *
2063 * Note that we use the max clock and lane count for eDP 1.3 and
2064 * earlier, and fast vs. wide is irrelevant.
2065 */
a4a15777
MN
2066 ret = intel_dp_compute_link_config_fast(intel_dp, pipe_config,
2067 &limits);
2068 else
7769db58 2069 /* Optimize for slow and wide. */
a4a15777
MN
2070 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config,
2071 &limits);
2072
2073 /* enable compression if the mode doesn't fit available BW */
e845f099 2074 DRM_DEBUG_KMS("Force DSC en = %d\n", intel_dp->force_dsc_en);
204474a6
LP
2075 if (ret || intel_dp->force_dsc_en) {
2076 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2077 conn_state, &limits);
2078 if (ret < 0)
2079 return ret;
7769db58 2080 }
981a63eb 2081
a4a15777
MN
2082 if (pipe_config->dsc_params.compression_enable) {
2083 DRM_DEBUG_KMS("DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2084 pipe_config->lane_count, pipe_config->port_clock,
2085 pipe_config->pipe_bpp,
2086 pipe_config->dsc_params.compressed_bpp);
2087
2088 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2089 intel_dp_link_required(adjusted_mode->crtc_clock,
2090 pipe_config->dsc_params.compressed_bpp),
2091 intel_dp_max_data_rate(pipe_config->port_clock,
2092 pipe_config->lane_count));
2093 } else {
2094 DRM_DEBUG_KMS("DP lane count %d clock %d bpp %d\n",
2095 pipe_config->lane_count, pipe_config->port_clock,
2096 pipe_config->pipe_bpp);
2097
2098 DRM_DEBUG_KMS("DP link rate required %i available %i\n",
2099 intel_dp_link_required(adjusted_mode->crtc_clock,
2100 pipe_config->pipe_bpp),
2101 intel_dp_max_data_rate(pipe_config->port_clock,
2102 pipe_config->lane_count));
2103 }
204474a6 2104 return 0;
981a63eb
JN
2105}
2106
37aa52bf
VS
2107bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2108 const struct drm_connector_state *conn_state)
2109{
2110 const struct intel_digital_connector_state *intel_conn_state =
2111 to_intel_digital_connector_state(conn_state);
2112 const struct drm_display_mode *adjusted_mode =
2113 &crtc_state->base.adjusted_mode;
2114
2115 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2116 /*
2117 * See:
2118 * CEA-861-E - 5.1 Default Encoding Parameters
2119 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2120 */
2121 return crtc_state->pipe_bpp != 18 &&
2122 drm_default_rgb_quant_range(adjusted_mode) ==
2123 HDMI_QUANTIZATION_RANGE_LIMITED;
2124 } else {
2125 return intel_conn_state->broadcast_rgb ==
2126 INTEL_BROADCAST_RGB_LIMITED;
2127 }
2128}
2129
204474a6 2130int
981a63eb
JN
2131intel_dp_compute_config(struct intel_encoder *encoder,
2132 struct intel_crtc_state *pipe_config,
2133 struct drm_connector_state *conn_state)
2134{
2135 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
2136 struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
2137 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
668b6c17 2138 struct intel_lspcon *lspcon = enc_to_intel_lspcon(&encoder->base);
981a63eb
JN
2139 enum port port = encoder->port;
2140 struct intel_crtc *intel_crtc = to_intel_crtc(pipe_config->base.crtc);
2141 struct intel_connector *intel_connector = intel_dp->attached_connector;
2142 struct intel_digital_connector_state *intel_conn_state =
2143 to_intel_digital_connector_state(conn_state);
53ca2edc
LS
2144 bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
2145 DP_DPCD_QUIRK_CONSTANT_N);
204474a6 2146 int ret;
981a63eb
JN
2147
2148 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2149 pipe_config->has_pch_encoder = true;
2150
d9facae6 2151 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
668b6c17
SS
2152 if (lspcon->active)
2153 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
2154
981a63eb
JN
2155 pipe_config->has_drrs = false;
2156 if (IS_G4X(dev_priv) || port == PORT_A)
2157 pipe_config->has_audio = false;
2158 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2159 pipe_config->has_audio = intel_dp->has_audio;
2160 else
2161 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2162
2163 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
d93fa1b4
JN
2164 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2165 adjusted_mode);
981a63eb
JN
2166
2167 if (INTEL_GEN(dev_priv) >= 9) {
981a63eb
JN
2168 ret = skl_update_scaler_crtc(pipe_config);
2169 if (ret)
2170 return ret;
2171 }
2172
b2ae318a 2173 if (HAS_GMCH(dev_priv))
981a63eb
JN
2174 intel_gmch_panel_fitting(intel_crtc, pipe_config,
2175 conn_state->scaling_mode);
2176 else
2177 intel_pch_panel_fitting(intel_crtc, pipe_config,
2178 conn_state->scaling_mode);
2179 }
2180
e4dd27aa 2181 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
204474a6 2182 return -EINVAL;
e4dd27aa 2183
b2ae318a 2184 if (HAS_GMCH(dev_priv) &&
981a63eb 2185 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
204474a6 2186 return -EINVAL;
981a63eb
JN
2187
2188 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
204474a6 2189 return -EINVAL;
981a63eb 2190
240999cf
AS
2191 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2192 intel_dp_supports_fec(intel_dp, pipe_config);
2193
204474a6
LP
2194 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2195 if (ret < 0)
2196 return ret;
981a63eb 2197
37aa52bf
VS
2198 pipe_config->limited_color_range =
2199 intel_dp_limited_color_range(pipe_config, conn_state);
55bc60db 2200
a4a15777
MN
2201 if (!pipe_config->dsc_params.compression_enable)
2202 intel_link_compute_m_n(pipe_config->pipe_bpp,
2203 pipe_config->lane_count,
2204 adjusted_mode->crtc_clock,
2205 pipe_config->port_clock,
2206 &pipe_config->dp_m_n,
2207 constant_n);
2208 else
ae9e7ced 2209 intel_link_compute_m_n(pipe_config->dsc_params.compressed_bpp,
a4a15777
MN
2210 pipe_config->lane_count,
2211 adjusted_mode->crtc_clock,
2212 pipe_config->port_clock,
2213 &pipe_config->dp_m_n,
2214 constant_n);
9d1a455b 2215
439d7ac0 2216 if (intel_connector->panel.downclock_mode != NULL &&
96178eeb 2217 dev_priv->drrs.type == SEAMLESS_DRRS_SUPPORT) {
f769cd24 2218 pipe_config->has_drrs = true;
981a63eb
JN
2219 intel_link_compute_m_n(pipe_config->pipe_bpp,
2220 pipe_config->lane_count,
2221 intel_connector->panel.downclock_mode->clock,
2222 pipe_config->port_clock,
2223 &pipe_config->dp_m2_n2,
53ca2edc 2224 constant_n);
439d7ac0
PB
2225 }
2226
4f8036a2 2227 if (!HAS_DDI(dev_priv))
840b32b7 2228 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 2229
4d90f2d5
VS
2230 intel_psr_compute_config(intel_dp, pipe_config);
2231
204474a6 2232 return 0;
a4fc5ed6
KP
2233}
2234
901c2daf 2235void intel_dp_set_link_params(struct intel_dp *intel_dp,
830de422 2236 int link_rate, u8 lane_count,
dfa10480 2237 bool link_mst)
901c2daf 2238{
edb2e530 2239 intel_dp->link_trained = false;
dfa10480
ACO
2240 intel_dp->link_rate = link_rate;
2241 intel_dp->lane_count = lane_count;
2242 intel_dp->link_mst = link_mst;
901c2daf
VS
2243}
2244
85cb48a1 2245static void intel_dp_prepare(struct intel_encoder *encoder,
5f88a9c6 2246 const struct intel_crtc_state *pipe_config)
a4fc5ed6 2247{
2f773477 2248 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b934223d 2249 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
8f4f2797 2250 enum port port = encoder->port;
adc10304 2251 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
85cb48a1 2252 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
a4fc5ed6 2253
dfa10480
ACO
2254 intel_dp_set_link_params(intel_dp, pipe_config->port_clock,
2255 pipe_config->lane_count,
2256 intel_crtc_has_type(pipe_config,
2257 INTEL_OUTPUT_DP_MST));
901c2daf 2258
417e822d 2259 /*
1a2eb460 2260 * There are four kinds of DP registers:
417e822d
KP
2261 *
2262 * IBX PCH
1a2eb460
KP
2263 * SNB CPU
2264 * IVB CPU
417e822d
KP
2265 * CPT PCH
2266 *
2267 * IBX PCH and CPU are the same for almost everything,
2268 * except that the CPU DP PLL is configured in this
2269 * register
2270 *
2271 * CPT PCH is quite different, having many bits moved
2272 * to the TRANS_DP_CTL register instead. That
2273 * configuration happens (oddly) in ironlake_pch_enable
2274 */
9c9e7927 2275
417e822d
KP
2276 /* Preserve the BIOS-computed detected bit. This is
2277 * supposed to be read-only.
2278 */
2279 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 2280
417e822d 2281 /* Handle DP bits in common between all three register formats */
417e822d 2282 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
85cb48a1 2283 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
a4fc5ed6 2284
417e822d 2285 /* Split out the IBX/CPU vs CPT settings */
32f9d658 2286
b752e995 2287 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
1a2eb460
KP
2288 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2289 intel_dp->DP |= DP_SYNC_HS_HIGH;
2290 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2291 intel_dp->DP |= DP_SYNC_VS_HIGH;
2292 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2293
6aba5b6c 2294 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
2295 intel_dp->DP |= DP_ENHANCED_FRAMING;
2296
59b74c49 2297 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
6e266956 2298 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
e3ef4479
VS
2299 u32 trans_dp;
2300
39e5fa88 2301 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479
VS
2302
2303 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
2304 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2305 trans_dp |= TRANS_DP_ENH_FRAMING;
2306 else
2307 trans_dp &= ~TRANS_DP_ENH_FRAMING;
2308 I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 2309 } else {
c99f53f7 2310 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
0f2a2a75 2311 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
2312
2313 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2314 intel_dp->DP |= DP_SYNC_HS_HIGH;
2315 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2316 intel_dp->DP |= DP_SYNC_VS_HIGH;
2317 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2318
6aba5b6c 2319 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
2320 intel_dp->DP |= DP_ENHANCED_FRAMING;
2321
920a14b2 2322 if (IS_CHERRYVIEW(dev_priv))
59b74c49
VS
2323 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2324 else
2325 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
32f9d658 2326 }
a4fc5ed6
KP
2327}
2328
ffd6749d
PZ
2329#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2330#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 2331
1a5ef5b7
PZ
2332#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2333#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 2334
ffd6749d
PZ
2335#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2336#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 2337
46bd8383 2338static void intel_pps_verify_state(struct intel_dp *intel_dp);
de9c1b6b 2339
4be73780 2340static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
2341 u32 mask,
2342 u32 value)
bd943159 2343{
de25eb7f 2344 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
f0f59a00 2345 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 2346
e39b999a
VS
2347 lockdep_assert_held(&dev_priv->pps_mutex);
2348
46bd8383 2349 intel_pps_verify_state(intel_dp);
de9c1b6b 2350
bf13e81b
JN
2351 pp_stat_reg = _pp_stat_reg(intel_dp);
2352 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 2353
99ea7127 2354 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
2355 mask, value,
2356 I915_READ(pp_stat_reg),
2357 I915_READ(pp_ctrl_reg));
32ce697c 2358
97a04e0d 2359 if (intel_wait_for_register(&dev_priv->uncore,
9036ff06
CW
2360 pp_stat_reg, mask, value,
2361 5000))
99ea7127 2362 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
2363 I915_READ(pp_stat_reg),
2364 I915_READ(pp_ctrl_reg));
54c136d4
CW
2365
2366 DRM_DEBUG_KMS("Wait complete\n");
99ea7127 2367}
32ce697c 2368
4be73780 2369static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127
KP
2370{
2371 DRM_DEBUG_KMS("Wait for panel power on\n");
4be73780 2372 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
2373}
2374
4be73780 2375static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127
KP
2376{
2377 DRM_DEBUG_KMS("Wait for panel power off time\n");
4be73780 2378 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
2379}
2380
4be73780 2381static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127 2382{
d28d4731
AK
2383 ktime_t panel_power_on_time;
2384 s64 panel_power_off_duration;
2385
99ea7127 2386 DRM_DEBUG_KMS("Wait for panel power cycle\n");
dce56b3c 2387
d28d4731
AK
2388 /* take the difference of currrent time and panel power off time
2389 * and then make panel wait for t11_t12 if needed. */
2390 panel_power_on_time = ktime_get_boottime();
2391 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2392
dce56b3c
PZ
2393 /* When we disable the VDD override bit last we have to do the manual
2394 * wait. */
d28d4731
AK
2395 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2396 wait_remaining_ms_from_jiffies(jiffies,
2397 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
dce56b3c 2398
4be73780 2399 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
2400}
2401
4be73780 2402static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
2403{
2404 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2405 intel_dp->backlight_on_delay);
2406}
2407
4be73780 2408static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
2409{
2410 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2411 intel_dp->backlight_off_delay);
2412}
99ea7127 2413
832dd3c1
KP
2414/* Read the current pp_control value, unlocking the register if it
2415 * is locked
2416 */
2417
453c5420 2418static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 2419{
de25eb7f 2420 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
453c5420 2421 u32 control;
832dd3c1 2422
e39b999a
VS
2423 lockdep_assert_held(&dev_priv->pps_mutex);
2424
bf13e81b 2425 control = I915_READ(_pp_ctrl_reg(intel_dp));
8090ba8c
ID
2426 if (WARN_ON(!HAS_DDI(dev_priv) &&
2427 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
b0a08bec
VK
2428 control &= ~PANEL_UNLOCK_MASK;
2429 control |= PANEL_UNLOCK_REGS;
2430 }
832dd3c1 2431 return control;
bd943159
KP
2432}
2433
951468f3
VS
2434/*
2435 * Must be paired with edp_panel_vdd_off().
2436 * Must hold pps_mutex around the whole on/off sequence.
2437 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2438 */
1e0560e0 2439static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 2440{
de25eb7f 2441 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4e6e1a54 2442 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5d613501 2443 u32 pp;
f0f59a00 2444 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 2445 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 2446
e39b999a
VS
2447 lockdep_assert_held(&dev_priv->pps_mutex);
2448
1853a9da 2449 if (!intel_dp_is_edp(intel_dp))
adddaaf4 2450 return false;
bd943159 2451
2c623c11 2452 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 2453 intel_dp->want_panel_vdd = true;
99ea7127 2454
4be73780 2455 if (edp_have_panel_vdd(intel_dp))
adddaaf4 2456 return need_to_disable;
b0665d57 2457
337837ac
ID
2458 intel_display_power_get(dev_priv,
2459 intel_aux_power_domain(intel_dig_port));
e9cb81a2 2460
3936fcf4 2461 DRM_DEBUG_KMS("Turning eDP port %c VDD on\n",
8f4f2797 2462 port_name(intel_dig_port->base.port));
bd943159 2463
4be73780
DV
2464 if (!edp_have_panel_power(intel_dp))
2465 wait_panel_power_cycle(intel_dp);
99ea7127 2466
453c5420 2467 pp = ironlake_get_pp_control(intel_dp);
5d613501 2468 pp |= EDP_FORCE_VDD;
ebf33b18 2469
bf13e81b
JN
2470 pp_stat_reg = _pp_stat_reg(intel_dp);
2471 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420
JB
2472
2473 I915_WRITE(pp_ctrl_reg, pp);
2474 POSTING_READ(pp_ctrl_reg);
2475 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2476 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
2477 /*
2478 * If the panel wasn't on, delay before accessing aux channel
2479 */
4be73780 2480 if (!edp_have_panel_power(intel_dp)) {
3936fcf4 2481 DRM_DEBUG_KMS("eDP port %c panel power wasn't enabled\n",
8f4f2797 2482 port_name(intel_dig_port->base.port));
f01eca2e 2483 msleep(intel_dp->panel_power_up_delay);
f01eca2e 2484 }
adddaaf4
JN
2485
2486 return need_to_disable;
2487}
2488
951468f3
VS
2489/*
2490 * Must be paired with intel_edp_panel_vdd_off() or
2491 * intel_edp_panel_off().
2492 * Nested calls to these functions are not allowed since
2493 * we drop the lock. Caller must use some higher level
2494 * locking to prevent nested calls from other threads.
2495 */
b80d6c78 2496void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 2497{
69d93820 2498 intel_wakeref_t wakeref;
c695b6b6 2499 bool vdd;
adddaaf4 2500
1853a9da 2501 if (!intel_dp_is_edp(intel_dp))
c695b6b6
VS
2502 return;
2503
69d93820
CW
2504 vdd = false;
2505 with_pps_lock(intel_dp, wakeref)
2506 vdd = edp_panel_vdd_on(intel_dp);
e2c719b7 2507 I915_STATE_WARN(!vdd, "eDP port %c VDD already requested on\n",
8f4f2797 2508 port_name(dp_to_dig_port(intel_dp)->base.port));
5d613501
JB
2509}
2510
4be73780 2511static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 2512{
de25eb7f 2513 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
be2c9196
VS
2514 struct intel_digital_port *intel_dig_port =
2515 dp_to_dig_port(intel_dp);
5d613501 2516 u32 pp;
f0f59a00 2517 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 2518
e39b999a 2519 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 2520
15e899a0 2521 WARN_ON(intel_dp->want_panel_vdd);
4e6e1a54 2522
15e899a0 2523 if (!edp_have_panel_vdd(intel_dp))
be2c9196 2524 return;
b0665d57 2525
3936fcf4 2526 DRM_DEBUG_KMS("Turning eDP port %c VDD off\n",
8f4f2797 2527 port_name(intel_dig_port->base.port));
bd943159 2528
be2c9196
VS
2529 pp = ironlake_get_pp_control(intel_dp);
2530 pp &= ~EDP_FORCE_VDD;
453c5420 2531
be2c9196
VS
2532 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2533 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 2534
be2c9196
VS
2535 I915_WRITE(pp_ctrl_reg, pp);
2536 POSTING_READ(pp_ctrl_reg);
90791a5c 2537
be2c9196
VS
2538 /* Make sure sequencer is idle before allowing subsequent activity */
2539 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
2540 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
e9cb81a2 2541
5a162e22 2542 if ((pp & PANEL_POWER_ON) == 0)
d28d4731 2543 intel_dp->panel_power_off_time = ktime_get_boottime();
e9cb81a2 2544
0e6e0be4
CW
2545 intel_display_power_put_unchecked(dev_priv,
2546 intel_aux_power_domain(intel_dig_port));
bd943159 2547}
5d613501 2548
4be73780 2549static void edp_panel_vdd_work(struct work_struct *__work)
bd943159 2550{
69d93820
CW
2551 struct intel_dp *intel_dp =
2552 container_of(to_delayed_work(__work),
2553 struct intel_dp, panel_vdd_work);
2554 intel_wakeref_t wakeref;
bd943159 2555
69d93820
CW
2556 with_pps_lock(intel_dp, wakeref) {
2557 if (!intel_dp->want_panel_vdd)
2558 edp_panel_vdd_off_sync(intel_dp);
2559 }
bd943159
KP
2560}
2561
aba86890
ID
2562static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
2563{
2564 unsigned long delay;
2565
2566 /*
2567 * Queue the timer to fire a long time from now (relative to the power
2568 * down delay) to keep the panel power up across a sequence of
2569 * operations.
2570 */
2571 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
2572 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
2573}
2574
951468f3
VS
2575/*
2576 * Must be paired with edp_panel_vdd_on().
2577 * Must hold pps_mutex around the whole on/off sequence.
2578 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
2579 */
4be73780 2580static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 2581{
de25eb7f 2582 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
e39b999a
VS
2583
2584 lockdep_assert_held(&dev_priv->pps_mutex);
2585
1853a9da 2586 if (!intel_dp_is_edp(intel_dp))
97af61f5 2587 return;
5d613501 2588
e2c719b7 2589 I915_STATE_WARN(!intel_dp->want_panel_vdd, "eDP port %c VDD not forced on",
8f4f2797 2590 port_name(dp_to_dig_port(intel_dp)->base.port));
f2e8b18a 2591
bd943159
KP
2592 intel_dp->want_panel_vdd = false;
2593
aba86890 2594 if (sync)
4be73780 2595 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
2596 else
2597 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
2598}
2599
9f0fb5be 2600static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 2601{
de25eb7f 2602 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
99ea7127 2603 u32 pp;
f0f59a00 2604 i915_reg_t pp_ctrl_reg;
9934c132 2605
9f0fb5be
VS
2606 lockdep_assert_held(&dev_priv->pps_mutex);
2607
1853a9da 2608 if (!intel_dp_is_edp(intel_dp))
bd943159 2609 return;
99ea7127 2610
3936fcf4 2611 DRM_DEBUG_KMS("Turn eDP port %c panel power on\n",
8f4f2797 2612 port_name(dp_to_dig_port(intel_dp)->base.port));
e39b999a 2613
e7a89ace
VS
2614 if (WARN(edp_have_panel_power(intel_dp),
2615 "eDP port %c panel power already on\n",
8f4f2797 2616 port_name(dp_to_dig_port(intel_dp)->base.port)))
9f0fb5be 2617 return;
9934c132 2618
4be73780 2619 wait_panel_power_cycle(intel_dp);
37c6c9b0 2620
bf13e81b 2621 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2622 pp = ironlake_get_pp_control(intel_dp);
cf819eff 2623 if (IS_GEN(dev_priv, 5)) {
05ce1a49
KP
2624 /* ILK workaround: disable reset around power sequence */
2625 pp &= ~PANEL_POWER_RESET;
bf13e81b
JN
2626 I915_WRITE(pp_ctrl_reg, pp);
2627 POSTING_READ(pp_ctrl_reg);
05ce1a49 2628 }
37c6c9b0 2629
5a162e22 2630 pp |= PANEL_POWER_ON;
cf819eff 2631 if (!IS_GEN(dev_priv, 5))
99ea7127
KP
2632 pp |= PANEL_POWER_RESET;
2633
453c5420
JB
2634 I915_WRITE(pp_ctrl_reg, pp);
2635 POSTING_READ(pp_ctrl_reg);
9934c132 2636
4be73780 2637 wait_panel_on(intel_dp);
dce56b3c 2638 intel_dp->last_power_on = jiffies;
9934c132 2639
cf819eff 2640 if (IS_GEN(dev_priv, 5)) {
05ce1a49 2641 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
bf13e81b
JN
2642 I915_WRITE(pp_ctrl_reg, pp);
2643 POSTING_READ(pp_ctrl_reg);
05ce1a49 2644 }
9f0fb5be 2645}
e39b999a 2646
9f0fb5be
VS
2647void intel_edp_panel_on(struct intel_dp *intel_dp)
2648{
69d93820
CW
2649 intel_wakeref_t wakeref;
2650
1853a9da 2651 if (!intel_dp_is_edp(intel_dp))
9f0fb5be
VS
2652 return;
2653
69d93820
CW
2654 with_pps_lock(intel_dp, wakeref)
2655 edp_panel_on(intel_dp);
9934c132
JB
2656}
2657
9f0fb5be
VS
2658
2659static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 2660{
de25eb7f 2661 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
337837ac 2662 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
99ea7127 2663 u32 pp;
f0f59a00 2664 i915_reg_t pp_ctrl_reg;
9934c132 2665
9f0fb5be
VS
2666 lockdep_assert_held(&dev_priv->pps_mutex);
2667
1853a9da 2668 if (!intel_dp_is_edp(intel_dp))
97af61f5 2669 return;
37c6c9b0 2670
3936fcf4 2671 DRM_DEBUG_KMS("Turn eDP port %c panel power off\n",
337837ac 2672 port_name(dig_port->base.port));
37c6c9b0 2673
3936fcf4 2674 WARN(!intel_dp->want_panel_vdd, "Need eDP port %c VDD to turn off panel\n",
337837ac 2675 port_name(dig_port->base.port));
24f3e092 2676
453c5420 2677 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
2678 /* We need to switch off panel power _and_ force vdd, for otherwise some
2679 * panels get very unhappy and cease to work. */
5a162e22 2680 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
b3064154 2681 EDP_BLC_ENABLE);
453c5420 2682
bf13e81b 2683 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 2684
849e39f5
PZ
2685 intel_dp->want_panel_vdd = false;
2686
453c5420
JB
2687 I915_WRITE(pp_ctrl_reg, pp);
2688 POSTING_READ(pp_ctrl_reg);
9934c132 2689
4be73780 2690 wait_panel_off(intel_dp);
d7ba25bd 2691 intel_dp->panel_power_off_time = ktime_get_boottime();
849e39f5
PZ
2692
2693 /* We got a reference when we enabled the VDD. */
0e6e0be4 2694 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
9f0fb5be 2695}
e39b999a 2696
9f0fb5be
VS
2697void intel_edp_panel_off(struct intel_dp *intel_dp)
2698{
69d93820
CW
2699 intel_wakeref_t wakeref;
2700
1853a9da 2701 if (!intel_dp_is_edp(intel_dp))
9f0fb5be 2702 return;
e39b999a 2703
69d93820
CW
2704 with_pps_lock(intel_dp, wakeref)
2705 edp_panel_off(intel_dp);
9934c132
JB
2706}
2707
1250d107
JN
2708/* Enable backlight in the panel power control. */
2709static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 2710{
de25eb7f 2711 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 2712 intel_wakeref_t wakeref;
32f9d658 2713
01cb9ea6
JB
2714 /*
2715 * If we enable the backlight right away following a panel power
2716 * on, we may see slight flicker as the panel syncs with the eDP
2717 * link. So delay a bit to make sure the image is solid before
2718 * allowing it to appear.
2719 */
4be73780 2720 wait_backlight_on(intel_dp);
e39b999a 2721
69d93820
CW
2722 with_pps_lock(intel_dp, wakeref) {
2723 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2724 u32 pp;
453c5420 2725
69d93820
CW
2726 pp = ironlake_get_pp_control(intel_dp);
2727 pp |= EDP_BLC_ENABLE;
453c5420 2728
69d93820
CW
2729 I915_WRITE(pp_ctrl_reg, pp);
2730 POSTING_READ(pp_ctrl_reg);
2731 }
32f9d658
ZW
2732}
2733
1250d107 2734/* Enable backlight PWM and backlight PP control. */
b037d58f
ML
2735void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
2736 const struct drm_connector_state *conn_state)
1250d107 2737{
b037d58f
ML
2738 struct intel_dp *intel_dp = enc_to_intel_dp(conn_state->best_encoder);
2739
1853a9da 2740 if (!intel_dp_is_edp(intel_dp))
1250d107
JN
2741 return;
2742
2743 DRM_DEBUG_KMS("\n");
2744
b037d58f 2745 intel_panel_enable_backlight(crtc_state, conn_state);
1250d107
JN
2746 _intel_edp_backlight_on(intel_dp);
2747}
2748
2749/* Disable backlight in the panel power control. */
2750static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 2751{
de25eb7f 2752 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 2753 intel_wakeref_t wakeref;
32f9d658 2754
1853a9da 2755 if (!intel_dp_is_edp(intel_dp))
f01eca2e
KP
2756 return;
2757
69d93820
CW
2758 with_pps_lock(intel_dp, wakeref) {
2759 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
2760 u32 pp;
e39b999a 2761
69d93820
CW
2762 pp = ironlake_get_pp_control(intel_dp);
2763 pp &= ~EDP_BLC_ENABLE;
453c5420 2764
69d93820
CW
2765 I915_WRITE(pp_ctrl_reg, pp);
2766 POSTING_READ(pp_ctrl_reg);
2767 }
e39b999a
VS
2768
2769 intel_dp->last_backlight_off = jiffies;
f7d2323c 2770 edp_wait_backlight_off(intel_dp);
1250d107 2771}
f7d2323c 2772
1250d107 2773/* Disable backlight PP control and backlight PWM. */
b037d58f 2774void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
1250d107 2775{
b037d58f
ML
2776 struct intel_dp *intel_dp = enc_to_intel_dp(old_conn_state->best_encoder);
2777
1853a9da 2778 if (!intel_dp_is_edp(intel_dp))
1250d107
JN
2779 return;
2780
2781 DRM_DEBUG_KMS("\n");
f7d2323c 2782
1250d107 2783 _intel_edp_backlight_off(intel_dp);
b037d58f 2784 intel_panel_disable_backlight(old_conn_state);
32f9d658 2785}
a4fc5ed6 2786
73580fb7
JN
2787/*
2788 * Hook for controlling the panel power control backlight through the bl_power
2789 * sysfs attribute. Take care to handle multiple calls.
2790 */
2791static void intel_edp_backlight_power(struct intel_connector *connector,
2792 bool enable)
2793{
2794 struct intel_dp *intel_dp = intel_attached_dp(&connector->base);
69d93820 2795 intel_wakeref_t wakeref;
e39b999a
VS
2796 bool is_enabled;
2797
69d93820
CW
2798 is_enabled = false;
2799 with_pps_lock(intel_dp, wakeref)
2800 is_enabled = ironlake_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
73580fb7
JN
2801 if (is_enabled == enable)
2802 return;
2803
23ba9373
JN
2804 DRM_DEBUG_KMS("panel power control backlight %s\n",
2805 enable ? "enable" : "disable");
73580fb7
JN
2806
2807 if (enable)
2808 _intel_edp_backlight_on(intel_dp);
2809 else
2810 _intel_edp_backlight_off(intel_dp);
2811}
2812
64e1077a
VS
2813static void assert_dp_port(struct intel_dp *intel_dp, bool state)
2814{
2815 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2816 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
2817 bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
2818
2819 I915_STATE_WARN(cur_state != state,
2820 "DP port %c state assertion failure (expected %s, current %s)\n",
8f4f2797 2821 port_name(dig_port->base.port),
87ad3212 2822 onoff(state), onoff(cur_state));
64e1077a
VS
2823}
2824#define assert_dp_port_disabled(d) assert_dp_port((d), false)
2825
2826static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
2827{
2828 bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
2829
2830 I915_STATE_WARN(cur_state != state,
2831 "eDP PLL state assertion failure (expected %s, current %s)\n",
87ad3212 2832 onoff(state), onoff(cur_state));
64e1077a
VS
2833}
2834#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
2835#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
2836
85cb48a1 2837static void ironlake_edp_pll_on(struct intel_dp *intel_dp,
5f88a9c6 2838 const struct intel_crtc_state *pipe_config)
d240f20f 2839{
85cb48a1 2840 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
64e1077a 2841 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2842
64e1077a
VS
2843 assert_pipe_disabled(dev_priv, crtc->pipe);
2844 assert_dp_port_disabled(intel_dp);
2845 assert_edp_pll_disabled(dev_priv);
2bd2ad64 2846
abfce949 2847 DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
85cb48a1 2848 pipe_config->port_clock);
abfce949
VS
2849
2850 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
2851
85cb48a1 2852 if (pipe_config->port_clock == 162000)
abfce949
VS
2853 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
2854 else
2855 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
2856
2857 I915_WRITE(DP_A, intel_dp->DP);
2858 POSTING_READ(DP_A);
2859 udelay(500);
2860
6b23f3e8
VS
2861 /*
2862 * [DevILK] Work around required when enabling DP PLL
2863 * while a pipe is enabled going to FDI:
2864 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
2865 * 2. Program DP PLL enable
2866 */
cf819eff 2867 if (IS_GEN(dev_priv, 5))
0f0f74bc 2868 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
6b23f3e8 2869
0767935e 2870 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 2871
0767935e 2872 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
2873 POSTING_READ(DP_A);
2874 udelay(200);
d240f20f
JB
2875}
2876
adc10304
VS
2877static void ironlake_edp_pll_off(struct intel_dp *intel_dp,
2878 const struct intel_crtc_state *old_crtc_state)
d240f20f 2879{
adc10304 2880 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
64e1077a 2881 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 2882
64e1077a
VS
2883 assert_pipe_disabled(dev_priv, crtc->pipe);
2884 assert_dp_port_disabled(intel_dp);
2885 assert_edp_pll_enabled(dev_priv);
2bd2ad64 2886
abfce949
VS
2887 DRM_DEBUG_KMS("disabling eDP PLL\n");
2888
6fec7662 2889 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 2890
6fec7662 2891 I915_WRITE(DP_A, intel_dp->DP);
1af5fa1b 2892 POSTING_READ(DP_A);
d240f20f
JB
2893 udelay(200);
2894}
2895
857c416e
VS
2896static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
2897{
2898 /*
2899 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
2900 * be capable of signalling downstream hpd with a long pulse.
2901 * Whether or not that means D3 is safe to use is not clear,
2902 * but let's assume so until proven otherwise.
2903 *
2904 * FIXME should really check all downstream ports...
2905 */
2906 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
2907 intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
2908 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
2909}
2910
2279298d
GS
2911void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
2912 const struct intel_crtc_state *crtc_state,
2913 bool enable)
2914{
2915 int ret;
2916
2917 if (!crtc_state->dsc_params.compression_enable)
2918 return;
2919
2920 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
2921 enable ? DP_DECOMPRESSION_EN : 0);
2922 if (ret < 0)
2923 DRM_DEBUG_KMS("Failed to %s sink decompression state\n",
2924 enable ? "enable" : "disable");
2925}
2926
c7ad3810 2927/* If the sink supports it, try to set the power state appropriately */
c19b0669 2928void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
2929{
2930 int ret, i;
2931
2932 /* Should have a valid DPCD by this point */
2933 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
2934 return;
2935
2936 if (mode != DRM_MODE_DPMS_ON) {
857c416e
VS
2937 if (downstream_hpd_needs_d0(intel_dp))
2938 return;
2939
9d1a1031
JN
2940 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2941 DP_SET_POWER_D3);
c7ad3810 2942 } else {
357c0ae9
ID
2943 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
2944
c7ad3810
JB
2945 /*
2946 * When turning on, we need to retry for 1ms to give the sink
2947 * time to wake up.
2948 */
2949 for (i = 0; i < 3; i++) {
9d1a1031
JN
2950 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
2951 DP_SET_POWER_D0);
c7ad3810
JB
2952 if (ret == 1)
2953 break;
2954 msleep(1);
2955 }
357c0ae9
ID
2956
2957 if (ret == 1 && lspcon->active)
2958 lspcon_wait_pcon_mode(lspcon);
c7ad3810 2959 }
f9cac721
JN
2960
2961 if (ret != 1)
2962 DRM_DEBUG_KMS("failed to %s sink power state\n",
2963 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
2964}
2965
59b74c49
VS
2966static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
2967 enum port port, enum pipe *pipe)
2968{
2969 enum pipe p;
2970
2971 for_each_pipe(dev_priv, p) {
2972 u32 val = I915_READ(TRANS_DP_CTL(p));
2973
2974 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
2975 *pipe = p;
2976 return true;
2977 }
2978 }
2979
2980 DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
2981
2982 /* must initialize pipe to something for the asserts */
2983 *pipe = PIPE_A;
2984
2985 return false;
2986}
2987
2988bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
2989 i915_reg_t dp_reg, enum port port,
2990 enum pipe *pipe)
2991{
2992 bool ret;
2993 u32 val;
2994
2995 val = I915_READ(dp_reg);
2996
2997 ret = val & DP_PORT_EN;
2998
2999 /* asserts want to know the pipe even if the port is disabled */
3000 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3001 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3002 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3003 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3004 else if (IS_CHERRYVIEW(dev_priv))
3005 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3006 else
3007 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3008
3009 return ret;
3010}
3011
19d8fe15
DV
3012static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3013 enum pipe *pipe)
d240f20f 3014{
2f773477 3015 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
19d8fe15 3016 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
0e6e0be4 3017 intel_wakeref_t wakeref;
6fa9a5ec 3018 bool ret;
6d129bea 3019
0e6e0be4
CW
3020 wakeref = intel_display_power_get_if_enabled(dev_priv,
3021 encoder->power_domain);
3022 if (!wakeref)
6d129bea
ID
3023 return false;
3024
59b74c49
VS
3025 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3026 encoder->port, pipe);
6fa9a5ec 3027
0e6e0be4 3028 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
6fa9a5ec
ID
3029
3030 return ret;
19d8fe15 3031}
d240f20f 3032
045ac3b5 3033static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 3034 struct intel_crtc_state *pipe_config)
045ac3b5 3035{
2f773477 3036 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
045ac3b5 3037 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 3038 u32 tmp, flags = 0;
8f4f2797 3039 enum port port = encoder->port;
adc10304 3040 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
045ac3b5 3041
e1214b95
VS
3042 if (encoder->type == INTEL_OUTPUT_EDP)
3043 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3044 else
3045 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
045ac3b5 3046
9ed109a7 3047 tmp = I915_READ(intel_dp->output_reg);
9fcb1704
JN
3048
3049 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 3050
6e266956 3051 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
b81e34c2
VS
3052 u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
3053
3054 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
3055 flags |= DRM_MODE_FLAG_PHSYNC;
3056 else
3057 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 3058
b81e34c2 3059 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
3060 flags |= DRM_MODE_FLAG_PVSYNC;
3061 else
3062 flags |= DRM_MODE_FLAG_NVSYNC;
3063 } else {
39e5fa88 3064 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
3065 flags |= DRM_MODE_FLAG_PHSYNC;
3066 else
3067 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 3068
39e5fa88 3069 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
3070 flags |= DRM_MODE_FLAG_PVSYNC;
3071 else
3072 flags |= DRM_MODE_FLAG_NVSYNC;
3073 }
045ac3b5 3074
2d112de7 3075 pipe_config->base.adjusted_mode.flags |= flags;
f1f644dc 3076
c99f53f7 3077 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
8c875fca
VS
3078 pipe_config->limited_color_range = true;
3079
90a6b7b0
VS
3080 pipe_config->lane_count =
3081 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3082
eb14cb74
VS
3083 intel_dp_get_m_n(crtc, pipe_config);
3084
18442d08 3085 if (port == PORT_A) {
b377e0df 3086 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
3087 pipe_config->port_clock = 162000;
3088 else
3089 pipe_config->port_clock = 270000;
3090 }
18442d08 3091
e3b247da
VS
3092 pipe_config->base.adjusted_mode.crtc_clock =
3093 intel_dotclock_calculate(pipe_config->port_clock,
3094 &pipe_config->dp_m_n);
7f16e5c1 3095
1853a9da 3096 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
6aa23e65 3097 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
c6cd2ee2
JN
3098 /*
3099 * This is a big fat ugly hack.
3100 *
3101 * Some machines in UEFI boot mode provide us a VBT that has 18
3102 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3103 * unknown we fail to light up. Yet the same BIOS boots up with
3104 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3105 * max, not what it tells us to use.
3106 *
3107 * Note: This will still be broken if the eDP panel is not lit
3108 * up by the BIOS, and thus we can't get the mode at module
3109 * load.
3110 */
3111 DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
6aa23e65
JN
3112 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
3113 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
c6cd2ee2 3114 }
045ac3b5
JB
3115}
3116
fd6bbda9 3117static void intel_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3118 const struct intel_crtc_state *old_crtc_state,
3119 const struct drm_connector_state *old_conn_state)
d240f20f 3120{
e8cb4558 3121 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
495a5bb8 3122
edb2e530
VS
3123 intel_dp->link_trained = false;
3124
85cb48a1 3125 if (old_crtc_state->has_audio)
8ec47de2
VS
3126 intel_audio_codec_disable(encoder,
3127 old_crtc_state, old_conn_state);
6cb49835
DV
3128
3129 /* Make sure the panel is off before trying to change the mode. But also
3130 * ensure that we have vdd while we switch off the panel. */
24f3e092 3131 intel_edp_panel_vdd_on(intel_dp);
b037d58f 3132 intel_edp_backlight_off(old_conn_state);
fdbc3b1f 3133 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 3134 intel_edp_panel_off(intel_dp);
1a8ff607
VS
3135}
3136
3137static void g4x_disable_dp(struct intel_encoder *encoder,
3138 const struct intel_crtc_state *old_crtc_state,
3139 const struct drm_connector_state *old_conn_state)
1a8ff607
VS
3140{
3141 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
3142}
3143
3144static void vlv_disable_dp(struct intel_encoder *encoder,
3145 const struct intel_crtc_state *old_crtc_state,
3146 const struct drm_connector_state *old_conn_state)
3147{
1a8ff607 3148 intel_disable_dp(encoder, old_crtc_state, old_conn_state);
d240f20f
JB
3149}
3150
51a9f6df 3151static void g4x_post_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3152 const struct intel_crtc_state *old_crtc_state,
3153 const struct drm_connector_state *old_conn_state)
d240f20f 3154{
2bd2ad64 3155 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
adc10304 3156 enum port port = encoder->port;
2bd2ad64 3157
51a9f6df
VS
3158 /*
3159 * Bspec does not list a specific disable sequence for g4x DP.
3160 * Follow the ilk+ sequence (disable pipe before the port) for
3161 * g4x DP as it does not suffer from underruns like the normal
3162 * g4x modeset sequence (disable pipe after the port).
3163 */
adc10304 3164 intel_dp_link_down(encoder, old_crtc_state);
abfce949
VS
3165
3166 /* Only ilk+ has port A */
08aff3fe 3167 if (port == PORT_A)
adc10304 3168 ironlake_edp_pll_off(intel_dp, old_crtc_state);
49277c31
VS
3169}
3170
fd6bbda9 3171static void vlv_post_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3172 const struct intel_crtc_state *old_crtc_state,
3173 const struct drm_connector_state *old_conn_state)
49277c31 3174{
adc10304 3175 intel_dp_link_down(encoder, old_crtc_state);
2bd2ad64
DV
3176}
3177
fd6bbda9 3178static void chv_post_disable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3179 const struct intel_crtc_state *old_crtc_state,
3180 const struct drm_connector_state *old_conn_state)
a8f327fb 3181{
adc10304 3182 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
97fd4d5c 3183
adc10304 3184 intel_dp_link_down(encoder, old_crtc_state);
a8f327fb
VS
3185
3186 mutex_lock(&dev_priv->sb_lock);
3187
3188 /* Assert data lane reset */
2e1029c6 3189 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
580d3811 3190
a580516d 3191 mutex_unlock(&dev_priv->sb_lock);
580d3811
VS
3192}
3193
7b13b58a
VS
3194static void
3195_intel_dp_set_link_train(struct intel_dp *intel_dp,
830de422
JN
3196 u32 *DP,
3197 u8 dp_train_pat)
7b13b58a 3198{
de25eb7f 3199 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7b13b58a 3200 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 3201 enum port port = intel_dig_port->base.port;
830de422 3202 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
7b13b58a 3203
2edd5327 3204 if (dp_train_pat & train_pat_mask)
8b0878a0 3205 DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
2edd5327 3206 dp_train_pat & train_pat_mask);
8b0878a0 3207
4f8036a2 3208 if (HAS_DDI(dev_priv)) {
830de422 3209 u32 temp = I915_READ(DP_TP_CTL(port));
7b13b58a
VS
3210
3211 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
3212 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
3213 else
3214 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
3215
3216 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2edd5327 3217 switch (dp_train_pat & train_pat_mask) {
7b13b58a
VS
3218 case DP_TRAINING_PATTERN_DISABLE:
3219 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
3220
3221 break;
3222 case DP_TRAINING_PATTERN_1:
3223 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
3224 break;
3225 case DP_TRAINING_PATTERN_2:
3226 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
3227 break;
3228 case DP_TRAINING_PATTERN_3:
3229 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
3230 break;
2edd5327
MN
3231 case DP_TRAINING_PATTERN_4:
3232 temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
3233 break;
7b13b58a
VS
3234 }
3235 I915_WRITE(DP_TP_CTL(port), temp);
3236
b752e995 3237 } else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
6e266956 3238 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
7b13b58a
VS
3239 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
3240
3241 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3242 case DP_TRAINING_PATTERN_DISABLE:
3243 *DP |= DP_LINK_TRAIN_OFF_CPT;
3244 break;
3245 case DP_TRAINING_PATTERN_1:
3246 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3247 break;
3248 case DP_TRAINING_PATTERN_2:
3249 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3250 break;
3251 case DP_TRAINING_PATTERN_3:
8b0878a0 3252 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
7b13b58a
VS
3253 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3254 break;
3255 }
3256
3257 } else {
3b358cda 3258 *DP &= ~DP_LINK_TRAIN_MASK;
7b13b58a
VS
3259
3260 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3261 case DP_TRAINING_PATTERN_DISABLE:
3262 *DP |= DP_LINK_TRAIN_OFF;
3263 break;
3264 case DP_TRAINING_PATTERN_1:
3265 *DP |= DP_LINK_TRAIN_PAT_1;
3266 break;
3267 case DP_TRAINING_PATTERN_2:
3268 *DP |= DP_LINK_TRAIN_PAT_2;
3269 break;
3270 case DP_TRAINING_PATTERN_3:
3b358cda
VS
3271 DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
3272 *DP |= DP_LINK_TRAIN_PAT_2;
7b13b58a
VS
3273 break;
3274 }
3275 }
3276}
3277
85cb48a1 3278static void intel_dp_enable_port(struct intel_dp *intel_dp,
5f88a9c6 3279 const struct intel_crtc_state *old_crtc_state)
7b13b58a 3280{
de25eb7f 3281 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7b13b58a 3282
7b13b58a 3283 /* enable with pattern 1 (as per spec) */
7b13b58a 3284
8b0878a0 3285 intel_dp_program_link_training_pattern(intel_dp, DP_TRAINING_PATTERN_1);
7b713f50
VS
3286
3287 /*
3288 * Magic for VLV/CHV. We _must_ first set up the register
3289 * without actually enabling the port, and then do another
3290 * write to enable the port. Otherwise link training will
3291 * fail when the power sequencer is freshly used for this port.
3292 */
3293 intel_dp->DP |= DP_PORT_EN;
85cb48a1 3294 if (old_crtc_state->has_audio)
6fec7662 3295 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50
VS
3296
3297 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3298 POSTING_READ(intel_dp->output_reg);
580d3811
VS
3299}
3300
85cb48a1 3301static void intel_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3302 const struct intel_crtc_state *pipe_config,
3303 const struct drm_connector_state *conn_state)
d240f20f 3304{
2f773477 3305 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
e8cb4558 3306 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
adc10304 3307 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
830de422 3308 u32 dp_reg = I915_READ(intel_dp->output_reg);
d6fbdd15 3309 enum pipe pipe = crtc->pipe;
69d93820 3310 intel_wakeref_t wakeref;
5d613501 3311
0c33d8d7
DV
3312 if (WARN_ON(dp_reg & DP_PORT_EN))
3313 return;
5d613501 3314
69d93820
CW
3315 with_pps_lock(intel_dp, wakeref) {
3316 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3317 vlv_init_panel_power_sequencer(encoder, pipe_config);
093e3f13 3318
69d93820 3319 intel_dp_enable_port(intel_dp, pipe_config);
093e3f13 3320
69d93820
CW
3321 edp_panel_vdd_on(intel_dp);
3322 edp_panel_on(intel_dp);
3323 edp_panel_vdd_off(intel_dp, true);
3324 }
093e3f13 3325
920a14b2 3326 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
e0fce78f
VS
3327 unsigned int lane_mask = 0x0;
3328
920a14b2 3329 if (IS_CHERRYVIEW(dev_priv))
85cb48a1 3330 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
e0fce78f 3331
9b6de0a1
VS
3332 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3333 lane_mask);
e0fce78f 3334 }
61234fa5 3335
f01eca2e 3336 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 3337 intel_dp_start_link_train(intel_dp);
3ab9c637 3338 intel_dp_stop_link_train(intel_dp);
c1dec79a 3339
85cb48a1 3340 if (pipe_config->has_audio) {
c1dec79a 3341 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
d6fbdd15 3342 pipe_name(pipe));
bbf35e9d 3343 intel_audio_codec_enable(encoder, pipe_config, conn_state);
c1dec79a 3344 }
ab1f90f9 3345}
89b667f8 3346
fd6bbda9 3347static void g4x_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3348 const struct intel_crtc_state *pipe_config,
3349 const struct drm_connector_state *conn_state)
ecff4f3b 3350{
bbf35e9d 3351 intel_enable_dp(encoder, pipe_config, conn_state);
b037d58f 3352 intel_edp_backlight_on(pipe_config, conn_state);
ab1f90f9 3353}
89b667f8 3354
fd6bbda9 3355static void vlv_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3356 const struct intel_crtc_state *pipe_config,
3357 const struct drm_connector_state *conn_state)
ab1f90f9 3358{
b037d58f 3359 intel_edp_backlight_on(pipe_config, conn_state);
d240f20f
JB
3360}
3361
fd6bbda9 3362static void g4x_pre_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3363 const struct intel_crtc_state *pipe_config,
3364 const struct drm_connector_state *conn_state)
ab1f90f9
JN
3365{
3366 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
8f4f2797 3367 enum port port = encoder->port;
ab1f90f9 3368
85cb48a1 3369 intel_dp_prepare(encoder, pipe_config);
8ac33ed3 3370
d41f1efb 3371 /* Only ilk+ has port A */
abfce949 3372 if (port == PORT_A)
85cb48a1 3373 ironlake_edp_pll_on(intel_dp, pipe_config);
ab1f90f9
JN
3374}
3375
83b84597
VS
3376static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3377{
3378 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
fac5e23e 3379 struct drm_i915_private *dev_priv = to_i915(intel_dig_port->base.base.dev);
83b84597 3380 enum pipe pipe = intel_dp->pps_pipe;
44cb734c 3381 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
83b84597 3382
9f2bdb00
VS
3383 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
3384
d158694f
VS
3385 if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
3386 return;
3387
83b84597
VS
3388 edp_panel_vdd_off_sync(intel_dp);
3389
3390 /*
e7f2af78 3391 * VLV seems to get confused when multiple power sequencers
83b84597
VS
3392 * have the same port selected (even if only one has power/vdd
3393 * enabled). The failure manifests as vlv_wait_port_ready() failing
3394 * CHV on the other hand doesn't seem to mind having the same port
e7f2af78 3395 * selected in multiple power sequencers, but let's clear the
83b84597
VS
3396 * port select always when logically disconnecting a power sequencer
3397 * from a port.
3398 */
3399 DRM_DEBUG_KMS("detaching pipe %c power sequencer from port %c\n",
8f4f2797 3400 pipe_name(pipe), port_name(intel_dig_port->base.port));
83b84597
VS
3401 I915_WRITE(pp_on_reg, 0);
3402 POSTING_READ(pp_on_reg);
3403
3404 intel_dp->pps_pipe = INVALID_PIPE;
3405}
3406
46bd8383 3407static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
a4a5d2f8
VS
3408 enum pipe pipe)
3409{
a4a5d2f8
VS
3410 struct intel_encoder *encoder;
3411
3412 lockdep_assert_held(&dev_priv->pps_mutex);
3413
14aa521c
VS
3414 for_each_intel_dp(&dev_priv->drm, encoder) {
3415 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3416 enum port port = encoder->port;
a4a5d2f8 3417
9f2bdb00
VS
3418 WARN(intel_dp->active_pipe == pipe,
3419 "stealing pipe %c power sequencer from active (e)DP port %c\n",
3420 pipe_name(pipe), port_name(port));
3421
a4a5d2f8
VS
3422 if (intel_dp->pps_pipe != pipe)
3423 continue;
3424
3425 DRM_DEBUG_KMS("stealing pipe %c power sequencer from port %c\n",
773538e8 3426 pipe_name(pipe), port_name(port));
a4a5d2f8
VS
3427
3428 /* make sure vdd is off before we steal it */
83b84597 3429 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
3430 }
3431}
3432
adc10304
VS
3433static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
3434 const struct intel_crtc_state *crtc_state)
a4a5d2f8 3435{
46bd8383 3436 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
adc10304 3437 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
adc10304 3438 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a4a5d2f8
VS
3439
3440 lockdep_assert_held(&dev_priv->pps_mutex);
3441
9f2bdb00 3442 WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
093e3f13 3443
9f2bdb00
VS
3444 if (intel_dp->pps_pipe != INVALID_PIPE &&
3445 intel_dp->pps_pipe != crtc->pipe) {
3446 /*
3447 * If another power sequencer was being used on this
3448 * port previously make sure to turn off vdd there while
3449 * we still have control of it.
3450 */
83b84597 3451 vlv_detach_power_sequencer(intel_dp);
9f2bdb00 3452 }
a4a5d2f8
VS
3453
3454 /*
3455 * We may be stealing the power
3456 * sequencer from another port.
3457 */
46bd8383 3458 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
a4a5d2f8 3459
9f2bdb00
VS
3460 intel_dp->active_pipe = crtc->pipe;
3461
1853a9da 3462 if (!intel_dp_is_edp(intel_dp))
9f2bdb00
VS
3463 return;
3464
a4a5d2f8
VS
3465 /* now it's all ours */
3466 intel_dp->pps_pipe = crtc->pipe;
3467
3468 DRM_DEBUG_KMS("initializing pipe %c power sequencer for port %c\n",
adc10304 3469 pipe_name(intel_dp->pps_pipe), port_name(encoder->port));
a4a5d2f8
VS
3470
3471 /* init power sequencer on this pipe and port */
46bd8383
VS
3472 intel_dp_init_panel_power_sequencer(intel_dp);
3473 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
a4a5d2f8
VS
3474}
3475
fd6bbda9 3476static void vlv_pre_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3477 const struct intel_crtc_state *pipe_config,
3478 const struct drm_connector_state *conn_state)
a4fc5ed6 3479{
2e1029c6 3480 vlv_phy_pre_encoder_enable(encoder, pipe_config);
ab1f90f9 3481
bbf35e9d 3482 intel_enable_dp(encoder, pipe_config, conn_state);
89b667f8
JB
3483}
3484
fd6bbda9 3485static void vlv_dp_pre_pll_enable(struct intel_encoder *encoder,
5f88a9c6
VS
3486 const struct intel_crtc_state *pipe_config,
3487 const struct drm_connector_state *conn_state)
89b667f8 3488{
85cb48a1 3489 intel_dp_prepare(encoder, pipe_config);
8ac33ed3 3490
2e1029c6 3491 vlv_phy_pre_pll_enable(encoder, pipe_config);
a4fc5ed6
KP
3492}
3493
fd6bbda9 3494static void chv_pre_enable_dp(struct intel_encoder *encoder,
5f88a9c6
VS
3495 const struct intel_crtc_state *pipe_config,
3496 const struct drm_connector_state *conn_state)
e4a1d846 3497{
2e1029c6 3498 chv_phy_pre_encoder_enable(encoder, pipe_config);
e4a1d846 3499
bbf35e9d 3500 intel_enable_dp(encoder, pipe_config, conn_state);
b0b33846
VS
3501
3502 /* Second common lane will stay alive on its own now */
e7d2a717 3503 chv_phy_release_cl2_override(encoder);
e4a1d846
CML
3504}
3505
fd6bbda9 3506static void chv_dp_pre_pll_enable(struct intel_encoder *encoder,
5f88a9c6
VS
3507 const struct intel_crtc_state *pipe_config,
3508 const struct drm_connector_state *conn_state)
9197c88b 3509{
85cb48a1 3510 intel_dp_prepare(encoder, pipe_config);
625695f8 3511
2e1029c6 3512 chv_phy_pre_pll_enable(encoder, pipe_config);
9197c88b
VS
3513}
3514
fd6bbda9 3515static void chv_dp_post_pll_disable(struct intel_encoder *encoder,
2e1029c6
VS
3516 const struct intel_crtc_state *old_crtc_state,
3517 const struct drm_connector_state *old_conn_state)
d6db995f 3518{
2e1029c6 3519 chv_phy_post_pll_disable(encoder, old_crtc_state);
d6db995f
VS
3520}
3521
a4fc5ed6
KP
3522/*
3523 * Fetch AUX CH registers 0x202 - 0x207 which contain
3524 * link status information
3525 */
94223d04 3526bool
830de422 3527intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 3528{
9f085ebb
L
3529 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
3530 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
3531}
3532
1100244e 3533/* These are source-specific values. */
830de422 3534u8
1a2eb460 3535intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 3536{
de25eb7f 3537 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
a393e964
VS
3538 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3539 enum port port = encoder->port;
1a2eb460 3540
a393e964 3541 if (HAS_DDI(dev_priv))
ffe5111e 3542 return intel_ddi_dp_voltage_max(encoder);
a393e964 3543 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
bd60018a 3544 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
b752e995 3545 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
bd60018a 3546 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
6e266956 3547 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
bd60018a 3548 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460 3549 else
bd60018a 3550 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
1a2eb460
KP
3551}
3552
830de422
JN
3553u8
3554intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, u8 voltage_swing)
1a2eb460 3555{
de25eb7f 3556 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4718a365
VS
3557 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3558 enum port port = encoder->port;
1a2eb460 3559
4718a365
VS
3560 if (HAS_DDI(dev_priv)) {
3561 return intel_ddi_dp_pre_emphasis_max(encoder, voltage_swing);
8652744b 3562 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
e2fa6fba 3563 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3564 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3565 return DP_TRAIN_PRE_EMPH_LEVEL_3;
3566 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3567 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3568 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3569 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3570 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba 3571 default:
bd60018a 3572 return DP_TRAIN_PRE_EMPH_LEVEL_0;
e2fa6fba 3573 }
b752e995 3574 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
1a2eb460 3575 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3576 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3577 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3578 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3579 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3580 return DP_TRAIN_PRE_EMPH_LEVEL_1;
1a2eb460 3581 default:
bd60018a 3582 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460
KP
3583 }
3584 } else {
3585 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a
SJ
3586 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
3587 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3588 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
3589 return DP_TRAIN_PRE_EMPH_LEVEL_2;
3590 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
3591 return DP_TRAIN_PRE_EMPH_LEVEL_1;
3592 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
1a2eb460 3593 default:
bd60018a 3594 return DP_TRAIN_PRE_EMPH_LEVEL_0;
1a2eb460 3595 }
a4fc5ed6
KP
3596 }
3597}
3598
830de422 3599static u32 vlv_signal_levels(struct intel_dp *intel_dp)
e2fa6fba 3600{
53d98725 3601 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
e2fa6fba
P
3602 unsigned long demph_reg_value, preemph_reg_value,
3603 uniqtranscale_reg_value;
830de422 3604 u8 train_set = intel_dp->train_set[0];
e2fa6fba
P
3605
3606 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3607 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
3608 preemph_reg_value = 0x0004000;
3609 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3610 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3611 demph_reg_value = 0x2B405555;
3612 uniqtranscale_reg_value = 0x552AB83A;
3613 break;
bd60018a 3614 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3615 demph_reg_value = 0x2B404040;
3616 uniqtranscale_reg_value = 0x5548B83A;
3617 break;
bd60018a 3618 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3619 demph_reg_value = 0x2B245555;
3620 uniqtranscale_reg_value = 0x5560B83A;
3621 break;
bd60018a 3622 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
3623 demph_reg_value = 0x2B405555;
3624 uniqtranscale_reg_value = 0x5598DA3A;
3625 break;
3626 default:
3627 return 0;
3628 }
3629 break;
bd60018a 3630 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
3631 preemph_reg_value = 0x0002000;
3632 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3633 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3634 demph_reg_value = 0x2B404040;
3635 uniqtranscale_reg_value = 0x5552B83A;
3636 break;
bd60018a 3637 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3638 demph_reg_value = 0x2B404848;
3639 uniqtranscale_reg_value = 0x5580B83A;
3640 break;
bd60018a 3641 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
3642 demph_reg_value = 0x2B404040;
3643 uniqtranscale_reg_value = 0x55ADDA3A;
3644 break;
3645 default:
3646 return 0;
3647 }
3648 break;
bd60018a 3649 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
3650 preemph_reg_value = 0x0000000;
3651 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3652 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3653 demph_reg_value = 0x2B305555;
3654 uniqtranscale_reg_value = 0x5570B83A;
3655 break;
bd60018a 3656 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
3657 demph_reg_value = 0x2B2B4040;
3658 uniqtranscale_reg_value = 0x55ADDA3A;
3659 break;
3660 default:
3661 return 0;
3662 }
3663 break;
bd60018a 3664 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
3665 preemph_reg_value = 0x0006000;
3666 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3667 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
3668 demph_reg_value = 0x1B405555;
3669 uniqtranscale_reg_value = 0x55ADDA3A;
3670 break;
3671 default:
3672 return 0;
3673 }
3674 break;
3675 default:
3676 return 0;
3677 }
3678
53d98725
ACO
3679 vlv_set_phy_signal_level(encoder, demph_reg_value, preemph_reg_value,
3680 uniqtranscale_reg_value, 0);
e2fa6fba
P
3681
3682 return 0;
3683}
3684
830de422 3685static u32 chv_signal_levels(struct intel_dp *intel_dp)
e4a1d846 3686{
b7fa22d8
ACO
3687 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
3688 u32 deemph_reg_value, margin_reg_value;
3689 bool uniq_trans_scale = false;
830de422 3690 u8 train_set = intel_dp->train_set[0];
e4a1d846
CML
3691
3692 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3693 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 3694 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3695 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3696 deemph_reg_value = 128;
3697 margin_reg_value = 52;
3698 break;
bd60018a 3699 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3700 deemph_reg_value = 128;
3701 margin_reg_value = 77;
3702 break;
bd60018a 3703 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3704 deemph_reg_value = 128;
3705 margin_reg_value = 102;
3706 break;
bd60018a 3707 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
3708 deemph_reg_value = 128;
3709 margin_reg_value = 154;
b7fa22d8 3710 uniq_trans_scale = true;
e4a1d846
CML
3711 break;
3712 default:
3713 return 0;
3714 }
3715 break;
bd60018a 3716 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 3717 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3718 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3719 deemph_reg_value = 85;
3720 margin_reg_value = 78;
3721 break;
bd60018a 3722 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3723 deemph_reg_value = 85;
3724 margin_reg_value = 116;
3725 break;
bd60018a 3726 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
3727 deemph_reg_value = 85;
3728 margin_reg_value = 154;
3729 break;
3730 default:
3731 return 0;
3732 }
3733 break;
bd60018a 3734 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 3735 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3736 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3737 deemph_reg_value = 64;
3738 margin_reg_value = 104;
3739 break;
bd60018a 3740 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
3741 deemph_reg_value = 64;
3742 margin_reg_value = 154;
3743 break;
3744 default:
3745 return 0;
3746 }
3747 break;
bd60018a 3748 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 3749 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3750 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
3751 deemph_reg_value = 43;
3752 margin_reg_value = 154;
3753 break;
3754 default:
3755 return 0;
3756 }
3757 break;
3758 default:
3759 return 0;
3760 }
3761
b7fa22d8
ACO
3762 chv_set_phy_signal_level(encoder, deemph_reg_value,
3763 margin_reg_value, uniq_trans_scale);
e4a1d846
CML
3764
3765 return 0;
3766}
3767
830de422
JN
3768static u32
3769g4x_signal_levels(u8 train_set)
a4fc5ed6 3770{
830de422 3771 u32 signal_levels = 0;
a4fc5ed6 3772
3cf2efb1 3773 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 3774 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
3775 default:
3776 signal_levels |= DP_VOLTAGE_0_4;
3777 break;
bd60018a 3778 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
3779 signal_levels |= DP_VOLTAGE_0_6;
3780 break;
bd60018a 3781 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
3782 signal_levels |= DP_VOLTAGE_0_8;
3783 break;
bd60018a 3784 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
3785 signal_levels |= DP_VOLTAGE_1_2;
3786 break;
3787 }
3cf2efb1 3788 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 3789 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
3790 default:
3791 signal_levels |= DP_PRE_EMPHASIS_0;
3792 break;
bd60018a 3793 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
3794 signal_levels |= DP_PRE_EMPHASIS_3_5;
3795 break;
bd60018a 3796 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
3797 signal_levels |= DP_PRE_EMPHASIS_6;
3798 break;
bd60018a 3799 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
3800 signal_levels |= DP_PRE_EMPHASIS_9_5;
3801 break;
3802 }
3803 return signal_levels;
3804}
3805
4d82c2b5 3806/* SNB CPU eDP voltage swing and pre-emphasis control */
830de422
JN
3807static u32
3808snb_cpu_edp_signal_levels(u8 train_set)
e3421a18 3809{
3c5a62b5
YL
3810 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3811 DP_TRAIN_PRE_EMPHASIS_MASK);
3812 switch (signal_levels) {
bd60018a
SJ
3813 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3814 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3815 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 3816 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3817 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
3818 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3819 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 3820 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
3821 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3822 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 3823 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
3824 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3825 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 3826 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 3827 default:
3c5a62b5
YL
3828 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3829 "0x%x\n", signal_levels);
3830 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
3831 }
3832}
3833
4d82c2b5 3834/* IVB CPU eDP voltage swing and pre-emphasis control */
830de422
JN
3835static u32
3836ivb_cpu_edp_signal_levels(u8 train_set)
1a2eb460
KP
3837{
3838 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
3839 DP_TRAIN_PRE_EMPHASIS_MASK);
3840 switch (signal_levels) {
bd60018a 3841 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3842 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 3843 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 3844 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 3845 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
3846 return EDP_LINK_TRAIN_400MV_6DB_IVB;
3847
bd60018a 3848 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3849 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 3850 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3851 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
3852
bd60018a 3853 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 3854 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 3855 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
3856 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
3857
3858 default:
3859 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
3860 "0x%x\n", signal_levels);
3861 return EDP_LINK_TRAIN_500MV_0DB_IVB;
3862 }
3863}
3864
94223d04 3865void
f4eb692e 3866intel_dp_set_signal_levels(struct intel_dp *intel_dp)
f0a3424e 3867{
de25eb7f 3868 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
f0a3424e 3869 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 3870 enum port port = intel_dig_port->base.port;
830de422
JN
3871 u32 signal_levels, mask = 0;
3872 u8 train_set = intel_dp->train_set[0];
f0a3424e 3873
61cdfb9e 3874 if (IS_GEN9_LP(dev_priv) || INTEL_GEN(dev_priv) >= 10) {
d509af6c
RV
3875 signal_levels = bxt_signal_levels(intel_dp);
3876 } else if (HAS_DDI(dev_priv)) {
f8896f5d 3877 signal_levels = ddi_signal_levels(intel_dp);
d509af6c 3878 mask = DDI_BUF_EMP_MASK;
920a14b2 3879 } else if (IS_CHERRYVIEW(dev_priv)) {
5829975c 3880 signal_levels = chv_signal_levels(intel_dp);
11a914c2 3881 } else if (IS_VALLEYVIEW(dev_priv)) {
5829975c 3882 signal_levels = vlv_signal_levels(intel_dp);
b752e995 3883 } else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
4d82c2b5 3884 signal_levels = ivb_cpu_edp_signal_levels(train_set);
f0a3424e 3885 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
cf819eff 3886 } else if (IS_GEN(dev_priv, 6) && port == PORT_A) {
4d82c2b5 3887 signal_levels = snb_cpu_edp_signal_levels(train_set);
f0a3424e
PZ
3888 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
3889 } else {
45101e93 3890 signal_levels = g4x_signal_levels(train_set);
f0a3424e
PZ
3891 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
3892 }
3893
96fb9f9b
VK
3894 if (mask)
3895 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
3896
3897 DRM_DEBUG_KMS("Using vswing level %d\n",
3898 train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
3899 DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
3900 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
3901 DP_TRAIN_PRE_EMPHASIS_SHIFT);
f0a3424e 3902
f4eb692e 3903 intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
b905a915
ACO
3904
3905 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
3906 POSTING_READ(intel_dp->output_reg);
f0a3424e
PZ
3907}
3908
94223d04 3909void
e9c176d5 3910intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
830de422 3911 u8 dp_train_pat)
a4fc5ed6 3912{
174edf1f 3913 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
90a6b7b0
VS
3914 struct drm_i915_private *dev_priv =
3915 to_i915(intel_dig_port->base.base.dev);
a4fc5ed6 3916
f4eb692e 3917 _intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
47ea7542 3918
f4eb692e 3919 I915_WRITE(intel_dp->output_reg, intel_dp->DP);
ea5b213a 3920 POSTING_READ(intel_dp->output_reg);
e9c176d5
ACO
3921}
3922
94223d04 3923void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
3ab9c637 3924{
de25eb7f 3925 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3ab9c637 3926 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
8f4f2797 3927 enum port port = intel_dig_port->base.port;
830de422 3928 u32 val;
3ab9c637 3929
4f8036a2 3930 if (!HAS_DDI(dev_priv))
3ab9c637
ID
3931 return;
3932
3933 val = I915_READ(DP_TP_CTL(port));
3934 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
3935 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
3936 I915_WRITE(DP_TP_CTL(port), val);
3937
3938 /*
3939 * On PORT_A we can have only eDP in SST mode. There the only reason
3940 * we need to set idle transmission mode is to work around a HW issue
3941 * where we enable the pipe while not in idle link-training mode.
3942 * In this case there is requirement to wait for a minimum number of
3943 * idle patterns to be sent.
3944 */
3945 if (port == PORT_A)
3946 return;
3947
97a04e0d 3948 if (intel_wait_for_register(&dev_priv->uncore, DP_TP_STATUS(port),
a767017f
CW
3949 DP_TP_STATUS_IDLE_DONE,
3950 DP_TP_STATUS_IDLE_DONE,
3951 1))
3ab9c637
ID
3952 DRM_ERROR("Timed out waiting for DP idle patterns\n");
3953}
3954
a4fc5ed6 3955static void
adc10304
VS
3956intel_dp_link_down(struct intel_encoder *encoder,
3957 const struct intel_crtc_state *old_crtc_state)
a4fc5ed6 3958{
adc10304
VS
3959 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
3960 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
3961 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
3962 enum port port = encoder->port;
830de422 3963 u32 DP = intel_dp->DP;
a4fc5ed6 3964
4f8036a2 3965 if (WARN_ON(HAS_DDI(dev_priv)))
c19b0669
PZ
3966 return;
3967
0c33d8d7 3968 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
3969 return;
3970
28c97730 3971 DRM_DEBUG_KMS("\n");
32f9d658 3972
b752e995 3973 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
6e266956 3974 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
e3421a18 3975 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 3976 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 3977 } else {
3b358cda 3978 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 3979 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 3980 }
1612c8bd 3981 I915_WRITE(intel_dp->output_reg, DP);
fe255d00 3982 POSTING_READ(intel_dp->output_reg);
5eb08b69 3983
1612c8bd
VS
3984 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
3985 I915_WRITE(intel_dp->output_reg, DP);
3986 POSTING_READ(intel_dp->output_reg);
3987
3988 /*
3989 * HW workaround for IBX, we need to move the port
3990 * to transcoder A after disabling it to allow the
3991 * matching HDMI port to be enabled on transcoder A.
3992 */
6e266956 3993 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
3994 /*
3995 * We get CPU/PCH FIFO underruns on the other pipe when
3996 * doing the workaround. Sweep them under the rug.
3997 */
3998 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
3999 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4000
1612c8bd 4001 /* always enable with pattern 1 (as per spec) */
59b74c49
VS
4002 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4003 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4004 DP_LINK_TRAIN_PAT_1;
1612c8bd
VS
4005 I915_WRITE(intel_dp->output_reg, DP);
4006 POSTING_READ(intel_dp->output_reg);
4007
4008 DP &= ~DP_PORT_EN;
5bddd17f 4009 I915_WRITE(intel_dp->output_reg, DP);
0ca09685 4010 POSTING_READ(intel_dp->output_reg);
0c241d5b 4011
0f0f74bc 4012 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
0c241d5b
VS
4013 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4014 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
4015 }
4016
f01eca2e 4017 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
4018
4019 intel_dp->DP = DP;
9f2bdb00
VS
4020
4021 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
69d93820
CW
4022 intel_wakeref_t wakeref;
4023
4024 with_pps_lock(intel_dp, wakeref)
4025 intel_dp->active_pipe = INVALID_PIPE;
9f2bdb00 4026 }
a4fc5ed6
KP
4027}
4028
a1d92652
MA
4029static void
4030intel_dp_extended_receiver_capabilities(struct intel_dp *intel_dp)
4031{
4032 u8 dpcd_ext[6];
4033
4034 /*
4035 * Prior to DP1.3 the bit represented by
4036 * DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT was reserved.
4037 * if it is set DP_DPCD_REV at 0000h could be at a value less than
4038 * the true capability of the panel. The only way to check is to
4039 * then compare 0000h and 2200h.
4040 */
4041 if (!(intel_dp->dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
4042 DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT))
4043 return;
4044
4045 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DP13_DPCD_REV,
4046 &dpcd_ext, sizeof(dpcd_ext)) != sizeof(dpcd_ext)) {
4047 DRM_ERROR("DPCD failed read at extended capabilities\n");
4048 return;
4049 }
4050
4051 if (intel_dp->dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
4052 DRM_DEBUG_KMS("DPCD extended DPCD rev less than base DPCD rev\n");
4053 return;
4054 }
4055
4056 if (!memcmp(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext)))
4057 return;
4058
4059 DRM_DEBUG_KMS("Base DPCD: %*ph\n",
4060 (int)sizeof(intel_dp->dpcd), intel_dp->dpcd);
4061
4062 memcpy(intel_dp->dpcd, dpcd_ext, sizeof(dpcd_ext));
4063}
4064
24e807e7 4065bool
fe5a66f9 4066intel_dp_read_dpcd(struct intel_dp *intel_dp)
92fd8fd1 4067{
9f085ebb
L
4068 if (drm_dp_dpcd_read(&intel_dp->aux, 0x000, intel_dp->dpcd,
4069 sizeof(intel_dp->dpcd)) < 0)
edb39244 4070 return false; /* aux transfer failed */
92fd8fd1 4071
a1d92652
MA
4072 intel_dp_extended_receiver_capabilities(intel_dp);
4073
a8e98153 4074 DRM_DEBUG_KMS("DPCD: %*ph\n", (int) sizeof(intel_dp->dpcd), intel_dp->dpcd);
577c7a50 4075
fe5a66f9
VS
4076 return intel_dp->dpcd[DP_DPCD_REV] != 0;
4077}
edb39244 4078
93ac092f
MN
4079static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4080{
4081 /*
4082 * Clear the cached register set to avoid using stale values
4083 * for the sinks that do not support DSC.
4084 */
4085 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4086
08cadae8
AS
4087 /* Clear fec_capable to avoid using stale values */
4088 intel_dp->fec_capable = 0;
4089
93ac092f
MN
4090 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4091 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4092 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4093 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4094 intel_dp->dsc_dpcd,
4095 sizeof(intel_dp->dsc_dpcd)) < 0)
4096 DRM_ERROR("Failed to read DPCD register 0x%x\n",
4097 DP_DSC_SUPPORT);
4098
4099 DRM_DEBUG_KMS("DSC DPCD: %*ph\n",
4100 (int)sizeof(intel_dp->dsc_dpcd),
4101 intel_dp->dsc_dpcd);
0ce611c9 4102
08cadae8 4103 /* FEC is supported only on DP 1.4 */
0ce611c9
CW
4104 if (!intel_dp_is_edp(intel_dp) &&
4105 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4106 &intel_dp->fec_capable) < 0)
4107 DRM_ERROR("Failed to read FEC DPCD register\n");
08cadae8 4108
0ce611c9 4109 DRM_DEBUG_KMS("FEC CAPABILITY: %x\n", intel_dp->fec_capable);
93ac092f
MN
4110 }
4111}
4112
fe5a66f9
VS
4113static bool
4114intel_edp_init_dpcd(struct intel_dp *intel_dp)
4115{
4116 struct drm_i915_private *dev_priv =
4117 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
30d9aa42 4118
fe5a66f9
VS
4119 /* this function is meant to be called only once */
4120 WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
30d9aa42 4121
fe5a66f9 4122 if (!intel_dp_read_dpcd(intel_dp))
30d9aa42
SS
4123 return false;
4124
84c36753
JN
4125 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4126 drm_dp_is_branch(intel_dp->dpcd));
12a47a42 4127
fe5a66f9
VS
4128 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
4129 dev_priv->no_aux_handshake = intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
4130 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
474d1ec4 4131
7c838e2a
JN
4132 /*
4133 * Read the eDP display control registers.
4134 *
4135 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4136 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4137 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4138 * method). The display control registers should read zero if they're
4139 * not supported anyway.
4140 */
4141 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
f7170e2e
DC
4142 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4143 sizeof(intel_dp->edp_dpcd))
e6ed2a1b 4144 DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
fe5a66f9 4145 intel_dp->edp_dpcd);
06ea66b6 4146
84bb2916
DP
4147 /*
4148 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4149 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4150 */
4151 intel_psr_init_dpcd(intel_dp);
4152
e6ed2a1b
JN
4153 /* Read the eDP 1.4+ supported link rates. */
4154 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
94ca719e 4155 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
4156 int i;
4157
9f085ebb
L
4158 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4159 sink_rates, sizeof(sink_rates));
ea2d8a42 4160
94ca719e
VS
4161 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4162 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
4163
4164 if (val == 0)
4165 break;
4166
fd81c44e
DP
4167 /* Value read multiplied by 200kHz gives the per-lane
4168 * link rate in kHz. The source rates are, however,
4169 * stored in terms of LS_Clk kHz. The full conversion
4170 * back to symbols is
4171 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4172 */
af77b974 4173 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 4174 }
94ca719e 4175 intel_dp->num_sink_rates = i;
fc0f8e25 4176 }
0336400e 4177
e6ed2a1b
JN
4178 /*
4179 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4180 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4181 */
68f357cb
JN
4182 if (intel_dp->num_sink_rates)
4183 intel_dp->use_rate_select = true;
4184 else
4185 intel_dp_set_sink_rates(intel_dp);
4186
975ee5fc
JN
4187 intel_dp_set_common_rates(intel_dp);
4188
93ac092f
MN
4189 /* Read the eDP DSC DPCD registers */
4190 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4191 intel_dp_get_dsc_sink_cap(intel_dp);
4192
fe5a66f9
VS
4193 return true;
4194}
4195
4196
4197static bool
4198intel_dp_get_dpcd(struct intel_dp *intel_dp)
4199{
4200 if (!intel_dp_read_dpcd(intel_dp))
4201 return false;
4202
68f357cb 4203 /* Don't clobber cached eDP rates. */
1853a9da 4204 if (!intel_dp_is_edp(intel_dp)) {
68f357cb 4205 intel_dp_set_sink_rates(intel_dp);
975ee5fc
JN
4206 intel_dp_set_common_rates(intel_dp);
4207 }
68f357cb 4208
fe5a66f9 4209 /*
2bb06265
JRS
4210 * Some eDP panels do not set a valid value for sink count, that is why
4211 * it don't care about read it here and in intel_edp_init_dpcd().
fe5a66f9 4212 */
2bb06265
JRS
4213 if (!intel_dp_is_edp(intel_dp)) {
4214 u8 count;
4215 ssize_t r;
fe5a66f9 4216
2bb06265
JRS
4217 r = drm_dp_dpcd_readb(&intel_dp->aux, DP_SINK_COUNT, &count);
4218 if (r < 1)
4219 return false;
4220
4221 /*
4222 * Sink count can change between short pulse hpd hence
4223 * a member variable in intel_dp will track any changes
4224 * between short pulse interrupts.
4225 */
4226 intel_dp->sink_count = DP_GET_SINK_COUNT(count);
4227
4228 /*
4229 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4230 * a dongle is present but no display. Unless we require to know
4231 * if a dongle is present or not, we don't need to update
4232 * downstream port information. So, an early return here saves
4233 * time from performing other operations which are not required.
4234 */
4235 if (!intel_dp->sink_count)
4236 return false;
4237 }
0336400e 4238
c726ad01 4239 if (!drm_dp_is_branch(intel_dp->dpcd))
edb39244
AJ
4240 return true; /* native DP sink */
4241
4242 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
4243 return true; /* no per-port downstream info */
4244
9f085ebb
L
4245 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DOWNSTREAM_PORT_0,
4246 intel_dp->downstream_ports,
4247 DP_MAX_DOWNSTREAM_PORTS) < 0)
edb39244
AJ
4248 return false; /* downstream port status fetch failed */
4249
4250 return true;
92fd8fd1
KP
4251}
4252
0e32b39c 4253static bool
9dbf5a4e 4254intel_dp_sink_can_mst(struct intel_dp *intel_dp)
0e32b39c 4255{
010b9b39 4256 u8 mstm_cap;
0e32b39c 4257
0e32b39c
DA
4258 if (intel_dp->dpcd[DP_DPCD_REV] < 0x12)
4259 return false;
4260
010b9b39 4261 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_MSTM_CAP, &mstm_cap) != 1)
c4e3170a 4262 return false;
0e32b39c 4263
010b9b39 4264 return mstm_cap & DP_MST_CAP;
c4e3170a
VS
4265}
4266
9dbf5a4e
VS
4267static bool
4268intel_dp_can_mst(struct intel_dp *intel_dp)
4269{
4270 return i915_modparams.enable_dp_mst &&
4271 intel_dp->can_mst &&
4272 intel_dp_sink_can_mst(intel_dp);
4273}
4274
c4e3170a
VS
4275static void
4276intel_dp_configure_mst(struct intel_dp *intel_dp)
4277{
9dbf5a4e
VS
4278 struct intel_encoder *encoder =
4279 &dp_to_dig_port(intel_dp)->base;
4280 bool sink_can_mst = intel_dp_sink_can_mst(intel_dp);
4281
4282 DRM_DEBUG_KMS("MST support? port %c: %s, sink: %s, modparam: %s\n",
4283 port_name(encoder->port), yesno(intel_dp->can_mst),
4284 yesno(sink_can_mst), yesno(i915_modparams.enable_dp_mst));
c4e3170a
VS
4285
4286 if (!intel_dp->can_mst)
4287 return;
4288
9dbf5a4e
VS
4289 intel_dp->is_mst = sink_can_mst &&
4290 i915_modparams.enable_dp_mst;
c4e3170a
VS
4291
4292 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4293 intel_dp->is_mst);
0e32b39c
DA
4294}
4295
0e32b39c
DA
4296static bool
4297intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4298{
e8b2577c
PD
4299 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4300 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4301 DP_DPRX_ESI_LEN;
0e32b39c
DA
4302}
4303
830de422 4304u16 intel_dp_dsc_get_output_bpp(int link_clock, u8 lane_count,
d9218c8f
MN
4305 int mode_clock, int mode_hdisplay)
4306{
4307 u16 bits_per_pixel, max_bpp_small_joiner_ram;
4308 int i;
4309
4310 /*
4311 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
4312 * (LinkSymbolClock)* 8 * ((100-FECOverhead)/100)*(TimeSlotsPerMTP)
4313 * FECOverhead = 2.4%, for SST -> TimeSlotsPerMTP is 1,
4314 * for MST -> TimeSlotsPerMTP has to be calculated
4315 */
4316 bits_per_pixel = (link_clock * lane_count * 8 *
4317 DP_DSC_FEC_OVERHEAD_FACTOR) /
4318 mode_clock;
4319
4320 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
4321 max_bpp_small_joiner_ram = DP_DSC_MAX_SMALL_JOINER_RAM_BUFFER /
4322 mode_hdisplay;
4323
4324 /*
4325 * Greatest allowed DSC BPP = MIN (output BPP from avaialble Link BW
4326 * check, output bpp from small joiner RAM check)
4327 */
4328 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
4329
4330 /* Error out if the max bpp is less than smallest allowed valid bpp */
4331 if (bits_per_pixel < valid_dsc_bpp[0]) {
4332 DRM_DEBUG_KMS("Unsupported BPP %d\n", bits_per_pixel);
4333 return 0;
4334 }
4335
4336 /* Find the nearest match in the array of known BPPs from VESA */
4337 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
4338 if (bits_per_pixel < valid_dsc_bpp[i + 1])
4339 break;
4340 }
4341 bits_per_pixel = valid_dsc_bpp[i];
4342
4343 /*
4344 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
4345 * fractional part is 0
4346 */
4347 return bits_per_pixel << 4;
4348}
4349
4350u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
4351 int mode_clock,
4352 int mode_hdisplay)
4353{
4354 u8 min_slice_count, i;
4355 int max_slice_width;
4356
4357 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
4358 min_slice_count = DIV_ROUND_UP(mode_clock,
4359 DP_DSC_MAX_ENC_THROUGHPUT_0);
4360 else
4361 min_slice_count = DIV_ROUND_UP(mode_clock,
4362 DP_DSC_MAX_ENC_THROUGHPUT_1);
4363
4364 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
4365 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
4366 DRM_DEBUG_KMS("Unsupported slice width %d by DP DSC Sink device\n",
4367 max_slice_width);
4368 return 0;
4369 }
4370 /* Also take into account max slice width */
830de422 4371 min_slice_count = min_t(u8, min_slice_count,
d9218c8f
MN
4372 DIV_ROUND_UP(mode_hdisplay,
4373 max_slice_width));
4374
4375 /* Find the closest match to the valid slice count values */
4376 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
4377 if (valid_dsc_slicecount[i] >
4378 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
4379 false))
4380 break;
4381 if (min_slice_count <= valid_dsc_slicecount[i])
4382 return valid_dsc_slicecount[i];
4383 }
4384
4385 DRM_DEBUG_KMS("Unsupported Slice Count %d\n", min_slice_count);
4386 return 0;
4387}
4388
830de422 4389static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
c5d5ab7a 4390{
da15f7cb 4391 int status = 0;
140ef138 4392 int test_link_rate;
830de422 4393 u8 test_lane_count, test_link_bw;
da15f7cb
MN
4394 /* (DP CTS 1.2)
4395 * 4.3.1.11
4396 */
4397 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
4398 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
4399 &test_lane_count);
4400
4401 if (status <= 0) {
4402 DRM_DEBUG_KMS("Lane count read failed\n");
4403 return DP_TEST_NAK;
4404 }
4405 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
da15f7cb
MN
4406
4407 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
4408 &test_link_bw);
4409 if (status <= 0) {
4410 DRM_DEBUG_KMS("Link Rate read failed\n");
4411 return DP_TEST_NAK;
4412 }
da15f7cb 4413 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
140ef138
MN
4414
4415 /* Validate the requested link rate and lane count */
4416 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
4417 test_lane_count))
da15f7cb
MN
4418 return DP_TEST_NAK;
4419
4420 intel_dp->compliance.test_lane_count = test_lane_count;
4421 intel_dp->compliance.test_link_rate = test_link_rate;
4422
4423 return DP_TEST_ACK;
c5d5ab7a
TP
4424}
4425
830de422 4426static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
c5d5ab7a 4427{
830de422
JN
4428 u8 test_pattern;
4429 u8 test_misc;
611032bf
MN
4430 __be16 h_width, v_height;
4431 int status = 0;
4432
4433 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
010b9b39
JN
4434 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
4435 &test_pattern);
611032bf
MN
4436 if (status <= 0) {
4437 DRM_DEBUG_KMS("Test pattern read failed\n");
4438 return DP_TEST_NAK;
4439 }
4440 if (test_pattern != DP_COLOR_RAMP)
4441 return DP_TEST_NAK;
4442
4443 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
4444 &h_width, 2);
4445 if (status <= 0) {
4446 DRM_DEBUG_KMS("H Width read failed\n");
4447 return DP_TEST_NAK;
4448 }
4449
4450 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
4451 &v_height, 2);
4452 if (status <= 0) {
4453 DRM_DEBUG_KMS("V Height read failed\n");
4454 return DP_TEST_NAK;
4455 }
4456
010b9b39
JN
4457 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
4458 &test_misc);
611032bf
MN
4459 if (status <= 0) {
4460 DRM_DEBUG_KMS("TEST MISC read failed\n");
4461 return DP_TEST_NAK;
4462 }
4463 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
4464 return DP_TEST_NAK;
4465 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
4466 return DP_TEST_NAK;
4467 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
4468 case DP_TEST_BIT_DEPTH_6:
4469 intel_dp->compliance.test_data.bpc = 6;
4470 break;
4471 case DP_TEST_BIT_DEPTH_8:
4472 intel_dp->compliance.test_data.bpc = 8;
4473 break;
4474 default:
4475 return DP_TEST_NAK;
4476 }
4477
4478 intel_dp->compliance.test_data.video_pattern = test_pattern;
4479 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
4480 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
4481 /* Set test active flag here so userspace doesn't interrupt things */
4482 intel_dp->compliance.test_active = 1;
4483
4484 return DP_TEST_ACK;
c5d5ab7a
TP
4485}
4486
830de422 4487static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 4488{
830de422 4489 u8 test_result = DP_TEST_ACK;
559be30c
TP
4490 struct intel_connector *intel_connector = intel_dp->attached_connector;
4491 struct drm_connector *connector = &intel_connector->base;
4492
4493 if (intel_connector->detect_edid == NULL ||
ac6f2e29 4494 connector->edid_corrupt ||
559be30c
TP
4495 intel_dp->aux.i2c_defer_count > 6) {
4496 /* Check EDID read for NACKs, DEFERs and corruption
4497 * (DP CTS 1.2 Core r1.1)
4498 * 4.2.2.4 : Failed EDID read, I2C_NAK
4499 * 4.2.2.5 : Failed EDID read, I2C_DEFER
4500 * 4.2.2.6 : EDID corruption detected
4501 * Use failsafe mode for all cases
4502 */
4503 if (intel_dp->aux.i2c_nack_count > 0 ||
4504 intel_dp->aux.i2c_defer_count > 0)
4505 DRM_DEBUG_KMS("EDID read had %d NACKs, %d DEFERs\n",
4506 intel_dp->aux.i2c_nack_count,
4507 intel_dp->aux.i2c_defer_count);
c1617abc 4508 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
559be30c 4509 } else {
f79b468e
TS
4510 struct edid *block = intel_connector->detect_edid;
4511
4512 /* We have to write the checksum
4513 * of the last block read
4514 */
4515 block += intel_connector->detect_edid->extensions;
4516
010b9b39
JN
4517 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
4518 block->checksum) <= 0)
559be30c
TP
4519 DRM_DEBUG_KMS("Failed to write EDID checksum\n");
4520
4521 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
b48a5ba9 4522 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
559be30c
TP
4523 }
4524
4525 /* Set test active flag here so userspace doesn't interrupt things */
c1617abc 4526 intel_dp->compliance.test_active = 1;
559be30c 4527
c5d5ab7a
TP
4528 return test_result;
4529}
4530
830de422 4531static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 4532{
830de422 4533 u8 test_result = DP_TEST_NAK;
c5d5ab7a
TP
4534 return test_result;
4535}
4536
4537static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
4538{
830de422
JN
4539 u8 response = DP_TEST_NAK;
4540 u8 request = 0;
5ec63bbd 4541 int status;
c5d5ab7a 4542
5ec63bbd 4543 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
c5d5ab7a
TP
4544 if (status <= 0) {
4545 DRM_DEBUG_KMS("Could not read test request from sink\n");
4546 goto update_status;
4547 }
4548
5ec63bbd 4549 switch (request) {
c5d5ab7a
TP
4550 case DP_TEST_LINK_TRAINING:
4551 DRM_DEBUG_KMS("LINK_TRAINING test requested\n");
c5d5ab7a
TP
4552 response = intel_dp_autotest_link_training(intel_dp);
4553 break;
4554 case DP_TEST_LINK_VIDEO_PATTERN:
4555 DRM_DEBUG_KMS("TEST_PATTERN test requested\n");
c5d5ab7a
TP
4556 response = intel_dp_autotest_video_pattern(intel_dp);
4557 break;
4558 case DP_TEST_LINK_EDID_READ:
4559 DRM_DEBUG_KMS("EDID test requested\n");
c5d5ab7a
TP
4560 response = intel_dp_autotest_edid(intel_dp);
4561 break;
4562 case DP_TEST_LINK_PHY_TEST_PATTERN:
4563 DRM_DEBUG_KMS("PHY_PATTERN test requested\n");
c5d5ab7a
TP
4564 response = intel_dp_autotest_phy_pattern(intel_dp);
4565 break;
4566 default:
5ec63bbd 4567 DRM_DEBUG_KMS("Invalid test request '%02x'\n", request);
c5d5ab7a
TP
4568 break;
4569 }
4570
5ec63bbd
JN
4571 if (response & DP_TEST_ACK)
4572 intel_dp->compliance.test_type = request;
4573
c5d5ab7a 4574update_status:
5ec63bbd 4575 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
c5d5ab7a
TP
4576 if (status <= 0)
4577 DRM_DEBUG_KMS("Could not write test response to sink\n");
a60f0e38
JB
4578}
4579
0e32b39c
DA
4580static int
4581intel_dp_check_mst_status(struct intel_dp *intel_dp)
4582{
4583 bool bret;
4584
4585 if (intel_dp->is_mst) {
e8b2577c 4586 u8 esi[DP_DPRX_ESI_LEN] = { 0 };
0e32b39c
DA
4587 int ret = 0;
4588 int retry;
4589 bool handled;
45ef40aa
DP
4590
4591 WARN_ON_ONCE(intel_dp->active_mst_links < 0);
0e32b39c
DA
4592 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4593go_again:
4594 if (bret == true) {
4595
4596 /* check link status - esi[10] = 0x200c */
45ef40aa 4597 if (intel_dp->active_mst_links > 0 &&
901c2daf 4598 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
0e32b39c
DA
4599 DRM_DEBUG_KMS("channel EQ not ok, retraining\n");
4600 intel_dp_start_link_train(intel_dp);
0e32b39c
DA
4601 intel_dp_stop_link_train(intel_dp);
4602 }
4603
6f34cc39 4604 DRM_DEBUG_KMS("got esi %3ph\n", esi);
0e32b39c
DA
4605 ret = drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
4606
4607 if (handled) {
4608 for (retry = 0; retry < 3; retry++) {
4609 int wret;
4610 wret = drm_dp_dpcd_write(&intel_dp->aux,
4611 DP_SINK_COUNT_ESI+1,
4612 &esi[1], 3);
4613 if (wret == 3) {
4614 break;
4615 }
4616 }
4617
4618 bret = intel_dp_get_sink_irq_esi(intel_dp, esi);
4619 if (bret == true) {
6f34cc39 4620 DRM_DEBUG_KMS("got esi2 %3ph\n", esi);
0e32b39c
DA
4621 goto go_again;
4622 }
4623 } else
4624 ret = 0;
4625
4626 return ret;
4627 } else {
0e32b39c
DA
4628 DRM_DEBUG_KMS("failed to get ESI - device may have failed\n");
4629 intel_dp->is_mst = false;
6cbb55c0
LP
4630 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4631 intel_dp->is_mst);
0e32b39c
DA
4632 }
4633 }
4634 return -EINVAL;
4635}
4636
c85d200e
VS
4637static bool
4638intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
4639{
4640 u8 link_status[DP_LINK_STATUS_SIZE];
4641
edb2e530 4642 if (!intel_dp->link_trained)
2f8e7ea9
JRS
4643 return false;
4644
4645 /*
4646 * While PSR source HW is enabled, it will control main-link sending
4647 * frames, enabling and disabling it so trying to do a retrain will fail
4648 * as the link would or not be on or it could mix training patterns
4649 * and frame data at the same time causing retrain to fail.
4650 * Also when exiting PSR, HW will retrain the link anyways fixing
4651 * any link status error.
4652 */
4653 if (intel_psr_enabled(intel_dp))
edb2e530
VS
4654 return false;
4655
4656 if (!intel_dp_get_link_status(intel_dp, link_status))
c85d200e 4657 return false;
c85d200e
VS
4658
4659 /*
4660 * Validate the cached values of intel_dp->link_rate and
4661 * intel_dp->lane_count before attempting to retrain.
4662 */
4663 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
4664 intel_dp->lane_count))
4665 return false;
4666
4667 /* Retrain if Channel EQ or CR not ok */
4668 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
4669}
4670
c85d200e
VS
4671int intel_dp_retrain_link(struct intel_encoder *encoder,
4672 struct drm_modeset_acquire_ctx *ctx)
bfd02b3c 4673{
bfd02b3c 4674 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
c85d200e
VS
4675 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
4676 struct intel_connector *connector = intel_dp->attached_connector;
4677 struct drm_connector_state *conn_state;
4678 struct intel_crtc_state *crtc_state;
4679 struct intel_crtc *crtc;
4680 int ret;
4681
4682 /* FIXME handle the MST connectors as well */
4683
4684 if (!connector || connector->base.status != connector_status_connected)
4685 return 0;
4686
4687 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
4688 ctx);
4689 if (ret)
4690 return ret;
4691
4692 conn_state = connector->base.state;
4693
4694 crtc = to_intel_crtc(conn_state->crtc);
4695 if (!crtc)
4696 return 0;
4697
4698 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
4699 if (ret)
4700 return ret;
4701
4702 crtc_state = to_intel_crtc_state(crtc->base.state);
4703
4704 WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
4705
4706 if (!crtc_state->base.active)
4707 return 0;
4708
4709 if (conn_state->commit &&
4710 !try_wait_for_completion(&conn_state->commit->hw_done))
4711 return 0;
4712
4713 if (!intel_dp_needs_link_retrain(intel_dp))
4714 return 0;
bfd02b3c
VS
4715
4716 /* Suppress underruns caused by re-training */
4717 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
f56f6648 4718 if (crtc_state->has_pch_encoder)
bfd02b3c
VS
4719 intel_set_pch_fifo_underrun_reporting(dev_priv,
4720 intel_crtc_pch_transcoder(crtc), false);
4721
4722 intel_dp_start_link_train(intel_dp);
4723 intel_dp_stop_link_train(intel_dp);
4724
4725 /* Keep underrun reporting disabled until things are stable */
0f0f74bc 4726 intel_wait_for_vblank(dev_priv, crtc->pipe);
bfd02b3c
VS
4727
4728 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
f56f6648 4729 if (crtc_state->has_pch_encoder)
bfd02b3c
VS
4730 intel_set_pch_fifo_underrun_reporting(dev_priv,
4731 intel_crtc_pch_transcoder(crtc), true);
c85d200e
VS
4732
4733 return 0;
bfd02b3c
VS
4734}
4735
c85d200e
VS
4736/*
4737 * If display is now connected check links status,
4738 * there has been known issues of link loss triggering
4739 * long pulse.
4740 *
4741 * Some sinks (eg. ASUS PB287Q) seem to perform some
4742 * weird HPD ping pong during modesets. So we can apparently
4743 * end up with HPD going low during a modeset, and then
4744 * going back up soon after. And once that happens we must
4745 * retrain the link to get a picture. That's in case no
4746 * userspace component reacted to intermittent HPD dip.
4747 */
4748static bool intel_dp_hotplug(struct intel_encoder *encoder,
4749 struct intel_connector *connector)
5c9114d0 4750{
c85d200e
VS
4751 struct drm_modeset_acquire_ctx ctx;
4752 bool changed;
4753 int ret;
5c9114d0 4754
c85d200e 4755 changed = intel_encoder_hotplug(encoder, connector);
5c9114d0 4756
c85d200e 4757 drm_modeset_acquire_init(&ctx, 0);
42e5e657 4758
c85d200e
VS
4759 for (;;) {
4760 ret = intel_dp_retrain_link(encoder, &ctx);
5c9114d0 4761
c85d200e
VS
4762 if (ret == -EDEADLK) {
4763 drm_modeset_backoff(&ctx);
4764 continue;
4765 }
5c9114d0 4766
c85d200e
VS
4767 break;
4768 }
d4cb3fd9 4769
c85d200e
VS
4770 drm_modeset_drop_locks(&ctx);
4771 drm_modeset_acquire_fini(&ctx);
4772 WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
bfd02b3c 4773
c85d200e 4774 return changed;
5c9114d0
SS
4775}
4776
9844bc87
DP
4777static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
4778{
4779 u8 val;
4780
4781 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
4782 return;
4783
4784 if (drm_dp_dpcd_readb(&intel_dp->aux,
4785 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
4786 return;
4787
4788 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
4789
4790 if (val & DP_AUTOMATED_TEST_REQUEST)
4791 intel_dp_handle_test_request(intel_dp);
4792
342ac601 4793 if (val & DP_CP_IRQ)
09d56393 4794 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
342ac601
R
4795
4796 if (val & DP_SINK_SPECIFIC_IRQ)
4797 DRM_DEBUG_DRIVER("Sink specific irq unhandled\n");
9844bc87
DP
4798}
4799
a4fc5ed6
KP
4800/*
4801 * According to DP spec
4802 * 5.1.2:
4803 * 1. Read DPCD
4804 * 2. Configure link according to Receiver Capabilities
4805 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
4806 * 4. Check link status on receipt of hot-plug interrupt
39ff747b
SS
4807 *
4808 * intel_dp_short_pulse - handles short pulse interrupts
4809 * when full detection is not required.
4810 * Returns %true if short pulse is handled and full detection
4811 * is NOT required and %false otherwise.
a4fc5ed6 4812 */
39ff747b 4813static bool
5c9114d0 4814intel_dp_short_pulse(struct intel_dp *intel_dp)
a4fc5ed6 4815{
de25eb7f 4816 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
39ff747b
SS
4817 u8 old_sink_count = intel_dp->sink_count;
4818 bool ret;
5b215bcf 4819
4df6960e
SS
4820 /*
4821 * Clearing compliance test variables to allow capturing
4822 * of values for next automated test request.
4823 */
c1617abc 4824 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4df6960e 4825
39ff747b
SS
4826 /*
4827 * Now read the DPCD to see if it's actually running
4828 * If the current value of sink count doesn't match with
4829 * the value that was stored earlier or dpcd read failed
4830 * we need to do full detection
4831 */
4832 ret = intel_dp_get_dpcd(intel_dp);
4833
4834 if ((old_sink_count != intel_dp->sink_count) || !ret) {
4835 /* No need to proceed if we are going to do full detect */
4836 return false;
59cd09e1
JB
4837 }
4838
9844bc87 4839 intel_dp_check_service_irq(intel_dp);
a60f0e38 4840
82e00d11
HV
4841 /* Handle CEC interrupts, if any */
4842 drm_dp_cec_irq(&intel_dp->aux);
4843
c85d200e
VS
4844 /* defer to the hotplug work for link retraining if needed */
4845 if (intel_dp_needs_link_retrain(intel_dp))
4846 return false;
42e5e657 4847
cc3054ff
JRS
4848 intel_psr_short_pulse(intel_dp);
4849
da15f7cb
MN
4850 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
4851 DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
4852 /* Send a Hotplug Uevent to userspace to start modeset */
2f773477 4853 drm_kms_helper_hotplug_event(&dev_priv->drm);
da15f7cb 4854 }
39ff747b
SS
4855
4856 return true;
a4fc5ed6 4857}
a4fc5ed6 4858
caf9ab24 4859/* XXX this is probably wrong for multiple downstream ports */
71ba9000 4860static enum drm_connector_status
26d61aad 4861intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 4862{
e393d0d6 4863 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
830de422
JN
4864 u8 *dpcd = intel_dp->dpcd;
4865 u8 type;
caf9ab24 4866
e393d0d6
ID
4867 if (lspcon->active)
4868 lspcon_resume(lspcon);
4869
caf9ab24
AJ
4870 if (!intel_dp_get_dpcd(intel_dp))
4871 return connector_status_disconnected;
4872
1853a9da 4873 if (intel_dp_is_edp(intel_dp))
1034ce70
SS
4874 return connector_status_connected;
4875
caf9ab24 4876 /* if there's no downstream port, we're done */
c726ad01 4877 if (!drm_dp_is_branch(dpcd))
26d61aad 4878 return connector_status_connected;
caf9ab24
AJ
4879
4880 /* If we're HPD-aware, SINK_COUNT changes dynamically */
c9ff160b
JN
4881 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
4882 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
9d1a1031 4883
30d9aa42
SS
4884 return intel_dp->sink_count ?
4885 connector_status_connected : connector_status_disconnected;
caf9ab24
AJ
4886 }
4887
c4e3170a
VS
4888 if (intel_dp_can_mst(intel_dp))
4889 return connector_status_connected;
4890
caf9ab24 4891 /* If no HPD, poke DDC gently */
0b99836f 4892 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 4893 return connector_status_connected;
caf9ab24
AJ
4894
4895 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
4896 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
4897 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
4898 if (type == DP_DS_PORT_TYPE_VGA ||
4899 type == DP_DS_PORT_TYPE_NON_EDID)
4900 return connector_status_unknown;
4901 } else {
4902 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
4903 DP_DWN_STRM_PORT_TYPE_MASK;
4904 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
4905 type == DP_DWN_STRM_PORT_TYPE_OTHER)
4906 return connector_status_unknown;
4907 }
caf9ab24
AJ
4908
4909 /* Anything else is out of spec, warn and ignore */
4910 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 4911 return connector_status_disconnected;
71ba9000
AJ
4912}
4913
d410b56d
CW
4914static enum drm_connector_status
4915edp_detect(struct intel_dp *intel_dp)
4916{
b93b41af 4917 return connector_status_connected;
d410b56d
CW
4918}
4919
7533eb4f 4920static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5eb08b69 4921{
7533eb4f 4922 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b93433cc 4923 u32 bit;
01cb9ea6 4924
7533eb4f
RV
4925 switch (encoder->hpd_pin) {
4926 case HPD_PORT_B:
0df53b77
JN
4927 bit = SDE_PORTB_HOTPLUG;
4928 break;
7533eb4f 4929 case HPD_PORT_C:
0df53b77
JN
4930 bit = SDE_PORTC_HOTPLUG;
4931 break;
7533eb4f 4932 case HPD_PORT_D:
0df53b77
JN
4933 bit = SDE_PORTD_HOTPLUG;
4934 break;
4935 default:
7533eb4f 4936 MISSING_CASE(encoder->hpd_pin);
0df53b77
JN
4937 return false;
4938 }
4939
4940 return I915_READ(SDEISR) & bit;
4941}
4942
7533eb4f 4943static bool cpt_digital_port_connected(struct intel_encoder *encoder)
0df53b77 4944{
7533eb4f 4945 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
0df53b77
JN
4946 u32 bit;
4947
7533eb4f
RV
4948 switch (encoder->hpd_pin) {
4949 case HPD_PORT_B:
0df53b77
JN
4950 bit = SDE_PORTB_HOTPLUG_CPT;
4951 break;
7533eb4f 4952 case HPD_PORT_C:
0df53b77
JN
4953 bit = SDE_PORTC_HOTPLUG_CPT;
4954 break;
7533eb4f 4955 case HPD_PORT_D:
0df53b77
JN
4956 bit = SDE_PORTD_HOTPLUG_CPT;
4957 break;
93e5f0b6 4958 default:
7533eb4f 4959 MISSING_CASE(encoder->hpd_pin);
93e5f0b6
VS
4960 return false;
4961 }
4962
4963 return I915_READ(SDEISR) & bit;
4964}
4965
7533eb4f 4966static bool spt_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 4967{
7533eb4f 4968 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
93e5f0b6
VS
4969 u32 bit;
4970
7533eb4f
RV
4971 switch (encoder->hpd_pin) {
4972 case HPD_PORT_A:
93e5f0b6
VS
4973 bit = SDE_PORTA_HOTPLUG_SPT;
4974 break;
7533eb4f 4975 case HPD_PORT_E:
a78695d3
JN
4976 bit = SDE_PORTE_HOTPLUG_SPT;
4977 break;
0df53b77 4978 default:
7533eb4f 4979 return cpt_digital_port_connected(encoder);
b93433cc 4980 }
1b469639 4981
b93433cc 4982 return I915_READ(SDEISR) & bit;
5eb08b69
ZW
4983}
4984
7533eb4f 4985static bool g4x_digital_port_connected(struct intel_encoder *encoder)
a4fc5ed6 4986{
7533eb4f 4987 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9642c81c 4988 u32 bit;
5eb08b69 4989
7533eb4f
RV
4990 switch (encoder->hpd_pin) {
4991 case HPD_PORT_B:
9642c81c
JN
4992 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
4993 break;
7533eb4f 4994 case HPD_PORT_C:
9642c81c
JN
4995 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
4996 break;
7533eb4f 4997 case HPD_PORT_D:
9642c81c
JN
4998 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
4999 break;
5000 default:
7533eb4f 5001 MISSING_CASE(encoder->hpd_pin);
9642c81c
JN
5002 return false;
5003 }
5004
5005 return I915_READ(PORT_HOTPLUG_STAT) & bit;
5006}
5007
7533eb4f 5008static bool gm45_digital_port_connected(struct intel_encoder *encoder)
9642c81c 5009{
7533eb4f 5010 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9642c81c
JN
5011 u32 bit;
5012
7533eb4f
RV
5013 switch (encoder->hpd_pin) {
5014 case HPD_PORT_B:
0780cd36 5015 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
9642c81c 5016 break;
7533eb4f 5017 case HPD_PORT_C:
0780cd36 5018 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
9642c81c 5019 break;
7533eb4f 5020 case HPD_PORT_D:
0780cd36 5021 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
5022 break;
5023 default:
7533eb4f 5024 MISSING_CASE(encoder->hpd_pin);
9642c81c 5025 return false;
a4fc5ed6
KP
5026 }
5027
1d245987 5028 return I915_READ(PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
5029}
5030
7533eb4f 5031static bool ilk_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5032{
7533eb4f
RV
5033 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5034
5035 if (encoder->hpd_pin == HPD_PORT_A)
93e5f0b6
VS
5036 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5037 else
7533eb4f 5038 return ibx_digital_port_connected(encoder);
93e5f0b6
VS
5039}
5040
7533eb4f 5041static bool snb_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5042{
7533eb4f
RV
5043 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5044
5045 if (encoder->hpd_pin == HPD_PORT_A)
93e5f0b6
VS
5046 return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
5047 else
7533eb4f 5048 return cpt_digital_port_connected(encoder);
93e5f0b6
VS
5049}
5050
7533eb4f 5051static bool ivb_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5052{
7533eb4f
RV
5053 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5054
5055 if (encoder->hpd_pin == HPD_PORT_A)
93e5f0b6
VS
5056 return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
5057 else
7533eb4f 5058 return cpt_digital_port_connected(encoder);
93e5f0b6
VS
5059}
5060
7533eb4f 5061static bool bdw_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 5062{
7533eb4f
RV
5063 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5064
5065 if (encoder->hpd_pin == HPD_PORT_A)
93e5f0b6
VS
5066 return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
5067 else
7533eb4f 5068 return cpt_digital_port_connected(encoder);
93e5f0b6
VS
5069}
5070
7533eb4f 5071static bool bxt_digital_port_connected(struct intel_encoder *encoder)
e464bfde 5072{
7533eb4f 5073 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
e464bfde
JN
5074 u32 bit;
5075
7533eb4f
RV
5076 switch (encoder->hpd_pin) {
5077 case HPD_PORT_A:
e464bfde
JN
5078 bit = BXT_DE_PORT_HP_DDIA;
5079 break;
7533eb4f 5080 case HPD_PORT_B:
e464bfde
JN
5081 bit = BXT_DE_PORT_HP_DDIB;
5082 break;
7533eb4f 5083 case HPD_PORT_C:
e464bfde
JN
5084 bit = BXT_DE_PORT_HP_DDIC;
5085 break;
5086 default:
7533eb4f 5087 MISSING_CASE(encoder->hpd_pin);
e464bfde
JN
5088 return false;
5089 }
5090
5091 return I915_READ(GEN8_DE_PORT_ISR) & bit;
5092}
5093
b9fcddab
PZ
5094static bool icl_combo_port_connected(struct drm_i915_private *dev_priv,
5095 struct intel_digital_port *intel_dig_port)
5096{
5097 enum port port = intel_dig_port->base.port;
5098
5099 return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(port);
5100}
5101
f0236a85
ID
5102static const char *tc_type_name(enum tc_port_type type)
5103{
5104 static const char * const names[] = {
5105 [TC_PORT_UNKNOWN] = "unknown",
5106 [TC_PORT_LEGACY] = "legacy",
5107 [TC_PORT_TYPEC] = "typec",
5108 [TC_PORT_TBT] = "tbt",
5109 };
5110
5111 if (WARN_ON(type >= ARRAY_SIZE(names)))
5112 type = TC_PORT_UNKNOWN;
5113
5114 return names[type];
5115}
5116
6075546f
PZ
5117static void icl_update_tc_port_type(struct drm_i915_private *dev_priv,
5118 struct intel_digital_port *intel_dig_port,
5119 bool is_legacy, bool is_typec, bool is_tbt)
5120{
5121 enum port port = intel_dig_port->base.port;
5122 enum tc_port_type old_type = intel_dig_port->tc_type;
6075546f
PZ
5123
5124 WARN_ON(is_legacy + is_typec + is_tbt != 1);
5125
f0236a85 5126 if (is_legacy)
6075546f 5127 intel_dig_port->tc_type = TC_PORT_LEGACY;
f0236a85 5128 else if (is_typec)
6075546f 5129 intel_dig_port->tc_type = TC_PORT_TYPEC;
f0236a85 5130 else if (is_tbt)
6075546f 5131 intel_dig_port->tc_type = TC_PORT_TBT;
f0236a85 5132 else
6075546f 5133 return;
6075546f
PZ
5134
5135 /* Types are not supposed to be changed at runtime. */
5136 WARN_ON(old_type != TC_PORT_UNKNOWN &&
5137 old_type != intel_dig_port->tc_type);
5138
5139 if (old_type != intel_dig_port->tc_type)
5140 DRM_DEBUG_KMS("Port %c has TC type %s\n", port_name(port),
f0236a85 5141 tc_type_name(intel_dig_port->tc_type));
6075546f
PZ
5142}
5143
39d1e234
PZ
5144/*
5145 * This function implements the first part of the Connect Flow described by our
5146 * specification, Gen11 TypeC Programming chapter. The rest of the flow (reading
5147 * lanes, EDID, etc) is done as needed in the typical places.
5148 *
5149 * Unlike the other ports, type-C ports are not available to use as soon as we
5150 * get a hotplug. The type-C PHYs can be shared between multiple controllers:
5151 * display, USB, etc. As a result, handshaking through FIA is required around
5152 * connect and disconnect to cleanly transfer ownership with the controller and
5153 * set the type-C power state.
5154 *
5155 * We could opt to only do the connect flow when we actually try to use the AUX
5156 * channels or do a modeset, then immediately run the disconnect flow after
5157 * usage, but there are some implications on this for a dynamic environment:
5158 * things may go away or change behind our backs. So for now our driver is
5159 * always trying to acquire ownership of the controller as soon as it gets an
5160 * interrupt (or polls state and sees a port is connected) and only gives it
5161 * back when it sees a disconnect. Implementation of a more fine-grained model
5162 * will require a lot of coordination with user space and thorough testing for
5163 * the extra possible cases.
5164 */
5165static bool icl_tc_phy_connect(struct drm_i915_private *dev_priv,
5166 struct intel_digital_port *dig_port)
5167{
5168 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
5169 u32 val;
5170
5171 if (dig_port->tc_type != TC_PORT_LEGACY &&
5172 dig_port->tc_type != TC_PORT_TYPEC)
5173 return true;
5174
5175 val = I915_READ(PORT_TX_DFLEXDPPMS);
5176 if (!(val & DP_PHY_MODE_STATUS_COMPLETED(tc_port))) {
5177 DRM_DEBUG_KMS("DP PHY for TC port %d not ready\n", tc_port);
f6bff60e 5178 WARN_ON(dig_port->tc_legacy_port);
39d1e234
PZ
5179 return false;
5180 }
5181
5182 /*
5183 * This function may be called many times in a row without an HPD event
5184 * in between, so try to avoid the write when we can.
5185 */
5186 val = I915_READ(PORT_TX_DFLEXDPCSSS);
5187 if (!(val & DP_PHY_MODE_STATUS_NOT_SAFE(tc_port))) {
5188 val |= DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5189 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5190 }
5191
5192 /*
5193 * Now we have to re-check the live state, in case the port recently
5194 * became disconnected. Not necessary for legacy mode.
5195 */
5196 if (dig_port->tc_type == TC_PORT_TYPEC &&
5197 !(I915_READ(PORT_TX_DFLEXDPSP) & TC_LIVE_STATE_TC(tc_port))) {
5198 DRM_DEBUG_KMS("TC PHY %d sudden disconnect.\n", tc_port);
d1b5973c 5199 icl_tc_phy_disconnect(dev_priv, dig_port);
39d1e234
PZ
5200 return false;
5201 }
5202
5203 return true;
5204}
5205
5206/*
5207 * See the comment at the connect function. This implements the Disconnect
5208 * Flow.
5209 */
f6bff60e
ID
5210void icl_tc_phy_disconnect(struct drm_i915_private *dev_priv,
5211 struct intel_digital_port *dig_port)
39d1e234
PZ
5212{
5213 enum tc_port tc_port = intel_port_to_tc(dev_priv, dig_port->base.port);
39d1e234 5214
b298ba5f 5215 if (dig_port->tc_type == TC_PORT_UNKNOWN)
39d1e234
PZ
5216 return;
5217
5218 /*
b298ba5f
JRS
5219 * TBT disconnection flow is read the live status, what was done in
5220 * caller.
39d1e234 5221 */
b298ba5f
JRS
5222 if (dig_port->tc_type == TC_PORT_TYPEC ||
5223 dig_port->tc_type == TC_PORT_LEGACY) {
5224 u32 val;
5225
5226 val = I915_READ(PORT_TX_DFLEXDPCSSS);
39d1e234
PZ
5227 val &= ~DP_PHY_MODE_STATUS_NOT_SAFE(tc_port);
5228 I915_WRITE(PORT_TX_DFLEXDPCSSS, val);
5229 }
b298ba5f 5230
f0236a85
ID
5231 DRM_DEBUG_KMS("Port %c TC type %s disconnected\n",
5232 port_name(dig_port->base.port),
5233 tc_type_name(dig_port->tc_type));
5234
b298ba5f 5235 dig_port->tc_type = TC_PORT_UNKNOWN;
39d1e234
PZ
5236}
5237
5238/*
5239 * The type-C ports are different because even when they are connected, they may
5240 * not be available/usable by the graphics driver: see the comment on
5241 * icl_tc_phy_connect(). So in our driver instead of adding the additional
5242 * concept of "usable" and make everything check for "connected and usable" we
5243 * define a port as "connected" when it is not only connected, but also when it
5244 * is usable by the rest of the driver. That maintains the old assumption that
5245 * connected ports are usable, and avoids exposing to the users objects they
5246 * can't really use.
5247 */
b9fcddab
PZ
5248static bool icl_tc_port_connected(struct drm_i915_private *dev_priv,
5249 struct intel_digital_port *intel_dig_port)
5250{
5251 enum port port = intel_dig_port->base.port;
5252 enum tc_port tc_port = intel_port_to_tc(dev_priv, port);
5253 bool is_legacy, is_typec, is_tbt;
5254 u32 dpsp;
5255
2a041c97
ID
5256 /*
5257 * WARN if we got a legacy port HPD, but VBT didn't mark the port as
5258 * legacy. Treat the port as legacy from now on.
5259 */
5260 if (WARN_ON(!intel_dig_port->tc_legacy_port &&
5261 I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(tc_port)))
5262 intel_dig_port->tc_legacy_port = true;
5263 is_legacy = intel_dig_port->tc_legacy_port;
b9fcddab
PZ
5264
5265 /*
5266 * The spec says we shouldn't be using the ISR bits for detecting
5267 * between TC and TBT. We should use DFLEXDPSP.
5268 */
5269 dpsp = I915_READ(PORT_TX_DFLEXDPSP);
5270 is_typec = dpsp & TC_LIVE_STATE_TC(tc_port);
5271 is_tbt = dpsp & TC_LIVE_STATE_TBT(tc_port);
5272
39d1e234
PZ
5273 if (!is_legacy && !is_typec && !is_tbt) {
5274 icl_tc_phy_disconnect(dev_priv, intel_dig_port);
f6bff60e 5275
6075546f 5276 return false;
39d1e234 5277 }
6075546f
PZ
5278
5279 icl_update_tc_port_type(dev_priv, intel_dig_port, is_legacy, is_typec,
5280 is_tbt);
b9fcddab 5281
39d1e234
PZ
5282 if (!icl_tc_phy_connect(dev_priv, intel_dig_port))
5283 return false;
5284
6075546f 5285 return true;
b9fcddab
PZ
5286}
5287
5288static bool icl_digital_port_connected(struct intel_encoder *encoder)
5289{
5290 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5291 struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
5292
c0aa8344 5293 if (intel_port_is_combophy(dev_priv, encoder->port))
b9fcddab 5294 return icl_combo_port_connected(dev_priv, dig_port);
c0aa8344 5295 else if (intel_port_is_tc(dev_priv, encoder->port))
b9fcddab 5296 return icl_tc_port_connected(dev_priv, dig_port);
c0aa8344 5297 else
b9fcddab 5298 MISSING_CASE(encoder->hpd_pin);
c0aa8344
MK
5299
5300 return false;
b9fcddab
PZ
5301}
5302
7e66bcf2
JN
5303/*
5304 * intel_digital_port_connected - is the specified port connected?
7533eb4f 5305 * @encoder: intel_encoder
7e66bcf2 5306 *
39d1e234
PZ
5307 * In cases where there's a connector physically connected but it can't be used
5308 * by our hardware we also return false, since the rest of the driver should
5309 * pretty much treat the port as disconnected. This is relevant for type-C
5310 * (starting on ICL) where there's ownership involved.
5311 *
7533eb4f 5312 * Return %true if port is connected, %false otherwise.
7e66bcf2 5313 */
7533eb4f 5314bool intel_digital_port_connected(struct intel_encoder *encoder)
7e66bcf2 5315{
7533eb4f
RV
5316 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5317
b2ae318a 5318 if (HAS_GMCH(dev_priv)) {
93e5f0b6 5319 if (IS_GM45(dev_priv))
7533eb4f 5320 return gm45_digital_port_connected(encoder);
93e5f0b6 5321 else
7533eb4f 5322 return g4x_digital_port_connected(encoder);
93e5f0b6
VS
5323 }
5324
210126bd
RV
5325 if (INTEL_GEN(dev_priv) >= 11)
5326 return icl_digital_port_connected(encoder);
cf819eff 5327 else if (IS_GEN(dev_priv, 10) || IS_GEN9_BC(dev_priv))
210126bd 5328 return spt_digital_port_connected(encoder);
cc3f90f0 5329 else if (IS_GEN9_LP(dev_priv))
7533eb4f 5330 return bxt_digital_port_connected(encoder);
cf819eff 5331 else if (IS_GEN(dev_priv, 8))
210126bd 5332 return bdw_digital_port_connected(encoder);
cf819eff 5333 else if (IS_GEN(dev_priv, 7))
210126bd 5334 return ivb_digital_port_connected(encoder);
cf819eff 5335 else if (IS_GEN(dev_priv, 6))
210126bd 5336 return snb_digital_port_connected(encoder);
cf819eff 5337 else if (IS_GEN(dev_priv, 5))
210126bd
RV
5338 return ilk_digital_port_connected(encoder);
5339
5340 MISSING_CASE(INTEL_GEN(dev_priv));
5341 return false;
7e66bcf2
JN
5342}
5343
8c241fef 5344static struct edid *
beb60608 5345intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 5346{
beb60608 5347 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 5348
9cd300e0
JN
5349 /* use cached edid if we have one */
5350 if (intel_connector->edid) {
9cd300e0
JN
5351 /* invalid edid */
5352 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
5353 return NULL;
5354
55e9edeb 5355 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
5356 } else
5357 return drm_get_edid(&intel_connector->base,
5358 &intel_dp->aux.ddc);
5359}
8c241fef 5360
beb60608
CW
5361static void
5362intel_dp_set_edid(struct intel_dp *intel_dp)
5363{
5364 struct intel_connector *intel_connector = intel_dp->attached_connector;
5365 struct edid *edid;
8c241fef 5366
f21a2198 5367 intel_dp_unset_edid(intel_dp);
beb60608
CW
5368 edid = intel_dp_get_edid(intel_dp);
5369 intel_connector->detect_edid = edid;
5370
e6b72c94 5371 intel_dp->has_audio = drm_detect_monitor_audio(edid);
82e00d11 5372 drm_dp_cec_set_edid(&intel_dp->aux, edid);
8c241fef
KP
5373}
5374
beb60608
CW
5375static void
5376intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 5377{
beb60608 5378 struct intel_connector *intel_connector = intel_dp->attached_connector;
8c241fef 5379
82e00d11 5380 drm_dp_cec_unset_edid(&intel_dp->aux);
beb60608
CW
5381 kfree(intel_connector->detect_edid);
5382 intel_connector->detect_edid = NULL;
9cd300e0 5383
beb60608
CW
5384 intel_dp->has_audio = false;
5385}
d6f24d0f 5386
6c5ed5ae 5387static int
cbfa8ac8
DP
5388intel_dp_detect(struct drm_connector *connector,
5389 struct drm_modeset_acquire_ctx *ctx,
5390 bool force)
a9756bb5 5391{
cbfa8ac8
DP
5392 struct drm_i915_private *dev_priv = to_i915(connector->dev);
5393 struct intel_dp *intel_dp = intel_attached_dp(connector);
337837ac
ID
5394 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5395 struct intel_encoder *encoder = &dig_port->base;
a9756bb5 5396 enum drm_connector_status status;
337837ac
ID
5397 enum intel_display_power_domain aux_domain =
5398 intel_aux_power_domain(dig_port);
0e6e0be4 5399 intel_wakeref_t wakeref;
a9756bb5 5400
cbfa8ac8
DP
5401 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5402 connector->base.id, connector->name);
2f773477 5403 WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
6c5ed5ae 5404
0e6e0be4 5405 wakeref = intel_display_power_get(dev_priv, aux_domain);
a9756bb5 5406
b93b41af 5407 /* Can't disconnect eDP */
1853a9da 5408 if (intel_dp_is_edp(intel_dp))
d410b56d 5409 status = edp_detect(intel_dp);
d5acd97f 5410 else if (intel_digital_port_connected(encoder))
c555a81d 5411 status = intel_dp_detect_dpcd(intel_dp);
a9756bb5 5412 else
c555a81d
ACO
5413 status = connector_status_disconnected;
5414
5cb651a7 5415 if (status == connector_status_disconnected) {
c1617abc 5416 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
93ac092f 5417 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4df6960e 5418
0e505a08 5419 if (intel_dp->is_mst) {
5420 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
5421 intel_dp->is_mst,
5422 intel_dp->mst_mgr.mst_state);
5423 intel_dp->is_mst = false;
5424 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
5425 intel_dp->is_mst);
5426 }
5427
c8c8fb33 5428 goto out;
4df6960e 5429 }
a9756bb5 5430
d7e8ef02 5431 if (intel_dp->reset_link_params) {
540b0b7f
JN
5432 /* Initial max link lane count */
5433 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
f482984a 5434
540b0b7f
JN
5435 /* Initial max link rate */
5436 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
d7e8ef02
MN
5437
5438 intel_dp->reset_link_params = false;
5439 }
f482984a 5440
fe5a66f9
VS
5441 intel_dp_print_rates(intel_dp);
5442
93ac092f
MN
5443 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
5444 if (INTEL_GEN(dev_priv) >= 11)
5445 intel_dp_get_dsc_sink_cap(intel_dp);
5446
84c36753
JN
5447 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
5448 drm_dp_is_branch(intel_dp->dpcd));
0e390a33 5449
c4e3170a
VS
5450 intel_dp_configure_mst(intel_dp);
5451
5452 if (intel_dp->is_mst) {
f21a2198
SS
5453 /*
5454 * If we are in MST mode then this connector
5455 * won't appear connected or have anything
5456 * with EDID on it
5457 */
0e32b39c
DA
5458 status = connector_status_disconnected;
5459 goto out;
f24f6eb9
DP
5460 }
5461
5462 /*
5463 * Some external monitors do not signal loss of link synchronization
5464 * with an IRQ_HPD, so force a link status check.
5465 */
47658556
DP
5466 if (!intel_dp_is_edp(intel_dp)) {
5467 int ret;
5468
5469 ret = intel_dp_retrain_link(encoder, ctx);
5470 if (ret) {
0e6e0be4 5471 intel_display_power_put(dev_priv, aux_domain, wakeref);
47658556
DP
5472 return ret;
5473 }
5474 }
0e32b39c 5475
4df6960e
SS
5476 /*
5477 * Clearing NACK and defer counts to get their exact values
5478 * while reading EDID which are required by Compliance tests
5479 * 4.2.2.4 and 4.2.2.5
5480 */
5481 intel_dp->aux.i2c_nack_count = 0;
5482 intel_dp->aux.i2c_defer_count = 0;
5483
beb60608 5484 intel_dp_set_edid(intel_dp);
cbfa8ac8
DP
5485 if (intel_dp_is_edp(intel_dp) ||
5486 to_intel_connector(connector)->detect_edid)
5cb651a7 5487 status = connector_status_connected;
c8c8fb33 5488
9844bc87 5489 intel_dp_check_service_irq(intel_dp);
09b1eb13 5490
c8c8fb33 5491out:
5cb651a7 5492 if (status != connector_status_connected && !intel_dp->is_mst)
f21a2198 5493 intel_dp_unset_edid(intel_dp);
7d23e3c3 5494
0e6e0be4 5495 intel_display_power_put(dev_priv, aux_domain, wakeref);
5cb651a7 5496 return status;
f21a2198
SS
5497}
5498
beb60608
CW
5499static void
5500intel_dp_force(struct drm_connector *connector)
a4fc5ed6 5501{
df0e9248 5502 struct intel_dp *intel_dp = intel_attached_dp(connector);
337837ac
ID
5503 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5504 struct intel_encoder *intel_encoder = &dig_port->base;
25f78f58 5505 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
337837ac
ID
5506 enum intel_display_power_domain aux_domain =
5507 intel_aux_power_domain(dig_port);
0e6e0be4 5508 intel_wakeref_t wakeref;
a4fc5ed6 5509
beb60608
CW
5510 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
5511 connector->base.id, connector->name);
5512 intel_dp_unset_edid(intel_dp);
a4fc5ed6 5513
beb60608
CW
5514 if (connector->status != connector_status_connected)
5515 return;
671dedd2 5516
0e6e0be4 5517 wakeref = intel_display_power_get(dev_priv, aux_domain);
beb60608
CW
5518
5519 intel_dp_set_edid(intel_dp);
5520
0e6e0be4 5521 intel_display_power_put(dev_priv, aux_domain, wakeref);
beb60608
CW
5522}
5523
5524static int intel_dp_get_modes(struct drm_connector *connector)
5525{
5526 struct intel_connector *intel_connector = to_intel_connector(connector);
5527 struct edid *edid;
5528
5529 edid = intel_connector->detect_edid;
5530 if (edid) {
5531 int ret = intel_connector_update_modes(connector, edid);
5532 if (ret)
5533 return ret;
5534 }
32f9d658 5535
f8779fda 5536 /* if eDP has no EDID, fall back to fixed mode */
1853a9da 5537 if (intel_dp_is_edp(intel_attached_dp(connector)) &&
beb60608 5538 intel_connector->panel.fixed_mode) {
f8779fda 5539 struct drm_display_mode *mode;
beb60608
CW
5540
5541 mode = drm_mode_duplicate(connector->dev,
dd06f90e 5542 intel_connector->panel.fixed_mode);
f8779fda 5543 if (mode) {
32f9d658
ZW
5544 drm_mode_probed_add(connector, mode);
5545 return 1;
5546 }
5547 }
beb60608 5548
32f9d658 5549 return 0;
a4fc5ed6
KP
5550}
5551
7a418e34
CW
5552static int
5553intel_dp_connector_register(struct drm_connector *connector)
5554{
5555 struct intel_dp *intel_dp = intel_attached_dp(connector);
82e00d11 5556 struct drm_device *dev = connector->dev;
1ebaa0b9
CW
5557 int ret;
5558
5559 ret = intel_connector_register(connector);
5560 if (ret)
5561 return ret;
7a418e34
CW
5562
5563 i915_debugfs_connector_add(connector);
5564
5565 DRM_DEBUG_KMS("registering %s bus for %s\n",
5566 intel_dp->aux.name, connector->kdev->kobj.name);
5567
5568 intel_dp->aux.dev = connector->kdev;
82e00d11
HV
5569 ret = drm_dp_aux_register(&intel_dp->aux);
5570 if (!ret)
5571 drm_dp_cec_register_connector(&intel_dp->aux,
5572 connector->name, dev->dev);
5573 return ret;
7a418e34
CW
5574}
5575
c191eca1
CW
5576static void
5577intel_dp_connector_unregister(struct drm_connector *connector)
5578{
82e00d11
HV
5579 struct intel_dp *intel_dp = intel_attached_dp(connector);
5580
5581 drm_dp_cec_unregister_connector(&intel_dp->aux);
5582 drm_dp_aux_unregister(&intel_dp->aux);
c191eca1
CW
5583 intel_connector_unregister(connector);
5584}
5585
f6bff60e 5586void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
24d05927 5587{
da63a9f2
PZ
5588 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
5589 struct intel_dp *intel_dp = &intel_dig_port->dp;
24d05927 5590
0e32b39c 5591 intel_dp_mst_encoder_cleanup(intel_dig_port);
1853a9da 5592 if (intel_dp_is_edp(intel_dp)) {
69d93820
CW
5593 intel_wakeref_t wakeref;
5594
bd943159 5595 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
5596 /*
5597 * vdd might still be enabled do to the delayed vdd off.
5598 * Make sure vdd is actually turned off here.
5599 */
69d93820
CW
5600 with_pps_lock(intel_dp, wakeref)
5601 edp_panel_vdd_off_sync(intel_dp);
773538e8 5602
01527b31
CT
5603 if (intel_dp->edp_notifier.notifier_call) {
5604 unregister_reboot_notifier(&intel_dp->edp_notifier);
5605 intel_dp->edp_notifier.notifier_call = NULL;
5606 }
bd943159 5607 }
99681886
CW
5608
5609 intel_dp_aux_fini(intel_dp);
f6bff60e
ID
5610}
5611
5612static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
5613{
5614 intel_dp_encoder_flush_work(encoder);
99681886 5615
c8bd0e49 5616 drm_encoder_cleanup(encoder);
f6bff60e 5617 kfree(enc_to_dig_port(encoder));
24d05927
DV
5618}
5619
bf93ba67 5620void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
07f9cd0b
ID
5621{
5622 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
69d93820 5623 intel_wakeref_t wakeref;
07f9cd0b 5624
1853a9da 5625 if (!intel_dp_is_edp(intel_dp))
07f9cd0b
ID
5626 return;
5627
951468f3
VS
5628 /*
5629 * vdd might still be enabled do to the delayed vdd off.
5630 * Make sure vdd is actually turned off here.
5631 */
afa4e53a 5632 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
69d93820
CW
5633 with_pps_lock(intel_dp, wakeref)
5634 edp_panel_vdd_off_sync(intel_dp);
07f9cd0b
ID
5635}
5636
cf9cb35f
R
5637static void intel_dp_hdcp_wait_for_cp_irq(struct intel_hdcp *hdcp, int timeout)
5638{
5639 long ret;
5640
5641#define C (hdcp->cp_irq_count_cached != atomic_read(&hdcp->cp_irq_count))
5642 ret = wait_event_interruptible_timeout(hdcp->cp_irq_queue, C,
5643 msecs_to_jiffies(timeout));
5644
5645 if (!ret)
5646 DRM_DEBUG_KMS("Timedout at waiting for CP_IRQ\n");
5647}
5648
20f24d77
SP
5649static
5650int intel_dp_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
5651 u8 *an)
5652{
5653 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_dig_port->base.base);
32078b72
VS
5654 static const struct drm_dp_aux_msg msg = {
5655 .request = DP_AUX_NATIVE_WRITE,
5656 .address = DP_AUX_HDCP_AKSV,
5657 .size = DRM_HDCP_KSV_LEN,
5658 };
830de422 5659 u8 txbuf[HEADER_SIZE + DRM_HDCP_KSV_LEN] = {}, rxbuf[2], reply = 0;
20f24d77
SP
5660 ssize_t dpcd_ret;
5661 int ret;
5662
5663 /* Output An first, that's easy */
5664 dpcd_ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux, DP_AUX_HDCP_AN,
5665 an, DRM_HDCP_AN_LEN);
5666 if (dpcd_ret != DRM_HDCP_AN_LEN) {
3aae21fc
R
5667 DRM_DEBUG_KMS("Failed to write An over DP/AUX (%zd)\n",
5668 dpcd_ret);
20f24d77
SP
5669 return dpcd_ret >= 0 ? -EIO : dpcd_ret;
5670 }
5671
5672 /*
5673 * Since Aksv is Oh-So-Secret, we can't access it in software. So in
5674 * order to get it on the wire, we need to create the AUX header as if
5675 * we were writing the data, and then tickle the hardware to output the
5676 * data once the header is sent out.
5677 */
32078b72 5678 intel_dp_aux_header(txbuf, &msg);
20f24d77 5679
32078b72 5680 ret = intel_dp_aux_xfer(intel_dp, txbuf, HEADER_SIZE + msg.size,
8159c796
VS
5681 rxbuf, sizeof(rxbuf),
5682 DP_AUX_CH_CTL_AUX_AKSV_SELECT);
20f24d77 5683 if (ret < 0) {
3aae21fc 5684 DRM_DEBUG_KMS("Write Aksv over DP/AUX failed (%d)\n", ret);
20f24d77
SP
5685 return ret;
5686 } else if (ret == 0) {
3aae21fc 5687 DRM_DEBUG_KMS("Aksv write over DP/AUX was empty\n");
20f24d77
SP
5688 return -EIO;
5689 }
5690
5691 reply = (rxbuf[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK;
4cf74aaf
R
5692 if (reply != DP_AUX_NATIVE_REPLY_ACK) {
5693 DRM_DEBUG_KMS("Aksv write: no DP_AUX_NATIVE_REPLY_ACK %x\n",
5694 reply);
5695 return -EIO;
5696 }
5697 return 0;
20f24d77
SP
5698}
5699
5700static int intel_dp_hdcp_read_bksv(struct intel_digital_port *intel_dig_port,
5701 u8 *bksv)
5702{
5703 ssize_t ret;
5704 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BKSV, bksv,
5705 DRM_HDCP_KSV_LEN);
5706 if (ret != DRM_HDCP_KSV_LEN) {
3aae21fc 5707 DRM_DEBUG_KMS("Read Bksv from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
5708 return ret >= 0 ? -EIO : ret;
5709 }
5710 return 0;
5711}
5712
5713static int intel_dp_hdcp_read_bstatus(struct intel_digital_port *intel_dig_port,
5714 u8 *bstatus)
5715{
5716 ssize_t ret;
5717 /*
5718 * For some reason the HDMI and DP HDCP specs call this register
5719 * definition by different names. In the HDMI spec, it's called BSTATUS,
5720 * but in DP it's called BINFO.
5721 */
5722 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BINFO,
5723 bstatus, DRM_HDCP_BSTATUS_LEN);
5724 if (ret != DRM_HDCP_BSTATUS_LEN) {
3aae21fc 5725 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
5726 return ret >= 0 ? -EIO : ret;
5727 }
5728 return 0;
5729}
5730
5731static
791a98dd
R
5732int intel_dp_hdcp_read_bcaps(struct intel_digital_port *intel_dig_port,
5733 u8 *bcaps)
20f24d77
SP
5734{
5735 ssize_t ret;
791a98dd 5736
20f24d77 5737 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BCAPS,
791a98dd 5738 bcaps, 1);
20f24d77 5739 if (ret != 1) {
3aae21fc 5740 DRM_DEBUG_KMS("Read bcaps from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
5741 return ret >= 0 ? -EIO : ret;
5742 }
791a98dd
R
5743
5744 return 0;
5745}
5746
5747static
5748int intel_dp_hdcp_repeater_present(struct intel_digital_port *intel_dig_port,
5749 bool *repeater_present)
5750{
5751 ssize_t ret;
5752 u8 bcaps;
5753
5754 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5755 if (ret)
5756 return ret;
5757
20f24d77
SP
5758 *repeater_present = bcaps & DP_BCAPS_REPEATER_PRESENT;
5759 return 0;
5760}
5761
5762static
5763int intel_dp_hdcp_read_ri_prime(struct intel_digital_port *intel_dig_port,
5764 u8 *ri_prime)
5765{
5766 ssize_t ret;
5767 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_RI_PRIME,
5768 ri_prime, DRM_HDCP_RI_LEN);
5769 if (ret != DRM_HDCP_RI_LEN) {
3aae21fc 5770 DRM_DEBUG_KMS("Read Ri' from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
5771 return ret >= 0 ? -EIO : ret;
5772 }
5773 return 0;
5774}
5775
5776static
5777int intel_dp_hdcp_read_ksv_ready(struct intel_digital_port *intel_dig_port,
5778 bool *ksv_ready)
5779{
5780 ssize_t ret;
5781 u8 bstatus;
5782 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5783 &bstatus, 1);
5784 if (ret != 1) {
3aae21fc 5785 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
20f24d77
SP
5786 return ret >= 0 ? -EIO : ret;
5787 }
5788 *ksv_ready = bstatus & DP_BSTATUS_READY;
5789 return 0;
5790}
5791
5792static
5793int intel_dp_hdcp_read_ksv_fifo(struct intel_digital_port *intel_dig_port,
5794 int num_downstream, u8 *ksv_fifo)
5795{
5796 ssize_t ret;
5797 int i;
5798
5799 /* KSV list is read via 15 byte window (3 entries @ 5 bytes each) */
5800 for (i = 0; i < num_downstream; i += 3) {
5801 size_t len = min(num_downstream - i, 3) * DRM_HDCP_KSV_LEN;
5802 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5803 DP_AUX_HDCP_KSV_FIFO,
5804 ksv_fifo + i * DRM_HDCP_KSV_LEN,
5805 len);
5806 if (ret != len) {
3aae21fc
R
5807 DRM_DEBUG_KMS("Read ksv[%d] from DP/AUX failed (%zd)\n",
5808 i, ret);
20f24d77
SP
5809 return ret >= 0 ? -EIO : ret;
5810 }
5811 }
5812 return 0;
5813}
5814
5815static
5816int intel_dp_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
5817 int i, u32 *part)
5818{
5819 ssize_t ret;
5820
5821 if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
5822 return -EINVAL;
5823
5824 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5825 DP_AUX_HDCP_V_PRIME(i), part,
5826 DRM_HDCP_V_PRIME_PART_LEN);
5827 if (ret != DRM_HDCP_V_PRIME_PART_LEN) {
3aae21fc 5828 DRM_DEBUG_KMS("Read v'[%d] from DP/AUX failed (%zd)\n", i, ret);
20f24d77
SP
5829 return ret >= 0 ? -EIO : ret;
5830 }
5831 return 0;
5832}
5833
5834static
5835int intel_dp_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
5836 bool enable)
5837{
5838 /* Not used for single stream DisplayPort setups */
5839 return 0;
5840}
5841
5842static
5843bool intel_dp_hdcp_check_link(struct intel_digital_port *intel_dig_port)
5844{
5845 ssize_t ret;
5846 u8 bstatus;
b7fc1a9b 5847
20f24d77
SP
5848 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, DP_AUX_HDCP_BSTATUS,
5849 &bstatus, 1);
5850 if (ret != 1) {
3aae21fc 5851 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
b7fc1a9b 5852 return false;
20f24d77 5853 }
b7fc1a9b 5854
20f24d77
SP
5855 return !(bstatus & (DP_BSTATUS_LINK_FAILURE | DP_BSTATUS_REAUTH_REQ));
5856}
5857
791a98dd
R
5858static
5859int intel_dp_hdcp_capable(struct intel_digital_port *intel_dig_port,
5860 bool *hdcp_capable)
5861{
5862 ssize_t ret;
5863 u8 bcaps;
5864
5865 ret = intel_dp_hdcp_read_bcaps(intel_dig_port, &bcaps);
5866 if (ret)
5867 return ret;
5868
5869 *hdcp_capable = bcaps & DP_BCAPS_HDCP_CAPABLE;
5870 return 0;
5871}
5872
238d3a9e
R
5873struct hdcp2_dp_errata_stream_type {
5874 u8 msg_id;
5875 u8 stream_type;
5876} __packed;
5877
5878static struct hdcp2_dp_msg_data {
5879 u8 msg_id;
5880 u32 offset;
5881 bool msg_detectable;
5882 u32 timeout;
5883 u32 timeout2; /* Added for non_paired situation */
5884 } hdcp2_msg_data[] = {
5885 {HDCP_2_2_AKE_INIT, DP_HDCP_2_2_AKE_INIT_OFFSET, false, 0, 0},
5886 {HDCP_2_2_AKE_SEND_CERT, DP_HDCP_2_2_AKE_SEND_CERT_OFFSET,
5887 false, HDCP_2_2_CERT_TIMEOUT_MS, 0},
5888 {HDCP_2_2_AKE_NO_STORED_KM, DP_HDCP_2_2_AKE_NO_STORED_KM_OFFSET,
5889 false, 0, 0},
5890 {HDCP_2_2_AKE_STORED_KM, DP_HDCP_2_2_AKE_STORED_KM_OFFSET,
5891 false, 0, 0},
5892 {HDCP_2_2_AKE_SEND_HPRIME, DP_HDCP_2_2_AKE_SEND_HPRIME_OFFSET,
5893 true, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
5894 HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
5895 {HDCP_2_2_AKE_SEND_PAIRING_INFO,
5896 DP_HDCP_2_2_AKE_SEND_PAIRING_INFO_OFFSET, true,
5897 HDCP_2_2_PAIRING_TIMEOUT_MS, 0},
5898 {HDCP_2_2_LC_INIT, DP_HDCP_2_2_LC_INIT_OFFSET, false, 0, 0},
5899 {HDCP_2_2_LC_SEND_LPRIME, DP_HDCP_2_2_LC_SEND_LPRIME_OFFSET,
5900 false, HDCP_2_2_DP_LPRIME_TIMEOUT_MS, 0},
5901 {HDCP_2_2_SKE_SEND_EKS, DP_HDCP_2_2_SKE_SEND_EKS_OFFSET, false,
5902 0, 0},
5903 {HDCP_2_2_REP_SEND_RECVID_LIST,
5904 DP_HDCP_2_2_REP_SEND_RECVID_LIST_OFFSET, true,
5905 HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
5906 {HDCP_2_2_REP_SEND_ACK, DP_HDCP_2_2_REP_SEND_ACK_OFFSET, false,
5907 0, 0},
5908 {HDCP_2_2_REP_STREAM_MANAGE,
5909 DP_HDCP_2_2_REP_STREAM_MANAGE_OFFSET, false,
5910 0, 0},
5911 {HDCP_2_2_REP_STREAM_READY, DP_HDCP_2_2_REP_STREAM_READY_OFFSET,
5912 false, HDCP_2_2_STREAM_READY_TIMEOUT_MS, 0},
5913/* local define to shovel this through the write_2_2 interface */
5914#define HDCP_2_2_ERRATA_DP_STREAM_TYPE 50
5915 {HDCP_2_2_ERRATA_DP_STREAM_TYPE,
5916 DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET, false,
5917 0, 0},
5918 };
5919
5920static inline
5921int intel_dp_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
5922 u8 *rx_status)
5923{
5924 ssize_t ret;
5925
5926 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
5927 DP_HDCP_2_2_REG_RXSTATUS_OFFSET, rx_status,
5928 HDCP_2_2_DP_RXSTATUS_LEN);
5929 if (ret != HDCP_2_2_DP_RXSTATUS_LEN) {
5930 DRM_DEBUG_KMS("Read bstatus from DP/AUX failed (%zd)\n", ret);
5931 return ret >= 0 ? -EIO : ret;
5932 }
5933
5934 return 0;
5935}
5936
5937static
5938int hdcp2_detect_msg_availability(struct intel_digital_port *intel_dig_port,
5939 u8 msg_id, bool *msg_ready)
5940{
5941 u8 rx_status;
5942 int ret;
5943
5944 *msg_ready = false;
5945 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
5946 if (ret < 0)
5947 return ret;
5948
5949 switch (msg_id) {
5950 case HDCP_2_2_AKE_SEND_HPRIME:
5951 if (HDCP_2_2_DP_RXSTATUS_H_PRIME(rx_status))
5952 *msg_ready = true;
5953 break;
5954 case HDCP_2_2_AKE_SEND_PAIRING_INFO:
5955 if (HDCP_2_2_DP_RXSTATUS_PAIRING(rx_status))
5956 *msg_ready = true;
5957 break;
5958 case HDCP_2_2_REP_SEND_RECVID_LIST:
5959 if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
5960 *msg_ready = true;
5961 break;
5962 default:
5963 DRM_ERROR("Unidentified msg_id: %d\n", msg_id);
5964 return -EINVAL;
5965 }
5966
5967 return 0;
5968}
5969
5970static ssize_t
5971intel_dp_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
5972 struct hdcp2_dp_msg_data *hdcp2_msg_data)
5973{
5974 struct intel_dp *dp = &intel_dig_port->dp;
5975 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
5976 u8 msg_id = hdcp2_msg_data->msg_id;
5977 int ret, timeout;
5978 bool msg_ready = false;
5979
5980 if (msg_id == HDCP_2_2_AKE_SEND_HPRIME && !hdcp->is_paired)
5981 timeout = hdcp2_msg_data->timeout2;
5982 else
5983 timeout = hdcp2_msg_data->timeout;
5984
5985 /*
5986 * There is no way to detect the CERT, LPRIME and STREAM_READY
5987 * availability. So Wait for timeout and read the msg.
5988 */
5989 if (!hdcp2_msg_data->msg_detectable) {
5990 mdelay(timeout);
5991 ret = 0;
5992 } else {
cf9cb35f
R
5993 /*
5994 * As we want to check the msg availability at timeout, Ignoring
5995 * the timeout at wait for CP_IRQ.
5996 */
5997 intel_dp_hdcp_wait_for_cp_irq(hdcp, timeout);
5998 ret = hdcp2_detect_msg_availability(intel_dig_port,
5999 msg_id, &msg_ready);
238d3a9e
R
6000 if (!msg_ready)
6001 ret = -ETIMEDOUT;
6002 }
6003
6004 if (ret)
6005 DRM_DEBUG_KMS("msg_id %d, ret %d, timeout(mSec): %d\n",
6006 hdcp2_msg_data->msg_id, ret, timeout);
6007
6008 return ret;
6009}
6010
6011static struct hdcp2_dp_msg_data *get_hdcp2_dp_msg_data(u8 msg_id)
6012{
6013 int i;
6014
6015 for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
6016 if (hdcp2_msg_data[i].msg_id == msg_id)
6017 return &hdcp2_msg_data[i];
6018
6019 return NULL;
6020}
6021
6022static
6023int intel_dp_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
6024 void *buf, size_t size)
6025{
cf9cb35f
R
6026 struct intel_dp *dp = &intel_dig_port->dp;
6027 struct intel_hdcp *hdcp = &dp->attached_connector->hdcp;
238d3a9e
R
6028 unsigned int offset;
6029 u8 *byte = buf;
6030 ssize_t ret, bytes_to_write, len;
6031 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6032
6033 hdcp2_msg_data = get_hdcp2_dp_msg_data(*byte);
6034 if (!hdcp2_msg_data)
6035 return -EINVAL;
6036
6037 offset = hdcp2_msg_data->offset;
6038
6039 /* No msg_id in DP HDCP2.2 msgs */
6040 bytes_to_write = size - 1;
6041 byte++;
6042
cf9cb35f
R
6043 hdcp->cp_irq_count_cached = atomic_read(&hdcp->cp_irq_count);
6044
238d3a9e
R
6045 while (bytes_to_write) {
6046 len = bytes_to_write > DP_AUX_MAX_PAYLOAD_BYTES ?
6047 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_write;
6048
6049 ret = drm_dp_dpcd_write(&intel_dig_port->dp.aux,
6050 offset, (void *)byte, len);
6051 if (ret < 0)
6052 return ret;
6053
6054 bytes_to_write -= ret;
6055 byte += ret;
6056 offset += ret;
6057 }
6058
6059 return size;
6060}
6061
6062static
6063ssize_t get_receiver_id_list_size(struct intel_digital_port *intel_dig_port)
6064{
6065 u8 rx_info[HDCP_2_2_RXINFO_LEN];
6066 u32 dev_cnt;
6067 ssize_t ret;
6068
6069 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6070 DP_HDCP_2_2_REG_RXINFO_OFFSET,
6071 (void *)rx_info, HDCP_2_2_RXINFO_LEN);
6072 if (ret != HDCP_2_2_RXINFO_LEN)
6073 return ret >= 0 ? -EIO : ret;
6074
6075 dev_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
6076 HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
6077
6078 if (dev_cnt > HDCP_2_2_MAX_DEVICE_COUNT)
6079 dev_cnt = HDCP_2_2_MAX_DEVICE_COUNT;
6080
6081 ret = sizeof(struct hdcp2_rep_send_receiverid_list) -
6082 HDCP_2_2_RECEIVER_IDS_MAX_LEN +
6083 (dev_cnt * HDCP_2_2_RECEIVER_ID_LEN);
6084
6085 return ret;
6086}
6087
6088static
6089int intel_dp_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
6090 u8 msg_id, void *buf, size_t size)
6091{
6092 unsigned int offset;
6093 u8 *byte = buf;
6094 ssize_t ret, bytes_to_recv, len;
6095 struct hdcp2_dp_msg_data *hdcp2_msg_data;
6096
6097 hdcp2_msg_data = get_hdcp2_dp_msg_data(msg_id);
6098 if (!hdcp2_msg_data)
6099 return -EINVAL;
6100 offset = hdcp2_msg_data->offset;
6101
6102 ret = intel_dp_hdcp2_wait_for_msg(intel_dig_port, hdcp2_msg_data);
6103 if (ret < 0)
6104 return ret;
6105
6106 if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST) {
6107 ret = get_receiver_id_list_size(intel_dig_port);
6108 if (ret < 0)
6109 return ret;
6110
6111 size = ret;
6112 }
6113 bytes_to_recv = size - 1;
6114
6115 /* DP adaptation msgs has no msg_id */
6116 byte++;
6117
6118 while (bytes_to_recv) {
6119 len = bytes_to_recv > DP_AUX_MAX_PAYLOAD_BYTES ?
6120 DP_AUX_MAX_PAYLOAD_BYTES : bytes_to_recv;
6121
6122 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux, offset,
6123 (void *)byte, len);
6124 if (ret < 0) {
6125 DRM_DEBUG_KMS("msg_id %d, ret %zd\n", msg_id, ret);
6126 return ret;
6127 }
6128
6129 bytes_to_recv -= ret;
6130 byte += ret;
6131 offset += ret;
6132 }
6133 byte = buf;
6134 *byte = msg_id;
6135
6136 return size;
6137}
6138
6139static
6140int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
6141 bool is_repeater, u8 content_type)
6142{
6143 struct hdcp2_dp_errata_stream_type stream_type_msg;
6144
6145 if (is_repeater)
6146 return 0;
6147
6148 /*
6149 * Errata for DP: As Stream type is used for encryption, Receiver
6150 * should be communicated with stream type for the decryption of the
6151 * content.
6152 * Repeater will be communicated with stream type as a part of it's
6153 * auth later in time.
6154 */
6155 stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
6156 stream_type_msg.stream_type = content_type;
6157
6158 return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
6159 sizeof(stream_type_msg));
6160}
6161
6162static
6163int intel_dp_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
6164{
6165 u8 rx_status;
6166 int ret;
6167
6168 ret = intel_dp_hdcp2_read_rx_status(intel_dig_port, &rx_status);
6169 if (ret)
6170 return ret;
6171
6172 if (HDCP_2_2_DP_RXSTATUS_REAUTH_REQ(rx_status))
6173 ret = HDCP_REAUTH_REQUEST;
6174 else if (HDCP_2_2_DP_RXSTATUS_LINK_FAILED(rx_status))
6175 ret = HDCP_LINK_INTEGRITY_FAILURE;
6176 else if (HDCP_2_2_DP_RXSTATUS_READY(rx_status))
6177 ret = HDCP_TOPOLOGY_CHANGE;
6178
6179 return ret;
6180}
6181
6182static
6183int intel_dp_hdcp2_capable(struct intel_digital_port *intel_dig_port,
6184 bool *capable)
6185{
6186 u8 rx_caps[3];
6187 int ret;
6188
6189 *capable = false;
6190 ret = drm_dp_dpcd_read(&intel_dig_port->dp.aux,
6191 DP_HDCP_2_2_REG_RX_CAPS_OFFSET,
6192 rx_caps, HDCP_2_2_RXCAPS_LEN);
6193 if (ret != HDCP_2_2_RXCAPS_LEN)
6194 return ret >= 0 ? -EIO : ret;
6195
6196 if (rx_caps[0] == HDCP_2_2_RX_CAPS_VERSION_VAL &&
6197 HDCP_2_2_DP_HDCP_CAPABLE(rx_caps[2]))
6198 *capable = true;
6199
6200 return 0;
6201}
6202
20f24d77
SP
6203static const struct intel_hdcp_shim intel_dp_hdcp_shim = {
6204 .write_an_aksv = intel_dp_hdcp_write_an_aksv,
6205 .read_bksv = intel_dp_hdcp_read_bksv,
6206 .read_bstatus = intel_dp_hdcp_read_bstatus,
6207 .repeater_present = intel_dp_hdcp_repeater_present,
6208 .read_ri_prime = intel_dp_hdcp_read_ri_prime,
6209 .read_ksv_ready = intel_dp_hdcp_read_ksv_ready,
6210 .read_ksv_fifo = intel_dp_hdcp_read_ksv_fifo,
6211 .read_v_prime_part = intel_dp_hdcp_read_v_prime_part,
6212 .toggle_signalling = intel_dp_hdcp_toggle_signalling,
6213 .check_link = intel_dp_hdcp_check_link,
791a98dd 6214 .hdcp_capable = intel_dp_hdcp_capable,
238d3a9e
R
6215 .write_2_2_msg = intel_dp_hdcp2_write_msg,
6216 .read_2_2_msg = intel_dp_hdcp2_read_msg,
6217 .config_stream_type = intel_dp_hdcp2_config_stream_type,
6218 .check_2_2_link = intel_dp_hdcp2_check_link,
6219 .hdcp_2_2_capable = intel_dp_hdcp2_capable,
6220 .protocol = HDCP_PROTOCOL_DP,
20f24d77
SP
6221};
6222
49e6bc51
VS
6223static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6224{
de25eb7f 6225 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
337837ac 6226 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
49e6bc51
VS
6227
6228 lockdep_assert_held(&dev_priv->pps_mutex);
6229
6230 if (!edp_have_panel_vdd(intel_dp))
6231 return;
6232
6233 /*
6234 * The VDD bit needs a power domain reference, so if the bit is
6235 * already enabled when we boot or resume, grab this reference and
6236 * schedule a vdd off, so we don't hold on to the reference
6237 * indefinitely.
6238 */
6239 DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
337837ac 6240 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
49e6bc51
VS
6241
6242 edp_panel_vdd_schedule_off(intel_dp);
6243}
6244
9f2bdb00
VS
6245static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6246{
de25eb7f 6247 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
59b74c49
VS
6248 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6249 enum pipe pipe;
9f2bdb00 6250
59b74c49
VS
6251 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6252 encoder->port, &pipe))
6253 return pipe;
9f2bdb00 6254
59b74c49 6255 return INVALID_PIPE;
9f2bdb00
VS
6256}
6257
bf93ba67 6258void intel_dp_encoder_reset(struct drm_encoder *encoder)
6d93c0c4 6259{
64989ca4 6260 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
dd75f6dd
ID
6261 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
6262 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
69d93820 6263 intel_wakeref_t wakeref;
64989ca4
VS
6264
6265 if (!HAS_DDI(dev_priv))
6266 intel_dp->DP = I915_READ(intel_dp->output_reg);
49e6bc51 6267
dd75f6dd 6268 if (lspcon->active)
910530c0
SS
6269 lspcon_resume(lspcon);
6270
d7e8ef02
MN
6271 intel_dp->reset_link_params = true;
6272
69d93820
CW
6273 with_pps_lock(intel_dp, wakeref) {
6274 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6275 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
49e6bc51 6276
69d93820
CW
6277 if (intel_dp_is_edp(intel_dp)) {
6278 /*
6279 * Reinit the power sequencer, in case BIOS did
6280 * something nasty with it.
6281 */
6282 intel_dp_pps_init(intel_dp);
6283 intel_edp_panel_vdd_sanitize(intel_dp);
6284 }
9f2bdb00 6285 }
6d93c0c4
ID
6286}
6287
a4fc5ed6 6288static const struct drm_connector_funcs intel_dp_connector_funcs = {
beb60608 6289 .force = intel_dp_force,
a4fc5ed6 6290 .fill_modes = drm_helper_probe_single_connector_modes,
8f647a01
ML
6291 .atomic_get_property = intel_digital_connector_atomic_get_property,
6292 .atomic_set_property = intel_digital_connector_atomic_set_property,
7a418e34 6293 .late_register = intel_dp_connector_register,
c191eca1 6294 .early_unregister = intel_dp_connector_unregister,
d4b26e4f 6295 .destroy = intel_connector_destroy,
c6f95f27 6296 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
8f647a01 6297 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
a4fc5ed6
KP
6298};
6299
6300static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6c5ed5ae 6301 .detect_ctx = intel_dp_detect,
a4fc5ed6
KP
6302 .get_modes = intel_dp_get_modes,
6303 .mode_valid = intel_dp_mode_valid,
8f647a01 6304 .atomic_check = intel_digital_connector_atomic_check,
a4fc5ed6
KP
6305};
6306
a4fc5ed6 6307static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 6308 .reset = intel_dp_encoder_reset,
24d05927 6309 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
6310};
6311
b2c5c181 6312enum irqreturn
13cf5504
DA
6313intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
6314{
6315 struct intel_dp *intel_dp = &intel_dig_port->dp;
de25eb7f 6316 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
b2c5c181 6317 enum irqreturn ret = IRQ_NONE;
0e6e0be4 6318 intel_wakeref_t wakeref;
1c767b33 6319
7a7f84cc
VS
6320 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
6321 /*
6322 * vdd off can generate a long pulse on eDP which
6323 * would require vdd on to handle it, and thus we
6324 * would end up in an endless cycle of
6325 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
6326 */
6327 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
8f4f2797 6328 port_name(intel_dig_port->base.port));
a8b3d52f 6329 return IRQ_HANDLED;
7a7f84cc
VS
6330 }
6331
26fbb774 6332 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
8f4f2797 6333 port_name(intel_dig_port->base.port),
0e32b39c 6334 long_hpd ? "long" : "short");
13cf5504 6335
27d4efc5 6336 if (long_hpd) {
d7e8ef02 6337 intel_dp->reset_link_params = true;
27d4efc5
VS
6338 return IRQ_NONE;
6339 }
6340
0e6e0be4
CW
6341 wakeref = intel_display_power_get(dev_priv,
6342 intel_aux_power_domain(intel_dig_port));
1c767b33 6343
27d4efc5
VS
6344 if (intel_dp->is_mst) {
6345 if (intel_dp_check_mst_status(intel_dp) == -EINVAL) {
6346 /*
6347 * If we were in MST mode, and device is not
6348 * there, get out of MST mode
6349 */
6350 DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
6351 intel_dp->is_mst, intel_dp->mst_mgr.mst_state);
6352 intel_dp->is_mst = false;
6353 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6354 intel_dp->is_mst);
27d4efc5 6355 goto put_power;
0e32b39c 6356 }
27d4efc5 6357 }
0e32b39c 6358
27d4efc5 6359 if (!intel_dp->is_mst) {
c85d200e 6360 bool handled;
42e5e657
DV
6361
6362 handled = intel_dp_short_pulse(intel_dp);
6363
cbfa8ac8 6364 if (!handled)
27d4efc5 6365 goto put_power;
0e32b39c 6366 }
b2c5c181
DV
6367
6368 ret = IRQ_HANDLED;
6369
1c767b33 6370put_power:
337837ac 6371 intel_display_power_put(dev_priv,
0e6e0be4
CW
6372 intel_aux_power_domain(intel_dig_port),
6373 wakeref);
1c767b33
ID
6374
6375 return ret;
13cf5504
DA
6376}
6377
477ec328 6378/* check the VBT to see whether the eDP is on another port */
7b91bf7f 6379bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
36e83a18 6380{
53ce81a7
VS
6381 /*
6382 * eDP not supported on g4x. so bail out early just
6383 * for a bit extra safety in case the VBT is bonkers.
6384 */
dd11bc10 6385 if (INTEL_GEN(dev_priv) < 5)
53ce81a7
VS
6386 return false;
6387
a98d9c1d 6388 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
3b32a35b
VS
6389 return true;
6390
951d9efe 6391 return intel_bios_is_port_edp(dev_priv, port);
36e83a18
ZY
6392}
6393
200819ab 6394static void
f684960e
CW
6395intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
6396{
8b45330a 6397 struct drm_i915_private *dev_priv = to_i915(connector->dev);
68ec0736
VS
6398 enum port port = dp_to_dig_port(intel_dp)->base.port;
6399
6400 if (!IS_G4X(dev_priv) && port != PORT_A)
6401 intel_attach_force_audio_property(connector);
8b45330a 6402
e953fd7b 6403 intel_attach_broadcast_rgb_property(connector);
b2ae318a 6404 if (HAS_GMCH(dev_priv))
f1a12172
RS
6405 drm_connector_attach_max_bpc_property(connector, 6, 10);
6406 else if (INTEL_GEN(dev_priv) >= 5)
6407 drm_connector_attach_max_bpc_property(connector, 6, 12);
53b41837 6408
1853a9da 6409 if (intel_dp_is_edp(intel_dp)) {
8b45330a
ML
6410 u32 allowed_scalers;
6411
6412 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
b2ae318a 6413 if (!HAS_GMCH(dev_priv))
8b45330a
ML
6414 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
6415
6416 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
6417
eead06df 6418 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
8b45330a 6419
53b41837 6420 }
f684960e
CW
6421}
6422
dada1a9f
ID
6423static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
6424{
d28d4731 6425 intel_dp->panel_power_off_time = ktime_get_boottime();
dada1a9f
ID
6426 intel_dp->last_power_on = jiffies;
6427 intel_dp->last_backlight_off = jiffies;
6428}
6429
67a54566 6430static void
46bd8383 6431intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
67a54566 6432{
de25eb7f 6433 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ab3517c1 6434 u32 pp_on, pp_off, pp_ctl;
8e8232d5 6435 struct pps_registers regs;
453c5420 6436
46bd8383 6437 intel_pps_get_registers(intel_dp, &regs);
67a54566 6438
b0a08bec 6439 pp_ctl = ironlake_get_pp_control(intel_dp);
67a54566 6440
1b61c4a3
JN
6441 /* Ensure PPS is unlocked */
6442 if (!HAS_DDI(dev_priv))
6443 I915_WRITE(regs.pp_ctrl, pp_ctl);
6444
8e8232d5
ID
6445 pp_on = I915_READ(regs.pp_on);
6446 pp_off = I915_READ(regs.pp_off);
67a54566
DV
6447
6448 /* Pull timing values out of registers */
78b36b10
JN
6449 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
6450 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
6451 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
6452 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
67a54566 6453
ab3517c1
JN
6454 if (i915_mmio_reg_valid(regs.pp_div)) {
6455 u32 pp_div;
6456
6457 pp_div = I915_READ(regs.pp_div);
6458
78b36b10 6459 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
ab3517c1 6460 } else {
78b36b10 6461 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
b0a08bec 6462 }
54648618
ID
6463}
6464
de9c1b6b
ID
6465static void
6466intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
6467{
6468 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
6469 state_name,
6470 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
6471}
6472
6473static void
46bd8383 6474intel_pps_verify_state(struct intel_dp *intel_dp)
de9c1b6b
ID
6475{
6476 struct edp_power_seq hw;
6477 struct edp_power_seq *sw = &intel_dp->pps_delays;
6478
46bd8383 6479 intel_pps_readout_hw_state(intel_dp, &hw);
de9c1b6b
ID
6480
6481 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
6482 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
6483 DRM_ERROR("PPS state mismatch\n");
6484 intel_pps_dump_state("sw", sw);
6485 intel_pps_dump_state("hw", &hw);
6486 }
6487}
6488
54648618 6489static void
46bd8383 6490intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
54648618 6491{
de25eb7f 6492 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
54648618
ID
6493 struct edp_power_seq cur, vbt, spec,
6494 *final = &intel_dp->pps_delays;
6495
6496 lockdep_assert_held(&dev_priv->pps_mutex);
6497
6498 /* already initialized? */
6499 if (final->t11_t12 != 0)
6500 return;
6501
46bd8383 6502 intel_pps_readout_hw_state(intel_dp, &cur);
67a54566 6503
de9c1b6b 6504 intel_pps_dump_state("cur", &cur);
67a54566 6505
6aa23e65 6506 vbt = dev_priv->vbt.edp.pps;
c99a259b
MN
6507 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
6508 * of 500ms appears to be too short. Ocassionally the panel
6509 * just fails to power back on. Increasing the delay to 800ms
6510 * seems sufficient to avoid this problem.
6511 */
6512 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
7313f5a9 6513 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
c99a259b
MN
6514 DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
6515 vbt.t11_t12);
6516 }
770a17a5
MN
6517 /* T11_T12 delay is special and actually in units of 100ms, but zero
6518 * based in the hw (so we need to add 100 ms). But the sw vbt
6519 * table multiplies it with 1000 to make it in units of 100usec,
6520 * too. */
6521 vbt.t11_t12 += 100 * 10;
67a54566
DV
6522
6523 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
6524 * our hw here, which are all in 100usec. */
6525 spec.t1_t3 = 210 * 10;
6526 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
6527 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
6528 spec.t10 = 500 * 10;
6529 /* This one is special and actually in units of 100ms, but zero
6530 * based in the hw (so we need to add 100 ms). But the sw vbt
6531 * table multiplies it with 1000 to make it in units of 100usec,
6532 * too. */
6533 spec.t11_t12 = (510 + 100) * 10;
6534
de9c1b6b 6535 intel_pps_dump_state("vbt", &vbt);
67a54566
DV
6536
6537 /* Use the max of the register settings and vbt. If both are
6538 * unset, fall back to the spec limits. */
36b5f425 6539#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
6540 spec.field : \
6541 max(cur.field, vbt.field))
6542 assign_final(t1_t3);
6543 assign_final(t8);
6544 assign_final(t9);
6545 assign_final(t10);
6546 assign_final(t11_t12);
6547#undef assign_final
6548
36b5f425 6549#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
6550 intel_dp->panel_power_up_delay = get_delay(t1_t3);
6551 intel_dp->backlight_on_delay = get_delay(t8);
6552 intel_dp->backlight_off_delay = get_delay(t9);
6553 intel_dp->panel_power_down_delay = get_delay(t10);
6554 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
6555#undef get_delay
6556
f30d26e4
JN
6557 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
6558 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
6559 intel_dp->panel_power_cycle_delay);
6560
6561 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
6562 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
de9c1b6b
ID
6563
6564 /*
6565 * We override the HW backlight delays to 1 because we do manual waits
6566 * on them. For T8, even BSpec recommends doing it. For T9, if we
6567 * don't do this, we'll end up waiting for the backlight off delay
6568 * twice: once when we do the manual sleep, and once when we disable
6569 * the panel and wait for the PP_STATUS bit to become zero.
6570 */
6571 final->t8 = 1;
6572 final->t9 = 1;
5643205c
ID
6573
6574 /*
6575 * HW has only a 100msec granularity for t11_t12 so round it up
6576 * accordingly.
6577 */
6578 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
f30d26e4
JN
6579}
6580
6581static void
46bd8383 6582intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5d5ab2d2 6583 bool force_disable_vdd)
f30d26e4 6584{
de25eb7f 6585 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ab3517c1 6586 u32 pp_on, pp_off, port_sel = 0;
e7dc33f3 6587 int div = dev_priv->rawclk_freq / 1000;
8e8232d5 6588 struct pps_registers regs;
8f4f2797 6589 enum port port = dp_to_dig_port(intel_dp)->base.port;
36b5f425 6590 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 6591
e39b999a 6592 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 6593
46bd8383 6594 intel_pps_get_registers(intel_dp, &regs);
453c5420 6595
5d5ab2d2
VS
6596 /*
6597 * On some VLV machines the BIOS can leave the VDD
e7f2af78 6598 * enabled even on power sequencers which aren't
5d5ab2d2
VS
6599 * hooked up to any port. This would mess up the
6600 * power domain tracking the first time we pick
6601 * one of these power sequencers for use since
6602 * edp_panel_vdd_on() would notice that the VDD was
6603 * already on and therefore wouldn't grab the power
6604 * domain reference. Disable VDD first to avoid this.
6605 * This also avoids spuriously turning the VDD on as
e7f2af78 6606 * soon as the new power sequencer gets initialized.
5d5ab2d2
VS
6607 */
6608 if (force_disable_vdd) {
6609 u32 pp = ironlake_get_pp_control(intel_dp);
6610
6611 WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
6612
6613 if (pp & EDP_FORCE_VDD)
6614 DRM_DEBUG_KMS("VDD already on, disabling first\n");
6615
6616 pp &= ~EDP_FORCE_VDD;
6617
6618 I915_WRITE(regs.pp_ctrl, pp);
6619 }
6620
78b36b10
JN
6621 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
6622 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
6623 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
6624 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
67a54566
DV
6625
6626 /* Haswell doesn't have any port selection bits for the panel
6627 * power sequencer any more. */
920a14b2 6628 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ad933b56 6629 port_sel = PANEL_PORT_SELECT_VLV(port);
6e266956 6630 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
05bf51d3
VS
6631 switch (port) {
6632 case PORT_A:
a24c144c 6633 port_sel = PANEL_PORT_SELECT_DPA;
05bf51d3
VS
6634 break;
6635 case PORT_C:
6636 port_sel = PANEL_PORT_SELECT_DPC;
6637 break;
6638 case PORT_D:
a24c144c 6639 port_sel = PANEL_PORT_SELECT_DPD;
05bf51d3
VS
6640 break;
6641 default:
6642 MISSING_CASE(port);
6643 break;
6644 }
67a54566
DV
6645 }
6646
453c5420
JB
6647 pp_on |= port_sel;
6648
8e8232d5
ID
6649 I915_WRITE(regs.pp_on, pp_on);
6650 I915_WRITE(regs.pp_off, pp_off);
ab3517c1
JN
6651
6652 /*
6653 * Compute the divisor for the pp clock, simply match the Bspec formula.
6654 */
6655 if (i915_mmio_reg_valid(regs.pp_div)) {
78b36b10
JN
6656 I915_WRITE(regs.pp_div,
6657 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
6658 REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
ab3517c1
JN
6659 } else {
6660 u32 pp_ctl;
6661
6662 pp_ctl = I915_READ(regs.pp_ctrl);
6663 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
78b36b10 6664 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
ab3517c1
JN
6665 I915_WRITE(regs.pp_ctrl, pp_ctl);
6666 }
67a54566 6667
67a54566 6668 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
8e8232d5
ID
6669 I915_READ(regs.pp_on),
6670 I915_READ(regs.pp_off),
ab3517c1
JN
6671 i915_mmio_reg_valid(regs.pp_div) ?
6672 I915_READ(regs.pp_div) :
6673 (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
f684960e
CW
6674}
6675
46bd8383 6676static void intel_dp_pps_init(struct intel_dp *intel_dp)
335f752b 6677{
de25eb7f 6678 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
920a14b2
TU
6679
6680 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
335f752b
ID
6681 vlv_initial_power_sequencer_setup(intel_dp);
6682 } else {
46bd8383
VS
6683 intel_dp_init_panel_power_sequencer(intel_dp);
6684 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
335f752b
ID
6685 }
6686}
6687
b33a2815
VK
6688/**
6689 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5423adf1 6690 * @dev_priv: i915 device
e896402c 6691 * @crtc_state: a pointer to the active intel_crtc_state
b33a2815
VK
6692 * @refresh_rate: RR to be programmed
6693 *
6694 * This function gets called when refresh rate (RR) has to be changed from
6695 * one frequency to another. Switches can be between high and low RR
6696 * supported by the panel or to any other RR based on media playback (in
6697 * this case, RR value needs to be passed from user space).
6698 *
6699 * The caller of this function needs to take a lock on dev_priv->drrs.
6700 */
85cb48a1 6701static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5f88a9c6 6702 const struct intel_crtc_state *crtc_state,
85cb48a1 6703 int refresh_rate)
439d7ac0 6704{
439d7ac0 6705 struct intel_encoder *encoder;
96178eeb
VK
6706 struct intel_digital_port *dig_port = NULL;
6707 struct intel_dp *intel_dp = dev_priv->drrs.dp;
85cb48a1 6708 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
96178eeb 6709 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
6710
6711 if (refresh_rate <= 0) {
6712 DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
6713 return;
6714 }
6715
96178eeb
VK
6716 if (intel_dp == NULL) {
6717 DRM_DEBUG_KMS("DRRS not supported.\n");
439d7ac0
PB
6718 return;
6719 }
6720
96178eeb
VK
6721 dig_port = dp_to_dig_port(intel_dp);
6722 encoder = &dig_port->base;
439d7ac0
PB
6723
6724 if (!intel_crtc) {
6725 DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
6726 return;
6727 }
6728
96178eeb 6729 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
439d7ac0
PB
6730 DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
6731 return;
6732 }
6733
96178eeb
VK
6734 if (intel_dp->attached_connector->panel.downclock_mode->vrefresh ==
6735 refresh_rate)
439d7ac0
PB
6736 index = DRRS_LOW_RR;
6737
96178eeb 6738 if (index == dev_priv->drrs.refresh_rate_type) {
439d7ac0
PB
6739 DRM_DEBUG_KMS(
6740 "DRRS requested for previously set RR...ignoring\n");
6741 return;
6742 }
6743
85cb48a1 6744 if (!crtc_state->base.active) {
439d7ac0
PB
6745 DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
6746 return;
6747 }
6748
85cb48a1 6749 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
a4c30b1d
VK
6750 switch (index) {
6751 case DRRS_HIGH_RR:
4c354754 6752 intel_dp_set_m_n(crtc_state, M1_N1);
a4c30b1d
VK
6753 break;
6754 case DRRS_LOW_RR:
4c354754 6755 intel_dp_set_m_n(crtc_state, M2_N2);
a4c30b1d
VK
6756 break;
6757 case DRRS_MAX_RR:
6758 default:
6759 DRM_ERROR("Unsupported refreshrate type\n");
6760 }
85cb48a1
ML
6761 } else if (INTEL_GEN(dev_priv) > 6) {
6762 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
649636ef 6763 u32 val;
a4c30b1d 6764
649636ef 6765 val = I915_READ(reg);
439d7ac0 6766 if (index > DRRS_HIGH_RR) {
85cb48a1 6767 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6fa7aec1
VK
6768 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6769 else
6770 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 6771 } else {
85cb48a1 6772 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6fa7aec1
VK
6773 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
6774 else
6775 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0
PB
6776 }
6777 I915_WRITE(reg, val);
6778 }
6779
4e9ac947
VK
6780 dev_priv->drrs.refresh_rate_type = index;
6781
6782 DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
6783}
6784
b33a2815
VK
6785/**
6786 * intel_edp_drrs_enable - init drrs struct if supported
6787 * @intel_dp: DP struct
5423adf1 6788 * @crtc_state: A pointer to the active crtc state.
b33a2815
VK
6789 *
6790 * Initializes frontbuffer_bits and drrs.dp
6791 */
85cb48a1 6792void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5f88a9c6 6793 const struct intel_crtc_state *crtc_state)
c395578e 6794{
de25eb7f 6795 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
c395578e 6796
85cb48a1 6797 if (!crtc_state->has_drrs) {
c395578e
VK
6798 DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
6799 return;
6800 }
6801
da83ef85
RS
6802 if (dev_priv->psr.enabled) {
6803 DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
6804 return;
6805 }
6806
c395578e 6807 mutex_lock(&dev_priv->drrs.mutex);
f69a0d71
HG
6808 if (dev_priv->drrs.dp) {
6809 DRM_DEBUG_KMS("DRRS already enabled\n");
c395578e
VK
6810 goto unlock;
6811 }
6812
6813 dev_priv->drrs.busy_frontbuffer_bits = 0;
6814
6815 dev_priv->drrs.dp = intel_dp;
6816
6817unlock:
6818 mutex_unlock(&dev_priv->drrs.mutex);
6819}
6820
b33a2815
VK
6821/**
6822 * intel_edp_drrs_disable - Disable DRRS
6823 * @intel_dp: DP struct
5423adf1 6824 * @old_crtc_state: Pointer to old crtc_state.
b33a2815
VK
6825 *
6826 */
85cb48a1 6827void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5f88a9c6 6828 const struct intel_crtc_state *old_crtc_state)
c395578e 6829{
de25eb7f 6830 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
c395578e 6831
85cb48a1 6832 if (!old_crtc_state->has_drrs)
c395578e
VK
6833 return;
6834
6835 mutex_lock(&dev_priv->drrs.mutex);
6836 if (!dev_priv->drrs.dp) {
6837 mutex_unlock(&dev_priv->drrs.mutex);
6838 return;
6839 }
6840
6841 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
85cb48a1
ML
6842 intel_dp_set_drrs_state(dev_priv, old_crtc_state,
6843 intel_dp->attached_connector->panel.fixed_mode->vrefresh);
c395578e
VK
6844
6845 dev_priv->drrs.dp = NULL;
6846 mutex_unlock(&dev_priv->drrs.mutex);
6847
6848 cancel_delayed_work_sync(&dev_priv->drrs.work);
6849}
6850
4e9ac947
VK
6851static void intel_edp_drrs_downclock_work(struct work_struct *work)
6852{
6853 struct drm_i915_private *dev_priv =
6854 container_of(work, typeof(*dev_priv), drrs.work.work);
6855 struct intel_dp *intel_dp;
6856
6857 mutex_lock(&dev_priv->drrs.mutex);
6858
6859 intel_dp = dev_priv->drrs.dp;
6860
6861 if (!intel_dp)
6862 goto unlock;
6863
439d7ac0 6864 /*
4e9ac947
VK
6865 * The delayed work can race with an invalidate hence we need to
6866 * recheck.
439d7ac0
PB
6867 */
6868
4e9ac947
VK
6869 if (dev_priv->drrs.busy_frontbuffer_bits)
6870 goto unlock;
439d7ac0 6871
85cb48a1
ML
6872 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
6873 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
6874
6875 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6876 intel_dp->attached_connector->panel.downclock_mode->vrefresh);
6877 }
439d7ac0 6878
4e9ac947 6879unlock:
4e9ac947 6880 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
6881}
6882
b33a2815 6883/**
0ddfd203 6884 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5748b6a1 6885 * @dev_priv: i915 device
b33a2815
VK
6886 * @frontbuffer_bits: frontbuffer plane tracking bits
6887 *
0ddfd203
R
6888 * This function gets called everytime rendering on the given planes start.
6889 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
6890 *
6891 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6892 */
5748b6a1
CW
6893void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
6894 unsigned int frontbuffer_bits)
a93fad0f 6895{
a93fad0f
VK
6896 struct drm_crtc *crtc;
6897 enum pipe pipe;
6898
9da7d693 6899 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
6900 return;
6901
88f933a8 6902 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 6903
a93fad0f 6904 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
6905 if (!dev_priv->drrs.dp) {
6906 mutex_unlock(&dev_priv->drrs.mutex);
6907 return;
6908 }
6909
a93fad0f
VK
6910 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6911 pipe = to_intel_crtc(crtc)->pipe;
6912
c1d038c6
DV
6913 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
6914 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
6915
0ddfd203 6916 /* invalidate means busy screen hence upclock */
c1d038c6 6917 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
85cb48a1
ML
6918 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6919 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
a93fad0f 6920
a93fad0f
VK
6921 mutex_unlock(&dev_priv->drrs.mutex);
6922}
6923
b33a2815 6924/**
0ddfd203 6925 * intel_edp_drrs_flush - Restart Idleness DRRS
5748b6a1 6926 * @dev_priv: i915 device
b33a2815
VK
6927 * @frontbuffer_bits: frontbuffer plane tracking bits
6928 *
0ddfd203
R
6929 * This function gets called every time rendering on the given planes has
6930 * completed or flip on a crtc is completed. So DRRS should be upclocked
6931 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
6932 * if no other planes are dirty.
b33a2815
VK
6933 *
6934 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
6935 */
5748b6a1
CW
6936void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
6937 unsigned int frontbuffer_bits)
a93fad0f 6938{
a93fad0f
VK
6939 struct drm_crtc *crtc;
6940 enum pipe pipe;
6941
9da7d693 6942 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
6943 return;
6944
88f933a8 6945 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 6946
a93fad0f 6947 mutex_lock(&dev_priv->drrs.mutex);
9da7d693
DV
6948 if (!dev_priv->drrs.dp) {
6949 mutex_unlock(&dev_priv->drrs.mutex);
6950 return;
6951 }
6952
a93fad0f
VK
6953 crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc;
6954 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
6955
6956 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
6957 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
6958
0ddfd203 6959 /* flush means busy screen hence upclock */
c1d038c6 6960 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
85cb48a1
ML
6961 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
6962 dev_priv->drrs.dp->attached_connector->panel.fixed_mode->vrefresh);
0ddfd203
R
6963
6964 /*
6965 * flush also means no more activity hence schedule downclock, if all
6966 * other fbs are quiescent too
6967 */
6968 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
6969 schedule_delayed_work(&dev_priv->drrs.work,
6970 msecs_to_jiffies(1000));
6971 mutex_unlock(&dev_priv->drrs.mutex);
6972}
6973
b33a2815
VK
6974/**
6975 * DOC: Display Refresh Rate Switching (DRRS)
6976 *
6977 * Display Refresh Rate Switching (DRRS) is a power conservation feature
6978 * which enables swtching between low and high refresh rates,
6979 * dynamically, based on the usage scenario. This feature is applicable
6980 * for internal panels.
6981 *
6982 * Indication that the panel supports DRRS is given by the panel EDID, which
6983 * would list multiple refresh rates for one resolution.
6984 *
6985 * DRRS is of 2 types - static and seamless.
6986 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
6987 * (may appear as a blink on screen) and is used in dock-undock scenario.
6988 * Seamless DRRS involves changing RR without any visual effect to the user
6989 * and can be used during normal system usage. This is done by programming
6990 * certain registers.
6991 *
6992 * Support for static/seamless DRRS may be indicated in the VBT based on
6993 * inputs from the panel spec.
6994 *
6995 * DRRS saves power by switching to low RR based on usage scenarios.
6996 *
2e7a5701
DV
6997 * The implementation is based on frontbuffer tracking implementation. When
6998 * there is a disturbance on the screen triggered by user activity or a periodic
6999 * system activity, DRRS is disabled (RR is changed to high RR). When there is
7000 * no movement on screen, after a timeout of 1 second, a switch to low RR is
7001 * made.
7002 *
7003 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7004 * and intel_edp_drrs_flush() are called.
b33a2815
VK
7005 *
7006 * DRRS can be further extended to support other internal panels and also
7007 * the scenario of video playback wherein RR is set based on the rate
7008 * requested by userspace.
7009 */
7010
7011/**
7012 * intel_dp_drrs_init - Init basic DRRS work and mutex.
2f773477 7013 * @connector: eDP connector
b33a2815
VK
7014 * @fixed_mode: preferred mode of panel
7015 *
7016 * This function is called only once at driver load to initialize basic
7017 * DRRS stuff.
7018 *
7019 * Returns:
7020 * Downclock mode if panel supports it, else return NULL.
7021 * DRRS support is determined by the presence of downclock mode (apart
7022 * from VBT setting).
7023 */
4f9db5b5 7024static struct drm_display_mode *
2f773477
VS
7025intel_dp_drrs_init(struct intel_connector *connector,
7026 struct drm_display_mode *fixed_mode)
4f9db5b5 7027{
2f773477 7028 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
4f9db5b5
PB
7029 struct drm_display_mode *downclock_mode = NULL;
7030
9da7d693
DV
7031 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7032 mutex_init(&dev_priv->drrs.mutex);
7033
dd11bc10 7034 if (INTEL_GEN(dev_priv) <= 6) {
4f9db5b5
PB
7035 DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
7036 return NULL;
7037 }
7038
7039 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
4079b8d1 7040 DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
4f9db5b5
PB
7041 return NULL;
7042 }
7043
abf1aae8 7044 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
4f9db5b5 7045 if (!downclock_mode) {
a1d26342 7046 DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
7047 return NULL;
7048 }
7049
96178eeb 7050 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 7051
96178eeb 7052 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
4079b8d1 7053 DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
7054 return downclock_mode;
7055}
7056
ed92f0b2 7057static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 7058 struct intel_connector *intel_connector)
ed92f0b2 7059{
de25eb7f
RV
7060 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7061 struct drm_device *dev = &dev_priv->drm;
2f773477 7062 struct drm_connector *connector = &intel_connector->base;
ed92f0b2 7063 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 7064 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2 7065 bool has_dpcd;
6517d273 7066 enum pipe pipe = INVALID_PIPE;
69d93820
CW
7067 intel_wakeref_t wakeref;
7068 struct edid *edid;
ed92f0b2 7069
1853a9da 7070 if (!intel_dp_is_edp(intel_dp))
ed92f0b2
PZ
7071 return true;
7072
36b80aa3
JRS
7073 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7074
97a824e1
ID
7075 /*
7076 * On IBX/CPT we may get here with LVDS already registered. Since the
7077 * driver uses the only internal power sequencer available for both
7078 * eDP and LVDS bail out early in this case to prevent interfering
7079 * with an already powered-on LVDS power sequencer.
7080 */
17be4942 7081 if (intel_get_lvds_encoder(dev_priv)) {
97a824e1
ID
7082 WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
7083 DRM_INFO("LVDS was detected, not registering eDP\n");
7084
7085 return false;
7086 }
7087
69d93820
CW
7088 with_pps_lock(intel_dp, wakeref) {
7089 intel_dp_init_panel_power_timestamps(intel_dp);
7090 intel_dp_pps_init(intel_dp);
7091 intel_edp_panel_vdd_sanitize(intel_dp);
7092 }
63635217 7093
ed92f0b2 7094 /* Cache DPCD and EDID for edp. */
fe5a66f9 7095 has_dpcd = intel_edp_init_dpcd(intel_dp);
ed92f0b2 7096
fe5a66f9 7097 if (!has_dpcd) {
ed92f0b2
PZ
7098 /* if this fails, presume the device is a ghost */
7099 DRM_INFO("failed to retrieve link info, disabling eDP\n");
b4d06ede 7100 goto out_vdd_off;
ed92f0b2
PZ
7101 }
7102
060c8778 7103 mutex_lock(&dev->mode_config.mutex);
0b99836f 7104 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
7105 if (edid) {
7106 if (drm_add_edid_modes(connector, edid)) {
c555f023 7107 drm_connector_update_edid_property(connector,
ed92f0b2 7108 edid);
ed92f0b2
PZ
7109 } else {
7110 kfree(edid);
7111 edid = ERR_PTR(-EINVAL);
7112 }
7113 } else {
7114 edid = ERR_PTR(-ENOENT);
7115 }
7116 intel_connector->edid = edid;
7117
0dc927eb
VS
7118 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7119 if (fixed_mode)
7120 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
ed92f0b2
PZ
7121
7122 /* fallback to VBT if available for eDP */
325710d3
VS
7123 if (!fixed_mode)
7124 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
060c8778 7125 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 7126
920a14b2 7127 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
01527b31
CT
7128 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7129 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
7130
7131 /*
7132 * Figure out the current pipe for the initial backlight setup.
7133 * If the current pipe isn't valid, try the PPS pipe, and if that
7134 * fails just assume pipe A.
7135 */
9f2bdb00 7136 pipe = vlv_active_pipe(intel_dp);
6517d273
VS
7137
7138 if (pipe != PIPE_A && pipe != PIPE_B)
7139 pipe = intel_dp->pps_pipe;
7140
7141 if (pipe != PIPE_A && pipe != PIPE_B)
7142 pipe = PIPE_A;
7143
7144 DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
7145 pipe_name(pipe));
01527b31
CT
7146 }
7147
d93fa1b4 7148 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 7149 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 7150 intel_panel_setup_backlight(connector, pipe);
ed92f0b2 7151
9531221d
HG
7152 if (fixed_mode)
7153 drm_connector_init_panel_orientation_property(
7154 connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
7155
ed92f0b2 7156 return true;
b4d06ede
ID
7157
7158out_vdd_off:
7159 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7160 /*
7161 * vdd might still be enabled do to the delayed vdd off.
7162 * Make sure vdd is actually turned off here.
7163 */
69d93820
CW
7164 with_pps_lock(intel_dp, wakeref)
7165 edp_panel_vdd_off_sync(intel_dp);
b4d06ede
ID
7166
7167 return false;
ed92f0b2
PZ
7168}
7169
9301397a
MN
7170static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7171{
7172 struct intel_connector *intel_connector;
7173 struct drm_connector *connector;
7174
7175 intel_connector = container_of(work, typeof(*intel_connector),
7176 modeset_retry_work);
7177 connector = &intel_connector->base;
7178 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7179 connector->name);
7180
7181 /* Grab the locks before changing connector property*/
7182 mutex_lock(&connector->dev->mode_config.mutex);
7183 /* Set connector link status to BAD and send a Uevent to notify
7184 * userspace to do a modeset.
7185 */
97e14fbe
DV
7186 drm_connector_set_link_status_property(connector,
7187 DRM_MODE_LINK_STATUS_BAD);
9301397a
MN
7188 mutex_unlock(&connector->dev->mode_config.mutex);
7189 /* Send Hotplug uevent so userspace can reprobe */
7190 drm_kms_helper_hotplug_event(connector->dev);
7191}
7192
16c25533 7193bool
f0fec3f2
PZ
7194intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
7195 struct intel_connector *intel_connector)
a4fc5ed6 7196{
f0fec3f2
PZ
7197 struct drm_connector *connector = &intel_connector->base;
7198 struct intel_dp *intel_dp = &intel_dig_port->dp;
7199 struct intel_encoder *intel_encoder = &intel_dig_port->base;
7200 struct drm_device *dev = intel_encoder->base.dev;
fac5e23e 7201 struct drm_i915_private *dev_priv = to_i915(dev);
8f4f2797 7202 enum port port = intel_encoder->port;
7a418e34 7203 int type;
a4fc5ed6 7204
9301397a
MN
7205 /* Initialize the work for modeset in case of link train failure */
7206 INIT_WORK(&intel_connector->modeset_retry_work,
7207 intel_dp_modeset_retry_work_fn);
7208
ccb1a831
VS
7209 if (WARN(intel_dig_port->max_lanes < 1,
7210 "Not enough lanes (%d) for DP on port %c\n",
7211 intel_dig_port->max_lanes, port_name(port)))
7212 return false;
7213
55cfc580
JN
7214 intel_dp_set_source_rates(intel_dp);
7215
d7e8ef02 7216 intel_dp->reset_link_params = true;
a4a5d2f8 7217 intel_dp->pps_pipe = INVALID_PIPE;
9f2bdb00 7218 intel_dp->active_pipe = INVALID_PIPE;
a4a5d2f8 7219
ec5b01dd 7220 /* intel_dp vfuncs */
4f8036a2 7221 if (HAS_DDI(dev_priv))
ad64217b
ACO
7222 intel_dp->prepare_link_retrain = intel_ddi_prepare_link_retrain;
7223
0767935e
DV
7224 /* Preserve the current hw state. */
7225 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 7226 intel_dp->attached_connector = intel_connector;
3d3dc149 7227
7b91bf7f 7228 if (intel_dp_is_port_edp(dev_priv, port))
b329530c 7229 type = DRM_MODE_CONNECTOR_eDP;
3b32a35b
VS
7230 else
7231 type = DRM_MODE_CONNECTOR_DisplayPort;
b329530c 7232
9f2bdb00
VS
7233 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7234 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7235
f7d24902
ID
7236 /*
7237 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7238 * for DP the encoder type can be set by the caller to
7239 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7240 */
7241 if (type == DRM_MODE_CONNECTOR_eDP)
7242 intel_encoder->type = INTEL_OUTPUT_EDP;
7243
c17ed5b5 7244 /* eDP only on port B and/or C on vlv/chv */
920a14b2 7245 if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
1853a9da
JN
7246 intel_dp_is_edp(intel_dp) &&
7247 port != PORT_B && port != PORT_C))
c17ed5b5
VS
7248 return false;
7249
e7281eab
ID
7250 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
7251 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7252 port_name(port));
7253
b329530c 7254 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
7255 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7256
b2ae318a 7257 if (!HAS_GMCH(dev_priv))
05021389 7258 connector->interlace_allowed = true;
a4fc5ed6
KP
7259 connector->doublescan_allowed = 0;
7260
bdabdb63 7261 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
5432fcaf 7262
b6339585 7263 intel_dp_aux_init(intel_dp);
7a418e34 7264
df0e9248 7265 intel_connector_attach_encoder(intel_connector, intel_encoder);
a4fc5ed6 7266
4f8036a2 7267 if (HAS_DDI(dev_priv))
bcbc889b
PZ
7268 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7269 else
7270 intel_connector->get_hw_state = intel_connector_get_hw_state;
7271
0e32b39c 7272 /* init MST on ports that can support it */
1853a9da 7273 if (HAS_DP_MST(dev_priv) && !intel_dp_is_edp(intel_dp) &&
9787e835
RV
7274 (port == PORT_B || port == PORT_C ||
7275 port == PORT_D || port == PORT_F))
0c9b3715
JN
7276 intel_dp_mst_encoder_init(intel_dig_port,
7277 intel_connector->base.base.id);
0e32b39c 7278
36b5f425 7279 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5
VS
7280 intel_dp_aux_fini(intel_dp);
7281 intel_dp_mst_encoder_cleanup(intel_dig_port);
7282 goto fail;
b2f246a8 7283 }
32f9d658 7284
f684960e 7285 intel_dp_add_properties(intel_dp, connector);
20f24d77 7286
fdddd08c 7287 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
20f24d77
SP
7288 int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
7289 if (ret)
7290 DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
7291 }
f684960e 7292
a4fc5ed6
KP
7293 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
7294 * 0xd. Failure to do so will result in spurious interrupts being
7295 * generated on the port when a cable is not attached.
7296 */
1c0f1b3d 7297 if (IS_G45(dev_priv)) {
a4fc5ed6
KP
7298 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
7299 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
7300 }
16c25533
PZ
7301
7302 return true;
a121f4e5
VS
7303
7304fail:
a121f4e5
VS
7305 drm_connector_cleanup(connector);
7306
7307 return false;
a4fc5ed6 7308}
f0fec3f2 7309
c39055b0 7310bool intel_dp_init(struct drm_i915_private *dev_priv,
457c52d8
CW
7311 i915_reg_t output_reg,
7312 enum port port)
f0fec3f2
PZ
7313{
7314 struct intel_digital_port *intel_dig_port;
7315 struct intel_encoder *intel_encoder;
7316 struct drm_encoder *encoder;
7317 struct intel_connector *intel_connector;
7318
b14c5679 7319 intel_dig_port = kzalloc(sizeof(*intel_dig_port), GFP_KERNEL);
f0fec3f2 7320 if (!intel_dig_port)
457c52d8 7321 return false;
f0fec3f2 7322
08d9bc92 7323 intel_connector = intel_connector_alloc();
11aee0f6
SM
7324 if (!intel_connector)
7325 goto err_connector_alloc;
f0fec3f2
PZ
7326
7327 intel_encoder = &intel_dig_port->base;
7328 encoder = &intel_encoder->base;
7329
c39055b0
ACO
7330 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
7331 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
7332 "DP %c", port_name(port)))
893da0c9 7333 goto err_encoder_init;
f0fec3f2 7334
c85d200e 7335 intel_encoder->hotplug = intel_dp_hotplug;
5bfe2ac0 7336 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 7337 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 7338 intel_encoder->get_config = intel_dp_get_config;
63a23d24 7339 intel_encoder->update_pipe = intel_panel_update_backlight;
07f9cd0b 7340 intel_encoder->suspend = intel_dp_encoder_suspend;
920a14b2 7341 if (IS_CHERRYVIEW(dev_priv)) {
9197c88b 7342 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
7343 intel_encoder->pre_enable = chv_pre_enable_dp;
7344 intel_encoder->enable = vlv_enable_dp;
1a8ff607 7345 intel_encoder->disable = vlv_disable_dp;
580d3811 7346 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 7347 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
11a914c2 7348 } else if (IS_VALLEYVIEW(dev_priv)) {
ecff4f3b 7349 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
7350 intel_encoder->pre_enable = vlv_pre_enable_dp;
7351 intel_encoder->enable = vlv_enable_dp;
1a8ff607 7352 intel_encoder->disable = vlv_disable_dp;
49277c31 7353 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 7354 } else {
ecff4f3b
JN
7355 intel_encoder->pre_enable = g4x_pre_enable_dp;
7356 intel_encoder->enable = g4x_enable_dp;
1a8ff607 7357 intel_encoder->disable = g4x_disable_dp;
51a9f6df 7358 intel_encoder->post_disable = g4x_post_disable_dp;
ab1f90f9 7359 }
f0fec3f2 7360
f0fec3f2 7361 intel_dig_port->dp.output_reg = output_reg;
ccb1a831 7362 intel_dig_port->max_lanes = 4;
f0fec3f2 7363
cca0502b 7364 intel_encoder->type = INTEL_OUTPUT_DP;
79f255a0 7365 intel_encoder->power_domain = intel_port_to_power_domain(port);
920a14b2 7366 if (IS_CHERRYVIEW(dev_priv)) {
882ec384
VS
7367 if (port == PORT_D)
7368 intel_encoder->crtc_mask = 1 << 2;
7369 else
7370 intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
7371 } else {
7372 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
7373 }
bc079e8b 7374 intel_encoder->cloneable = 0;
03cdc1d4 7375 intel_encoder->port = port;
f0fec3f2 7376
13cf5504 7377 intel_dig_port->hpd_pulse = intel_dp_hpd_pulse;
13cf5504 7378
385e4de0
VS
7379 if (port != PORT_A)
7380 intel_infoframe_init(intel_dig_port);
7381
39053089 7382 intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
11aee0f6
SM
7383 if (!intel_dp_init_connector(intel_dig_port, intel_connector))
7384 goto err_init_connector;
7385
457c52d8 7386 return true;
11aee0f6
SM
7387
7388err_init_connector:
7389 drm_encoder_cleanup(encoder);
893da0c9 7390err_encoder_init:
11aee0f6
SM
7391 kfree(intel_connector);
7392err_connector_alloc:
7393 kfree(intel_dig_port);
457c52d8 7394 return false;
f0fec3f2 7395}
0e32b39c 7396
1a4313d1 7397void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
0e32b39c 7398{
1a4313d1
VS
7399 struct intel_encoder *encoder;
7400
7401 for_each_intel_encoder(&dev_priv->drm, encoder) {
7402 struct intel_dp *intel_dp;
0e32b39c 7403
1a4313d1
VS
7404 if (encoder->type != INTEL_OUTPUT_DDI)
7405 continue;
5aa56969 7406
1a4313d1 7407 intel_dp = enc_to_intel_dp(&encoder->base);
5aa56969 7408
1a4313d1 7409 if (!intel_dp->can_mst)
0e32b39c
DA
7410 continue;
7411
1a4313d1
VS
7412 if (intel_dp->is_mst)
7413 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
0e32b39c
DA
7414 }
7415}
7416
1a4313d1 7417void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
0e32b39c 7418{
1a4313d1 7419 struct intel_encoder *encoder;
0e32b39c 7420
1a4313d1
VS
7421 for_each_intel_encoder(&dev_priv->drm, encoder) {
7422 struct intel_dp *intel_dp;
5aa56969 7423 int ret;
0e32b39c 7424
1a4313d1
VS
7425 if (encoder->type != INTEL_OUTPUT_DDI)
7426 continue;
7427
7428 intel_dp = enc_to_intel_dp(&encoder->base);
7429
7430 if (!intel_dp->can_mst)
5aa56969 7431 continue;
0e32b39c 7432
1a4313d1 7433 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr);
6be1cf96
LP
7434 if (ret) {
7435 intel_dp->is_mst = false;
7436 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
7437 false);
7438 }
0e32b39c
DA
7439 }
7440}