]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - drivers/gpu/drm/i915/display/intel_dp.c
drm/i915: Init lspcon after HPD in intel_dp_detect()
[mirror_ubuntu-jammy-kernel.git] / drivers / gpu / drm / i915 / display / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
2d1a8a48 28#include <linux/export.h>
331c201a 29#include <linux/i2c.h>
01527b31
CT
30#include <linux/notifier.h>
31#include <linux/reboot.h>
331c201a
JN
32#include <linux/slab.h>
33#include <linux/types.h>
56c5098f 34
611032bf 35#include <asm/byteorder.h>
331c201a 36
c6f95f27 37#include <drm/drm_atomic_helper.h>
760285e7 38#include <drm/drm_crtc.h>
20f24d77 39#include <drm/drm_dp_helper.h>
760285e7 40#include <drm/drm_edid.h>
fcd70cd3 41#include <drm/drm_probe_helper.h>
331c201a 42
2126d3e9 43#include "i915_debugfs.h"
a4fc5ed6 44#include "i915_drv.h"
a09d9a80 45#include "i915_trace.h"
12392a74 46#include "intel_atomic.h"
331c201a 47#include "intel_audio.h"
ec7f29ff 48#include "intel_connector.h"
fdc24cf3 49#include "intel_ddi.h"
1d455f8d 50#include "intel_display_types.h"
27fec1f9 51#include "intel_dp.h"
e075094f 52#include "intel_dp_link_training.h"
46f2066e 53#include "intel_dp_mst.h"
b1ad4c39 54#include "intel_dpio_phy.h"
8834e365 55#include "intel_fifo_underrun.h"
408bd917 56#include "intel_hdcp.h"
0550691d 57#include "intel_hdmi.h"
dbeb38d9 58#include "intel_hotplug.h"
f3e18947 59#include "intel_lspcon.h"
42406fdc 60#include "intel_lvds.h"
44c1220a 61#include "intel_panel.h"
55367a27 62#include "intel_psr.h"
56c5098f 63#include "intel_sideband.h"
bc85328f 64#include "intel_tc.h"
b375d0ef 65#include "intel_vdsc.h"
a4fc5ed6 66
e8b2577c 67#define DP_DPRX_ESI_LEN 14
a4fc5ed6 68
d9218c8f
MN
69/* DP DSC throughput values used for slice count calculations KPixels/s */
70#define DP_DSC_PEAK_PIXEL_RATE 2720000
71#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
72#define DP_DSC_MAX_ENC_THROUGHPUT_1 400000
73
ed06efb8
ML
74/* DP DSC FEC Overhead factor = 1/(0.972261) */
75#define DP_DSC_FEC_OVERHEAD_FACTOR 972261
d9218c8f 76
559be30c
TP
77/* Compliance test status bits */
78#define INTEL_DP_RESOLUTION_SHIFT_MASK 0
79#define INTEL_DP_RESOLUTION_PREFERRED (1 << INTEL_DP_RESOLUTION_SHIFT_MASK)
80#define INTEL_DP_RESOLUTION_STANDARD (2 << INTEL_DP_RESOLUTION_SHIFT_MASK)
81#define INTEL_DP_RESOLUTION_FAILSAFE (3 << INTEL_DP_RESOLUTION_SHIFT_MASK)
82
9dd4ffdf 83struct dp_link_dpll {
840b32b7 84 int clock;
9dd4ffdf
CML
85 struct dpll dpll;
86};
87
45101e93 88static const struct dp_link_dpll g4x_dpll[] = {
840b32b7 89 { 162000,
9dd4ffdf 90 { .p1 = 2, .p2 = 10, .n = 2, .m1 = 23, .m2 = 8 } },
840b32b7 91 { 270000,
9dd4ffdf
CML
92 { .p1 = 1, .p2 = 10, .n = 1, .m1 = 14, .m2 = 2 } }
93};
94
95static const struct dp_link_dpll pch_dpll[] = {
840b32b7 96 { 162000,
9dd4ffdf 97 { .p1 = 2, .p2 = 10, .n = 1, .m1 = 12, .m2 = 9 } },
840b32b7 98 { 270000,
9dd4ffdf
CML
99 { .p1 = 1, .p2 = 10, .n = 2, .m1 = 14, .m2 = 8 } }
100};
101
65ce4bf5 102static const struct dp_link_dpll vlv_dpll[] = {
840b32b7 103 { 162000,
58f6e632 104 { .p1 = 3, .p2 = 2, .n = 5, .m1 = 3, .m2 = 81 } },
840b32b7 105 { 270000,
65ce4bf5
CML
106 { .p1 = 2, .p2 = 2, .n = 1, .m1 = 2, .m2 = 27 } }
107};
108
ef9348c8
CML
109/*
110 * CHV supports eDP 1.4 that have more link rates.
111 * Below only provides the fixed rate but exclude variable rate.
112 */
113static const struct dp_link_dpll chv_dpll[] = {
114 /*
115 * CHV requires to program fractional division for m2.
116 * m2 is stored in fixed point format using formula below
117 * (m2_int << 22) | m2_fraction
118 */
840b32b7 119 { 162000, /* m2_int = 32, m2_fraction = 1677722 */
ef9348c8 120 { .p1 = 4, .p2 = 2, .n = 1, .m1 = 2, .m2 = 0x819999a } },
840b32b7 121 { 270000, /* m2_int = 27, m2_fraction = 0 */
ef9348c8 122 { .p1 = 4, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } },
ef9348c8 123};
637a9c63 124
d9218c8f
MN
125/* Constants for DP DSC configurations */
126static const u8 valid_dsc_bpp[] = {6, 8, 10, 12, 15};
127
128/* With Single pipe configuration, HW is capable of supporting maximum
129 * of 4 slices per line.
130 */
131static const u8 valid_dsc_slicecount[] = {1, 2, 4};
132
cfcb0fc9 133/**
1853a9da 134 * intel_dp_is_edp - is the given port attached to an eDP panel (either CPU or PCH)
cfcb0fc9
JB
135 * @intel_dp: DP struct
136 *
137 * If a CPU or PCH DP output is attached to an eDP panel, this function
138 * will return true, and false otherwise.
139 */
1853a9da 140bool intel_dp_is_edp(struct intel_dp *intel_dp)
cfcb0fc9 141{
7801f3b7 142 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
da63a9f2 143
7801f3b7 144 return dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
145}
146
adc10304
VS
147static void intel_dp_link_down(struct intel_encoder *encoder,
148 const struct intel_crtc_state *old_crtc_state);
1e0560e0 149static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
4be73780 150static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync);
adc10304
VS
151static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
152 const struct intel_crtc_state *crtc_state);
46bd8383 153static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
a8c3344e 154 enum pipe pipe);
f21a2198 155static void intel_dp_unset_edid(struct intel_dp *intel_dp);
a4fc5ed6 156
68f357cb
JN
157/* update sink rates from dpcd */
158static void intel_dp_set_sink_rates(struct intel_dp *intel_dp)
159{
229675d5 160 static const int dp_rates[] = {
c71b53cc 161 162000, 270000, 540000, 810000
229675d5 162 };
a8a08886 163 int i, max_rate;
68f357cb 164
639e0db2
MK
165 if (drm_dp_has_quirk(&intel_dp->desc, 0,
166 DP_DPCD_QUIRK_CAN_DO_MAX_LINK_RATE_3_24_GBPS)) {
167 /* Needed, e.g., for Apple MBP 2017, 15 inch eDP Retina panel */
168 static const int quirk_rates[] = { 162000, 270000, 324000 };
169
170 memcpy(intel_dp->sink_rates, quirk_rates, sizeof(quirk_rates));
171 intel_dp->num_sink_rates = ARRAY_SIZE(quirk_rates);
172
173 return;
174 }
175
a8a08886 176 max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]);
68f357cb 177
229675d5
JN
178 for (i = 0; i < ARRAY_SIZE(dp_rates); i++) {
179 if (dp_rates[i] > max_rate)
a8a08886 180 break;
229675d5 181 intel_dp->sink_rates[i] = dp_rates[i];
a8a08886 182 }
68f357cb 183
a8a08886 184 intel_dp->num_sink_rates = i;
68f357cb
JN
185}
186
10ebb736
JN
187/* Get length of rates array potentially limited by max_rate. */
188static int intel_dp_rate_limit_len(const int *rates, int len, int max_rate)
189{
190 int i;
191
192 /* Limit results by potentially reduced max rate */
193 for (i = 0; i < len; i++) {
194 if (rates[len - i - 1] <= max_rate)
195 return len - i;
196 }
197
198 return 0;
199}
200
201/* Get length of common rates array potentially limited by max_rate. */
202static int intel_dp_common_len_rate_limit(const struct intel_dp *intel_dp,
203 int max_rate)
204{
205 return intel_dp_rate_limit_len(intel_dp->common_rates,
206 intel_dp->num_common_rates, max_rate);
207}
208
540b0b7f
JN
209/* Theoretical max between source and sink */
210static int intel_dp_max_common_rate(struct intel_dp *intel_dp)
a4fc5ed6 211{
540b0b7f 212 return intel_dp->common_rates[intel_dp->num_common_rates - 1];
a4fc5ed6
KP
213}
214
540b0b7f
JN
215/* Theoretical max between source and sink */
216static int intel_dp_max_common_lane_count(struct intel_dp *intel_dp)
eeb6324d 217{
7801f3b7
LDM
218 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
219 int source_max = dig_port->max_lanes;
540b0b7f 220 int sink_max = drm_dp_max_lane_count(intel_dp->dpcd);
7801f3b7 221 int fia_max = intel_tc_port_fia_max_lane_count(dig_port);
eeb6324d 222
db7295c2 223 return min3(source_max, sink_max, fia_max);
eeb6324d
PZ
224}
225
3d65a735 226int intel_dp_max_lane_count(struct intel_dp *intel_dp)
540b0b7f
JN
227{
228 return intel_dp->max_link_lane_count;
229}
230
22a2c8e0 231int
c898261c 232intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 233{
fd81c44e
DP
234 /* pixel_clock is in kHz, divide bpp by 8 for bit to Byte conversion */
235 return DIV_ROUND_UP(pixel_clock * bpp, 8);
a4fc5ed6
KP
236}
237
22a2c8e0 238int
fe27d53e
DA
239intel_dp_max_data_rate(int max_link_clock, int max_lanes)
240{
fd81c44e
DP
241 /* max_link_clock is the link symbol clock (LS_Clk) in kHz and not the
242 * link rate that is generally expressed in Gbps. Since, 8 bits of data
243 * is transmitted every LS_Clk per lane, there is no need to account for
244 * the channel encoding that is done in the PHY layer here.
245 */
246
247 return max_link_clock * max_lanes;
fe27d53e
DA
248}
249
4ba285d4 250static int cnl_max_source_rate(struct intel_dp *intel_dp)
53ddb3cd
RV
251{
252 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
253 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
254 enum port port = dig_port->base.port;
255
b4e33881 256 u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
53ddb3cd
RV
257
258 /* Low voltage SKUs are limited to max of 5.4G */
259 if (voltage == VOLTAGE_INFO_0_85V)
4ba285d4 260 return 540000;
53ddb3cd
RV
261
262 /* For this SKU 8.1G is supported in all ports */
263 if (IS_CNL_WITH_PORT_F(dev_priv))
4ba285d4 264 return 810000;
53ddb3cd 265
3758d968 266 /* For other SKUs, max rate on ports A and D is 5.4G */
53ddb3cd 267 if (port == PORT_A || port == PORT_D)
4ba285d4 268 return 540000;
53ddb3cd 269
4ba285d4 270 return 810000;
53ddb3cd
RV
271}
272
46b527d1
MN
273static int icl_max_source_rate(struct intel_dp *intel_dp)
274{
275 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
b265a2a6 276 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
d8fe2ab6 277 enum phy phy = intel_port_to_phy(dev_priv, dig_port->base.port);
46b527d1 278
d8fe2ab6 279 if (intel_phy_is_combo(dev_priv, phy) &&
b7143860 280 !IS_ELKHARTLAKE(dev_priv) &&
b265a2a6 281 !intel_dp_is_edp(intel_dp))
46b527d1
MN
282 return 540000;
283
284 return 810000;
285}
286
55cfc580
JN
287static void
288intel_dp_set_source_rates(struct intel_dp *intel_dp)
40dba341 289{
229675d5
JN
290 /* The values must be in increasing order */
291 static const int cnl_rates[] = {
292 162000, 216000, 270000, 324000, 432000, 540000, 648000, 810000
293 };
294 static const int bxt_rates[] = {
295 162000, 216000, 243000, 270000, 324000, 432000, 540000
296 };
297 static const int skl_rates[] = {
298 162000, 216000, 270000, 324000, 432000, 540000
299 };
300 static const int hsw_rates[] = {
301 162000, 270000, 540000
302 };
303 static const int g4x_rates[] = {
304 162000, 270000
305 };
40dba341 306 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
f83acdab 307 struct intel_encoder *encoder = &dig_port->base;
40dba341 308 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
55cfc580 309 const int *source_rates;
f83acdab 310 int size, max_rate = 0, vbt_max_rate;
40dba341 311
55cfc580 312 /* This should only be done once */
eb020ca3
PB
313 drm_WARN_ON(&dev_priv->drm,
314 intel_dp->source_rates || intel_dp->num_source_rates);
55cfc580 315
46b527d1 316 if (INTEL_GEN(dev_priv) >= 10) {
d907b665 317 source_rates = cnl_rates;
4ba285d4 318 size = ARRAY_SIZE(cnl_rates);
cf819eff 319 if (IS_GEN(dev_priv, 10))
46b527d1
MN
320 max_rate = cnl_max_source_rate(intel_dp);
321 else
322 max_rate = icl_max_source_rate(intel_dp);
ba1c06a5
MN
323 } else if (IS_GEN9_LP(dev_priv)) {
324 source_rates = bxt_rates;
325 size = ARRAY_SIZE(bxt_rates);
b976dc53 326 } else if (IS_GEN9_BC(dev_priv)) {
55cfc580 327 source_rates = skl_rates;
40dba341 328 size = ARRAY_SIZE(skl_rates);
fc603ca7
JN
329 } else if ((IS_HASWELL(dev_priv) && !IS_HSW_ULX(dev_priv)) ||
330 IS_BROADWELL(dev_priv)) {
229675d5
JN
331 source_rates = hsw_rates;
332 size = ARRAY_SIZE(hsw_rates);
fc603ca7 333 } else {
229675d5
JN
334 source_rates = g4x_rates;
335 size = ARRAY_SIZE(g4x_rates);
40dba341
NM
336 }
337
f83acdab 338 vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
99b91bda
JN
339 if (max_rate && vbt_max_rate)
340 max_rate = min(max_rate, vbt_max_rate);
341 else if (vbt_max_rate)
342 max_rate = vbt_max_rate;
343
4ba285d4
JN
344 if (max_rate)
345 size = intel_dp_rate_limit_len(source_rates, size, max_rate);
346
55cfc580
JN
347 intel_dp->source_rates = source_rates;
348 intel_dp->num_source_rates = size;
40dba341
NM
349}
350
351static int intersect_rates(const int *source_rates, int source_len,
352 const int *sink_rates, int sink_len,
353 int *common_rates)
354{
355 int i = 0, j = 0, k = 0;
356
357 while (i < source_len && j < sink_len) {
358 if (source_rates[i] == sink_rates[j]) {
359 if (WARN_ON(k >= DP_MAX_SUPPORTED_RATES))
360 return k;
361 common_rates[k] = source_rates[i];
362 ++k;
363 ++i;
364 ++j;
365 } else if (source_rates[i] < sink_rates[j]) {
366 ++i;
367 } else {
368 ++j;
369 }
370 }
371 return k;
372}
373
8001b754
JN
374/* return index of rate in rates array, or -1 if not found */
375static int intel_dp_rate_index(const int *rates, int len, int rate)
376{
377 int i;
378
379 for (i = 0; i < len; i++)
380 if (rate == rates[i])
381 return i;
382
383 return -1;
384}
385
975ee5fc 386static void intel_dp_set_common_rates(struct intel_dp *intel_dp)
40dba341 387{
4f360482
PB
388 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
389
390 drm_WARN_ON(&i915->drm,
391 !intel_dp->num_source_rates || !intel_dp->num_sink_rates);
40dba341 392
975ee5fc
JN
393 intel_dp->num_common_rates = intersect_rates(intel_dp->source_rates,
394 intel_dp->num_source_rates,
395 intel_dp->sink_rates,
396 intel_dp->num_sink_rates,
397 intel_dp->common_rates);
398
399 /* Paranoia, there should always be something in common. */
4f360482 400 if (drm_WARN_ON(&i915->drm, intel_dp->num_common_rates == 0)) {
229675d5 401 intel_dp->common_rates[0] = 162000;
975ee5fc
JN
402 intel_dp->num_common_rates = 1;
403 }
404}
405
1a92c70e 406static bool intel_dp_link_params_valid(struct intel_dp *intel_dp, int link_rate,
830de422 407 u8 lane_count)
14c562c0
MN
408{
409 /*
410 * FIXME: we need to synchronize the current link parameters with
411 * hardware readout. Currently fast link training doesn't work on
412 * boot-up.
413 */
1a92c70e
MN
414 if (link_rate == 0 ||
415 link_rate > intel_dp->max_link_rate)
14c562c0
MN
416 return false;
417
1a92c70e
MN
418 if (lane_count == 0 ||
419 lane_count > intel_dp_max_lane_count(intel_dp))
14c562c0
MN
420 return false;
421
422 return true;
423}
424
1e712535
MN
425static bool intel_dp_can_link_train_fallback_for_edp(struct intel_dp *intel_dp,
426 int link_rate,
830de422 427 u8 lane_count)
1e712535
MN
428{
429 const struct drm_display_mode *fixed_mode =
430 intel_dp->attached_connector->panel.fixed_mode;
431 int mode_rate, max_rate;
432
433 mode_rate = intel_dp_link_required(fixed_mode->clock, 18);
434 max_rate = intel_dp_max_data_rate(link_rate, lane_count);
435 if (mode_rate > max_rate)
436 return false;
437
438 return true;
439}
440
fdb14d33 441int intel_dp_get_link_train_fallback_values(struct intel_dp *intel_dp,
830de422 442 int link_rate, u8 lane_count)
fdb14d33 443{
af67009c 444 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
b1810a74 445 int index;
fdb14d33 446
80a8cecf
ID
447 /*
448 * TODO: Enable fallback on MST links once MST link compute can handle
449 * the fallback params.
450 */
451 if (intel_dp->is_mst) {
452 drm_err(&i915->drm, "Link Training Unsuccessful\n");
453 return -1;
454 }
455
b1810a74
JN
456 index = intel_dp_rate_index(intel_dp->common_rates,
457 intel_dp->num_common_rates,
458 link_rate);
459 if (index > 0) {
1e712535
MN
460 if (intel_dp_is_edp(intel_dp) &&
461 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
462 intel_dp->common_rates[index - 1],
463 lane_count)) {
af67009c
JN
464 drm_dbg_kms(&i915->drm,
465 "Retrying Link training for eDP with same parameters\n");
1e712535
MN
466 return 0;
467 }
e6c0c64a
JN
468 intel_dp->max_link_rate = intel_dp->common_rates[index - 1];
469 intel_dp->max_link_lane_count = lane_count;
fdb14d33 470 } else if (lane_count > 1) {
1e712535
MN
471 if (intel_dp_is_edp(intel_dp) &&
472 !intel_dp_can_link_train_fallback_for_edp(intel_dp,
473 intel_dp_max_common_rate(intel_dp),
474 lane_count >> 1)) {
af67009c
JN
475 drm_dbg_kms(&i915->drm,
476 "Retrying Link training for eDP with same parameters\n");
1e712535
MN
477 return 0;
478 }
540b0b7f 479 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
e6c0c64a 480 intel_dp->max_link_lane_count = lane_count >> 1;
fdb14d33 481 } else {
af67009c 482 drm_err(&i915->drm, "Link Training Unsuccessful\n");
fdb14d33
MN
483 return -1;
484 }
485
486 return 0;
487}
488
ed06efb8
ML
489u32 intel_dp_mode_to_fec_clock(u32 mode_clock)
490{
491 return div_u64(mul_u32_u32(mode_clock, 1000000U),
492 DP_DSC_FEC_OVERHEAD_FACTOR);
493}
494
45d3c5cd
MR
495static int
496small_joiner_ram_size_bits(struct drm_i915_private *i915)
497{
498 if (INTEL_GEN(i915) >= 11)
499 return 7680 * 8;
500 else
501 return 6144 * 8;
502}
503
504static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
505 u32 link_clock, u32 lane_count,
ed06efb8
ML
506 u32 mode_clock, u32 mode_hdisplay)
507{
508 u32 bits_per_pixel, max_bpp_small_joiner_ram;
509 int i;
510
511 /*
512 * Available Link Bandwidth(Kbits/sec) = (NumberOfLanes)*
513 * (LinkSymbolClock)* 8 * (TimeSlotsPerMTP)
514 * for SST -> TimeSlotsPerMTP is 1,
515 * for MST -> TimeSlotsPerMTP has to be calculated
516 */
517 bits_per_pixel = (link_clock * lane_count * 8) /
518 intel_dp_mode_to_fec_clock(mode_clock);
bdc6114e 519 drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
ed06efb8
ML
520
521 /* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
45d3c5cd
MR
522 max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
523 mode_hdisplay;
bdc6114e
WK
524 drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
525 max_bpp_small_joiner_ram);
ed06efb8
ML
526
527 /*
528 * Greatest allowed DSC BPP = MIN (output BPP from available Link BW
529 * check, output bpp from small joiner RAM check)
530 */
531 bits_per_pixel = min(bits_per_pixel, max_bpp_small_joiner_ram);
532
533 /* Error out if the max bpp is less than smallest allowed valid bpp */
534 if (bits_per_pixel < valid_dsc_bpp[0]) {
bdc6114e
WK
535 drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
536 bits_per_pixel, valid_dsc_bpp[0]);
ed06efb8
ML
537 return 0;
538 }
539
540 /* Find the nearest match in the array of known BPPs from VESA */
541 for (i = 0; i < ARRAY_SIZE(valid_dsc_bpp) - 1; i++) {
542 if (bits_per_pixel < valid_dsc_bpp[i + 1])
543 break;
544 }
545 bits_per_pixel = valid_dsc_bpp[i];
546
547 /*
548 * Compressed BPP in U6.4 format so multiply by 16, for Gen 11,
549 * fractional part is 0
550 */
551 return bits_per_pixel << 4;
552}
553
554static u8 intel_dp_dsc_get_slice_count(struct intel_dp *intel_dp,
555 int mode_clock, int mode_hdisplay)
556{
af67009c 557 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
ed06efb8
ML
558 u8 min_slice_count, i;
559 int max_slice_width;
560
561 if (mode_clock <= DP_DSC_PEAK_PIXEL_RATE)
562 min_slice_count = DIV_ROUND_UP(mode_clock,
563 DP_DSC_MAX_ENC_THROUGHPUT_0);
564 else
565 min_slice_count = DIV_ROUND_UP(mode_clock,
566 DP_DSC_MAX_ENC_THROUGHPUT_1);
567
568 max_slice_width = drm_dp_dsc_sink_max_slice_width(intel_dp->dsc_dpcd);
569 if (max_slice_width < DP_DSC_MIN_SLICE_WIDTH_VALUE) {
af67009c
JN
570 drm_dbg_kms(&i915->drm,
571 "Unsupported slice width %d by DP DSC Sink device\n",
572 max_slice_width);
ed06efb8
ML
573 return 0;
574 }
575 /* Also take into account max slice width */
576 min_slice_count = min_t(u8, min_slice_count,
577 DIV_ROUND_UP(mode_hdisplay,
578 max_slice_width));
579
580 /* Find the closest match to the valid slice count values */
581 for (i = 0; i < ARRAY_SIZE(valid_dsc_slicecount); i++) {
582 if (valid_dsc_slicecount[i] >
583 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
584 false))
585 break;
586 if (min_slice_count <= valid_dsc_slicecount[i])
587 return valid_dsc_slicecount[i];
588 }
589
af67009c
JN
590 drm_dbg_kms(&i915->drm, "Unsupported Slice Count %d\n",
591 min_slice_count);
ed06efb8
ML
592 return 0;
593}
594
773bd825
VS
595static enum intel_output_format
596intel_dp_output_format(struct drm_connector *connector,
597 const struct drm_display_mode *mode)
598{
599 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
600 const struct drm_display_info *info = &connector->display_info;
601
602 if (!drm_mode_is_420_only(info, mode))
603 return INTEL_OUTPUT_FORMAT_RGB;
604
605 if (intel_dp->dfp.ycbcr_444_to_420)
606 return INTEL_OUTPUT_FORMAT_YCBCR444;
607 else
608 return INTEL_OUTPUT_FORMAT_YCBCR420;
609}
610
0bf8dedc
VS
611int intel_dp_min_bpp(enum intel_output_format output_format)
612{
613 if (output_format == INTEL_OUTPUT_FORMAT_RGB)
614 return 6 * 3;
615 else
616 return 8 * 3;
617}
618
619static int intel_dp_output_bpp(enum intel_output_format output_format, int bpp)
620{
621 /*
622 * bpp value was assumed to RGB format. And YCbCr 4:2:0 output
623 * format of the number of bytes per pixel will be half the number
624 * of bytes of RGB pixel.
625 */
626 if (output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
627 bpp /= 2;
628
629 return bpp;
630}
631
632static int
633intel_dp_mode_min_output_bpp(struct drm_connector *connector,
634 const struct drm_display_mode *mode)
635{
636 enum intel_output_format output_format =
637 intel_dp_output_format(connector, mode);
638
639 return intel_dp_output_bpp(output_format, intel_dp_min_bpp(output_format));
640}
641
98c93394
VS
642static bool intel_dp_hdisplay_bad(struct drm_i915_private *dev_priv,
643 int hdisplay)
644{
645 /*
646 * Older platforms don't like hdisplay==4096 with DP.
647 *
648 * On ILK/SNB/IVB the pipe seems to be somewhat running (scanline
649 * and frame counter increment), but we don't get vblank interrupts,
650 * and the pipe underruns immediately. The link also doesn't seem
651 * to get trained properly.
652 *
653 * On CHV the vblank interrupts don't seem to disappear but
654 * otherwise the symptoms are similar.
655 *
656 * TODO: confirm the behaviour on HSW+
657 */
658 return hdisplay == 4096 && !HAS_DDI(dev_priv);
659}
660
fe7cf496
VS
661static enum drm_mode_status
662intel_dp_mode_valid_downstream(struct intel_connector *connector,
3977cd1c 663 const struct drm_display_mode *mode,
fe7cf496
VS
664 int target_clock)
665{
666 struct intel_dp *intel_dp = intel_attached_dp(connector);
3977cd1c
VS
667 const struct drm_display_info *info = &connector->base.display_info;
668 int tmds_clock;
fe7cf496
VS
669
670 if (intel_dp->dfp.max_dotclock &&
671 target_clock > intel_dp->dfp.max_dotclock)
672 return MODE_CLOCK_HIGH;
673
3977cd1c
VS
674 /* Assume 8bpc for the DP++/HDMI/DVI TMDS clock check */
675 tmds_clock = target_clock;
676 if (drm_mode_is_420_only(info, mode))
677 tmds_clock /= 2;
678
679 if (intel_dp->dfp.min_tmds_clock &&
680 tmds_clock < intel_dp->dfp.min_tmds_clock)
681 return MODE_CLOCK_LOW;
682 if (intel_dp->dfp.max_tmds_clock &&
683 tmds_clock > intel_dp->dfp.max_tmds_clock)
684 return MODE_CLOCK_HIGH;
685
fe7cf496
VS
686 return MODE_OK;
687}
688
c19de8eb 689static enum drm_mode_status
a4fc5ed6
KP
690intel_dp_mode_valid(struct drm_connector *connector,
691 struct drm_display_mode *mode)
692{
43a6d19c 693 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
dd06f90e
JN
694 struct intel_connector *intel_connector = to_intel_connector(connector);
695 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
6cfd04b0 696 struct drm_i915_private *dev_priv = to_i915(connector->dev);
36008365
DV
697 int target_clock = mode->clock;
698 int max_rate, mode_rate, max_lanes, max_link_clock;
fe7cf496 699 int max_dotclk = dev_priv->max_dotclk_freq;
6cfd04b0
MN
700 u16 dsc_max_output_bpp = 0;
701 u8 dsc_slice_count = 0;
fe7cf496 702 enum drm_mode_status status;
70ec0645 703
e4dd27aa
VS
704 if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
705 return MODE_NO_DBLESCAN;
706
1853a9da 707 if (intel_dp_is_edp(intel_dp) && fixed_mode) {
dd06f90e 708 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
709 return MODE_PANEL;
710
dd06f90e 711 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 712 return MODE_PANEL;
03afc4a2
DV
713
714 target_clock = fixed_mode->clock;
7de56f43
ZY
715 }
716
50fec21a 717 max_link_clock = intel_dp_max_link_rate(intel_dp);
eeb6324d 718 max_lanes = intel_dp_max_lane_count(intel_dp);
36008365
DV
719
720 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
0bf8dedc
VS
721 mode_rate = intel_dp_link_required(target_clock,
722 intel_dp_mode_min_output_bpp(connector, mode));
36008365 723
98c93394
VS
724 if (intel_dp_hdisplay_bad(dev_priv, mode->hdisplay))
725 return MODE_H_ILLEGAL;
726
6cfd04b0
MN
727 /*
728 * Output bpp is stored in 6.4 format so right shift by 4 to get the
729 * integer value since we support only integer values of bpp.
730 */
731 if ((INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) &&
732 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)) {
733 if (intel_dp_is_edp(intel_dp)) {
734 dsc_max_output_bpp =
735 drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4;
736 dsc_slice_count =
737 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
738 true);
240999cf 739 } else if (drm_dp_sink_supports_fec(intel_dp->fec_capable)) {
6cfd04b0 740 dsc_max_output_bpp =
45d3c5cd
MR
741 intel_dp_dsc_get_output_bpp(dev_priv,
742 max_link_clock,
6cfd04b0
MN
743 max_lanes,
744 target_clock,
745 mode->hdisplay) >> 4;
746 dsc_slice_count =
747 intel_dp_dsc_get_slice_count(intel_dp,
748 target_clock,
749 mode->hdisplay);
750 }
751 }
752
753 if ((mode_rate > max_rate && !(dsc_max_output_bpp && dsc_slice_count)) ||
754 target_clock > max_dotclk)
c4867936 755 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
756
757 if (mode->clock < 10000)
758 return MODE_CLOCK_LOW;
759
0af78a2b
DV
760 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
761 return MODE_H_ILLEGAL;
762
3977cd1c
VS
763 status = intel_dp_mode_valid_downstream(intel_connector,
764 mode, target_clock);
fe7cf496
VS
765 if (status != MODE_OK)
766 return status;
767
2d20411e 768 return intel_mode_valid_max_plane_size(dev_priv, mode);
a4fc5ed6
KP
769}
770
830de422 771u32 intel_dp_pack_aux(const u8 *src, int src_bytes)
a4fc5ed6 772{
830de422
JN
773 int i;
774 u32 v = 0;
a4fc5ed6
KP
775
776 if (src_bytes > 4)
777 src_bytes = 4;
778 for (i = 0; i < src_bytes; i++)
830de422 779 v |= ((u32)src[i]) << ((3 - i) * 8);
a4fc5ed6
KP
780 return v;
781}
782
830de422 783static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes)
a4fc5ed6
KP
784{
785 int i;
786 if (dst_bytes > 4)
787 dst_bytes = 4;
788 for (i = 0; i < dst_bytes; i++)
789 dst[i] = src >> ((3-i) * 8);
790}
791
bf13e81b 792static void
46bd8383 793intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp);
bf13e81b 794static void
46bd8383 795intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5d5ab2d2 796 bool force_disable_vdd);
335f752b 797static void
46bd8383 798intel_dp_pps_init(struct intel_dp *intel_dp);
bf13e81b 799
69d93820
CW
800static intel_wakeref_t
801pps_lock(struct intel_dp *intel_dp)
773538e8 802{
de25eb7f 803 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 804 intel_wakeref_t wakeref;
773538e8
VS
805
806 /*
40c7ae45 807 * See intel_power_sequencer_reset() why we need
773538e8
VS
808 * a power domain reference here.
809 */
69d93820
CW
810 wakeref = intel_display_power_get(dev_priv,
811 intel_aux_power_domain(dp_to_dig_port(intel_dp)));
773538e8
VS
812
813 mutex_lock(&dev_priv->pps_mutex);
69d93820
CW
814
815 return wakeref;
773538e8
VS
816}
817
69d93820
CW
818static intel_wakeref_t
819pps_unlock(struct intel_dp *intel_dp, intel_wakeref_t wakeref)
773538e8 820{
de25eb7f 821 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
773538e8
VS
822
823 mutex_unlock(&dev_priv->pps_mutex);
69d93820
CW
824 intel_display_power_put(dev_priv,
825 intel_aux_power_domain(dp_to_dig_port(intel_dp)),
826 wakeref);
827 return 0;
773538e8
VS
828}
829
69d93820
CW
830#define with_pps_lock(dp, wf) \
831 for ((wf) = pps_lock(dp); (wf); (wf) = pps_unlock((dp), (wf)))
832
961a0db0
VS
833static void
834vlv_power_sequencer_kick(struct intel_dp *intel_dp)
835{
de25eb7f 836 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7801f3b7 837 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
961a0db0 838 enum pipe pipe = intel_dp->pps_pipe;
0047eedc
VS
839 bool pll_enabled, release_cl_override = false;
840 enum dpio_phy phy = DPIO_PHY(pipe);
841 enum dpio_channel ch = vlv_pipe_to_channel(pipe);
830de422 842 u32 DP;
961a0db0 843
eb020ca3
PB
844 if (drm_WARN(&dev_priv->drm,
845 intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
846 "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
7801f3b7
LDM
847 pipe_name(pipe), dig_port->base.base.base.id,
848 dig_port->base.base.name))
961a0db0
VS
849 return;
850
bdc6114e
WK
851 drm_dbg_kms(&dev_priv->drm,
852 "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
7801f3b7
LDM
853 pipe_name(pipe), dig_port->base.base.base.id,
854 dig_port->base.base.name);
961a0db0
VS
855
856 /* Preserve the BIOS-computed detected bit. This is
857 * supposed to be read-only.
858 */
b4e33881 859 DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
961a0db0
VS
860 DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
861 DP |= DP_PORT_WIDTH(1);
862 DP |= DP_LINK_TRAIN_PAT_1;
863
920a14b2 864 if (IS_CHERRYVIEW(dev_priv))
59b74c49
VS
865 DP |= DP_PIPE_SEL_CHV(pipe);
866 else
867 DP |= DP_PIPE_SEL(pipe);
961a0db0 868
b4e33881 869 pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
d288f65f
VS
870
871 /*
872 * The DPLL for the pipe must be enabled for this to work.
873 * So enable temporarily it if it's not already enabled.
874 */
0047eedc 875 if (!pll_enabled) {
920a14b2 876 release_cl_override = IS_CHERRYVIEW(dev_priv) &&
0047eedc
VS
877 !chv_phy_powergate_ch(dev_priv, phy, ch, true);
878
30ad9814 879 if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
3f36b937 880 &chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
bdc6114e
WK
881 drm_err(&dev_priv->drm,
882 "Failed to force on pll for pipe %c!\n",
883 pipe_name(pipe));
3f36b937
TU
884 return;
885 }
0047eedc 886 }
d288f65f 887
961a0db0
VS
888 /*
889 * Similar magic as in intel_dp_enable_port().
890 * We _must_ do this port enable + disable trick
e7f2af78 891 * to make this power sequencer lock onto the port.
961a0db0
VS
892 * Otherwise even VDD force bit won't work.
893 */
b4e33881
JN
894 intel_de_write(dev_priv, intel_dp->output_reg, DP);
895 intel_de_posting_read(dev_priv, intel_dp->output_reg);
961a0db0 896
b4e33881
JN
897 intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
898 intel_de_posting_read(dev_priv, intel_dp->output_reg);
961a0db0 899
b4e33881
JN
900 intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
901 intel_de_posting_read(dev_priv, intel_dp->output_reg);
d288f65f 902
0047eedc 903 if (!pll_enabled) {
30ad9814 904 vlv_force_pll_off(dev_priv, pipe);
0047eedc
VS
905
906 if (release_cl_override)
907 chv_phy_powergate_ch(dev_priv, phy, ch, false);
908 }
961a0db0
VS
909}
910
9f2bdb00
VS
911static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
912{
913 struct intel_encoder *encoder;
914 unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
915
916 /*
917 * We don't have power sequencer currently.
918 * Pick one that's not used by other ports.
919 */
14aa521c 920 for_each_intel_dp(&dev_priv->drm, encoder) {
b7d02c3a 921 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
9f2bdb00
VS
922
923 if (encoder->type == INTEL_OUTPUT_EDP) {
eb020ca3
PB
924 drm_WARN_ON(&dev_priv->drm,
925 intel_dp->active_pipe != INVALID_PIPE &&
926 intel_dp->active_pipe !=
927 intel_dp->pps_pipe);
9f2bdb00
VS
928
929 if (intel_dp->pps_pipe != INVALID_PIPE)
930 pipes &= ~(1 << intel_dp->pps_pipe);
931 } else {
eb020ca3
PB
932 drm_WARN_ON(&dev_priv->drm,
933 intel_dp->pps_pipe != INVALID_PIPE);
9f2bdb00
VS
934
935 if (intel_dp->active_pipe != INVALID_PIPE)
936 pipes &= ~(1 << intel_dp->active_pipe);
937 }
938 }
939
940 if (pipes == 0)
941 return INVALID_PIPE;
942
943 return ffs(pipes) - 1;
944}
945
bf13e81b
JN
946static enum pipe
947vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
948{
de25eb7f 949 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7801f3b7 950 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
a8c3344e 951 enum pipe pipe;
bf13e81b 952
e39b999a 953 lockdep_assert_held(&dev_priv->pps_mutex);
bf13e81b 954
a8c3344e 955 /* We should never land here with regular DP ports */
eb020ca3 956 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
a8c3344e 957
eb020ca3
PB
958 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
959 intel_dp->active_pipe != intel_dp->pps_pipe);
9f2bdb00 960
a4a5d2f8
VS
961 if (intel_dp->pps_pipe != INVALID_PIPE)
962 return intel_dp->pps_pipe;
963
9f2bdb00 964 pipe = vlv_find_free_pps(dev_priv);
a4a5d2f8
VS
965
966 /*
967 * Didn't find one. This should not happen since there
968 * are two power sequencers and up to two eDP ports.
969 */
eb020ca3 970 if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
a8c3344e 971 pipe = PIPE_A;
a4a5d2f8 972
46bd8383 973 vlv_steal_power_sequencer(dev_priv, pipe);
a8c3344e 974 intel_dp->pps_pipe = pipe;
a4a5d2f8 975
bdc6114e
WK
976 drm_dbg_kms(&dev_priv->drm,
977 "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
978 pipe_name(intel_dp->pps_pipe),
7801f3b7
LDM
979 dig_port->base.base.base.id,
980 dig_port->base.base.name);
a4a5d2f8
VS
981
982 /* init power sequencer on this pipe and port */
46bd8383
VS
983 intel_dp_init_panel_power_sequencer(intel_dp);
984 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
a4a5d2f8 985
961a0db0
VS
986 /*
987 * Even vdd force doesn't work until we've made
988 * the power sequencer lock in on the port.
989 */
990 vlv_power_sequencer_kick(intel_dp);
a4a5d2f8
VS
991
992 return intel_dp->pps_pipe;
993}
994
78597996
ID
995static int
996bxt_power_sequencer_idx(struct intel_dp *intel_dp)
997{
de25eb7f 998 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
73c0fcac 999 int backlight_controller = dev_priv->vbt.backlight.controller;
78597996
ID
1000
1001 lockdep_assert_held(&dev_priv->pps_mutex);
1002
1003 /* We should never land here with regular DP ports */
eb020ca3 1004 drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
78597996 1005
78597996 1006 if (!intel_dp->pps_reset)
73c0fcac 1007 return backlight_controller;
78597996
ID
1008
1009 intel_dp->pps_reset = false;
1010
1011 /*
1012 * Only the HW needs to be reprogrammed, the SW state is fixed and
1013 * has been setup during connector init.
1014 */
46bd8383 1015 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
78597996 1016
73c0fcac 1017 return backlight_controller;
78597996
ID
1018}
1019
6491ab27
VS
1020typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
1021 enum pipe pipe);
1022
1023static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
1024 enum pipe pipe)
1025{
b4e33881 1026 return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
6491ab27
VS
1027}
1028
1029static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
1030 enum pipe pipe)
1031{
b4e33881 1032 return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
6491ab27
VS
1033}
1034
1035static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
1036 enum pipe pipe)
1037{
1038 return true;
1039}
bf13e81b 1040
a4a5d2f8 1041static enum pipe
6491ab27
VS
1042vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
1043 enum port port,
1044 vlv_pipe_check pipe_check)
a4a5d2f8
VS
1045{
1046 enum pipe pipe;
bf13e81b 1047
bf13e81b 1048 for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
b4e33881 1049 u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
bf13e81b 1050 PANEL_PORT_SELECT_MASK;
a4a5d2f8
VS
1051
1052 if (port_sel != PANEL_PORT_SELECT_VLV(port))
1053 continue;
1054
6491ab27
VS
1055 if (!pipe_check(dev_priv, pipe))
1056 continue;
1057
a4a5d2f8 1058 return pipe;
bf13e81b
JN
1059 }
1060
a4a5d2f8
VS
1061 return INVALID_PIPE;
1062}
1063
1064static void
1065vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
1066{
de25eb7f 1067 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7801f3b7
LDM
1068 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1069 enum port port = dig_port->base.port;
a4a5d2f8
VS
1070
1071 lockdep_assert_held(&dev_priv->pps_mutex);
1072
1073 /* try to find a pipe with this port selected */
6491ab27
VS
1074 /* first pick one where the panel is on */
1075 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1076 vlv_pipe_has_pp_on);
1077 /* didn't find one? pick one where vdd is on */
1078 if (intel_dp->pps_pipe == INVALID_PIPE)
1079 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1080 vlv_pipe_has_vdd_on);
1081 /* didn't find one? pick one with just the correct port */
1082 if (intel_dp->pps_pipe == INVALID_PIPE)
1083 intel_dp->pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
1084 vlv_pipe_any);
a4a5d2f8
VS
1085
1086 /* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
1087 if (intel_dp->pps_pipe == INVALID_PIPE) {
bdc6114e
WK
1088 drm_dbg_kms(&dev_priv->drm,
1089 "no initial power sequencer for [ENCODER:%d:%s]\n",
7801f3b7
LDM
1090 dig_port->base.base.base.id,
1091 dig_port->base.base.name);
a4a5d2f8 1092 return;
bf13e81b
JN
1093 }
1094
bdc6114e
WK
1095 drm_dbg_kms(&dev_priv->drm,
1096 "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
7801f3b7
LDM
1097 dig_port->base.base.base.id,
1098 dig_port->base.base.name,
bdc6114e 1099 pipe_name(intel_dp->pps_pipe));
a4a5d2f8 1100
46bd8383
VS
1101 intel_dp_init_panel_power_sequencer(intel_dp);
1102 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
bf13e81b
JN
1103}
1104
78597996 1105void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
773538e8 1106{
773538e8
VS
1107 struct intel_encoder *encoder;
1108
a2ab4ab6
CW
1109 if (drm_WARN_ON(&dev_priv->drm,
1110 !(IS_VALLEYVIEW(dev_priv) ||
1111 IS_CHERRYVIEW(dev_priv) ||
1112 IS_GEN9_LP(dev_priv))))
773538e8
VS
1113 return;
1114
1115 /*
1116 * We can't grab pps_mutex here due to deadlock with power_domain
1117 * mutex when power_domain functions are called while holding pps_mutex.
1118 * That also means that in order to use pps_pipe the code needs to
1119 * hold both a power domain reference and pps_mutex, and the power domain
1120 * reference get/put must be done while _not_ holding pps_mutex.
1121 * pps_{lock,unlock}() do these steps in the correct order, so one
1122 * should use them always.
1123 */
1124
14aa521c 1125 for_each_intel_dp(&dev_priv->drm, encoder) {
b7d02c3a 1126 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
7e732cac 1127
eb020ca3
PB
1128 drm_WARN_ON(&dev_priv->drm,
1129 intel_dp->active_pipe != INVALID_PIPE);
9f2bdb00
VS
1130
1131 if (encoder->type != INTEL_OUTPUT_EDP)
1132 continue;
1133
cc3f90f0 1134 if (IS_GEN9_LP(dev_priv))
78597996
ID
1135 intel_dp->pps_reset = true;
1136 else
1137 intel_dp->pps_pipe = INVALID_PIPE;
773538e8 1138 }
bf13e81b
JN
1139}
1140
8e8232d5
ID
1141struct pps_registers {
1142 i915_reg_t pp_ctrl;
1143 i915_reg_t pp_stat;
1144 i915_reg_t pp_on;
1145 i915_reg_t pp_off;
1146 i915_reg_t pp_div;
1147};
1148
46bd8383 1149static void intel_pps_get_registers(struct intel_dp *intel_dp,
8e8232d5
ID
1150 struct pps_registers *regs)
1151{
de25eb7f 1152 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
44cb734c
ID
1153 int pps_idx = 0;
1154
8e8232d5
ID
1155 memset(regs, 0, sizeof(*regs));
1156
cc3f90f0 1157 if (IS_GEN9_LP(dev_priv))
44cb734c
ID
1158 pps_idx = bxt_power_sequencer_idx(intel_dp);
1159 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
1160 pps_idx = vlv_power_sequencer_pipe(intel_dp);
8e8232d5 1161
44cb734c
ID
1162 regs->pp_ctrl = PP_CONTROL(pps_idx);
1163 regs->pp_stat = PP_STATUS(pps_idx);
1164 regs->pp_on = PP_ON_DELAYS(pps_idx);
1165 regs->pp_off = PP_OFF_DELAYS(pps_idx);
ab3517c1
JN
1166
1167 /* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
c6c30b91 1168 if (IS_GEN9_LP(dev_priv) || INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
ab3517c1
JN
1169 regs->pp_div = INVALID_MMIO_REG;
1170 else
44cb734c 1171 regs->pp_div = PP_DIVISOR(pps_idx);
8e8232d5
ID
1172}
1173
f0f59a00
VS
1174static i915_reg_t
1175_pp_ctrl_reg(struct intel_dp *intel_dp)
bf13e81b 1176{
8e8232d5 1177 struct pps_registers regs;
bf13e81b 1178
46bd8383 1179 intel_pps_get_registers(intel_dp, &regs);
8e8232d5
ID
1180
1181 return regs.pp_ctrl;
bf13e81b
JN
1182}
1183
f0f59a00
VS
1184static i915_reg_t
1185_pp_stat_reg(struct intel_dp *intel_dp)
bf13e81b 1186{
8e8232d5 1187 struct pps_registers regs;
bf13e81b 1188
46bd8383 1189 intel_pps_get_registers(intel_dp, &regs);
8e8232d5
ID
1190
1191 return regs.pp_stat;
bf13e81b
JN
1192}
1193
01527b31
CT
1194/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
1195 This function only applicable when panel PM state is not to be tracked */
1196static int edp_notify_handler(struct notifier_block *this, unsigned long code,
1197 void *unused)
1198{
1199 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
1200 edp_notifier);
de25eb7f 1201 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 1202 intel_wakeref_t wakeref;
01527b31 1203
1853a9da 1204 if (!intel_dp_is_edp(intel_dp) || code != SYS_RESTART)
01527b31
CT
1205 return 0;
1206
69d93820
CW
1207 with_pps_lock(intel_dp, wakeref) {
1208 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
1209 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
1210 i915_reg_t pp_ctrl_reg, pp_div_reg;
1211 u32 pp_div;
1212
1213 pp_ctrl_reg = PP_CONTROL(pipe);
1214 pp_div_reg = PP_DIVISOR(pipe);
b4e33881 1215 pp_div = intel_de_read(dev_priv, pp_div_reg);
69d93820
CW
1216 pp_div &= PP_REFERENCE_DIVIDER_MASK;
1217
1218 /* 0x1F write to PP_DIV_REG sets max cycle delay */
b4e33881
JN
1219 intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F);
1220 intel_de_write(dev_priv, pp_ctrl_reg,
1221 PANEL_UNLOCK_REGS);
69d93820
CW
1222 msleep(intel_dp->panel_power_cycle_delay);
1223 }
01527b31
CT
1224 }
1225
1226 return 0;
1227}
1228
4be73780 1229static bool edp_have_panel_power(struct intel_dp *intel_dp)
ebf33b18 1230{
de25eb7f 1231 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1232
e39b999a
VS
1233 lockdep_assert_held(&dev_priv->pps_mutex);
1234
920a14b2 1235 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9a42356b
VS
1236 intel_dp->pps_pipe == INVALID_PIPE)
1237 return false;
1238
b4e33881 1239 return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
ebf33b18
KP
1240}
1241
4be73780 1242static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
ebf33b18 1243{
de25eb7f 1244 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1245
e39b999a
VS
1246 lockdep_assert_held(&dev_priv->pps_mutex);
1247
920a14b2 1248 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
9a42356b
VS
1249 intel_dp->pps_pipe == INVALID_PIPE)
1250 return false;
1251
b4e33881 1252 return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
ebf33b18
KP
1253}
1254
9b984dae
KP
1255static void
1256intel_dp_check_edp(struct intel_dp *intel_dp)
1257{
de25eb7f 1258 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ebf33b18 1259
1853a9da 1260 if (!intel_dp_is_edp(intel_dp))
9b984dae 1261 return;
453c5420 1262
4be73780 1263 if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
eb020ca3
PB
1264 drm_WARN(&dev_priv->drm, 1,
1265 "eDP powered off while attempting aux channel communication.\n");
bdc6114e 1266 drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
b4e33881
JN
1267 intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
1268 intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
9b984dae
KP
1269 }
1270}
1271
830de422 1272static u32
8a29c778 1273intel_dp_aux_wait_done(struct intel_dp *intel_dp)
9ee32fea 1274{
5a31d30b 1275 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4904fa66 1276 i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
54516464 1277 const unsigned int timeout_ms = 10;
830de422 1278 u32 status;
9ee32fea
DV
1279 bool done;
1280
5a31d30b
TU
1281#define C (((status = intel_uncore_read_notrace(&i915->uncore, ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1282 done = wait_event_timeout(i915->gmbus_wait_queue, C,
54516464 1283 msecs_to_jiffies_timeout(timeout_ms));
39806c3f
VS
1284
1285 /* just trace the final value */
1286 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
1287
9ee32fea 1288 if (!done)
bdc6114e 1289 drm_err(&i915->drm,
264c0247 1290 "%s: did not complete or timeout within %ums (status 0x%08x)\n",
bdc6114e 1291 intel_dp->aux.name, timeout_ms, status);
9ee32fea
DV
1292#undef C
1293
1294 return status;
1295}
1296
830de422 1297static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
a4fc5ed6 1298{
de25eb7f 1299 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
9ee32fea 1300
a457f54b
VS
1301 if (index)
1302 return 0;
1303
ec5b01dd
DL
1304 /*
1305 * The clock divider is based off the hrawclk, and would like to run at
a457f54b 1306 * 2MHz. So, take the hrawclk value and divide by 2000 and use that
a4fc5ed6 1307 */
b04002f4 1308 return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
ec5b01dd
DL
1309}
1310
830de422 1311static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
ec5b01dd 1312{
de25eb7f 1313 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0 1314 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
b04002f4 1315 u32 freq;
ec5b01dd
DL
1316
1317 if (index)
1318 return 0;
1319
a457f54b
VS
1320 /*
1321 * The clock divider is based off the cdclk or PCH rawclk, and would
1322 * like to run at 2MHz. So, take the cdclk or PCH rawclk value and
1323 * divide by 2000 and use that
1324 */
563d22a0 1325 if (dig_port->aux_ch == AUX_CH_A)
b04002f4 1326 freq = dev_priv->cdclk.hw.cdclk;
e7dc33f3 1327 else
b04002f4
CW
1328 freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
1329 return DIV_ROUND_CLOSEST(freq, 2000);
ec5b01dd
DL
1330}
1331
830de422 1332static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
ec5b01dd 1333{
de25eb7f 1334 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0 1335 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ec5b01dd 1336
563d22a0 1337 if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(dev_priv)) {
2c55c336 1338 /* Workaround for non-ULT HSW */
bc86625a
CW
1339 switch (index) {
1340 case 0: return 63;
1341 case 1: return 72;
1342 default: return 0;
1343 }
2c55c336 1344 }
a457f54b
VS
1345
1346 return ilk_get_aux_clock_divider(intel_dp, index);
b84a1cf8
RV
1347}
1348
830de422 1349static u32 skl_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
b6b5e383
DL
1350{
1351 /*
1352 * SKL doesn't need us to program the AUX clock divider (Hardware will
1353 * derive the clock from CDCLK automatically). We still implement the
1354 * get_aux_clock_divider vfunc to plug-in into the existing code.
1355 */
1356 return index ? 0 : 1;
1357}
1358
830de422
JN
1359static u32 g4x_get_aux_send_ctl(struct intel_dp *intel_dp,
1360 int send_bytes,
1361 u32 aux_clock_divider)
5ed12a19 1362{
7801f3b7 1363 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
8652744b 1364 struct drm_i915_private *dev_priv =
7801f3b7 1365 to_i915(dig_port->base.base.dev);
830de422 1366 u32 precharge, timeout;
5ed12a19 1367
cf819eff 1368 if (IS_GEN(dev_priv, 6))
5ed12a19
DL
1369 precharge = 3;
1370 else
1371 precharge = 5;
1372
8f5f63d5 1373 if (IS_BROADWELL(dev_priv))
5ed12a19
DL
1374 timeout = DP_AUX_CH_CTL_TIME_OUT_600us;
1375 else
1376 timeout = DP_AUX_CH_CTL_TIME_OUT_400us;
1377
1378 return DP_AUX_CH_CTL_SEND_BUSY |
788d4433 1379 DP_AUX_CH_CTL_DONE |
8a29c778 1380 DP_AUX_CH_CTL_INTERRUPT |
788d4433 1381 DP_AUX_CH_CTL_TIME_OUT_ERROR |
5ed12a19 1382 timeout |
788d4433 1383 DP_AUX_CH_CTL_RECEIVE_ERROR |
5ed12a19
DL
1384 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1385 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
788d4433 1386 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT);
5ed12a19
DL
1387}
1388
830de422
JN
1389static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
1390 int send_bytes,
1391 u32 unused)
b9ca5fad 1392{
7801f3b7 1393 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
49748264 1394 struct drm_i915_private *i915 =
7801f3b7
LDM
1395 to_i915(dig_port->base.base.dev);
1396 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
830de422 1397 u32 ret;
6f211ed4
AS
1398
1399 ret = DP_AUX_CH_CTL_SEND_BUSY |
1400 DP_AUX_CH_CTL_DONE |
1401 DP_AUX_CH_CTL_INTERRUPT |
1402 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1403 DP_AUX_CH_CTL_TIME_OUT_MAX |
1404 DP_AUX_CH_CTL_RECEIVE_ERROR |
1405 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1406 DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(32) |
1407 DP_AUX_CH_CTL_SYNC_PULSE_SKL(32);
1408
49748264 1409 if (intel_phy_is_tc(i915, phy) &&
7801f3b7 1410 dig_port->tc_mode == TC_PORT_TBT_ALT)
6f211ed4
AS
1411 ret |= DP_AUX_CH_CTL_TBT_IO;
1412
1413 return ret;
b9ca5fad
DL
1414}
1415
b84a1cf8 1416static int
f7606265 1417intel_dp_aux_xfer(struct intel_dp *intel_dp,
830de422
JN
1418 const u8 *send, int send_bytes,
1419 u8 *recv, int recv_size,
8159c796 1420 u32 aux_send_ctl_flags)
b84a1cf8 1421{
7801f3b7 1422 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5a31d30b 1423 struct drm_i915_private *i915 =
7801f3b7 1424 to_i915(dig_port->base.base.dev);
5a31d30b 1425 struct intel_uncore *uncore = &i915->uncore;
7801f3b7 1426 enum phy phy = intel_port_to_phy(i915, dig_port->base.port);
d8fe2ab6 1427 bool is_tc_port = intel_phy_is_tc(i915, phy);
4904fa66 1428 i915_reg_t ch_ctl, ch_data[5];
830de422 1429 u32 aux_clock_divider;
ae9b6cfe 1430 enum intel_display_power_domain aux_domain;
f39194a7
ID
1431 intel_wakeref_t aux_wakeref;
1432 intel_wakeref_t pps_wakeref;
b84a1cf8 1433 int i, ret, recv_bytes;
5ed12a19 1434 int try, clock = 0;
830de422 1435 u32 status;
884f19e9
JN
1436 bool vdd;
1437
4904fa66
VS
1438 ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
1439 for (i = 0; i < ARRAY_SIZE(ch_data); i++)
1440 ch_data[i] = intel_dp->aux_ch_data_reg(intel_dp, i);
1441
8c10e226 1442 if (is_tc_port)
7801f3b7 1443 intel_tc_port_lock(dig_port);
8c10e226 1444
7801f3b7 1445 aux_domain = intel_aux_power_domain(dig_port);
ae9b6cfe 1446
5a31d30b 1447 aux_wakeref = intel_display_power_get(i915, aux_domain);
f39194a7 1448 pps_wakeref = pps_lock(intel_dp);
e39b999a 1449
72c3500a
VS
1450 /*
1451 * We will be called with VDD already enabled for dpcd/edid/oui reads.
1452 * In such cases we want to leave VDD enabled and it's up to upper layers
1453 * to turn it off. But for eg. i2c-dev access we need to turn it on/off
1454 * ourselves.
1455 */
1e0560e0 1456 vdd = edp_panel_vdd_on(intel_dp);
b84a1cf8
RV
1457
1458 /* dp aux is extremely sensitive to irq latency, hence request the
1459 * lowest possible wakeup latency and so prevent the cpu from going into
1460 * deep sleep states.
1461 */
4d4dda48 1462 cpu_latency_qos_update_request(&i915->pm_qos, 0);
b84a1cf8
RV
1463
1464 intel_dp_check_edp(intel_dp);
5eb08b69 1465
11bee43e
JB
1466 /* Try to wait for any previous AUX channel activity */
1467 for (try = 0; try < 3; try++) {
5a31d30b 1468 status = intel_uncore_read_notrace(uncore, ch_ctl);
11bee43e
JB
1469 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
1470 break;
1471 msleep(1);
1472 }
39806c3f
VS
1473 /* just trace the final value */
1474 trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
11bee43e
JB
1475
1476 if (try == 3) {
5a31d30b 1477 const u32 status = intel_uncore_read(uncore, ch_ctl);
02196c77 1478
81cdeca4 1479 if (status != intel_dp->aux_busy_last_status) {
eb020ca3
PB
1480 drm_WARN(&i915->drm, 1,
1481 "%s: not started (status 0x%08x)\n",
1482 intel_dp->aux.name, status);
81cdeca4 1483 intel_dp->aux_busy_last_status = status;
02196c77
MK
1484 }
1485
9ee32fea
DV
1486 ret = -EBUSY;
1487 goto out;
4f7f7b7e
CW
1488 }
1489
46a5ae9f 1490 /* Only 5 data registers! */
eb020ca3 1491 if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
46a5ae9f
PZ
1492 ret = -E2BIG;
1493 goto out;
1494 }
1495
ec5b01dd 1496 while ((aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, clock++))) {
8159c796 1497 u32 send_ctl = intel_dp->get_aux_send_ctl(intel_dp,
8159c796
VS
1498 send_bytes,
1499 aux_clock_divider);
1500
1501 send_ctl |= aux_send_ctl_flags;
5ed12a19 1502
bc86625a
CW
1503 /* Must try at least 3 times according to DP spec */
1504 for (try = 0; try < 5; try++) {
1505 /* Load the send data into the aux channel data registers */
1506 for (i = 0; i < send_bytes; i += 4)
5a31d30b
TU
1507 intel_uncore_write(uncore,
1508 ch_data[i >> 2],
1509 intel_dp_pack_aux(send + i,
1510 send_bytes - i));
bc86625a
CW
1511
1512 /* Send the command and wait for it to complete */
5a31d30b 1513 intel_uncore_write(uncore, ch_ctl, send_ctl);
bc86625a 1514
8a29c778 1515 status = intel_dp_aux_wait_done(intel_dp);
bc86625a
CW
1516
1517 /* Clear done status and any errors */
5a31d30b
TU
1518 intel_uncore_write(uncore,
1519 ch_ctl,
1520 status |
1521 DP_AUX_CH_CTL_DONE |
1522 DP_AUX_CH_CTL_TIME_OUT_ERROR |
1523 DP_AUX_CH_CTL_RECEIVE_ERROR);
bc86625a 1524
74ebf294
TP
1525 /* DP CTS 1.2 Core Rev 1.1, 4.2.1.1 & 4.2.1.2
1526 * 400us delay required for errors and timeouts
1527 * Timeout errors from the HW already meet this
1528 * requirement so skip to next iteration
1529 */
3975f0aa
DP
1530 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR)
1531 continue;
1532
74ebf294
TP
1533 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1534 usleep_range(400, 500);
bc86625a 1535 continue;
74ebf294 1536 }
bc86625a 1537 if (status & DP_AUX_CH_CTL_DONE)
e058c945 1538 goto done;
bc86625a 1539 }
a4fc5ed6
KP
1540 }
1541
a4fc5ed6 1542 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
264c0247
VS
1543 drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
1544 intel_dp->aux.name, status);
9ee32fea
DV
1545 ret = -EBUSY;
1546 goto out;
a4fc5ed6
KP
1547 }
1548
e058c945 1549done:
a4fc5ed6
KP
1550 /* Check for timeout or receive error.
1551 * Timeouts occur when the sink is not connected
1552 */
a5b3da54 1553 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
264c0247
VS
1554 drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
1555 intel_dp->aux.name, status);
9ee32fea
DV
1556 ret = -EIO;
1557 goto out;
a5b3da54 1558 }
1ae8c0a5
KP
1559
1560 /* Timeouts occur when the device isn't connected, so they're
1561 * "normal" -- don't fill the kernel log with these */
a5b3da54 1562 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
264c0247
VS
1563 drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
1564 intel_dp->aux.name, status);
9ee32fea
DV
1565 ret = -ETIMEDOUT;
1566 goto out;
a4fc5ed6
KP
1567 }
1568
1569 /* Unload any bytes sent back from the other side */
1570 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
1571 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
14e01889
RV
1572
1573 /*
1574 * By BSpec: "Message sizes of 0 or >20 are not allowed."
1575 * We have no idea of what happened so we return -EBUSY so
1576 * drm layer takes care for the necessary retries.
1577 */
1578 if (recv_bytes == 0 || recv_bytes > 20) {
bdc6114e 1579 drm_dbg_kms(&i915->drm,
264c0247
VS
1580 "%s: Forbidden recv_bytes = %d on aux transaction\n",
1581 intel_dp->aux.name, recv_bytes);
14e01889
RV
1582 ret = -EBUSY;
1583 goto out;
1584 }
1585
a4fc5ed6
KP
1586 if (recv_bytes > recv_size)
1587 recv_bytes = recv_size;
0206e353 1588
4f7f7b7e 1589 for (i = 0; i < recv_bytes; i += 4)
5a31d30b 1590 intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]),
a4f1289e 1591 recv + i, recv_bytes - i);
a4fc5ed6 1592
9ee32fea
DV
1593 ret = recv_bytes;
1594out:
4d4dda48 1595 cpu_latency_qos_update_request(&i915->pm_qos, PM_QOS_DEFAULT_VALUE);
9ee32fea 1596
884f19e9
JN
1597 if (vdd)
1598 edp_panel_vdd_off(intel_dp, false);
1599
f39194a7 1600 pps_unlock(intel_dp, pps_wakeref);
5a31d30b 1601 intel_display_power_put_async(i915, aux_domain, aux_wakeref);
e39b999a 1602
8c10e226 1603 if (is_tc_port)
7801f3b7 1604 intel_tc_port_unlock(dig_port);
8c10e226 1605
9ee32fea 1606 return ret;
a4fc5ed6
KP
1607}
1608
a6c8aff0
JN
1609#define BARE_ADDRESS_SIZE 3
1610#define HEADER_SIZE (BARE_ADDRESS_SIZE + 1)
32078b72
VS
1611
1612static void
1613intel_dp_aux_header(u8 txbuf[HEADER_SIZE],
1614 const struct drm_dp_aux_msg *msg)
1615{
1616 txbuf[0] = (msg->request << 4) | ((msg->address >> 16) & 0xf);
1617 txbuf[1] = (msg->address >> 8) & 0xff;
1618 txbuf[2] = msg->address & 0xff;
1619 txbuf[3] = msg->size - 1;
1620}
1621
58418f0c
SP
1622static u32 intel_dp_aux_xfer_flags(const struct drm_dp_aux_msg *msg)
1623{
1624 /*
1625 * If we're trying to send the HDCP Aksv, we need to set a the Aksv
1626 * select bit to inform the hardware to send the Aksv after our header
1627 * since we can't access that data from software.
1628 */
1629 if ((msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_WRITE &&
1630 msg->address == DP_AUX_HDCP_AKSV)
1631 return DP_AUX_CH_CTL_AUX_AKSV_SELECT;
1632
1633 return 0;
1634}
1635
9d1a1031
JN
1636static ssize_t
1637intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
a4fc5ed6 1638{
9d1a1031 1639 struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
4f360482 1640 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
830de422 1641 u8 txbuf[20], rxbuf[20];
9d1a1031 1642 size_t txsize, rxsize;
58418f0c 1643 u32 flags = intel_dp_aux_xfer_flags(msg);
a4fc5ed6 1644 int ret;
a4fc5ed6 1645
32078b72 1646 intel_dp_aux_header(txbuf, msg);
46a5ae9f 1647
9d1a1031
JN
1648 switch (msg->request & ~DP_AUX_I2C_MOT) {
1649 case DP_AUX_NATIVE_WRITE:
1650 case DP_AUX_I2C_WRITE:
c1e74122 1651 case DP_AUX_I2C_WRITE_STATUS_UPDATE:
a6c8aff0 1652 txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
a1ddefd8 1653 rxsize = 2; /* 0 or 1 data bytes */
f51a44b9 1654
4f360482 1655 if (drm_WARN_ON(&i915->drm, txsize > 20))
9d1a1031 1656 return -E2BIG;
a4fc5ed6 1657
4f360482 1658 drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
dd788090 1659
d81a67cc
ID
1660 if (msg->buffer)
1661 memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
a4fc5ed6 1662
f7606265 1663 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
58418f0c 1664 rxbuf, rxsize, flags);
9d1a1031
JN
1665 if (ret > 0) {
1666 msg->reply = rxbuf[0] >> 4;
a4fc5ed6 1667
a1ddefd8
JN
1668 if (ret > 1) {
1669 /* Number of bytes written in a short write. */
1670 ret = clamp_t(int, rxbuf[1], 0, msg->size);
1671 } else {
1672 /* Return payload size. */
1673 ret = msg->size;
1674 }
9d1a1031
JN
1675 }
1676 break;
46a5ae9f 1677
9d1a1031
JN
1678 case DP_AUX_NATIVE_READ:
1679 case DP_AUX_I2C_READ:
a6c8aff0 1680 txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
9d1a1031 1681 rxsize = msg->size + 1;
a4fc5ed6 1682
4f360482 1683 if (drm_WARN_ON(&i915->drm, rxsize > 20))
9d1a1031 1684 return -E2BIG;
a4fc5ed6 1685
f7606265 1686 ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
58418f0c 1687 rxbuf, rxsize, flags);
9d1a1031
JN
1688 if (ret > 0) {
1689 msg->reply = rxbuf[0] >> 4;
1690 /*
1691 * Assume happy day, and copy the data. The caller is
1692 * expected to check msg->reply before touching it.
1693 *
1694 * Return payload size.
1695 */
1696 ret--;
1697 memcpy(msg->buffer, rxbuf + 1, ret);
a4fc5ed6 1698 }
9d1a1031
JN
1699 break;
1700
1701 default:
1702 ret = -EINVAL;
1703 break;
a4fc5ed6 1704 }
f51a44b9 1705
9d1a1031 1706 return ret;
a4fc5ed6
KP
1707}
1708
8f7ce038 1709
4904fa66 1710static i915_reg_t g4x_aux_ctl_reg(struct intel_dp *intel_dp)
da00bdcf 1711{
de25eb7f 1712 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1713 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1714 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1715
bdabdb63
VS
1716 switch (aux_ch) {
1717 case AUX_CH_B:
1718 case AUX_CH_C:
1719 case AUX_CH_D:
1720 return DP_AUX_CH_CTL(aux_ch);
da00bdcf 1721 default:
bdabdb63
VS
1722 MISSING_CASE(aux_ch);
1723 return DP_AUX_CH_CTL(AUX_CH_B);
da00bdcf
VS
1724 }
1725}
1726
4904fa66 1727static i915_reg_t g4x_aux_data_reg(struct intel_dp *intel_dp, int index)
330e20ec 1728{
de25eb7f 1729 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1730 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1731 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1732
bdabdb63
VS
1733 switch (aux_ch) {
1734 case AUX_CH_B:
1735 case AUX_CH_C:
1736 case AUX_CH_D:
1737 return DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1738 default:
bdabdb63
VS
1739 MISSING_CASE(aux_ch);
1740 return DP_AUX_CH_DATA(AUX_CH_B, index);
330e20ec
VS
1741 }
1742}
1743
4904fa66 1744static i915_reg_t ilk_aux_ctl_reg(struct intel_dp *intel_dp)
bdabdb63 1745{
de25eb7f 1746 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1747 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1748 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1749
bdabdb63
VS
1750 switch (aux_ch) {
1751 case AUX_CH_A:
1752 return DP_AUX_CH_CTL(aux_ch);
1753 case AUX_CH_B:
1754 case AUX_CH_C:
1755 case AUX_CH_D:
1756 return PCH_DP_AUX_CH_CTL(aux_ch);
da00bdcf 1757 default:
bdabdb63
VS
1758 MISSING_CASE(aux_ch);
1759 return DP_AUX_CH_CTL(AUX_CH_A);
da00bdcf
VS
1760 }
1761}
1762
4904fa66 1763static i915_reg_t ilk_aux_data_reg(struct intel_dp *intel_dp, int index)
bdabdb63 1764{
de25eb7f 1765 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1766 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1767 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1768
bdabdb63
VS
1769 switch (aux_ch) {
1770 case AUX_CH_A:
1771 return DP_AUX_CH_DATA(aux_ch, index);
1772 case AUX_CH_B:
1773 case AUX_CH_C:
1774 case AUX_CH_D:
1775 return PCH_DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1776 default:
bdabdb63
VS
1777 MISSING_CASE(aux_ch);
1778 return DP_AUX_CH_DATA(AUX_CH_A, index);
330e20ec
VS
1779 }
1780}
1781
4904fa66 1782static i915_reg_t skl_aux_ctl_reg(struct intel_dp *intel_dp)
bdabdb63 1783{
de25eb7f 1784 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1785 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1786 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1787
bdabdb63
VS
1788 switch (aux_ch) {
1789 case AUX_CH_A:
1790 case AUX_CH_B:
1791 case AUX_CH_C:
1792 case AUX_CH_D:
bb187e93 1793 case AUX_CH_E:
bdabdb63 1794 case AUX_CH_F:
eb8de23c 1795 case AUX_CH_G:
bdabdb63 1796 return DP_AUX_CH_CTL(aux_ch);
da00bdcf 1797 default:
bdabdb63
VS
1798 MISSING_CASE(aux_ch);
1799 return DP_AUX_CH_CTL(AUX_CH_A);
da00bdcf
VS
1800 }
1801}
1802
4904fa66 1803static i915_reg_t skl_aux_data_reg(struct intel_dp *intel_dp, int index)
bdabdb63 1804{
de25eb7f 1805 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1806 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1807 enum aux_ch aux_ch = dig_port->aux_ch;
4904fa66 1808
bdabdb63
VS
1809 switch (aux_ch) {
1810 case AUX_CH_A:
1811 case AUX_CH_B:
1812 case AUX_CH_C:
1813 case AUX_CH_D:
bb187e93 1814 case AUX_CH_E:
bdabdb63 1815 case AUX_CH_F:
eb8de23c 1816 case AUX_CH_G:
bdabdb63 1817 return DP_AUX_CH_DATA(aux_ch, index);
330e20ec 1818 default:
bdabdb63
VS
1819 MISSING_CASE(aux_ch);
1820 return DP_AUX_CH_DATA(AUX_CH_A, index);
330e20ec
VS
1821 }
1822}
1823
91e939ae
VS
1824static void
1825intel_dp_aux_fini(struct intel_dp *intel_dp)
1826{
1827 kfree(intel_dp->aux.name);
1828}
1829
1830static void
1831intel_dp_aux_init(struct intel_dp *intel_dp)
330e20ec 1832{
de25eb7f 1833 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
563d22a0
ID
1834 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1835 struct intel_encoder *encoder = &dig_port->base;
91e939ae 1836
4904fa66
VS
1837 if (INTEL_GEN(dev_priv) >= 9) {
1838 intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
1839 intel_dp->aux_ch_data_reg = skl_aux_data_reg;
1840 } else if (HAS_PCH_SPLIT(dev_priv)) {
1841 intel_dp->aux_ch_ctl_reg = ilk_aux_ctl_reg;
1842 intel_dp->aux_ch_data_reg = ilk_aux_data_reg;
1843 } else {
1844 intel_dp->aux_ch_ctl_reg = g4x_aux_ctl_reg;
1845 intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
1846 }
330e20ec 1847
91e939ae
VS
1848 if (INTEL_GEN(dev_priv) >= 9)
1849 intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
1850 else if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
1851 intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
1852 else if (HAS_PCH_SPLIT(dev_priv))
1853 intel_dp->get_aux_clock_divider = ilk_get_aux_clock_divider;
1854 else
1855 intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
bdabdb63 1856
91e939ae
VS
1857 if (INTEL_GEN(dev_priv) >= 9)
1858 intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
1859 else
1860 intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
ab2c0672 1861
7a418e34 1862 drm_dp_aux_init(&intel_dp->aux);
8316f337 1863
7a418e34 1864 /* Failure to allocate our preferred name is not critical */
a87e692a
VS
1865 intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c",
1866 aux_ch_name(dig_port->aux_ch),
bdabdb63 1867 port_name(encoder->port));
9d1a1031 1868 intel_dp->aux.transfer = intel_dp_aux_transfer;
a4fc5ed6
KP
1869}
1870
e588fa18 1871bool intel_dp_source_supports_hbr2(struct intel_dp *intel_dp)
ed63baaf 1872{
fc603ca7 1873 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
e588fa18 1874
fc603ca7 1875 return max_rate >= 540000;
ed63baaf
TS
1876}
1877
2edd5327
MN
1878bool intel_dp_source_supports_hbr3(struct intel_dp *intel_dp)
1879{
1880 int max_rate = intel_dp->source_rates[intel_dp->num_source_rates - 1];
1881
1882 return max_rate >= 810000;
1883}
1884
c6bb3538
DV
1885static void
1886intel_dp_set_clock(struct intel_encoder *encoder,
840b32b7 1887 struct intel_crtc_state *pipe_config)
c6bb3538 1888{
2f773477 1889 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9dd4ffdf
CML
1890 const struct dp_link_dpll *divisor = NULL;
1891 int i, count = 0;
c6bb3538 1892
9beb5fea 1893 if (IS_G4X(dev_priv)) {
45101e93
VS
1894 divisor = g4x_dpll;
1895 count = ARRAY_SIZE(g4x_dpll);
6e266956 1896 } else if (HAS_PCH_SPLIT(dev_priv)) {
9dd4ffdf
CML
1897 divisor = pch_dpll;
1898 count = ARRAY_SIZE(pch_dpll);
920a14b2 1899 } else if (IS_CHERRYVIEW(dev_priv)) {
ef9348c8
CML
1900 divisor = chv_dpll;
1901 count = ARRAY_SIZE(chv_dpll);
11a914c2 1902 } else if (IS_VALLEYVIEW(dev_priv)) {
65ce4bf5
CML
1903 divisor = vlv_dpll;
1904 count = ARRAY_SIZE(vlv_dpll);
c6bb3538 1905 }
9dd4ffdf
CML
1906
1907 if (divisor && count) {
1908 for (i = 0; i < count; i++) {
840b32b7 1909 if (pipe_config->port_clock == divisor[i].clock) {
9dd4ffdf
CML
1910 pipe_config->dpll = divisor[i].dpll;
1911 pipe_config->clock_set = true;
1912 break;
1913 }
1914 }
c6bb3538
DV
1915 }
1916}
1917
0336400e
VS
1918static void snprintf_int_array(char *str, size_t len,
1919 const int *array, int nelem)
1920{
1921 int i;
1922
1923 str[0] = '\0';
1924
1925 for (i = 0; i < nelem; i++) {
b2f505be 1926 int r = snprintf(str, len, "%s%d", i ? ", " : "", array[i]);
0336400e
VS
1927 if (r >= len)
1928 return;
1929 str += r;
1930 len -= r;
1931 }
1932}
1933
1934static void intel_dp_print_rates(struct intel_dp *intel_dp)
1935{
af67009c 1936 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
0336400e
VS
1937 char str[128]; /* FIXME: too big for stack? */
1938
bdbf43d7 1939 if (!drm_debug_enabled(DRM_UT_KMS))
0336400e
VS
1940 return;
1941
55cfc580
JN
1942 snprintf_int_array(str, sizeof(str),
1943 intel_dp->source_rates, intel_dp->num_source_rates);
af67009c 1944 drm_dbg_kms(&i915->drm, "source rates: %s\n", str);
0336400e 1945
68f357cb
JN
1946 snprintf_int_array(str, sizeof(str),
1947 intel_dp->sink_rates, intel_dp->num_sink_rates);
af67009c 1948 drm_dbg_kms(&i915->drm, "sink rates: %s\n", str);
0336400e 1949
975ee5fc
JN
1950 snprintf_int_array(str, sizeof(str),
1951 intel_dp->common_rates, intel_dp->num_common_rates);
af67009c 1952 drm_dbg_kms(&i915->drm, "common rates: %s\n", str);
0336400e
VS
1953}
1954
50fec21a
VS
1955int
1956intel_dp_max_link_rate(struct intel_dp *intel_dp)
1957{
4f360482 1958 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
50fec21a
VS
1959 int len;
1960
e6c0c64a 1961 len = intel_dp_common_len_rate_limit(intel_dp, intel_dp->max_link_rate);
4f360482 1962 if (drm_WARN_ON(&i915->drm, len <= 0))
50fec21a
VS
1963 return 162000;
1964
975ee5fc 1965 return intel_dp->common_rates[len - 1];
50fec21a
VS
1966}
1967
ed4e9c1d
VS
1968int intel_dp_rate_select(struct intel_dp *intel_dp, int rate)
1969{
4f360482 1970 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
8001b754
JN
1971 int i = intel_dp_rate_index(intel_dp->sink_rates,
1972 intel_dp->num_sink_rates, rate);
b5c72b20 1973
4f360482 1974 if (drm_WARN_ON(&i915->drm, i < 0))
b5c72b20
JN
1975 i = 0;
1976
1977 return i;
ed4e9c1d
VS
1978}
1979
94223d04 1980void intel_dp_compute_rate(struct intel_dp *intel_dp, int port_clock,
830de422 1981 u8 *link_bw, u8 *rate_select)
04a60f9f 1982{
68f357cb
JN
1983 /* eDP 1.4 rate select method. */
1984 if (intel_dp->use_rate_select) {
04a60f9f
VS
1985 *link_bw = 0;
1986 *rate_select =
1987 intel_dp_rate_select(intel_dp, port_clock);
1988 } else {
1989 *link_bw = drm_dp_link_rate_to_bw_code(port_clock);
1990 *rate_select = 0;
1991 }
1992}
1993
240999cf 1994static bool intel_dp_source_supports_fec(struct intel_dp *intel_dp,
a4a15777
MN
1995 const struct intel_crtc_state *pipe_config)
1996{
1997 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
1998
9770f220
MTP
1999 /* On TGL, FEC is supported on all Pipes */
2000 if (INTEL_GEN(dev_priv) >= 12)
2001 return true;
2002
2003 if (IS_GEN(dev_priv, 11) && pipe_config->cpu_transcoder != TRANSCODER_A)
2004 return true;
2005
2006 return false;
240999cf
AS
2007}
2008
2009static bool intel_dp_supports_fec(struct intel_dp *intel_dp,
2010 const struct intel_crtc_state *pipe_config)
2011{
2012 return intel_dp_source_supports_fec(intel_dp, pipe_config) &&
2013 drm_dp_sink_supports_fec(intel_dp->fec_capable);
2014}
2015
a4a15777 2016static bool intel_dp_supports_dsc(struct intel_dp *intel_dp,
deaaff49 2017 const struct intel_crtc_state *crtc_state)
a4a15777 2018{
deaaff49
JN
2019 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
2020
2021 if (!intel_dp_is_edp(intel_dp) && !crtc_state->fec_enable)
240999cf
AS
2022 return false;
2023
deaaff49 2024 return intel_dsc_source_support(encoder, crtc_state) &&
a4a15777
MN
2025 drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd);
2026}
2027
bc7ca6a6
VS
2028static bool intel_dp_hdmi_ycbcr420(struct intel_dp *intel_dp,
2029 const struct intel_crtc_state *crtc_state)
2030{
181567aa
VS
2031 return crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
2032 (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444 &&
2033 intel_dp->dfp.ycbcr_444_to_420);
bc7ca6a6
VS
2034}
2035
2036static int intel_dp_hdmi_tmds_clock(struct intel_dp *intel_dp,
2037 const struct intel_crtc_state *crtc_state, int bpc)
2038{
2039 int clock = crtc_state->hw.adjusted_mode.crtc_clock * bpc / 8;
2040
2041 if (intel_dp_hdmi_ycbcr420(intel_dp, crtc_state))
2042 clock /= 2;
2043
2044 return clock;
2045}
2046
2047static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp,
2048 const struct intel_crtc_state *crtc_state, int bpc)
2049{
2050 int tmds_clock = intel_dp_hdmi_tmds_clock(intel_dp, crtc_state, bpc);
2051
2052 if (intel_dp->dfp.min_tmds_clock &&
2053 tmds_clock < intel_dp->dfp.min_tmds_clock)
2054 return false;
2055
2056 if (intel_dp->dfp.max_tmds_clock &&
2057 tmds_clock > intel_dp->dfp.max_tmds_clock)
2058 return false;
2059
2060 return true;
2061}
2062
2063static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp,
2064 const struct intel_crtc_state *crtc_state,
2065 int bpc)
2066{
bc7ca6a6 2067
181567aa
VS
2068 return intel_hdmi_deep_color_possible(crtc_state, bpc,
2069 intel_dp->has_hdmi_sink,
2070 intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) &&
bc7ca6a6
VS
2071 intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc);
2072}
2073
2074static int intel_dp_max_bpp(struct intel_dp *intel_dp,
2075 const struct intel_crtc_state *crtc_state)
f9bb705e 2076{
de25eb7f 2077 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ef32659a 2078 struct intel_connector *intel_connector = intel_dp->attached_connector;
bc7ca6a6 2079 int bpp, bpc;
f9bb705e 2080
bc7ca6a6 2081 bpc = crtc_state->pipe_bpp / 3;
f9bb705e 2082
530df3c0 2083 if (intel_dp->dfp.max_bpc)
bc7ca6a6
VS
2084 bpc = min_t(int, bpc, intel_dp->dfp.max_bpc);
2085
2086 if (intel_dp->dfp.min_tmds_clock) {
2087 for (; bpc >= 10; bpc -= 2) {
2088 if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc))
2089 break;
2090 }
2091 }
f9bb705e 2092
bc7ca6a6 2093 bpp = bpc * 3;
ef32659a
JN
2094 if (intel_dp_is_edp(intel_dp)) {
2095 /* Get bpp from vbt only for panels that dont have bpp in edid */
2096 if (intel_connector->base.display_info.bpc == 0 &&
2097 dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
bdc6114e
WK
2098 drm_dbg_kms(&dev_priv->drm,
2099 "clamping bpp for eDP panel to BIOS-provided %i\n",
2100 dev_priv->vbt.edp.bpp);
ef32659a
JN
2101 bpp = dev_priv->vbt.edp.bpp;
2102 }
2103 }
2104
f9bb705e
MK
2105 return bpp;
2106}
2107
a4971453 2108/* Adjust link config limits based on compliance test requests. */
f1477219 2109void
a4971453
JN
2110intel_dp_adjust_compliance_config(struct intel_dp *intel_dp,
2111 struct intel_crtc_state *pipe_config,
2112 struct link_config_limits *limits)
2113{
af67009c
JN
2114 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2115
a4971453
JN
2116 /* For DP Compliance we override the computed bpp for the pipe */
2117 if (intel_dp->compliance.test_data.bpc != 0) {
2118 int bpp = 3 * intel_dp->compliance.test_data.bpc;
2119
2120 limits->min_bpp = limits->max_bpp = bpp;
2121 pipe_config->dither_force_disable = bpp == 6 * 3;
2122
af67009c 2123 drm_dbg_kms(&i915->drm, "Setting pipe_bpp to %d\n", bpp);
a4971453
JN
2124 }
2125
2126 /* Use values requested by Compliance Test Request */
2127 if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
2128 int index;
2129
2130 /* Validate the compliance test data since max values
2131 * might have changed due to link train fallback.
2132 */
2133 if (intel_dp_link_params_valid(intel_dp, intel_dp->compliance.test_link_rate,
2134 intel_dp->compliance.test_lane_count)) {
2135 index = intel_dp_rate_index(intel_dp->common_rates,
2136 intel_dp->num_common_rates,
2137 intel_dp->compliance.test_link_rate);
2138 if (index >= 0)
2139 limits->min_clock = limits->max_clock = index;
2140 limits->min_lane_count = limits->max_lane_count =
2141 intel_dp->compliance.test_lane_count;
2142 }
2143 }
2144}
2145
3acd115d 2146/* Optimize link config in order: max bpp, min clock, min lanes */
204474a6 2147static int
3acd115d
JN
2148intel_dp_compute_link_config_wide(struct intel_dp *intel_dp,
2149 struct intel_crtc_state *pipe_config,
2150 const struct link_config_limits *limits)
2151{
1326a92c 2152 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
3acd115d
JN
2153 int bpp, clock, lane_count;
2154 int mode_rate, link_clock, link_avail;
2155
2156 for (bpp = limits->max_bpp; bpp >= limits->min_bpp; bpp -= 2 * 3) {
f1bce832 2157 int output_bpp = intel_dp_output_bpp(pipe_config->output_format, bpp);
ddb3d12a 2158
3acd115d 2159 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
ddb3d12a 2160 output_bpp);
3acd115d
JN
2161
2162 for (clock = limits->min_clock; clock <= limits->max_clock; clock++) {
2163 for (lane_count = limits->min_lane_count;
2164 lane_count <= limits->max_lane_count;
2165 lane_count <<= 1) {
2166 link_clock = intel_dp->common_rates[clock];
2167 link_avail = intel_dp_max_data_rate(link_clock,
2168 lane_count);
2169
2170 if (mode_rate <= link_avail) {
2171 pipe_config->lane_count = lane_count;
2172 pipe_config->pipe_bpp = bpp;
2173 pipe_config->port_clock = link_clock;
2174
204474a6 2175 return 0;
3acd115d
JN
2176 }
2177 }
2178 }
2179 }
2180
204474a6 2181 return -EINVAL;
3acd115d
JN
2182}
2183
a4a15777
MN
2184static int intel_dp_dsc_compute_bpp(struct intel_dp *intel_dp, u8 dsc_max_bpc)
2185{
2186 int i, num_bpc;
2187 u8 dsc_bpc[3] = {0};
2188
2189 num_bpc = drm_dp_dsc_sink_supported_input_bpcs(intel_dp->dsc_dpcd,
2190 dsc_bpc);
2191 for (i = 0; i < num_bpc; i++) {
2192 if (dsc_max_bpc >= dsc_bpc[i])
2193 return dsc_bpc[i] * 3;
2194 }
2195
2196 return 0;
2197}
2198
7a7b5be9
JN
2199#define DSC_SUPPORTED_VERSION_MIN 1
2200
2201static int intel_dp_dsc_compute_params(struct intel_encoder *encoder,
2202 struct intel_crtc_state *crtc_state)
2203{
af67009c 2204 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
b7d02c3a 2205 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
7a7b5be9
JN
2206 struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
2207 u8 line_buf_depth;
2208 int ret;
2209
2210 ret = intel_dsc_compute_params(encoder, crtc_state);
2211 if (ret)
2212 return ret;
2213
c42c38ec
JN
2214 /*
2215 * Slice Height of 8 works for all currently available panels. So start
2216 * with that if pic_height is an integral multiple of 8. Eventually add
2217 * logic to try multiple slice heights.
2218 */
2219 if (vdsc_cfg->pic_height % 8 == 0)
2220 vdsc_cfg->slice_height = 8;
2221 else if (vdsc_cfg->pic_height % 4 == 0)
2222 vdsc_cfg->slice_height = 4;
2223 else
2224 vdsc_cfg->slice_height = 2;
2225
7a7b5be9
JN
2226 vdsc_cfg->dsc_version_major =
2227 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2228 DP_DSC_MAJOR_MASK) >> DP_DSC_MAJOR_SHIFT;
2229 vdsc_cfg->dsc_version_minor =
2230 min(DSC_SUPPORTED_VERSION_MIN,
2231 (intel_dp->dsc_dpcd[DP_DSC_REV - DP_DSC_SUPPORT] &
2232 DP_DSC_MINOR_MASK) >> DP_DSC_MINOR_SHIFT);
2233
2234 vdsc_cfg->convert_rgb = intel_dp->dsc_dpcd[DP_DSC_DEC_COLOR_FORMAT_CAP - DP_DSC_SUPPORT] &
2235 DP_DSC_RGB;
2236
2237 line_buf_depth = drm_dp_dsc_sink_line_buf_depth(intel_dp->dsc_dpcd);
2238 if (!line_buf_depth) {
af67009c
JN
2239 drm_dbg_kms(&i915->drm,
2240 "DSC Sink Line Buffer Depth invalid\n");
7a7b5be9
JN
2241 return -EINVAL;
2242 }
2243
2244 if (vdsc_cfg->dsc_version_minor == 2)
2245 vdsc_cfg->line_buf_depth = (line_buf_depth == DSC_1_2_MAX_LINEBUF_DEPTH_BITS) ?
2246 DSC_1_2_MAX_LINEBUF_DEPTH_VAL : line_buf_depth;
2247 else
2248 vdsc_cfg->line_buf_depth = (line_buf_depth > DSC_1_1_MAX_LINEBUF_DEPTH_BITS) ?
2249 DSC_1_1_MAX_LINEBUF_DEPTH_BITS : line_buf_depth;
2250
2251 vdsc_cfg->block_pred_enable =
2252 intel_dp->dsc_dpcd[DP_DSC_BLK_PREDICTION_SUPPORT - DP_DSC_SUPPORT] &
2253 DP_DSC_BLK_PREDICTION_IS_SUPPORTED;
2254
2255 return drm_dsc_compute_rc_parameters(vdsc_cfg);
2256}
2257
204474a6
LP
2258static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
2259 struct intel_crtc_state *pipe_config,
2260 struct drm_connector_state *conn_state,
2261 struct link_config_limits *limits)
a4a15777
MN
2262{
2263 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
2264 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
0c1abaa7
VS
2265 const struct drm_display_mode *adjusted_mode =
2266 &pipe_config->hw.adjusted_mode;
a4a15777
MN
2267 u8 dsc_max_bpc;
2268 int pipe_bpp;
204474a6 2269 int ret;
a4a15777 2270
6fd3134a
VS
2271 pipe_config->fec_enable = !intel_dp_is_edp(intel_dp) &&
2272 intel_dp_supports_fec(intel_dp, pipe_config);
2273
a4a15777 2274 if (!intel_dp_supports_dsc(intel_dp, pipe_config))
204474a6 2275 return -EINVAL;
a4a15777 2276
cee508a0
AS
2277 /* Max DSC Input BPC for ICL is 10 and for TGL+ is 12 */
2278 if (INTEL_GEN(dev_priv) >= 12)
2279 dsc_max_bpc = min_t(u8, 12, conn_state->max_requested_bpc);
2280 else
2281 dsc_max_bpc = min_t(u8, 10,
2282 conn_state->max_requested_bpc);
a4a15777
MN
2283
2284 pipe_bpp = intel_dp_dsc_compute_bpp(intel_dp, dsc_max_bpc);
cee508a0
AS
2285
2286 /* Min Input BPC for ICL+ is 8 */
2287 if (pipe_bpp < 8 * 3) {
bdc6114e
WK
2288 drm_dbg_kms(&dev_priv->drm,
2289 "No DSC support for less than 8bpc\n");
204474a6 2290 return -EINVAL;
a4a15777
MN
2291 }
2292
2293 /*
2294 * For now enable DSC for max bpp, max link rate, max lane count.
2295 * Optimize this later for the minimum possible link rate/lane count
2296 * with DSC enabled for the requested mode.
2297 */
2298 pipe_config->pipe_bpp = pipe_bpp;
2299 pipe_config->port_clock = intel_dp->common_rates[limits->max_clock];
2300 pipe_config->lane_count = limits->max_lane_count;
2301
2302 if (intel_dp_is_edp(intel_dp)) {
010663a6 2303 pipe_config->dsc.compressed_bpp =
a4a15777
MN
2304 min_t(u16, drm_edp_dsc_sink_output_bpp(intel_dp->dsc_dpcd) >> 4,
2305 pipe_config->pipe_bpp);
010663a6 2306 pipe_config->dsc.slice_count =
a4a15777
MN
2307 drm_dp_dsc_sink_max_slice_count(intel_dp->dsc_dpcd,
2308 true);
2309 } else {
2310 u16 dsc_max_output_bpp;
2311 u8 dsc_dp_slice_count;
2312
2313 dsc_max_output_bpp =
45d3c5cd
MR
2314 intel_dp_dsc_get_output_bpp(dev_priv,
2315 pipe_config->port_clock,
a4a15777
MN
2316 pipe_config->lane_count,
2317 adjusted_mode->crtc_clock,
2318 adjusted_mode->crtc_hdisplay);
2319 dsc_dp_slice_count =
2320 intel_dp_dsc_get_slice_count(intel_dp,
2321 adjusted_mode->crtc_clock,
2322 adjusted_mode->crtc_hdisplay);
2323 if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
bdc6114e
WK
2324 drm_dbg_kms(&dev_priv->drm,
2325 "Compressed BPP/Slice Count not supported\n");
204474a6 2326 return -EINVAL;
a4a15777 2327 }
010663a6 2328 pipe_config->dsc.compressed_bpp = min_t(u16,
a4a15777
MN
2329 dsc_max_output_bpp >> 4,
2330 pipe_config->pipe_bpp);
010663a6 2331 pipe_config->dsc.slice_count = dsc_dp_slice_count;
a4a15777
MN
2332 }
2333 /*
2334 * VDSC engine operates at 1 Pixel per clock, so if peak pixel rate
2335 * is greater than the maximum Cdclock and if slice count is even
2336 * then we need to use 2 VDSC instances.
2337 */
2338 if (adjusted_mode->crtc_clock > dev_priv->max_cdclk_freq) {
010663a6
JN
2339 if (pipe_config->dsc.slice_count > 1) {
2340 pipe_config->dsc.dsc_split = true;
a4a15777 2341 } else {
bdc6114e
WK
2342 drm_dbg_kms(&dev_priv->drm,
2343 "Cannot split stream to use 2 VDSC instances\n");
204474a6 2344 return -EINVAL;
a4a15777
MN
2345 }
2346 }
204474a6 2347
7a7b5be9 2348 ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
204474a6 2349 if (ret < 0) {
bdc6114e
WK
2350 drm_dbg_kms(&dev_priv->drm,
2351 "Cannot compute valid DSC parameters for Input Bpp = %d "
2352 "Compressed BPP = %d\n",
2353 pipe_config->pipe_bpp,
2354 pipe_config->dsc.compressed_bpp);
204474a6 2355 return ret;
168243c1 2356 }
204474a6 2357
010663a6 2358 pipe_config->dsc.compression_enable = true;
bdc6114e
WK
2359 drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
2360 "Compressed Bpp = %d Slice Count = %d\n",
2361 pipe_config->pipe_bpp,
2362 pipe_config->dsc.compressed_bpp,
2363 pipe_config->dsc.slice_count);
a4a15777 2364
204474a6 2365 return 0;
a4a15777
MN
2366}
2367
204474a6 2368static int
981a63eb 2369intel_dp_compute_link_config(struct intel_encoder *encoder,
a4a15777
MN
2370 struct intel_crtc_state *pipe_config,
2371 struct drm_connector_state *conn_state)
a4fc5ed6 2372{
af67009c 2373 struct drm_i915_private *i915 = to_i915(encoder->base.dev);
0c1abaa7
VS
2374 const struct drm_display_mode *adjusted_mode =
2375 &pipe_config->hw.adjusted_mode;
b7d02c3a 2376 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
7c2781e4 2377 struct link_config_limits limits;
94ca719e 2378 int common_len;
204474a6 2379 int ret;
7c2781e4 2380
975ee5fc 2381 common_len = intel_dp_common_len_rate_limit(intel_dp,
e6c0c64a 2382 intel_dp->max_link_rate);
a8f3ef61
SJ
2383
2384 /* No common link rates between source and sink */
3a47ae20 2385 drm_WARN_ON(encoder->base.dev, common_len <= 0);
a8f3ef61 2386
7c2781e4
JN
2387 limits.min_clock = 0;
2388 limits.max_clock = common_len - 1;
2389
2390 limits.min_lane_count = 1;
2391 limits.max_lane_count = intel_dp_max_lane_count(intel_dp);
2392
f1bce832 2393 limits.min_bpp = intel_dp_min_bpp(pipe_config->output_format);
bc7ca6a6 2394 limits.max_bpp = intel_dp_max_bpp(intel_dp, pipe_config);
a4fc5ed6 2395
f11cb1c1 2396 if (intel_dp_is_edp(intel_dp)) {
344c5bbc
JN
2397 /*
2398 * Use the maximum clock and number of lanes the eDP panel
f11cb1c1
JN
2399 * advertizes being capable of. The panels are generally
2400 * designed to support only a single clock and lane
2401 * configuration, and typically these values correspond to the
2402 * native resolution of the panel.
344c5bbc 2403 */
7c2781e4
JN
2404 limits.min_lane_count = limits.max_lane_count;
2405 limits.min_clock = limits.max_clock;
7984211e 2406 }
657445fe 2407
a4971453
JN
2408 intel_dp_adjust_compliance_config(intel_dp, pipe_config, &limits);
2409
af67009c
JN
2410 drm_dbg_kms(&i915->drm, "DP link computation with max lane count %i "
2411 "max rate %d max bpp %d pixel clock %iKHz\n",
2412 limits.max_lane_count,
2413 intel_dp->common_rates[limits.max_clock],
2414 limits.max_bpp, adjusted_mode->crtc_clock);
7c2781e4 2415
f11cb1c1
JN
2416 /*
2417 * Optimize for slow and wide. This is the place to add alternative
2418 * optimization policy.
2419 */
2420 ret = intel_dp_compute_link_config_wide(intel_dp, pipe_config, &limits);
a4a15777
MN
2421
2422 /* enable compression if the mode doesn't fit available BW */
af67009c 2423 drm_dbg_kms(&i915->drm, "Force DSC en = %d\n", intel_dp->force_dsc_en);
204474a6
LP
2424 if (ret || intel_dp->force_dsc_en) {
2425 ret = intel_dp_dsc_compute_config(intel_dp, pipe_config,
2426 conn_state, &limits);
2427 if (ret < 0)
2428 return ret;
7769db58 2429 }
981a63eb 2430
010663a6 2431 if (pipe_config->dsc.compression_enable) {
af67009c
JN
2432 drm_dbg_kms(&i915->drm,
2433 "DP lane count %d clock %d Input bpp %d Compressed bpp %d\n",
2434 pipe_config->lane_count, pipe_config->port_clock,
2435 pipe_config->pipe_bpp,
2436 pipe_config->dsc.compressed_bpp);
2437
2438 drm_dbg_kms(&i915->drm,
2439 "DP link rate required %i available %i\n",
2440 intel_dp_link_required(adjusted_mode->crtc_clock,
2441 pipe_config->dsc.compressed_bpp),
2442 intel_dp_max_data_rate(pipe_config->port_clock,
2443 pipe_config->lane_count));
a4a15777 2444 } else {
af67009c
JN
2445 drm_dbg_kms(&i915->drm, "DP lane count %d clock %d bpp %d\n",
2446 pipe_config->lane_count, pipe_config->port_clock,
2447 pipe_config->pipe_bpp);
a4a15777 2448
af67009c
JN
2449 drm_dbg_kms(&i915->drm,
2450 "DP link rate required %i available %i\n",
2451 intel_dp_link_required(adjusted_mode->crtc_clock,
2452 pipe_config->pipe_bpp),
2453 intel_dp_max_data_rate(pipe_config->port_clock,
2454 pipe_config->lane_count));
a4a15777 2455 }
204474a6 2456 return 0;
981a63eb
JN
2457}
2458
8e9d645c 2459static int
773bd825 2460intel_dp_ycbcr420_config(struct intel_crtc_state *crtc_state,
4cecc7c0 2461 const struct drm_connector_state *conn_state)
8e9d645c 2462{
4cecc7c0 2463 struct drm_connector *connector = conn_state->connector;
8e9d645c 2464 const struct drm_display_mode *adjusted_mode =
1326a92c 2465 &crtc_state->hw.adjusted_mode;
8e9d645c 2466
181567aa 2467 if (!connector->ycbcr_420_allowed)
8e9d645c
GM
2468 return 0;
2469
773bd825 2470 crtc_state->output_format = intel_dp_output_format(connector, adjusted_mode);
181567aa 2471
773bd825 2472 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_YCBCR420)
181567aa 2473 return 0;
8e9d645c 2474
d7ff281c 2475 return intel_pch_panel_fitting(crtc_state, conn_state);
8e9d645c
GM
2476}
2477
37aa52bf
VS
2478bool intel_dp_limited_color_range(const struct intel_crtc_state *crtc_state,
2479 const struct drm_connector_state *conn_state)
2480{
2481 const struct intel_digital_connector_state *intel_conn_state =
2482 to_intel_digital_connector_state(conn_state);
2483 const struct drm_display_mode *adjusted_mode =
1326a92c 2484 &crtc_state->hw.adjusted_mode;
37aa52bf 2485
cae154fc
VS
2486 /*
2487 * Our YCbCr output is always limited range.
2488 * crtc_state->limited_color_range only applies to RGB,
2489 * and it must never be set for YCbCr or we risk setting
2490 * some conflicting bits in PIPECONF which will mess up
2491 * the colors on the monitor.
2492 */
2493 if (crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
2494 return false;
2495
37aa52bf
VS
2496 if (intel_conn_state->broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
2497 /*
2498 * See:
2499 * CEA-861-E - 5.1 Default Encoding Parameters
2500 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
2501 */
2502 return crtc_state->pipe_bpp != 18 &&
2503 drm_default_rgb_quant_range(adjusted_mode) ==
2504 HDMI_QUANTIZATION_RANGE_LIMITED;
2505 } else {
2506 return intel_conn_state->broadcast_rgb ==
2507 INTEL_BROADCAST_RGB_LIMITED;
2508 }
2509}
2510
07130981
KV
2511static bool intel_dp_port_has_audio(struct drm_i915_private *dev_priv,
2512 enum port port)
2513{
2514 if (IS_G4X(dev_priv))
2515 return false;
2516 if (INTEL_GEN(dev_priv) < 12 && port == PORT_A)
2517 return false;
2518
2519 return true;
2520}
2521
9799c4c3
GM
2522static void intel_dp_compute_vsc_colorimetry(const struct intel_crtc_state *crtc_state,
2523 const struct drm_connector_state *conn_state,
2524 struct drm_dp_vsc_sdp *vsc)
2525{
2526 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
2527 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2528
2529 /*
2530 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2531 * VSC SDP supporting 3D stereo, PSR2, and Pixel Encoding/
2532 * Colorimetry Format indication.
2533 */
2534 vsc->revision = 0x5;
2535 vsc->length = 0x13;
2536
2537 /* DP 1.4a spec, Table 2-120 */
2538 switch (crtc_state->output_format) {
2539 case INTEL_OUTPUT_FORMAT_YCBCR444:
2540 vsc->pixelformat = DP_PIXELFORMAT_YUV444;
2541 break;
2542 case INTEL_OUTPUT_FORMAT_YCBCR420:
2543 vsc->pixelformat = DP_PIXELFORMAT_YUV420;
2544 break;
2545 case INTEL_OUTPUT_FORMAT_RGB:
2546 default:
2547 vsc->pixelformat = DP_PIXELFORMAT_RGB;
2548 }
2549
2550 switch (conn_state->colorspace) {
2551 case DRM_MODE_COLORIMETRY_BT709_YCC:
2552 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2553 break;
2554 case DRM_MODE_COLORIMETRY_XVYCC_601:
2555 vsc->colorimetry = DP_COLORIMETRY_XVYCC_601;
2556 break;
2557 case DRM_MODE_COLORIMETRY_XVYCC_709:
2558 vsc->colorimetry = DP_COLORIMETRY_XVYCC_709;
2559 break;
2560 case DRM_MODE_COLORIMETRY_SYCC_601:
2561 vsc->colorimetry = DP_COLORIMETRY_SYCC_601;
2562 break;
2563 case DRM_MODE_COLORIMETRY_OPYCC_601:
2564 vsc->colorimetry = DP_COLORIMETRY_OPYCC_601;
2565 break;
2566 case DRM_MODE_COLORIMETRY_BT2020_CYCC:
2567 vsc->colorimetry = DP_COLORIMETRY_BT2020_CYCC;
2568 break;
2569 case DRM_MODE_COLORIMETRY_BT2020_RGB:
2570 vsc->colorimetry = DP_COLORIMETRY_BT2020_RGB;
2571 break;
2572 case DRM_MODE_COLORIMETRY_BT2020_YCC:
2573 vsc->colorimetry = DP_COLORIMETRY_BT2020_YCC;
2574 break;
2575 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65:
2576 case DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER:
2577 vsc->colorimetry = DP_COLORIMETRY_DCI_P3_RGB;
2578 break;
2579 default:
2580 /*
2581 * RGB->YCBCR color conversion uses the BT.709
2582 * color space.
2583 */
2584 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
2585 vsc->colorimetry = DP_COLORIMETRY_BT709_YCC;
2586 else
2587 vsc->colorimetry = DP_COLORIMETRY_DEFAULT;
2588 break;
2589 }
2590
2591 vsc->bpc = crtc_state->pipe_bpp / 3;
2592
2593 /* only RGB pixelformat supports 6 bpc */
2594 drm_WARN_ON(&dev_priv->drm,
2595 vsc->bpc == 6 && vsc->pixelformat != DP_PIXELFORMAT_RGB);
2596
2597 /* all YCbCr are always limited range */
2598 vsc->dynamic_range = DP_DYNAMIC_RANGE_CTA;
2599 vsc->content_type = DP_CONTENT_TYPE_NOT_DEFINED;
2600}
2601
2602static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp,
2603 struct intel_crtc_state *crtc_state,
2604 const struct drm_connector_state *conn_state)
2605{
2606 struct drm_dp_vsc_sdp *vsc = &crtc_state->infoframes.vsc;
2607
cafac5a9
GM
2608 /* When a crtc state has PSR, VSC SDP will be handled by PSR routine */
2609 if (crtc_state->has_psr)
9799c4c3
GM
2610 return;
2611
2612 if (!intel_dp_needs_vsc_sdp(crtc_state, conn_state))
2613 return;
2614
2615 crtc_state->infoframes.enable |= intel_hdmi_infoframe_enable(DP_SDP_VSC);
2616 vsc->sdp_type = DP_SDP_VSC;
2617 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2618 &crtc_state->infoframes.vsc);
2619}
2620
cafac5a9
GM
2621void intel_dp_compute_psr_vsc_sdp(struct intel_dp *intel_dp,
2622 const struct intel_crtc_state *crtc_state,
2623 const struct drm_connector_state *conn_state,
2624 struct drm_dp_vsc_sdp *vsc)
2625{
2626 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2627
2628 vsc->sdp_type = DP_SDP_VSC;
2629
2630 if (dev_priv->psr.psr2_enabled) {
2631 if (dev_priv->psr.colorimetry_support &&
2632 intel_dp_needs_vsc_sdp(crtc_state, conn_state)) {
2633 /* [PSR2, +Colorimetry] */
2634 intel_dp_compute_vsc_colorimetry(crtc_state, conn_state,
2635 vsc);
2636 } else {
2637 /*
2638 * [PSR2, -Colorimetry]
2639 * Prepare VSC Header for SU as per eDP 1.4 spec, Table 6-11
2640 * 3D stereo + PSR/PSR2 + Y-coordinate.
2641 */
2642 vsc->revision = 0x4;
2643 vsc->length = 0xe;
2644 }
2645 } else {
2646 /*
2647 * [PSR1]
2648 * Prepare VSC Header for SU as per DP 1.4 spec, Table 2-118
2649 * VSC SDP supporting 3D stereo + PSR (applies to eDP v1.3 or
2650 * higher).
2651 */
2652 vsc->revision = 0x2;
2653 vsc->length = 0x8;
2654 }
2655}
2656
d1eed96d
GM
2657static void
2658intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp,
2659 struct intel_crtc_state *crtc_state,
2660 const struct drm_connector_state *conn_state)
2661{
2662 int ret;
2663 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2664 struct hdmi_drm_infoframe *drm_infoframe = &crtc_state->infoframes.drm.drm;
2665
2666 if (!conn_state->hdr_output_metadata)
2667 return;
2668
2669 ret = drm_hdmi_infoframe_set_hdr_metadata(drm_infoframe, conn_state);
2670
2671 if (ret) {
2672 drm_dbg_kms(&dev_priv->drm, "couldn't set HDR metadata in infoframe\n");
2673 return;
2674 }
2675
2676 crtc_state->infoframes.enable |=
2677 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GAMUT_METADATA);
2678}
2679
be2dd718
JRS
2680static void
2681intel_dp_drrs_compute_config(struct intel_dp *intel_dp,
2682 struct intel_crtc_state *pipe_config,
2683 int output_bpp, bool constant_n)
2684{
2685 struct intel_connector *intel_connector = intel_dp->attached_connector;
2686 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
2687
2688 /*
2689 * DRRS and PSR can't be enable together, so giving preference to PSR
2690 * as it allows more power-savings by complete shutting down display,
2691 * so to guarantee this, intel_dp_drrs_compute_config() must be called
2692 * after intel_psr_compute_config().
2693 */
2694 if (pipe_config->has_psr)
2695 return;
2696
2697 if (!intel_connector->panel.downclock_mode ||
2698 dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
2699 return;
2700
2701 pipe_config->has_drrs = true;
2702 intel_link_compute_m_n(output_bpp, pipe_config->lane_count,
2703 intel_connector->panel.downclock_mode->clock,
2704 pipe_config->port_clock, &pipe_config->dp_m2_n2,
2705 constant_n, pipe_config->fec_enable);
2706}
2707
204474a6 2708int
981a63eb
JN
2709intel_dp_compute_config(struct intel_encoder *encoder,
2710 struct intel_crtc_state *pipe_config,
2711 struct drm_connector_state *conn_state)
2712{
2713 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1326a92c 2714 struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
b7d02c3a
VS
2715 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2716 struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder);
981a63eb 2717 enum port port = encoder->port;
981a63eb
JN
2718 struct intel_connector *intel_connector = intel_dp->attached_connector;
2719 struct intel_digital_connector_state *intel_conn_state =
2720 to_intel_digital_connector_state(conn_state);
0883ce81 2721 bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
53ca2edc 2722 DP_DPCD_QUIRK_CONSTANT_N);
8e9d645c 2723 int ret = 0, output_bpp;
981a63eb
JN
2724
2725 if (HAS_PCH_SPLIT(dev_priv) && !HAS_DDI(dev_priv) && port != PORT_A)
2726 pipe_config->has_pch_encoder = true;
2727
d9facae6 2728 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
0c06fa15 2729
668b6c17
SS
2730 if (lspcon->active)
2731 lspcon_ycbcr420_config(&intel_connector->base, pipe_config);
8e9d645c 2732 else
773bd825 2733 ret = intel_dp_ycbcr420_config(pipe_config, conn_state);
8e9d645c
GM
2734 if (ret)
2735 return ret;
668b6c17 2736
07130981 2737 if (!intel_dp_port_has_audio(dev_priv, port))
981a63eb
JN
2738 pipe_config->has_audio = false;
2739 else if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO)
2740 pipe_config->has_audio = intel_dp->has_audio;
2741 else
2742 pipe_config->has_audio = intel_conn_state->force_audio == HDMI_AUDIO_ON;
2743
2744 if (intel_dp_is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
d93fa1b4
JN
2745 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
2746 adjusted_mode);
981a63eb 2747
b2ae318a 2748 if (HAS_GMCH(dev_priv))
d7ff281c 2749 ret = intel_gmch_panel_fitting(pipe_config, conn_state);
981a63eb 2750 else
d7ff281c
VS
2751 ret = intel_pch_panel_fitting(pipe_config, conn_state);
2752 if (ret)
2753 return ret;
981a63eb
JN
2754 }
2755
e4dd27aa 2756 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
204474a6 2757 return -EINVAL;
e4dd27aa 2758
b2ae318a 2759 if (HAS_GMCH(dev_priv) &&
981a63eb 2760 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
204474a6 2761 return -EINVAL;
981a63eb
JN
2762
2763 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
204474a6 2764 return -EINVAL;
981a63eb 2765
98c93394
VS
2766 if (intel_dp_hdisplay_bad(dev_priv, adjusted_mode->crtc_hdisplay))
2767 return -EINVAL;
2768
204474a6
LP
2769 ret = intel_dp_compute_link_config(encoder, pipe_config, conn_state);
2770 if (ret < 0)
2771 return ret;
981a63eb 2772
37aa52bf
VS
2773 pipe_config->limited_color_range =
2774 intel_dp_limited_color_range(pipe_config, conn_state);
55bc60db 2775
010663a6
JN
2776 if (pipe_config->dsc.compression_enable)
2777 output_bpp = pipe_config->dsc.compressed_bpp;
a4a15777 2778 else
f1bce832
VS
2779 output_bpp = intel_dp_output_bpp(pipe_config->output_format,
2780 pipe_config->pipe_bpp);
aefa95ba
VS
2781
2782 intel_link_compute_m_n(output_bpp,
2783 pipe_config->lane_count,
2784 adjusted_mode->crtc_clock,
2785 pipe_config->port_clock,
2786 &pipe_config->dp_m_n,
ed06efb8 2787 constant_n, pipe_config->fec_enable);
9d1a455b 2788
4f8036a2 2789 if (!HAS_DDI(dev_priv))
840b32b7 2790 intel_dp_set_clock(encoder, pipe_config);
c6bb3538 2791
4d90f2d5 2792 intel_psr_compute_config(intel_dp, pipe_config);
be2dd718
JRS
2793 intel_dp_drrs_compute_config(intel_dp, pipe_config, output_bpp,
2794 constant_n);
9799c4c3 2795 intel_dp_compute_vsc_sdp(intel_dp, pipe_config, conn_state);
d1eed96d 2796 intel_dp_compute_hdr_metadata_infoframe_sdp(intel_dp, pipe_config, conn_state);
4d90f2d5 2797
204474a6 2798 return 0;
a4fc5ed6
KP
2799}
2800
901c2daf 2801void intel_dp_set_link_params(struct intel_dp *intel_dp,
a621860a 2802 int link_rate, int lane_count)
901c2daf 2803{
edb2e530 2804 intel_dp->link_trained = false;
dfa10480
ACO
2805 intel_dp->link_rate = link_rate;
2806 intel_dp->lane_count = lane_count;
901c2daf
VS
2807}
2808
85cb48a1 2809static void intel_dp_prepare(struct intel_encoder *encoder,
5f88a9c6 2810 const struct intel_crtc_state *pipe_config)
a4fc5ed6 2811{
2f773477 2812 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 2813 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
8f4f2797 2814 enum port port = encoder->port;
2225f3c6 2815 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
1326a92c 2816 const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
a4fc5ed6 2817
a621860a
VS
2818 intel_dp_set_link_params(intel_dp,
2819 pipe_config->port_clock,
2820 pipe_config->lane_count);
901c2daf 2821
417e822d 2822 /*
1a2eb460 2823 * There are four kinds of DP registers:
417e822d
KP
2824 *
2825 * IBX PCH
1a2eb460
KP
2826 * SNB CPU
2827 * IVB CPU
417e822d
KP
2828 * CPT PCH
2829 *
2830 * IBX PCH and CPU are the same for almost everything,
2831 * except that the CPU DP PLL is configured in this
2832 * register
2833 *
2834 * CPT PCH is quite different, having many bits moved
2835 * to the TRANS_DP_CTL register instead. That
9eae5e27 2836 * configuration happens (oddly) in ilk_pch_enable
417e822d 2837 */
9c9e7927 2838
417e822d
KP
2839 /* Preserve the BIOS-computed detected bit. This is
2840 * supposed to be read-only.
2841 */
b4e33881 2842 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 2843
417e822d 2844 /* Handle DP bits in common between all three register formats */
417e822d 2845 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
85cb48a1 2846 intel_dp->DP |= DP_PORT_WIDTH(pipe_config->lane_count);
a4fc5ed6 2847
417e822d 2848 /* Split out the IBX/CPU vs CPT settings */
32f9d658 2849
b752e995 2850 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A) {
1a2eb460
KP
2851 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2852 intel_dp->DP |= DP_SYNC_HS_HIGH;
2853 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2854 intel_dp->DP |= DP_SYNC_VS_HIGH;
2855 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
2856
6aba5b6c 2857 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
1a2eb460
KP
2858 intel_dp->DP |= DP_ENHANCED_FRAMING;
2859
59b74c49 2860 intel_dp->DP |= DP_PIPE_SEL_IVB(crtc->pipe);
6e266956 2861 } else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
e3ef4479
VS
2862 u32 trans_dp;
2863
39e5fa88 2864 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
e3ef4479 2865
b4e33881 2866 trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
e3ef4479
VS
2867 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
2868 trans_dp |= TRANS_DP_ENH_FRAMING;
2869 else
2870 trans_dp &= ~TRANS_DP_ENH_FRAMING;
b4e33881 2871 intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
39e5fa88 2872 } else {
c99f53f7 2873 if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
0f2a2a75 2874 intel_dp->DP |= DP_COLOR_RANGE_16_235;
417e822d
KP
2875
2876 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
2877 intel_dp->DP |= DP_SYNC_HS_HIGH;
2878 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
2879 intel_dp->DP |= DP_SYNC_VS_HIGH;
2880 intel_dp->DP |= DP_LINK_TRAIN_OFF;
2881
6aba5b6c 2882 if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
417e822d
KP
2883 intel_dp->DP |= DP_ENHANCED_FRAMING;
2884
920a14b2 2885 if (IS_CHERRYVIEW(dev_priv))
59b74c49
VS
2886 intel_dp->DP |= DP_PIPE_SEL_CHV(crtc->pipe);
2887 else
2888 intel_dp->DP |= DP_PIPE_SEL(crtc->pipe);
32f9d658 2889 }
a4fc5ed6
KP
2890}
2891
ffd6749d
PZ
2892#define IDLE_ON_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
2893#define IDLE_ON_VALUE (PP_ON | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
99ea7127 2894
1a5ef5b7
PZ
2895#define IDLE_OFF_MASK (PP_ON | PP_SEQUENCE_MASK | 0 | 0)
2896#define IDLE_OFF_VALUE (0 | PP_SEQUENCE_NONE | 0 | 0)
99ea7127 2897
ffd6749d
PZ
2898#define IDLE_CYCLE_MASK (PP_ON | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
2899#define IDLE_CYCLE_VALUE (0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
99ea7127 2900
46bd8383 2901static void intel_pps_verify_state(struct intel_dp *intel_dp);
de9c1b6b 2902
4be73780 2903static void wait_panel_status(struct intel_dp *intel_dp,
99ea7127
KP
2904 u32 mask,
2905 u32 value)
bd943159 2906{
de25eb7f 2907 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
f0f59a00 2908 i915_reg_t pp_stat_reg, pp_ctrl_reg;
453c5420 2909
e39b999a
VS
2910 lockdep_assert_held(&dev_priv->pps_mutex);
2911
46bd8383 2912 intel_pps_verify_state(intel_dp);
de9c1b6b 2913
bf13e81b
JN
2914 pp_stat_reg = _pp_stat_reg(intel_dp);
2915 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
32ce697c 2916
bdc6114e
WK
2917 drm_dbg_kms(&dev_priv->drm,
2918 "mask %08x value %08x status %08x control %08x\n",
2919 mask, value,
b4e33881
JN
2920 intel_de_read(dev_priv, pp_stat_reg),
2921 intel_de_read(dev_priv, pp_ctrl_reg));
32ce697c 2922
4cb3b44d
DCS
2923 if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
2924 mask, value, 5000))
bdc6114e
WK
2925 drm_err(&dev_priv->drm,
2926 "Panel status timeout: status %08x control %08x\n",
b4e33881
JN
2927 intel_de_read(dev_priv, pp_stat_reg),
2928 intel_de_read(dev_priv, pp_ctrl_reg));
54c136d4 2929
bdc6114e 2930 drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
99ea7127 2931}
32ce697c 2932
4be73780 2933static void wait_panel_on(struct intel_dp *intel_dp)
99ea7127 2934{
af67009c
JN
2935 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2936
2937 drm_dbg_kms(&i915->drm, "Wait for panel power on\n");
4be73780 2938 wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
2939}
2940
4be73780 2941static void wait_panel_off(struct intel_dp *intel_dp)
99ea7127 2942{
af67009c
JN
2943 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2944
2945 drm_dbg_kms(&i915->drm, "Wait for panel power off time\n");
4be73780 2946 wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
99ea7127
KP
2947}
2948
4be73780 2949static void wait_panel_power_cycle(struct intel_dp *intel_dp)
99ea7127 2950{
af67009c 2951 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
d28d4731
AK
2952 ktime_t panel_power_on_time;
2953 s64 panel_power_off_duration;
2954
af67009c 2955 drm_dbg_kms(&i915->drm, "Wait for panel power cycle\n");
dce56b3c 2956
d28d4731
AK
2957 /* take the difference of currrent time and panel power off time
2958 * and then make panel wait for t11_t12 if needed. */
2959 panel_power_on_time = ktime_get_boottime();
2960 panel_power_off_duration = ktime_ms_delta(panel_power_on_time, intel_dp->panel_power_off_time);
2961
dce56b3c
PZ
2962 /* When we disable the VDD override bit last we have to do the manual
2963 * wait. */
d28d4731
AK
2964 if (panel_power_off_duration < (s64)intel_dp->panel_power_cycle_delay)
2965 wait_remaining_ms_from_jiffies(jiffies,
2966 intel_dp->panel_power_cycle_delay - panel_power_off_duration);
dce56b3c 2967
4be73780 2968 wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
99ea7127
KP
2969}
2970
4be73780 2971static void wait_backlight_on(struct intel_dp *intel_dp)
dce56b3c
PZ
2972{
2973 wait_remaining_ms_from_jiffies(intel_dp->last_power_on,
2974 intel_dp->backlight_on_delay);
2975}
2976
4be73780 2977static void edp_wait_backlight_off(struct intel_dp *intel_dp)
dce56b3c
PZ
2978{
2979 wait_remaining_ms_from_jiffies(intel_dp->last_backlight_off,
2980 intel_dp->backlight_off_delay);
2981}
99ea7127 2982
832dd3c1
KP
2983/* Read the current pp_control value, unlocking the register if it
2984 * is locked
2985 */
2986
9eae5e27 2987static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 2988{
de25eb7f 2989 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
453c5420 2990 u32 control;
832dd3c1 2991
e39b999a
VS
2992 lockdep_assert_held(&dev_priv->pps_mutex);
2993
b4e33881 2994 control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
eb020ca3
PB
2995 if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
2996 (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
b0a08bec
VK
2997 control &= ~PANEL_UNLOCK_MASK;
2998 control |= PANEL_UNLOCK_REGS;
2999 }
832dd3c1 3000 return control;
bd943159
KP
3001}
3002
951468f3
VS
3003/*
3004 * Must be paired with edp_panel_vdd_off().
3005 * Must hold pps_mutex around the whole on/off sequence.
3006 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
3007 */
1e0560e0 3008static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 3009{
de25eb7f 3010 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7801f3b7 3011 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5d613501 3012 u32 pp;
f0f59a00 3013 i915_reg_t pp_stat_reg, pp_ctrl_reg;
adddaaf4 3014 bool need_to_disable = !intel_dp->want_panel_vdd;
5d613501 3015
e39b999a
VS
3016 lockdep_assert_held(&dev_priv->pps_mutex);
3017
1853a9da 3018 if (!intel_dp_is_edp(intel_dp))
adddaaf4 3019 return false;
bd943159 3020
2c623c11 3021 cancel_delayed_work(&intel_dp->panel_vdd_work);
bd943159 3022 intel_dp->want_panel_vdd = true;
99ea7127 3023
4be73780 3024 if (edp_have_panel_vdd(intel_dp))
adddaaf4 3025 return need_to_disable;
b0665d57 3026
337837ac 3027 intel_display_power_get(dev_priv,
7801f3b7 3028 intel_aux_power_domain(dig_port));
e9cb81a2 3029
bdc6114e 3030 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
7801f3b7
LDM
3031 dig_port->base.base.base.id,
3032 dig_port->base.base.name);
bd943159 3033
4be73780
DV
3034 if (!edp_have_panel_power(intel_dp))
3035 wait_panel_power_cycle(intel_dp);
99ea7127 3036
9eae5e27 3037 pp = ilk_get_pp_control(intel_dp);
5d613501 3038 pp |= EDP_FORCE_VDD;
ebf33b18 3039
bf13e81b
JN
3040 pp_stat_reg = _pp_stat_reg(intel_dp);
3041 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 3042
b4e33881
JN
3043 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3044 intel_de_posting_read(dev_priv, pp_ctrl_reg);
bdc6114e 3045 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
b4e33881
JN
3046 intel_de_read(dev_priv, pp_stat_reg),
3047 intel_de_read(dev_priv, pp_ctrl_reg));
ebf33b18
KP
3048 /*
3049 * If the panel wasn't on, delay before accessing aux channel
3050 */
4be73780 3051 if (!edp_have_panel_power(intel_dp)) {
bdc6114e
WK
3052 drm_dbg_kms(&dev_priv->drm,
3053 "[ENCODER:%d:%s] panel power wasn't enabled\n",
7801f3b7
LDM
3054 dig_port->base.base.base.id,
3055 dig_port->base.base.name);
f01eca2e 3056 msleep(intel_dp->panel_power_up_delay);
f01eca2e 3057 }
adddaaf4
JN
3058
3059 return need_to_disable;
3060}
3061
951468f3
VS
3062/*
3063 * Must be paired with intel_edp_panel_vdd_off() or
3064 * intel_edp_panel_off().
3065 * Nested calls to these functions are not allowed since
3066 * we drop the lock. Caller must use some higher level
3067 * locking to prevent nested calls from other threads.
3068 */
b80d6c78 3069void intel_edp_panel_vdd_on(struct intel_dp *intel_dp)
adddaaf4 3070{
69d93820 3071 intel_wakeref_t wakeref;
c695b6b6 3072 bool vdd;
adddaaf4 3073
1853a9da 3074 if (!intel_dp_is_edp(intel_dp))
c695b6b6
VS
3075 return;
3076
69d93820
CW
3077 vdd = false;
3078 with_pps_lock(intel_dp, wakeref)
3079 vdd = edp_panel_vdd_on(intel_dp);
66a990dd
VS
3080 I915_STATE_WARN(!vdd, "[ENCODER:%d:%s] VDD already requested on\n",
3081 dp_to_dig_port(intel_dp)->base.base.base.id,
3082 dp_to_dig_port(intel_dp)->base.base.name);
5d613501
JB
3083}
3084
4be73780 3085static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 3086{
de25eb7f 3087 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7801f3b7 3088 struct intel_digital_port *dig_port =
be2c9196 3089 dp_to_dig_port(intel_dp);
5d613501 3090 u32 pp;
f0f59a00 3091 i915_reg_t pp_stat_reg, pp_ctrl_reg;
5d613501 3092
e39b999a 3093 lockdep_assert_held(&dev_priv->pps_mutex);
a0e99e68 3094
eb020ca3 3095 drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
4e6e1a54 3096
15e899a0 3097 if (!edp_have_panel_vdd(intel_dp))
be2c9196 3098 return;
b0665d57 3099
bdc6114e 3100 drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
7801f3b7
LDM
3101 dig_port->base.base.base.id,
3102 dig_port->base.base.name);
bd943159 3103
9eae5e27 3104 pp = ilk_get_pp_control(intel_dp);
be2c9196 3105 pp &= ~EDP_FORCE_VDD;
453c5420 3106
be2c9196
VS
3107 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3108 pp_stat_reg = _pp_stat_reg(intel_dp);
99ea7127 3109
b4e33881
JN
3110 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3111 intel_de_posting_read(dev_priv, pp_ctrl_reg);
90791a5c 3112
be2c9196 3113 /* Make sure sequencer is idle before allowing subsequent activity */
bdc6114e 3114 drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
b4e33881
JN
3115 intel_de_read(dev_priv, pp_stat_reg),
3116 intel_de_read(dev_priv, pp_ctrl_reg));
e9cb81a2 3117
5a162e22 3118 if ((pp & PANEL_POWER_ON) == 0)
d28d4731 3119 intel_dp->panel_power_off_time = ktime_get_boottime();
e9cb81a2 3120
0e6e0be4 3121 intel_display_power_put_unchecked(dev_priv,
7801f3b7 3122 intel_aux_power_domain(dig_port));
bd943159 3123}
5d613501 3124
4be73780 3125static void edp_panel_vdd_work(struct work_struct *__work)
bd943159 3126{
69d93820
CW
3127 struct intel_dp *intel_dp =
3128 container_of(to_delayed_work(__work),
3129 struct intel_dp, panel_vdd_work);
3130 intel_wakeref_t wakeref;
bd943159 3131
69d93820
CW
3132 with_pps_lock(intel_dp, wakeref) {
3133 if (!intel_dp->want_panel_vdd)
3134 edp_panel_vdd_off_sync(intel_dp);
3135 }
bd943159
KP
3136}
3137
aba86890
ID
3138static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
3139{
3140 unsigned long delay;
3141
3142 /*
3143 * Queue the timer to fire a long time from now (relative to the power
3144 * down delay) to keep the panel power up across a sequence of
3145 * operations.
3146 */
3147 delay = msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5);
3148 schedule_delayed_work(&intel_dp->panel_vdd_work, delay);
3149}
3150
951468f3
VS
3151/*
3152 * Must be paired with edp_panel_vdd_on().
3153 * Must hold pps_mutex around the whole on/off sequence.
3154 * Can be nested with intel_edp_panel_vdd_{on,off}() calls.
3155 */
4be73780 3156static void edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 3157{
de25eb7f 3158 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
e39b999a
VS
3159
3160 lockdep_assert_held(&dev_priv->pps_mutex);
3161
1853a9da 3162 if (!intel_dp_is_edp(intel_dp))
97af61f5 3163 return;
5d613501 3164
66a990dd
VS
3165 I915_STATE_WARN(!intel_dp->want_panel_vdd, "[ENCODER:%d:%s] VDD not forced on",
3166 dp_to_dig_port(intel_dp)->base.base.base.id,
3167 dp_to_dig_port(intel_dp)->base.base.name);
f2e8b18a 3168
bd943159
KP
3169 intel_dp->want_panel_vdd = false;
3170
aba86890 3171 if (sync)
4be73780 3172 edp_panel_vdd_off_sync(intel_dp);
aba86890
ID
3173 else
3174 edp_panel_vdd_schedule_off(intel_dp);
5d613501
JB
3175}
3176
9f0fb5be 3177static void edp_panel_on(struct intel_dp *intel_dp)
9934c132 3178{
de25eb7f 3179 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
99ea7127 3180 u32 pp;
f0f59a00 3181 i915_reg_t pp_ctrl_reg;
9934c132 3182
9f0fb5be
VS
3183 lockdep_assert_held(&dev_priv->pps_mutex);
3184
1853a9da 3185 if (!intel_dp_is_edp(intel_dp))
bd943159 3186 return;
99ea7127 3187
bdc6114e
WK
3188 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
3189 dp_to_dig_port(intel_dp)->base.base.base.id,
3190 dp_to_dig_port(intel_dp)->base.base.name);
e39b999a 3191
eb020ca3
PB
3192 if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
3193 "[ENCODER:%d:%s] panel power already on\n",
3194 dp_to_dig_port(intel_dp)->base.base.base.id,
3195 dp_to_dig_port(intel_dp)->base.base.name))
9f0fb5be 3196 return;
9934c132 3197
4be73780 3198 wait_panel_power_cycle(intel_dp);
37c6c9b0 3199
bf13e81b 3200 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
9eae5e27 3201 pp = ilk_get_pp_control(intel_dp);
cf819eff 3202 if (IS_GEN(dev_priv, 5)) {
05ce1a49
KP
3203 /* ILK workaround: disable reset around power sequence */
3204 pp &= ~PANEL_POWER_RESET;
b4e33881
JN
3205 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3206 intel_de_posting_read(dev_priv, pp_ctrl_reg);
05ce1a49 3207 }
37c6c9b0 3208
5a162e22 3209 pp |= PANEL_POWER_ON;
cf819eff 3210 if (!IS_GEN(dev_priv, 5))
99ea7127
KP
3211 pp |= PANEL_POWER_RESET;
3212
b4e33881
JN
3213 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3214 intel_de_posting_read(dev_priv, pp_ctrl_reg);
9934c132 3215
4be73780 3216 wait_panel_on(intel_dp);
dce56b3c 3217 intel_dp->last_power_on = jiffies;
9934c132 3218
cf819eff 3219 if (IS_GEN(dev_priv, 5)) {
05ce1a49 3220 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
b4e33881
JN
3221 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3222 intel_de_posting_read(dev_priv, pp_ctrl_reg);
05ce1a49 3223 }
9f0fb5be 3224}
e39b999a 3225
9f0fb5be
VS
3226void intel_edp_panel_on(struct intel_dp *intel_dp)
3227{
69d93820
CW
3228 intel_wakeref_t wakeref;
3229
1853a9da 3230 if (!intel_dp_is_edp(intel_dp))
9f0fb5be
VS
3231 return;
3232
69d93820
CW
3233 with_pps_lock(intel_dp, wakeref)
3234 edp_panel_on(intel_dp);
9934c132
JB
3235}
3236
9f0fb5be
VS
3237
3238static void edp_panel_off(struct intel_dp *intel_dp)
9934c132 3239{
de25eb7f 3240 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
337837ac 3241 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
99ea7127 3242 u32 pp;
f0f59a00 3243 i915_reg_t pp_ctrl_reg;
9934c132 3244
9f0fb5be
VS
3245 lockdep_assert_held(&dev_priv->pps_mutex);
3246
1853a9da 3247 if (!intel_dp_is_edp(intel_dp))
97af61f5 3248 return;
37c6c9b0 3249
bdc6114e
WK
3250 drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
3251 dig_port->base.base.base.id, dig_port->base.base.name);
37c6c9b0 3252
eb020ca3
PB
3253 drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
3254 "Need [ENCODER:%d:%s] VDD to turn off panel\n",
3255 dig_port->base.base.base.id, dig_port->base.base.name);
24f3e092 3256
9eae5e27 3257 pp = ilk_get_pp_control(intel_dp);
35a38556
DV
3258 /* We need to switch off panel power _and_ force vdd, for otherwise some
3259 * panels get very unhappy and cease to work. */
5a162e22 3260 pp &= ~(PANEL_POWER_ON | PANEL_POWER_RESET | EDP_FORCE_VDD |
b3064154 3261 EDP_BLC_ENABLE);
453c5420 3262
bf13e81b 3263 pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
453c5420 3264
849e39f5
PZ
3265 intel_dp->want_panel_vdd = false;
3266
b4e33881
JN
3267 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3268 intel_de_posting_read(dev_priv, pp_ctrl_reg);
9934c132 3269
4be73780 3270 wait_panel_off(intel_dp);
d7ba25bd 3271 intel_dp->panel_power_off_time = ktime_get_boottime();
849e39f5
PZ
3272
3273 /* We got a reference when we enabled the VDD. */
0e6e0be4 3274 intel_display_power_put_unchecked(dev_priv, intel_aux_power_domain(dig_port));
9f0fb5be 3275}
e39b999a 3276
9f0fb5be
VS
3277void intel_edp_panel_off(struct intel_dp *intel_dp)
3278{
69d93820
CW
3279 intel_wakeref_t wakeref;
3280
1853a9da 3281 if (!intel_dp_is_edp(intel_dp))
9f0fb5be 3282 return;
e39b999a 3283
69d93820
CW
3284 with_pps_lock(intel_dp, wakeref)
3285 edp_panel_off(intel_dp);
9934c132
JB
3286}
3287
1250d107
JN
3288/* Enable backlight in the panel power control. */
3289static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 3290{
de25eb7f 3291 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 3292 intel_wakeref_t wakeref;
32f9d658 3293
01cb9ea6
JB
3294 /*
3295 * If we enable the backlight right away following a panel power
3296 * on, we may see slight flicker as the panel syncs with the eDP
3297 * link. So delay a bit to make sure the image is solid before
3298 * allowing it to appear.
3299 */
4be73780 3300 wait_backlight_on(intel_dp);
e39b999a 3301
69d93820
CW
3302 with_pps_lock(intel_dp, wakeref) {
3303 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3304 u32 pp;
453c5420 3305
9eae5e27 3306 pp = ilk_get_pp_control(intel_dp);
69d93820 3307 pp |= EDP_BLC_ENABLE;
453c5420 3308
b4e33881
JN
3309 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3310 intel_de_posting_read(dev_priv, pp_ctrl_reg);
69d93820 3311 }
32f9d658
ZW
3312}
3313
1250d107 3314/* Enable backlight PWM and backlight PP control. */
b037d58f
ML
3315void intel_edp_backlight_on(const struct intel_crtc_state *crtc_state,
3316 const struct drm_connector_state *conn_state)
1250d107 3317{
b7d02c3a 3318 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(conn_state->best_encoder));
af67009c 3319 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
b037d58f 3320
1853a9da 3321 if (!intel_dp_is_edp(intel_dp))
1250d107
JN
3322 return;
3323
af67009c 3324 drm_dbg_kms(&i915->drm, "\n");
1250d107 3325
b037d58f 3326 intel_panel_enable_backlight(crtc_state, conn_state);
1250d107
JN
3327 _intel_edp_backlight_on(intel_dp);
3328}
3329
3330/* Disable backlight in the panel power control. */
3331static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 3332{
de25eb7f 3333 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
69d93820 3334 intel_wakeref_t wakeref;
32f9d658 3335
1853a9da 3336 if (!intel_dp_is_edp(intel_dp))
f01eca2e
KP
3337 return;
3338
69d93820
CW
3339 with_pps_lock(intel_dp, wakeref) {
3340 i915_reg_t pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
3341 u32 pp;
e39b999a 3342
9eae5e27 3343 pp = ilk_get_pp_control(intel_dp);
69d93820 3344 pp &= ~EDP_BLC_ENABLE;
453c5420 3345
b4e33881
JN
3346 intel_de_write(dev_priv, pp_ctrl_reg, pp);
3347 intel_de_posting_read(dev_priv, pp_ctrl_reg);
69d93820 3348 }
e39b999a
VS
3349
3350 intel_dp->last_backlight_off = jiffies;
f7d2323c 3351 edp_wait_backlight_off(intel_dp);
1250d107 3352}
f7d2323c 3353
1250d107 3354/* Disable backlight PP control and backlight PWM. */
b037d58f 3355void intel_edp_backlight_off(const struct drm_connector_state *old_conn_state)
1250d107 3356{
b7d02c3a 3357 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(old_conn_state->best_encoder));
af67009c 3358 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
b037d58f 3359
1853a9da 3360 if (!intel_dp_is_edp(intel_dp))
1250d107
JN
3361 return;
3362
af67009c 3363 drm_dbg_kms(&i915->drm, "\n");
f7d2323c 3364
1250d107 3365 _intel_edp_backlight_off(intel_dp);
b037d58f 3366 intel_panel_disable_backlight(old_conn_state);
32f9d658 3367}
a4fc5ed6 3368
73580fb7
JN
3369/*
3370 * Hook for controlling the panel power control backlight through the bl_power
3371 * sysfs attribute. Take care to handle multiple calls.
3372 */
3373static void intel_edp_backlight_power(struct intel_connector *connector,
3374 bool enable)
3375{
af67009c 3376 struct drm_i915_private *i915 = to_i915(connector->base.dev);
43a6d19c 3377 struct intel_dp *intel_dp = intel_attached_dp(connector);
69d93820 3378 intel_wakeref_t wakeref;
e39b999a
VS
3379 bool is_enabled;
3380
69d93820
CW
3381 is_enabled = false;
3382 with_pps_lock(intel_dp, wakeref)
9eae5e27 3383 is_enabled = ilk_get_pp_control(intel_dp) & EDP_BLC_ENABLE;
73580fb7
JN
3384 if (is_enabled == enable)
3385 return;
3386
af67009c
JN
3387 drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
3388 enable ? "enable" : "disable");
73580fb7
JN
3389
3390 if (enable)
3391 _intel_edp_backlight_on(intel_dp);
3392 else
3393 _intel_edp_backlight_off(intel_dp);
3394}
3395
64e1077a
VS
3396static void assert_dp_port(struct intel_dp *intel_dp, bool state)
3397{
3398 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3399 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
b4e33881 3400 bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
64e1077a
VS
3401
3402 I915_STATE_WARN(cur_state != state,
66a990dd
VS
3403 "[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
3404 dig_port->base.base.base.id, dig_port->base.base.name,
87ad3212 3405 onoff(state), onoff(cur_state));
64e1077a
VS
3406}
3407#define assert_dp_port_disabled(d) assert_dp_port((d), false)
3408
3409static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
3410{
b4e33881 3411 bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
64e1077a
VS
3412
3413 I915_STATE_WARN(cur_state != state,
3414 "eDP PLL state assertion failure (expected %s, current %s)\n",
87ad3212 3415 onoff(state), onoff(cur_state));
64e1077a
VS
3416}
3417#define assert_edp_pll_enabled(d) assert_edp_pll((d), true)
3418#define assert_edp_pll_disabled(d) assert_edp_pll((d), false)
3419
9eae5e27
LDM
3420static void ilk_edp_pll_on(struct intel_dp *intel_dp,
3421 const struct intel_crtc_state *pipe_config)
d240f20f 3422{
2225f3c6 3423 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
64e1077a 3424 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 3425
5c34ba27 3426 assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
64e1077a
VS
3427 assert_dp_port_disabled(intel_dp);
3428 assert_edp_pll_disabled(dev_priv);
2bd2ad64 3429
bdc6114e
WK
3430 drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
3431 pipe_config->port_clock);
abfce949
VS
3432
3433 intel_dp->DP &= ~DP_PLL_FREQ_MASK;
3434
85cb48a1 3435 if (pipe_config->port_clock == 162000)
abfce949
VS
3436 intel_dp->DP |= DP_PLL_FREQ_162MHZ;
3437 else
3438 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
3439
b4e33881
JN
3440 intel_de_write(dev_priv, DP_A, intel_dp->DP);
3441 intel_de_posting_read(dev_priv, DP_A);
abfce949
VS
3442 udelay(500);
3443
6b23f3e8
VS
3444 /*
3445 * [DevILK] Work around required when enabling DP PLL
3446 * while a pipe is enabled going to FDI:
3447 * 1. Wait for the start of vertical blank on the enabled pipe going to FDI
3448 * 2. Program DP PLL enable
3449 */
cf819eff 3450 if (IS_GEN(dev_priv, 5))
0f0f74bc 3451 intel_wait_for_vblank_if_active(dev_priv, !crtc->pipe);
6b23f3e8 3452
0767935e 3453 intel_dp->DP |= DP_PLL_ENABLE;
6fec7662 3454
b4e33881
JN
3455 intel_de_write(dev_priv, DP_A, intel_dp->DP);
3456 intel_de_posting_read(dev_priv, DP_A);
298b0b39 3457 udelay(200);
d240f20f
JB
3458}
3459
9eae5e27
LDM
3460static void ilk_edp_pll_off(struct intel_dp *intel_dp,
3461 const struct intel_crtc_state *old_crtc_state)
d240f20f 3462{
2225f3c6 3463 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
64e1077a 3464 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d240f20f 3465
5c34ba27 3466 assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
64e1077a
VS
3467 assert_dp_port_disabled(intel_dp);
3468 assert_edp_pll_enabled(dev_priv);
2bd2ad64 3469
bdc6114e 3470 drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
abfce949 3471
6fec7662 3472 intel_dp->DP &= ~DP_PLL_ENABLE;
0767935e 3473
b4e33881
JN
3474 intel_de_write(dev_priv, DP_A, intel_dp->DP);
3475 intel_de_posting_read(dev_priv, DP_A);
d240f20f
JB
3476 udelay(200);
3477}
3478
857c416e
VS
3479static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
3480{
3481 /*
3482 * DPCD 1.2+ should support BRANCH_DEVICE_CTRL, and thus
3483 * be capable of signalling downstream hpd with a long pulse.
3484 * Whether or not that means D3 is safe to use is not clear,
3485 * but let's assume so until proven otherwise.
3486 *
3487 * FIXME should really check all downstream ports...
3488 */
3489 return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
b4c32073 3490 drm_dp_is_branch(intel_dp->dpcd) &&
857c416e
VS
3491 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
3492}
3493
2279298d
GS
3494void intel_dp_sink_set_decompression_state(struct intel_dp *intel_dp,
3495 const struct intel_crtc_state *crtc_state,
3496 bool enable)
3497{
af67009c 3498 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
2279298d
GS
3499 int ret;
3500
010663a6 3501 if (!crtc_state->dsc.compression_enable)
2279298d
GS
3502 return;
3503
3504 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_DSC_ENABLE,
3505 enable ? DP_DECOMPRESSION_EN : 0);
3506 if (ret < 0)
af67009c
JN
3507 drm_dbg_kms(&i915->drm,
3508 "Failed to %s sink decompression state\n",
3509 enable ? "enable" : "disable");
2279298d
GS
3510}
3511
c7ad3810 3512/* If the sink supports it, try to set the power state appropriately */
c19b0669 3513void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810 3514{
af67009c 3515 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
c7ad3810
JB
3516 int ret, i;
3517
3518 /* Should have a valid DPCD by this point */
3519 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
3520 return;
3521
3522 if (mode != DRM_MODE_DPMS_ON) {
857c416e
VS
3523 if (downstream_hpd_needs_d0(intel_dp))
3524 return;
3525
9d1a1031
JN
3526 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3527 DP_SET_POWER_D3);
c7ad3810 3528 } else {
357c0ae9
ID
3529 struct intel_lspcon *lspcon = dp_to_lspcon(intel_dp);
3530
c7ad3810
JB
3531 /*
3532 * When turning on, we need to retry for 1ms to give the sink
3533 * time to wake up.
3534 */
3535 for (i = 0; i < 3; i++) {
9d1a1031
JN
3536 ret = drm_dp_dpcd_writeb(&intel_dp->aux, DP_SET_POWER,
3537 DP_SET_POWER_D0);
c7ad3810
JB
3538 if (ret == 1)
3539 break;
3540 msleep(1);
3541 }
357c0ae9
ID
3542
3543 if (ret == 1 && lspcon->active)
3544 lspcon_wait_pcon_mode(lspcon);
c7ad3810 3545 }
f9cac721
JN
3546
3547 if (ret != 1)
af67009c
JN
3548 drm_dbg_kms(&i915->drm, "failed to %s sink power state\n",
3549 mode == DRM_MODE_DPMS_ON ? "enable" : "disable");
c7ad3810
JB
3550}
3551
59b74c49
VS
3552static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
3553 enum port port, enum pipe *pipe)
3554{
3555 enum pipe p;
3556
3557 for_each_pipe(dev_priv, p) {
b4e33881 3558 u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
59b74c49
VS
3559
3560 if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
3561 *pipe = p;
3562 return true;
3563 }
3564 }
3565
bdc6114e
WK
3566 drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
3567 port_name(port));
59b74c49
VS
3568
3569 /* must initialize pipe to something for the asserts */
3570 *pipe = PIPE_A;
3571
3572 return false;
3573}
3574
3575bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
3576 i915_reg_t dp_reg, enum port port,
3577 enum pipe *pipe)
3578{
3579 bool ret;
3580 u32 val;
3581
b4e33881 3582 val = intel_de_read(dev_priv, dp_reg);
59b74c49
VS
3583
3584 ret = val & DP_PORT_EN;
3585
3586 /* asserts want to know the pipe even if the port is disabled */
3587 if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
3588 *pipe = (val & DP_PIPE_SEL_MASK_IVB) >> DP_PIPE_SEL_SHIFT_IVB;
3589 else if (HAS_PCH_CPT(dev_priv) && port != PORT_A)
3590 ret &= cpt_dp_port_selected(dev_priv, port, pipe);
3591 else if (IS_CHERRYVIEW(dev_priv))
3592 *pipe = (val & DP_PIPE_SEL_MASK_CHV) >> DP_PIPE_SEL_SHIFT_CHV;
3593 else
3594 *pipe = (val & DP_PIPE_SEL_MASK) >> DP_PIPE_SEL_SHIFT;
3595
3596 return ret;
3597}
3598
19d8fe15
DV
3599static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
3600 enum pipe *pipe)
d240f20f 3601{
2f773477 3602 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 3603 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
0e6e0be4 3604 intel_wakeref_t wakeref;
6fa9a5ec 3605 bool ret;
6d129bea 3606
0e6e0be4
CW
3607 wakeref = intel_display_power_get_if_enabled(dev_priv,
3608 encoder->power_domain);
3609 if (!wakeref)
6d129bea
ID
3610 return false;
3611
59b74c49
VS
3612 ret = intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
3613 encoder->port, pipe);
6fa9a5ec 3614
0e6e0be4 3615 intel_display_power_put(dev_priv, encoder->power_domain, wakeref);
6fa9a5ec
ID
3616
3617 return ret;
19d8fe15 3618}
d240f20f 3619
045ac3b5 3620static void intel_dp_get_config(struct intel_encoder *encoder,
5cec258b 3621 struct intel_crtc_state *pipe_config)
045ac3b5 3622{
2f773477 3623 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 3624 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
045ac3b5 3625 u32 tmp, flags = 0;
8f4f2797 3626 enum port port = encoder->port;
2225f3c6 3627 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
045ac3b5 3628
e1214b95
VS
3629 if (encoder->type == INTEL_OUTPUT_EDP)
3630 pipe_config->output_types |= BIT(INTEL_OUTPUT_EDP);
3631 else
3632 pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
045ac3b5 3633
b4e33881 3634 tmp = intel_de_read(dev_priv, intel_dp->output_reg);
9fcb1704
JN
3635
3636 pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
9ed109a7 3637
6e266956 3638 if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
b4e33881
JN
3639 u32 trans_dp = intel_de_read(dev_priv,
3640 TRANS_DP_CTL(crtc->pipe));
b81e34c2
VS
3641
3642 if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
63000ef6
XZ
3643 flags |= DRM_MODE_FLAG_PHSYNC;
3644 else
3645 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 3646
b81e34c2 3647 if (trans_dp & TRANS_DP_VSYNC_ACTIVE_HIGH)
63000ef6
XZ
3648 flags |= DRM_MODE_FLAG_PVSYNC;
3649 else
3650 flags |= DRM_MODE_FLAG_NVSYNC;
3651 } else {
39e5fa88 3652 if (tmp & DP_SYNC_HS_HIGH)
63000ef6
XZ
3653 flags |= DRM_MODE_FLAG_PHSYNC;
3654 else
3655 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 3656
39e5fa88 3657 if (tmp & DP_SYNC_VS_HIGH)
63000ef6
XZ
3658 flags |= DRM_MODE_FLAG_PVSYNC;
3659 else
3660 flags |= DRM_MODE_FLAG_NVSYNC;
3661 }
045ac3b5 3662
1326a92c 3663 pipe_config->hw.adjusted_mode.flags |= flags;
f1f644dc 3664
c99f53f7 3665 if (IS_G4X(dev_priv) && tmp & DP_COLOR_RANGE_16_235)
8c875fca
VS
3666 pipe_config->limited_color_range = true;
3667
90a6b7b0
VS
3668 pipe_config->lane_count =
3669 ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1;
3670
eb14cb74
VS
3671 intel_dp_get_m_n(crtc, pipe_config);
3672
18442d08 3673 if (port == PORT_A) {
b4e33881 3674 if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
f1f644dc
JB
3675 pipe_config->port_clock = 162000;
3676 else
3677 pipe_config->port_clock = 270000;
3678 }
18442d08 3679
1326a92c 3680 pipe_config->hw.adjusted_mode.crtc_clock =
e3b247da
VS
3681 intel_dotclock_calculate(pipe_config->port_clock,
3682 &pipe_config->dp_m_n);
7f16e5c1 3683
1853a9da 3684 if (intel_dp_is_edp(intel_dp) && dev_priv->vbt.edp.bpp &&
6aa23e65 3685 pipe_config->pipe_bpp > dev_priv->vbt.edp.bpp) {
c6cd2ee2
JN
3686 /*
3687 * This is a big fat ugly hack.
3688 *
3689 * Some machines in UEFI boot mode provide us a VBT that has 18
3690 * bpp and 1.62 GHz link bandwidth for eDP, which for reasons
3691 * unknown we fail to light up. Yet the same BIOS boots up with
3692 * 24 bpp and 2.7 GHz link. Use the same bpp as the BIOS uses as
3693 * max, not what it tells us to use.
3694 *
3695 * Note: This will still be broken if the eDP panel is not lit
3696 * up by the BIOS, and thus we can't get the mode at module
3697 * load.
3698 */
bdc6114e
WK
3699 drm_dbg_kms(&dev_priv->drm,
3700 "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
3701 pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
6aa23e65 3702 dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
c6cd2ee2 3703 }
045ac3b5
JB
3704}
3705
ede9771d
VS
3706static void intel_disable_dp(struct intel_atomic_state *state,
3707 struct intel_encoder *encoder,
5f88a9c6
VS
3708 const struct intel_crtc_state *old_crtc_state,
3709 const struct drm_connector_state *old_conn_state)
d240f20f 3710{
b7d02c3a 3711 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
495a5bb8 3712
edb2e530
VS
3713 intel_dp->link_trained = false;
3714
85cb48a1 3715 if (old_crtc_state->has_audio)
8ec47de2
VS
3716 intel_audio_codec_disable(encoder,
3717 old_crtc_state, old_conn_state);
6cb49835
DV
3718
3719 /* Make sure the panel is off before trying to change the mode. But also
3720 * ensure that we have vdd while we switch off the panel. */
24f3e092 3721 intel_edp_panel_vdd_on(intel_dp);
b037d58f 3722 intel_edp_backlight_off(old_conn_state);
fdbc3b1f 3723 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
4be73780 3724 intel_edp_panel_off(intel_dp);
1a8ff607
VS
3725}
3726
ede9771d
VS
3727static void g4x_disable_dp(struct intel_atomic_state *state,
3728 struct intel_encoder *encoder,
1a8ff607
VS
3729 const struct intel_crtc_state *old_crtc_state,
3730 const struct drm_connector_state *old_conn_state)
1a8ff607 3731{
ede9771d 3732 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
1a8ff607
VS
3733}
3734
ede9771d
VS
3735static void vlv_disable_dp(struct intel_atomic_state *state,
3736 struct intel_encoder *encoder,
1a8ff607
VS
3737 const struct intel_crtc_state *old_crtc_state,
3738 const struct drm_connector_state *old_conn_state)
3739{
ede9771d 3740 intel_disable_dp(state, encoder, old_crtc_state, old_conn_state);
d240f20f
JB
3741}
3742
ede9771d
VS
3743static void g4x_post_disable_dp(struct intel_atomic_state *state,
3744 struct intel_encoder *encoder,
5f88a9c6
VS
3745 const struct intel_crtc_state *old_crtc_state,
3746 const struct drm_connector_state *old_conn_state)
d240f20f 3747{
b7d02c3a 3748 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
adc10304 3749 enum port port = encoder->port;
2bd2ad64 3750
51a9f6df
VS
3751 /*
3752 * Bspec does not list a specific disable sequence for g4x DP.
3753 * Follow the ilk+ sequence (disable pipe before the port) for
3754 * g4x DP as it does not suffer from underruns like the normal
3755 * g4x modeset sequence (disable pipe after the port).
3756 */
adc10304 3757 intel_dp_link_down(encoder, old_crtc_state);
abfce949
VS
3758
3759 /* Only ilk+ has port A */
08aff3fe 3760 if (port == PORT_A)
9eae5e27 3761 ilk_edp_pll_off(intel_dp, old_crtc_state);
49277c31
VS
3762}
3763
ede9771d
VS
3764static void vlv_post_disable_dp(struct intel_atomic_state *state,
3765 struct intel_encoder *encoder,
5f88a9c6
VS
3766 const struct intel_crtc_state *old_crtc_state,
3767 const struct drm_connector_state *old_conn_state)
49277c31 3768{
adc10304 3769 intel_dp_link_down(encoder, old_crtc_state);
2bd2ad64
DV
3770}
3771
ede9771d
VS
3772static void chv_post_disable_dp(struct intel_atomic_state *state,
3773 struct intel_encoder *encoder,
5f88a9c6
VS
3774 const struct intel_crtc_state *old_crtc_state,
3775 const struct drm_connector_state *old_conn_state)
a8f327fb 3776{
adc10304 3777 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
97fd4d5c 3778
adc10304 3779 intel_dp_link_down(encoder, old_crtc_state);
a8f327fb 3780
221c7862 3781 vlv_dpio_get(dev_priv);
a8f327fb
VS
3782
3783 /* Assert data lane reset */
2e1029c6 3784 chv_data_lane_soft_reset(encoder, old_crtc_state, true);
580d3811 3785
221c7862 3786 vlv_dpio_put(dev_priv);
580d3811
VS
3787}
3788
7b13b58a 3789static void
eee3f911 3790cpt_set_link_train(struct intel_dp *intel_dp,
a621860a 3791 const struct intel_crtc_state *crtc_state,
eee3f911 3792 u8 dp_train_pat)
7b13b58a 3793{
de25eb7f 3794 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
eee3f911 3795 u32 *DP = &intel_dp->DP;
8b0878a0 3796
eee3f911 3797 *DP &= ~DP_LINK_TRAIN_MASK_CPT;
7b13b58a 3798
eee3f911
VS
3799 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3800 case DP_TRAINING_PATTERN_DISABLE:
3801 *DP |= DP_LINK_TRAIN_OFF_CPT;
3802 break;
3803 case DP_TRAINING_PATTERN_1:
3804 *DP |= DP_LINK_TRAIN_PAT_1_CPT;
3805 break;
3806 case DP_TRAINING_PATTERN_2:
3807 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3808 break;
3809 case DP_TRAINING_PATTERN_3:
3810 drm_dbg_kms(&dev_priv->drm,
3811 "TPS3 not supported, using TPS2 instead\n");
3812 *DP |= DP_LINK_TRAIN_PAT_2_CPT;
3813 break;
3814 }
7b13b58a 3815
eee3f911
VS
3816 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3817 intel_de_posting_read(dev_priv, intel_dp->output_reg);
3818}
7b13b58a 3819
eee3f911
VS
3820static void
3821g4x_set_link_train(struct intel_dp *intel_dp,
a621860a 3822 const struct intel_crtc_state *crtc_state,
eee3f911
VS
3823 u8 dp_train_pat)
3824{
3825 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
3826 u32 *DP = &intel_dp->DP;
7b13b58a 3827
eee3f911 3828 *DP &= ~DP_LINK_TRAIN_MASK;
7b13b58a 3829
eee3f911
VS
3830 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
3831 case DP_TRAINING_PATTERN_DISABLE:
3832 *DP |= DP_LINK_TRAIN_OFF;
3833 break;
3834 case DP_TRAINING_PATTERN_1:
3835 *DP |= DP_LINK_TRAIN_PAT_1;
3836 break;
3837 case DP_TRAINING_PATTERN_2:
3838 *DP |= DP_LINK_TRAIN_PAT_2;
3839 break;
3840 case DP_TRAINING_PATTERN_3:
3841 drm_dbg_kms(&dev_priv->drm,
3842 "TPS3 not supported, using TPS2 instead\n");
3843 *DP |= DP_LINK_TRAIN_PAT_2;
3844 break;
7b13b58a 3845 }
eee3f911
VS
3846
3847 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3848 intel_de_posting_read(dev_priv, intel_dp->output_reg);
7b13b58a
VS
3849}
3850
85cb48a1 3851static void intel_dp_enable_port(struct intel_dp *intel_dp,
95cef532 3852 const struct intel_crtc_state *crtc_state)
7b13b58a 3853{
de25eb7f 3854 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7b13b58a 3855
7b13b58a 3856 /* enable with pattern 1 (as per spec) */
7b13b58a 3857
a621860a
VS
3858 intel_dp_program_link_training_pattern(intel_dp, crtc_state,
3859 DP_TRAINING_PATTERN_1);
7b713f50
VS
3860
3861 /*
3862 * Magic for VLV/CHV. We _must_ first set up the register
3863 * without actually enabling the port, and then do another
3864 * write to enable the port. Otherwise link training will
3865 * fail when the power sequencer is freshly used for this port.
3866 */
3867 intel_dp->DP |= DP_PORT_EN;
95cef532 3868 if (crtc_state->has_audio)
6fec7662 3869 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
7b713f50 3870
b4e33881
JN
3871 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
3872 intel_de_posting_read(dev_priv, intel_dp->output_reg);
580d3811
VS
3873}
3874
b7feffd5
VS
3875void intel_dp_configure_protocol_converter(struct intel_dp *intel_dp)
3876{
3877 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
3878 u8 tmp;
3879
3880 if (intel_dp->dpcd[DP_DPCD_REV] < 0x13)
3881 return;
3882
3883 if (!drm_dp_is_branch(intel_dp->dpcd))
3884 return;
3885
3886 tmp = intel_dp->has_hdmi_sink ?
3887 DP_HDMI_DVI_OUTPUT_CONFIG : 0;
3888
3889 if (drm_dp_dpcd_writeb(&intel_dp->aux,
181567aa 3890 DP_PROTOCOL_CONVERTER_CONTROL_0, tmp) != 1)
b7feffd5
VS
3891 drm_dbg_kms(&i915->drm, "Failed to set protocol converter HDMI mode to %s\n",
3892 enableddisabled(intel_dp->has_hdmi_sink));
3893
181567aa
VS
3894 tmp = intel_dp->dfp.ycbcr_444_to_420 ?
3895 DP_CONVERSION_TO_YCBCR420_ENABLE : 0;
3896
3897 if (drm_dp_dpcd_writeb(&intel_dp->aux,
3898 DP_PROTOCOL_CONVERTER_CONTROL_1, tmp) != 1)
3899 drm_dbg_kms(&i915->drm,
3900 "Failed to set protocol converter YCbCr 4:2:0 conversion mode to %s\n",
3901 enableddisabled(intel_dp->dfp.ycbcr_444_to_420));
3902
3903 tmp = 0;
3904
3905 if (drm_dp_dpcd_writeb(&intel_dp->aux,
3906 DP_PROTOCOL_CONVERTER_CONTROL_2, tmp) <= 0)
3907 drm_dbg_kms(&i915->drm,
3908 "Failed to set protocol converter YCbCr 4:2:2 conversion mode to %s\n",
3909 enableddisabled(false));
b7feffd5
VS
3910}
3911
ede9771d
VS
3912static void intel_enable_dp(struct intel_atomic_state *state,
3913 struct intel_encoder *encoder,
5f88a9c6
VS
3914 const struct intel_crtc_state *pipe_config,
3915 const struct drm_connector_state *conn_state)
d240f20f 3916{
2f773477 3917 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 3918 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2225f3c6 3919 struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
b4e33881 3920 u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
d6fbdd15 3921 enum pipe pipe = crtc->pipe;
69d93820 3922 intel_wakeref_t wakeref;
5d613501 3923
eb020ca3 3924 if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
0c33d8d7 3925 return;
5d613501 3926
69d93820
CW
3927 with_pps_lock(intel_dp, wakeref) {
3928 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
3929 vlv_init_panel_power_sequencer(encoder, pipe_config);
093e3f13 3930
69d93820 3931 intel_dp_enable_port(intel_dp, pipe_config);
093e3f13 3932
69d93820
CW
3933 edp_panel_vdd_on(intel_dp);
3934 edp_panel_on(intel_dp);
3935 edp_panel_vdd_off(intel_dp, true);
3936 }
093e3f13 3937
920a14b2 3938 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
e0fce78f
VS
3939 unsigned int lane_mask = 0x0;
3940
920a14b2 3941 if (IS_CHERRYVIEW(dev_priv))
85cb48a1 3942 lane_mask = intel_dp_unused_lane_mask(pipe_config->lane_count);
e0fce78f 3943
9b6de0a1
VS
3944 vlv_wait_port_ready(dev_priv, dp_to_dig_port(intel_dp),
3945 lane_mask);
e0fce78f 3946 }
61234fa5 3947
f01eca2e 3948 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
b7feffd5 3949 intel_dp_configure_protocol_converter(intel_dp);
a621860a
VS
3950 intel_dp_start_link_train(intel_dp, pipe_config);
3951 intel_dp_stop_link_train(intel_dp, pipe_config);
c1dec79a 3952
85cb48a1 3953 if (pipe_config->has_audio) {
bdc6114e
WK
3954 drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
3955 pipe_name(pipe));
bbf35e9d 3956 intel_audio_codec_enable(encoder, pipe_config, conn_state);
c1dec79a 3957 }
ab1f90f9 3958}
89b667f8 3959
ede9771d
VS
3960static void g4x_enable_dp(struct intel_atomic_state *state,
3961 struct intel_encoder *encoder,
5f88a9c6
VS
3962 const struct intel_crtc_state *pipe_config,
3963 const struct drm_connector_state *conn_state)
ecff4f3b 3964{
ede9771d 3965 intel_enable_dp(state, encoder, pipe_config, conn_state);
b037d58f 3966 intel_edp_backlight_on(pipe_config, conn_state);
ab1f90f9 3967}
89b667f8 3968
ede9771d
VS
3969static void vlv_enable_dp(struct intel_atomic_state *state,
3970 struct intel_encoder *encoder,
5f88a9c6
VS
3971 const struct intel_crtc_state *pipe_config,
3972 const struct drm_connector_state *conn_state)
ab1f90f9 3973{
b037d58f 3974 intel_edp_backlight_on(pipe_config, conn_state);
d240f20f
JB
3975}
3976
ede9771d
VS
3977static void g4x_pre_enable_dp(struct intel_atomic_state *state,
3978 struct intel_encoder *encoder,
5f88a9c6
VS
3979 const struct intel_crtc_state *pipe_config,
3980 const struct drm_connector_state *conn_state)
ab1f90f9 3981{
b7d02c3a 3982 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
8f4f2797 3983 enum port port = encoder->port;
ab1f90f9 3984
85cb48a1 3985 intel_dp_prepare(encoder, pipe_config);
8ac33ed3 3986
d41f1efb 3987 /* Only ilk+ has port A */
abfce949 3988 if (port == PORT_A)
9eae5e27 3989 ilk_edp_pll_on(intel_dp, pipe_config);
ab1f90f9
JN
3990}
3991
83b84597
VS
3992static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
3993{
7801f3b7
LDM
3994 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
3995 struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
83b84597 3996 enum pipe pipe = intel_dp->pps_pipe;
44cb734c 3997 i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
83b84597 3998
eb020ca3 3999 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
9f2bdb00 4000
eb020ca3 4001 if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
d158694f
VS
4002 return;
4003
83b84597
VS
4004 edp_panel_vdd_off_sync(intel_dp);
4005
4006 /*
e7f2af78 4007 * VLV seems to get confused when multiple power sequencers
83b84597
VS
4008 * have the same port selected (even if only one has power/vdd
4009 * enabled). The failure manifests as vlv_wait_port_ready() failing
4010 * CHV on the other hand doesn't seem to mind having the same port
e7f2af78 4011 * selected in multiple power sequencers, but let's clear the
83b84597
VS
4012 * port select always when logically disconnecting a power sequencer
4013 * from a port.
4014 */
bdc6114e
WK
4015 drm_dbg_kms(&dev_priv->drm,
4016 "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
7801f3b7
LDM
4017 pipe_name(pipe), dig_port->base.base.base.id,
4018 dig_port->base.base.name);
b4e33881
JN
4019 intel_de_write(dev_priv, pp_on_reg, 0);
4020 intel_de_posting_read(dev_priv, pp_on_reg);
83b84597
VS
4021
4022 intel_dp->pps_pipe = INVALID_PIPE;
4023}
4024
46bd8383 4025static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
a4a5d2f8
VS
4026 enum pipe pipe)
4027{
a4a5d2f8
VS
4028 struct intel_encoder *encoder;
4029
4030 lockdep_assert_held(&dev_priv->pps_mutex);
4031
14aa521c 4032 for_each_intel_dp(&dev_priv->drm, encoder) {
b7d02c3a 4033 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
a4a5d2f8 4034
eb020ca3
PB
4035 drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
4036 "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
4037 pipe_name(pipe), encoder->base.base.id,
4038 encoder->base.name);
9f2bdb00 4039
a4a5d2f8
VS
4040 if (intel_dp->pps_pipe != pipe)
4041 continue;
4042
bdc6114e
WK
4043 drm_dbg_kms(&dev_priv->drm,
4044 "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
4045 pipe_name(pipe), encoder->base.base.id,
4046 encoder->base.name);
a4a5d2f8
VS
4047
4048 /* make sure vdd is off before we steal it */
83b84597 4049 vlv_detach_power_sequencer(intel_dp);
a4a5d2f8
VS
4050 }
4051}
4052
adc10304
VS
4053static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
4054 const struct intel_crtc_state *crtc_state)
a4a5d2f8 4055{
46bd8383 4056 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 4057 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2225f3c6 4058 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
a4a5d2f8
VS
4059
4060 lockdep_assert_held(&dev_priv->pps_mutex);
4061
eb020ca3 4062 drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
093e3f13 4063
9f2bdb00
VS
4064 if (intel_dp->pps_pipe != INVALID_PIPE &&
4065 intel_dp->pps_pipe != crtc->pipe) {
4066 /*
4067 * If another power sequencer was being used on this
4068 * port previously make sure to turn off vdd there while
4069 * we still have control of it.
4070 */
83b84597 4071 vlv_detach_power_sequencer(intel_dp);
9f2bdb00 4072 }
a4a5d2f8
VS
4073
4074 /*
4075 * We may be stealing the power
4076 * sequencer from another port.
4077 */
46bd8383 4078 vlv_steal_power_sequencer(dev_priv, crtc->pipe);
a4a5d2f8 4079
9f2bdb00
VS
4080 intel_dp->active_pipe = crtc->pipe;
4081
1853a9da 4082 if (!intel_dp_is_edp(intel_dp))
9f2bdb00
VS
4083 return;
4084
a4a5d2f8
VS
4085 /* now it's all ours */
4086 intel_dp->pps_pipe = crtc->pipe;
4087
bdc6114e
WK
4088 drm_dbg_kms(&dev_priv->drm,
4089 "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
4090 pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
4091 encoder->base.name);
a4a5d2f8
VS
4092
4093 /* init power sequencer on this pipe and port */
46bd8383
VS
4094 intel_dp_init_panel_power_sequencer(intel_dp);
4095 intel_dp_init_panel_power_sequencer_registers(intel_dp, true);
a4a5d2f8
VS
4096}
4097
ede9771d
VS
4098static void vlv_pre_enable_dp(struct intel_atomic_state *state,
4099 struct intel_encoder *encoder,
5f88a9c6
VS
4100 const struct intel_crtc_state *pipe_config,
4101 const struct drm_connector_state *conn_state)
a4fc5ed6 4102{
2e1029c6 4103 vlv_phy_pre_encoder_enable(encoder, pipe_config);
ab1f90f9 4104
ede9771d 4105 intel_enable_dp(state, encoder, pipe_config, conn_state);
89b667f8
JB
4106}
4107
ede9771d
VS
4108static void vlv_dp_pre_pll_enable(struct intel_atomic_state *state,
4109 struct intel_encoder *encoder,
5f88a9c6
VS
4110 const struct intel_crtc_state *pipe_config,
4111 const struct drm_connector_state *conn_state)
89b667f8 4112{
85cb48a1 4113 intel_dp_prepare(encoder, pipe_config);
8ac33ed3 4114
2e1029c6 4115 vlv_phy_pre_pll_enable(encoder, pipe_config);
a4fc5ed6
KP
4116}
4117
ede9771d
VS
4118static void chv_pre_enable_dp(struct intel_atomic_state *state,
4119 struct intel_encoder *encoder,
5f88a9c6
VS
4120 const struct intel_crtc_state *pipe_config,
4121 const struct drm_connector_state *conn_state)
e4a1d846 4122{
2e1029c6 4123 chv_phy_pre_encoder_enable(encoder, pipe_config);
e4a1d846 4124
ede9771d 4125 intel_enable_dp(state, encoder, pipe_config, conn_state);
b0b33846
VS
4126
4127 /* Second common lane will stay alive on its own now */
e7d2a717 4128 chv_phy_release_cl2_override(encoder);
e4a1d846
CML
4129}
4130
ede9771d
VS
4131static void chv_dp_pre_pll_enable(struct intel_atomic_state *state,
4132 struct intel_encoder *encoder,
5f88a9c6
VS
4133 const struct intel_crtc_state *pipe_config,
4134 const struct drm_connector_state *conn_state)
9197c88b 4135{
85cb48a1 4136 intel_dp_prepare(encoder, pipe_config);
625695f8 4137
2e1029c6 4138 chv_phy_pre_pll_enable(encoder, pipe_config);
9197c88b
VS
4139}
4140
ede9771d
VS
4141static void chv_dp_post_pll_disable(struct intel_atomic_state *state,
4142 struct intel_encoder *encoder,
2e1029c6
VS
4143 const struct intel_crtc_state *old_crtc_state,
4144 const struct drm_connector_state *old_conn_state)
d6db995f 4145{
2e1029c6 4146 chv_phy_post_pll_disable(encoder, old_crtc_state);
d6db995f
VS
4147}
4148
a4fc5ed6
KP
4149/*
4150 * Fetch AUX CH registers 0x202 - 0x207 which contain
4151 * link status information
4152 */
94223d04 4153bool
830de422 4154intel_dp_get_link_status(struct intel_dp *intel_dp, u8 link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 4155{
9f085ebb
L
4156 return drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS, link_status,
4157 DP_LINK_STATUS_SIZE) == DP_LINK_STATUS_SIZE;
a4fc5ed6
KP
4158}
4159
a621860a
VS
4160static u8 intel_dp_voltage_max_2(struct intel_dp *intel_dp,
4161 const struct intel_crtc_state *crtc_state)
a4fc5ed6 4162{
53de0a20
VS
4163 return DP_TRAIN_VOLTAGE_SWING_LEVEL_2;
4164}
1a2eb460 4165
a621860a
VS
4166static u8 intel_dp_voltage_max_3(struct intel_dp *intel_dp,
4167 const struct intel_crtc_state *crtc_state)
53de0a20
VS
4168{
4169 return DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
1a2eb460
KP
4170}
4171
6694d2be 4172static u8 intel_dp_preemph_max_2(struct intel_dp *intel_dp)
1a2eb460 4173{
53de0a20
VS
4174 return DP_TRAIN_PRE_EMPH_LEVEL_2;
4175}
1a2eb460 4176
6694d2be 4177static u8 intel_dp_preemph_max_3(struct intel_dp *intel_dp)
53de0a20
VS
4178{
4179 return DP_TRAIN_PRE_EMPH_LEVEL_3;
a4fc5ed6
KP
4180}
4181
a621860a
VS
4182static void vlv_set_signal_levels(struct intel_dp *intel_dp,
4183 const struct intel_crtc_state *crtc_state)
e2fa6fba 4184{
53d98725 4185 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
e2fa6fba
P
4186 unsigned long demph_reg_value, preemph_reg_value,
4187 uniqtranscale_reg_value;
830de422 4188 u8 train_set = intel_dp->train_set[0];
e2fa6fba
P
4189
4190 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 4191 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e2fa6fba
P
4192 preemph_reg_value = 0x0004000;
4193 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4194 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
4195 demph_reg_value = 0x2B405555;
4196 uniqtranscale_reg_value = 0x552AB83A;
4197 break;
bd60018a 4198 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
4199 demph_reg_value = 0x2B404040;
4200 uniqtranscale_reg_value = 0x5548B83A;
4201 break;
bd60018a 4202 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
4203 demph_reg_value = 0x2B245555;
4204 uniqtranscale_reg_value = 0x5560B83A;
4205 break;
bd60018a 4206 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e2fa6fba
P
4207 demph_reg_value = 0x2B405555;
4208 uniqtranscale_reg_value = 0x5598DA3A;
4209 break;
4210 default:
fb83f72c 4211 return;
e2fa6fba
P
4212 }
4213 break;
bd60018a 4214 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e2fa6fba
P
4215 preemph_reg_value = 0x0002000;
4216 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4217 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
4218 demph_reg_value = 0x2B404040;
4219 uniqtranscale_reg_value = 0x5552B83A;
4220 break;
bd60018a 4221 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
4222 demph_reg_value = 0x2B404848;
4223 uniqtranscale_reg_value = 0x5580B83A;
4224 break;
bd60018a 4225 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e2fa6fba
P
4226 demph_reg_value = 0x2B404040;
4227 uniqtranscale_reg_value = 0x55ADDA3A;
4228 break;
4229 default:
fb83f72c 4230 return;
e2fa6fba
P
4231 }
4232 break;
bd60018a 4233 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e2fa6fba
P
4234 preemph_reg_value = 0x0000000;
4235 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4236 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
4237 demph_reg_value = 0x2B305555;
4238 uniqtranscale_reg_value = 0x5570B83A;
4239 break;
bd60018a 4240 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e2fa6fba
P
4241 demph_reg_value = 0x2B2B4040;
4242 uniqtranscale_reg_value = 0x55ADDA3A;
4243 break;
4244 default:
fb83f72c 4245 return;
e2fa6fba
P
4246 }
4247 break;
bd60018a 4248 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e2fa6fba
P
4249 preemph_reg_value = 0x0006000;
4250 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4251 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e2fa6fba
P
4252 demph_reg_value = 0x1B405555;
4253 uniqtranscale_reg_value = 0x55ADDA3A;
4254 break;
4255 default:
fb83f72c 4256 return;
e2fa6fba
P
4257 }
4258 break;
4259 default:
fb83f72c 4260 return;
e2fa6fba
P
4261 }
4262
a621860a
VS
4263 vlv_set_phy_signal_level(encoder, crtc_state,
4264 demph_reg_value, preemph_reg_value,
53d98725 4265 uniqtranscale_reg_value, 0);
e2fa6fba
P
4266}
4267
a621860a
VS
4268static void chv_set_signal_levels(struct intel_dp *intel_dp,
4269 const struct intel_crtc_state *crtc_state)
e4a1d846 4270{
b7fa22d8
ACO
4271 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
4272 u32 deemph_reg_value, margin_reg_value;
4273 bool uniq_trans_scale = false;
830de422 4274 u8 train_set = intel_dp->train_set[0];
e4a1d846
CML
4275
4276 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 4277 case DP_TRAIN_PRE_EMPH_LEVEL_0:
e4a1d846 4278 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4279 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
4280 deemph_reg_value = 128;
4281 margin_reg_value = 52;
4282 break;
bd60018a 4283 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
4284 deemph_reg_value = 128;
4285 margin_reg_value = 77;
4286 break;
bd60018a 4287 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
4288 deemph_reg_value = 128;
4289 margin_reg_value = 102;
4290 break;
bd60018a 4291 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
e4a1d846
CML
4292 deemph_reg_value = 128;
4293 margin_reg_value = 154;
b7fa22d8 4294 uniq_trans_scale = true;
e4a1d846
CML
4295 break;
4296 default:
fb83f72c 4297 return;
e4a1d846
CML
4298 }
4299 break;
bd60018a 4300 case DP_TRAIN_PRE_EMPH_LEVEL_1:
e4a1d846 4301 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4302 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
4303 deemph_reg_value = 85;
4304 margin_reg_value = 78;
4305 break;
bd60018a 4306 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
4307 deemph_reg_value = 85;
4308 margin_reg_value = 116;
4309 break;
bd60018a 4310 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
e4a1d846
CML
4311 deemph_reg_value = 85;
4312 margin_reg_value = 154;
4313 break;
4314 default:
fb83f72c 4315 return;
e4a1d846
CML
4316 }
4317 break;
bd60018a 4318 case DP_TRAIN_PRE_EMPH_LEVEL_2:
e4a1d846 4319 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4320 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
4321 deemph_reg_value = 64;
4322 margin_reg_value = 104;
4323 break;
bd60018a 4324 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
e4a1d846
CML
4325 deemph_reg_value = 64;
4326 margin_reg_value = 154;
4327 break;
4328 default:
fb83f72c 4329 return;
e4a1d846
CML
4330 }
4331 break;
bd60018a 4332 case DP_TRAIN_PRE_EMPH_LEVEL_3:
e4a1d846 4333 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4334 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
e4a1d846
CML
4335 deemph_reg_value = 43;
4336 margin_reg_value = 154;
4337 break;
4338 default:
fb83f72c 4339 return;
e4a1d846
CML
4340 }
4341 break;
4342 default:
fb83f72c 4343 return;
e4a1d846
CML
4344 }
4345
a621860a
VS
4346 chv_set_phy_signal_level(encoder, crtc_state,
4347 deemph_reg_value, margin_reg_value,
4348 uniq_trans_scale);
e4a1d846
CML
4349}
4350
fb83f72c 4351static u32 g4x_signal_levels(u8 train_set)
a4fc5ed6 4352{
830de422 4353 u32 signal_levels = 0;
a4fc5ed6 4354
3cf2efb1 4355 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
bd60018a 4356 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0:
a4fc5ed6
KP
4357 default:
4358 signal_levels |= DP_VOLTAGE_0_4;
4359 break;
bd60018a 4360 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1:
a4fc5ed6
KP
4361 signal_levels |= DP_VOLTAGE_0_6;
4362 break;
bd60018a 4363 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2:
a4fc5ed6
KP
4364 signal_levels |= DP_VOLTAGE_0_8;
4365 break;
bd60018a 4366 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3:
a4fc5ed6
KP
4367 signal_levels |= DP_VOLTAGE_1_2;
4368 break;
4369 }
3cf2efb1 4370 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
bd60018a 4371 case DP_TRAIN_PRE_EMPH_LEVEL_0:
a4fc5ed6
KP
4372 default:
4373 signal_levels |= DP_PRE_EMPHASIS_0;
4374 break;
bd60018a 4375 case DP_TRAIN_PRE_EMPH_LEVEL_1:
a4fc5ed6
KP
4376 signal_levels |= DP_PRE_EMPHASIS_3_5;
4377 break;
bd60018a 4378 case DP_TRAIN_PRE_EMPH_LEVEL_2:
a4fc5ed6
KP
4379 signal_levels |= DP_PRE_EMPHASIS_6;
4380 break;
bd60018a 4381 case DP_TRAIN_PRE_EMPH_LEVEL_3:
a4fc5ed6
KP
4382 signal_levels |= DP_PRE_EMPHASIS_9_5;
4383 break;
4384 }
4385 return signal_levels;
4386}
4387
fb83f72c 4388static void
a621860a
VS
4389g4x_set_signal_levels(struct intel_dp *intel_dp,
4390 const struct intel_crtc_state *crtc_state)
fb83f72c
VS
4391{
4392 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4393 u8 train_set = intel_dp->train_set[0];
4394 u32 signal_levels;
4395
4396 signal_levels = g4x_signal_levels(train_set);
4397
4398 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4399 signal_levels);
4400
4401 intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
4402 intel_dp->DP |= signal_levels;
4403
4404 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4405 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4406}
4407
4d82c2b5 4408/* SNB CPU eDP voltage swing and pre-emphasis control */
fb83f72c 4409static u32 snb_cpu_edp_signal_levels(u8 train_set)
e3421a18 4410{
fb83f72c
VS
4411 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4412 DP_TRAIN_PRE_EMPHASIS_MASK);
4413
3c5a62b5 4414 switch (signal_levels) {
bd60018a
SJ
4415 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4416 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 4417 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
bd60018a 4418 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 4419 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
bd60018a
SJ
4420 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
4421 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
3c5a62b5 4422 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
bd60018a
SJ
4423 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
4424 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
3c5a62b5 4425 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
bd60018a
SJ
4426 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
4427 case DP_TRAIN_VOLTAGE_SWING_LEVEL_3 | DP_TRAIN_PRE_EMPH_LEVEL_0:
3c5a62b5 4428 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 4429 default:
3c5a62b5
YL
4430 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4431 "0x%x\n", signal_levels);
4432 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
4433 }
4434}
4435
fb83f72c 4436static void
a621860a
VS
4437snb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
4438 const struct intel_crtc_state *crtc_state)
fb83f72c
VS
4439{
4440 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4441 u8 train_set = intel_dp->train_set[0];
4442 u32 signal_levels;
4443
4444 signal_levels = snb_cpu_edp_signal_levels(train_set);
4445
4446 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4447 signal_levels);
4448
4449 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
4450 intel_dp->DP |= signal_levels;
4451
4452 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4453 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4454}
4455
4d82c2b5 4456/* IVB CPU eDP voltage swing and pre-emphasis control */
fb83f72c 4457static u32 ivb_cpu_edp_signal_levels(u8 train_set)
1a2eb460 4458{
fb83f72c
VS
4459 u8 signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
4460 DP_TRAIN_PRE_EMPHASIS_MASK);
4461
1a2eb460 4462 switch (signal_levels) {
bd60018a 4463 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 4464 return EDP_LINK_TRAIN_400MV_0DB_IVB;
bd60018a 4465 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460 4466 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
bd60018a 4467 case DP_TRAIN_VOLTAGE_SWING_LEVEL_0 | DP_TRAIN_PRE_EMPH_LEVEL_2:
33520eae 4468 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_2:
1a2eb460
KP
4469 return EDP_LINK_TRAIN_400MV_6DB_IVB;
4470
bd60018a 4471 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 4472 return EDP_LINK_TRAIN_600MV_0DB_IVB;
bd60018a 4473 case DP_TRAIN_VOLTAGE_SWING_LEVEL_1 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
4474 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
4475
bd60018a 4476 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_0:
1a2eb460 4477 return EDP_LINK_TRAIN_800MV_0DB_IVB;
bd60018a 4478 case DP_TRAIN_VOLTAGE_SWING_LEVEL_2 | DP_TRAIN_PRE_EMPH_LEVEL_1:
1a2eb460
KP
4479 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
4480
4481 default:
4482 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
4483 "0x%x\n", signal_levels);
4484 return EDP_LINK_TRAIN_500MV_0DB_IVB;
4485 }
4486}
4487
fb83f72c 4488static void
a621860a
VS
4489ivb_cpu_edp_set_signal_levels(struct intel_dp *intel_dp,
4490 const struct intel_crtc_state *crtc_state)
f0a3424e 4491{
de25eb7f 4492 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
830de422 4493 u8 train_set = intel_dp->train_set[0];
fb83f72c 4494 u32 signal_levels;
f0a3424e 4495
fb83f72c
VS
4496 signal_levels = ivb_cpu_edp_signal_levels(train_set);
4497
4498 drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
4499 signal_levels);
4500
4501 intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
4502 intel_dp->DP |= signal_levels;
f0a3424e 4503
fb83f72c
VS
4504 intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
4505 intel_de_posting_read(dev_priv, intel_dp->output_reg);
4506}
4507
a621860a
VS
4508void intel_dp_set_signal_levels(struct intel_dp *intel_dp,
4509 const struct intel_crtc_state *crtc_state)
fb83f72c
VS
4510{
4511 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4512 u8 train_set = intel_dp->train_set[0];
bdc6114e
WK
4513
4514 drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
4515 train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
4516 train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
4517 drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
4518 (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
4519 DP_TRAIN_PRE_EMPHASIS_SHIFT,
4520 train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
4521 " (max)" : "");
f0a3424e 4522
a621860a 4523 intel_dp->set_signal_levels(intel_dp, crtc_state);
f0a3424e
PZ
4524}
4525
94223d04 4526void
e9c176d5 4527intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
a621860a 4528 const struct intel_crtc_state *crtc_state,
830de422 4529 u8 dp_train_pat)
a4fc5ed6 4530{
eee3f911
VS
4531 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
4532 u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
a4fc5ed6 4533
eee3f911
VS
4534 if (dp_train_pat & train_pat_mask)
4535 drm_dbg_kms(&dev_priv->drm,
4536 "Using DP training pattern TPS%d\n",
4537 dp_train_pat & train_pat_mask);
47ea7542 4538
a621860a 4539 intel_dp->set_link_train(intel_dp, crtc_state, dp_train_pat);
e9c176d5
ACO
4540}
4541
a621860a
VS
4542void intel_dp_set_idle_link_train(struct intel_dp *intel_dp,
4543 const struct intel_crtc_state *crtc_state)
3ab9c637 4544{
8fdda385 4545 if (intel_dp->set_idle_link_train)
a621860a 4546 intel_dp->set_idle_link_train(intel_dp, crtc_state);
3ab9c637
ID
4547}
4548
a4fc5ed6 4549static void
adc10304
VS
4550intel_dp_link_down(struct intel_encoder *encoder,
4551 const struct intel_crtc_state *old_crtc_state)
a4fc5ed6 4552{
adc10304 4553 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 4554 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
2225f3c6 4555 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
adc10304 4556 enum port port = encoder->port;
830de422 4557 u32 DP = intel_dp->DP;
a4fc5ed6 4558
eb020ca3
PB
4559 if (drm_WARN_ON(&dev_priv->drm,
4560 (intel_de_read(dev_priv, intel_dp->output_reg) &
4561 DP_PORT_EN) == 0))
1b39d6f3
CW
4562 return;
4563
bdc6114e 4564 drm_dbg_kms(&dev_priv->drm, "\n");
32f9d658 4565
b752e995 4566 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
6e266956 4567 (HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
e3421a18 4568 DP &= ~DP_LINK_TRAIN_MASK_CPT;
1612c8bd 4569 DP |= DP_LINK_TRAIN_PAT_IDLE_CPT;
e3421a18 4570 } else {
3b358cda 4571 DP &= ~DP_LINK_TRAIN_MASK;
1612c8bd 4572 DP |= DP_LINK_TRAIN_PAT_IDLE;
e3421a18 4573 }
b4e33881
JN
4574 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4575 intel_de_posting_read(dev_priv, intel_dp->output_reg);
5eb08b69 4576
1612c8bd 4577 DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
b4e33881
JN
4578 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4579 intel_de_posting_read(dev_priv, intel_dp->output_reg);
1612c8bd
VS
4580
4581 /*
4582 * HW workaround for IBX, we need to move the port
4583 * to transcoder A after disabling it to allow the
4584 * matching HDMI port to be enabled on transcoder A.
4585 */
6e266956 4586 if (HAS_PCH_IBX(dev_priv) && crtc->pipe == PIPE_B && port != PORT_A) {
0c241d5b
VS
4587 /*
4588 * We get CPU/PCH FIFO underruns on the other pipe when
4589 * doing the workaround. Sweep them under the rug.
4590 */
4591 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4592 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
4593
1612c8bd 4594 /* always enable with pattern 1 (as per spec) */
59b74c49
VS
4595 DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
4596 DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
4597 DP_LINK_TRAIN_PAT_1;
b4e33881
JN
4598 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4599 intel_de_posting_read(dev_priv, intel_dp->output_reg);
1612c8bd
VS
4600
4601 DP &= ~DP_PORT_EN;
b4e33881
JN
4602 intel_de_write(dev_priv, intel_dp->output_reg, DP);
4603 intel_de_posting_read(dev_priv, intel_dp->output_reg);
0c241d5b 4604
0f0f74bc 4605 intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
0c241d5b
VS
4606 intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
4607 intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
5bddd17f
EA
4608 }
4609
f01eca2e 4610 msleep(intel_dp->panel_power_down_delay);
6fec7662
VS
4611
4612 intel_dp->DP = DP;
9f2bdb00
VS
4613
4614 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
69d93820
CW
4615 intel_wakeref_t wakeref;
4616
4617 with_pps_lock(intel_dp, wakeref)
4618 intel_dp->active_pipe = INVALID_PIPE;
9f2bdb00 4619 }
a4fc5ed6
KP
4620}
4621
8e9d645c
GM
4622bool intel_dp_get_colorimetry_status(struct intel_dp *intel_dp)
4623{
4624 u8 dprx = 0;
4625
4626 if (drm_dp_dpcd_readb(&intel_dp->aux, DP_DPRX_FEATURE_ENUMERATION_LIST,
4627 &dprx) != 1)
4628 return false;
4629 return dprx & DP_VSC_SDP_EXT_FOR_COLORIMETRY_SUPPORTED;
4630}
4631
93ac092f
MN
4632static void intel_dp_get_dsc_sink_cap(struct intel_dp *intel_dp)
4633{
af67009c
JN
4634 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4635
93ac092f
MN
4636 /*
4637 * Clear the cached register set to avoid using stale values
4638 * for the sinks that do not support DSC.
4639 */
4640 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4641
08cadae8
AS
4642 /* Clear fec_capable to avoid using stale values */
4643 intel_dp->fec_capable = 0;
4644
93ac092f
MN
4645 /* Cache the DSC DPCD if eDP or DP rev >= 1.4 */
4646 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x14 ||
4647 intel_dp->edp_dpcd[0] >= DP_EDP_14) {
4648 if (drm_dp_dpcd_read(&intel_dp->aux, DP_DSC_SUPPORT,
4649 intel_dp->dsc_dpcd,
4650 sizeof(intel_dp->dsc_dpcd)) < 0)
af67009c
JN
4651 drm_err(&i915->drm,
4652 "Failed to read DPCD register 0x%x\n",
4653 DP_DSC_SUPPORT);
93ac092f 4654
af67009c
JN
4655 drm_dbg_kms(&i915->drm, "DSC DPCD: %*ph\n",
4656 (int)sizeof(intel_dp->dsc_dpcd),
4657 intel_dp->dsc_dpcd);
0ce611c9 4658
08cadae8 4659 /* FEC is supported only on DP 1.4 */
0ce611c9
CW
4660 if (!intel_dp_is_edp(intel_dp) &&
4661 drm_dp_dpcd_readb(&intel_dp->aux, DP_FEC_CAPABILITY,
4662 &intel_dp->fec_capable) < 0)
af67009c
JN
4663 drm_err(&i915->drm,
4664 "Failed to read FEC DPCD register\n");
08cadae8 4665
af67009c
JN
4666 drm_dbg_kms(&i915->drm, "FEC CAPABILITY: %x\n",
4667 intel_dp->fec_capable);
93ac092f
MN
4668 }
4669}
4670
fe5a66f9
VS
4671static bool
4672intel_edp_init_dpcd(struct intel_dp *intel_dp)
4673{
4674 struct drm_i915_private *dev_priv =
4675 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
30d9aa42 4676
fe5a66f9 4677 /* this function is meant to be called only once */
eb020ca3 4678 drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
30d9aa42 4679
b9936121 4680 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0)
30d9aa42
SS
4681 return false;
4682
84c36753
JN
4683 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4684 drm_dp_is_branch(intel_dp->dpcd));
12a47a42 4685
7c838e2a
JN
4686 /*
4687 * Read the eDP display control registers.
4688 *
4689 * Do this independent of DP_DPCD_DISPLAY_CONTROL_CAPABLE bit in
4690 * DP_EDP_CONFIGURATION_CAP, because some buggy displays do not have it
4691 * set, but require eDP 1.4+ detection (e.g. for supported link rates
4692 * method). The display control registers should read zero if they're
4693 * not supported anyway.
4694 */
4695 if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
f7170e2e
DC
4696 intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
4697 sizeof(intel_dp->edp_dpcd))
bdc6114e
WK
4698 drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
4699 (int)sizeof(intel_dp->edp_dpcd),
4700 intel_dp->edp_dpcd);
06ea66b6 4701
84bb2916
DP
4702 /*
4703 * This has to be called after intel_dp->edp_dpcd is filled, PSR checks
4704 * for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
4705 */
4706 intel_psr_init_dpcd(intel_dp);
4707
e6ed2a1b
JN
4708 /* Read the eDP 1.4+ supported link rates. */
4709 if (intel_dp->edp_dpcd[0] >= DP_EDP_14) {
94ca719e 4710 __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
ea2d8a42
VS
4711 int i;
4712
9f085ebb
L
4713 drm_dp_dpcd_read(&intel_dp->aux, DP_SUPPORTED_LINK_RATES,
4714 sink_rates, sizeof(sink_rates));
ea2d8a42 4715
94ca719e
VS
4716 for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
4717 int val = le16_to_cpu(sink_rates[i]);
ea2d8a42
VS
4718
4719 if (val == 0)
4720 break;
4721
fd81c44e
DP
4722 /* Value read multiplied by 200kHz gives the per-lane
4723 * link rate in kHz. The source rates are, however,
4724 * stored in terms of LS_Clk kHz. The full conversion
4725 * back to symbols is
4726 * (val * 200kHz)*(8/10 ch. encoding)*(1/8 bit to Byte)
4727 */
af77b974 4728 intel_dp->sink_rates[i] = (val * 200) / 10;
ea2d8a42 4729 }
94ca719e 4730 intel_dp->num_sink_rates = i;
fc0f8e25 4731 }
0336400e 4732
e6ed2a1b
JN
4733 /*
4734 * Use DP_LINK_RATE_SET if DP_SUPPORTED_LINK_RATES are available,
4735 * default to DP_MAX_LINK_RATE and DP_LINK_BW_SET otherwise.
4736 */
68f357cb
JN
4737 if (intel_dp->num_sink_rates)
4738 intel_dp->use_rate_select = true;
4739 else
4740 intel_dp_set_sink_rates(intel_dp);
4741
975ee5fc
JN
4742 intel_dp_set_common_rates(intel_dp);
4743
93ac092f
MN
4744 /* Read the eDP DSC DPCD registers */
4745 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4746 intel_dp_get_dsc_sink_cap(intel_dp);
4747
fe5a66f9
VS
4748 return true;
4749}
4750
693c3ec5
LP
4751static bool
4752intel_dp_has_sink_count(struct intel_dp *intel_dp)
4753{
4754 if (!intel_dp->attached_connector)
4755 return false;
4756
4757 return drm_dp_read_sink_count_cap(&intel_dp->attached_connector->base,
4758 intel_dp->dpcd,
4759 &intel_dp->desc);
4760}
fe5a66f9
VS
4761
4762static bool
4763intel_dp_get_dpcd(struct intel_dp *intel_dp)
4764{
4778ff05
LP
4765 int ret;
4766
b9936121 4767 if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd))
fe5a66f9
VS
4768 return false;
4769
eaa2b31b
VS
4770 /*
4771 * Don't clobber cached eDP rates. Also skip re-reading
4772 * the OUI/ID since we know it won't change.
4773 */
1853a9da 4774 if (!intel_dp_is_edp(intel_dp)) {
eaa2b31b
VS
4775 drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
4776 drm_dp_is_branch(intel_dp->dpcd));
4777
68f357cb 4778 intel_dp_set_sink_rates(intel_dp);
975ee5fc
JN
4779 intel_dp_set_common_rates(intel_dp);
4780 }
68f357cb 4781
693c3ec5 4782 if (intel_dp_has_sink_count(intel_dp)) {
4778ff05
LP
4783 ret = drm_dp_read_sink_count(&intel_dp->aux);
4784 if (ret < 0)
2bb06265
JRS
4785 return false;
4786
4787 /*
4788 * Sink count can change between short pulse hpd hence
4789 * a member variable in intel_dp will track any changes
4790 * between short pulse interrupts.
4791 */
4778ff05 4792 intel_dp->sink_count = ret;
2bb06265
JRS
4793
4794 /*
4795 * SINK_COUNT == 0 and DOWNSTREAM_PORT_PRESENT == 1 implies that
4796 * a dongle is present but no display. Unless we require to know
4797 * if a dongle is present or not, we don't need to update
4798 * downstream port information. So, an early return here saves
4799 * time from performing other operations which are not required.
4800 */
4801 if (!intel_dp->sink_count)
4802 return false;
4803 }
0336400e 4804
3d3721cc
LP
4805 return drm_dp_read_downstream_info(&intel_dp->aux, intel_dp->dpcd,
4806 intel_dp->downstream_ports) == 0;
c4e3170a
VS
4807}
4808
9dbf5a4e
VS
4809static bool
4810intel_dp_can_mst(struct intel_dp *intel_dp)
4811{
8a25c4be
JN
4812 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
4813
4814 return i915->params.enable_dp_mst &&
9dbf5a4e 4815 intel_dp->can_mst &&
4b465912 4816 drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
9dbf5a4e
VS
4817}
4818
c4e3170a
VS
4819static void
4820intel_dp_configure_mst(struct intel_dp *intel_dp)
4821{
af67009c 4822 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
9dbf5a4e
VS
4823 struct intel_encoder *encoder =
4824 &dp_to_dig_port(intel_dp)->base;
4b465912 4825 bool sink_can_mst = drm_dp_read_mst_cap(&intel_dp->aux, intel_dp->dpcd);
9dbf5a4e 4826
af67009c
JN
4827 drm_dbg_kms(&i915->drm,
4828 "[ENCODER:%d:%s] MST support: port: %s, sink: %s, modparam: %s\n",
4829 encoder->base.base.id, encoder->base.name,
4830 yesno(intel_dp->can_mst), yesno(sink_can_mst),
8a25c4be 4831 yesno(i915->params.enable_dp_mst));
c4e3170a
VS
4832
4833 if (!intel_dp->can_mst)
4834 return;
4835
9dbf5a4e 4836 intel_dp->is_mst = sink_can_mst &&
8a25c4be 4837 i915->params.enable_dp_mst;
c4e3170a
VS
4838
4839 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
4840 intel_dp->is_mst);
0e32b39c
DA
4841}
4842
0e32b39c
DA
4843static bool
4844intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector)
4845{
e8b2577c
PD
4846 return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI,
4847 sink_irq_vector, DP_DPRX_ESI_LEN) ==
4848 DP_DPRX_ESI_LEN;
0e32b39c
DA
4849}
4850
0c06fa15
GM
4851bool
4852intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state,
4853 const struct drm_connector_state *conn_state)
4854{
4855 /*
4856 * As per DP 1.4a spec section 2.2.4.3 [MSA Field for Indication
4857 * of Color Encoding Format and Content Color Gamut], in order to
4858 * sending YCBCR 420 or HDR BT.2020 signals we should use DP VSC SDP.
4859 */
4860 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
4861 return true;
4862
4863 switch (conn_state->colorspace) {
4864 case DRM_MODE_COLORIMETRY_SYCC_601:
4865 case DRM_MODE_COLORIMETRY_OPYCC_601:
4866 case DRM_MODE_COLORIMETRY_BT2020_YCC:
4867 case DRM_MODE_COLORIMETRY_BT2020_RGB:
4868 case DRM_MODE_COLORIMETRY_BT2020_CYCC:
4869 return true;
4870 default:
4871 break;
4872 }
4873
4874 return false;
4875}
4876
03c761b0
GM
4877static ssize_t intel_dp_vsc_sdp_pack(const struct drm_dp_vsc_sdp *vsc,
4878 struct dp_sdp *sdp, size_t size)
4879{
4880 size_t length = sizeof(struct dp_sdp);
4881
4882 if (size < length)
4883 return -ENOSPC;
4884
4885 memset(sdp, 0, size);
4886
4887 /*
4888 * Prepare VSC Header for SU as per DP 1.4a spec, Table 2-119
4889 * VSC SDP Header Bytes
4890 */
4891 sdp->sdp_header.HB0 = 0; /* Secondary-Data Packet ID = 0 */
4892 sdp->sdp_header.HB1 = vsc->sdp_type; /* Secondary-data Packet Type */
4893 sdp->sdp_header.HB2 = vsc->revision; /* Revision Number */
4894 sdp->sdp_header.HB3 = vsc->length; /* Number of Valid Data Bytes */
4895
cafac5a9
GM
4896 /*
4897 * Only revision 0x5 supports Pixel Encoding/Colorimetry Format as
4898 * per DP 1.4a spec.
4899 */
4900 if (vsc->revision != 0x5)
4901 goto out;
4902
03c761b0
GM
4903 /* VSC SDP Payload for DB16 through DB18 */
4904 /* Pixel Encoding and Colorimetry Formats */
4905 sdp->db[16] = (vsc->pixelformat & 0xf) << 4; /* DB16[7:4] */
4906 sdp->db[16] |= vsc->colorimetry & 0xf; /* DB16[3:0] */
4907
4908 switch (vsc->bpc) {
4909 case 6:
4910 /* 6bpc: 0x0 */
4911 break;
4912 case 8:
4913 sdp->db[17] = 0x1; /* DB17[3:0] */
4914 break;
4915 case 10:
4916 sdp->db[17] = 0x2;
4917 break;
4918 case 12:
4919 sdp->db[17] = 0x3;
4920 break;
4921 case 16:
4922 sdp->db[17] = 0x4;
4923 break;
4924 default:
4925 MISSING_CASE(vsc->bpc);
4926 break;
4927 }
4928 /* Dynamic Range and Component Bit Depth */
4929 if (vsc->dynamic_range == DP_DYNAMIC_RANGE_CTA)
4930 sdp->db[17] |= 0x80; /* DB17[7] */
4931
4932 /* Content Type */
4933 sdp->db[18] = vsc->content_type & 0x7;
4934
cafac5a9 4935out:
03c761b0
GM
4936 return length;
4937}
4938
4939static ssize_t
4940intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe,
4941 struct dp_sdp *sdp,
4942 size_t size)
4943{
4944 size_t length = sizeof(struct dp_sdp);
4945 const int infoframe_size = HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE;
4946 unsigned char buf[HDMI_INFOFRAME_HEADER_SIZE + HDMI_DRM_INFOFRAME_SIZE];
4947 ssize_t len;
4948
4949 if (size < length)
4950 return -ENOSPC;
4951
4952 memset(sdp, 0, size);
4953
4954 len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf));
4955 if (len < 0) {
4956 DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n");
4957 return -ENOSPC;
4958 }
4959
4960 if (len != infoframe_size) {
4961 DRM_DEBUG_KMS("wrong static hdr metadata size\n");
4962 return -ENOSPC;
4963 }
4964
4965 /*
4966 * Set up the infoframe sdp packet for HDR static metadata.
4967 * Prepare VSC Header for SU as per DP 1.4a spec,
4968 * Table 2-100 and Table 2-101
4969 */
4970
4971 /* Secondary-Data Packet ID, 00h for non-Audio INFOFRAME */
4972 sdp->sdp_header.HB0 = 0;
4973 /*
4974 * Packet Type 80h + Non-audio INFOFRAME Type value
4975 * HDMI_INFOFRAME_TYPE_DRM: 0x87
4976 * - 80h + Non-audio INFOFRAME Type value
4977 * - InfoFrame Type: 0x07
4978 * [CTA-861-G Table-42 Dynamic Range and Mastering InfoFrame]
4979 */
4980 sdp->sdp_header.HB1 = drm_infoframe->type;
4981 /*
4982 * Least Significant Eight Bits of (Data Byte Count – 1)
4983 * infoframe_size - 1
4984 */
4985 sdp->sdp_header.HB2 = 0x1D;
4986 /* INFOFRAME SDP Version Number */
4987 sdp->sdp_header.HB3 = (0x13 << 2);
4988 /* CTA Header Byte 2 (INFOFRAME Version Number) */
4989 sdp->db[0] = drm_infoframe->version;
4990 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
4991 sdp->db[1] = drm_infoframe->length;
4992 /*
4993 * Copy HDMI_DRM_INFOFRAME_SIZE size from a buffer after
4994 * HDMI_INFOFRAME_HEADER_SIZE
4995 */
4996 BUILD_BUG_ON(sizeof(sdp->db) < HDMI_DRM_INFOFRAME_SIZE + 2);
4997 memcpy(&sdp->db[2], &buf[HDMI_INFOFRAME_HEADER_SIZE],
4998 HDMI_DRM_INFOFRAME_SIZE);
4999
5000 /*
5001 * Size of DP infoframe sdp packet for HDR static metadata consists of
5002 * - DP SDP Header(struct dp_sdp_header): 4 bytes
5003 * - Two Data Blocks: 2 bytes
5004 * CTA Header Byte2 (INFOFRAME Version Number)
5005 * CTA Header Byte3 (Length of INFOFRAME)
5006 * - HDMI_DRM_INFOFRAME_SIZE: 26 bytes
5007 *
5008 * Prior to GEN11's GMP register size is identical to DP HDR static metadata
5009 * infoframe size. But GEN11+ has larger than that size, write_infoframe
5010 * will pad rest of the size.
5011 */
5012 return sizeof(struct dp_sdp_header) + 2 + HDMI_DRM_INFOFRAME_SIZE;
5013}
5014
5015static void intel_write_dp_sdp(struct intel_encoder *encoder,
5016 const struct intel_crtc_state *crtc_state,
5017 unsigned int type)
5018{
7801f3b7 5019 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
03c761b0
GM
5020 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5021 struct dp_sdp sdp = {};
5022 ssize_t len;
5023
5024 if ((crtc_state->infoframes.enable &
5025 intel_hdmi_infoframe_enable(type)) == 0)
5026 return;
5027
5028 switch (type) {
5029 case DP_SDP_VSC:
5030 len = intel_dp_vsc_sdp_pack(&crtc_state->infoframes.vsc, &sdp,
5031 sizeof(sdp));
5032 break;
5033 case HDMI_PACKET_TYPE_GAMUT_METADATA:
5034 len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm,
5035 &sdp, sizeof(sdp));
5036 break;
5037 default:
5038 MISSING_CASE(type);
d121f63c 5039 return;
03c761b0
GM
5040 }
5041
5042 if (drm_WARN_ON(&dev_priv->drm, len < 0))
5043 return;
5044
7801f3b7 5045 dig_port->write_infoframe(encoder, crtc_state, type, &sdp, len);
03c761b0
GM
5046}
5047
cafac5a9
GM
5048void intel_write_dp_vsc_sdp(struct intel_encoder *encoder,
5049 const struct intel_crtc_state *crtc_state,
5050 struct drm_dp_vsc_sdp *vsc)
5051{
7801f3b7 5052 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
cafac5a9
GM
5053 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5054 struct dp_sdp sdp = {};
5055 ssize_t len;
5056
5057 len = intel_dp_vsc_sdp_pack(vsc, &sdp, sizeof(sdp));
5058
5059 if (drm_WARN_ON(&dev_priv->drm, len < 0))
5060 return;
5061
7801f3b7 5062 dig_port->write_infoframe(encoder, crtc_state, DP_SDP_VSC,
cafac5a9
GM
5063 &sdp, len);
5064}
5065
03c761b0
GM
5066void intel_dp_set_infoframes(struct intel_encoder *encoder,
5067 bool enable,
5068 const struct intel_crtc_state *crtc_state,
5069 const struct drm_connector_state *conn_state)
5070{
5071 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5072 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5073 i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
5074 u32 dip_enable = VIDEO_DIP_ENABLE_AVI_HSW | VIDEO_DIP_ENABLE_GCP_HSW |
5075 VIDEO_DIP_ENABLE_VS_HSW | VIDEO_DIP_ENABLE_GMP_HSW |
5076 VIDEO_DIP_ENABLE_SPD_HSW | VIDEO_DIP_ENABLE_DRM_GLK;
5077 u32 val = intel_de_read(dev_priv, reg);
5078
5079 /* TODO: Add DSC case (DIP_ENABLE_PPS) */
5080 /* When PSR is enabled, this routine doesn't disable VSC DIP */
5081 if (intel_psr_enabled(intel_dp))
5082 val &= ~dip_enable;
5083 else
5084 val &= ~(dip_enable | VIDEO_DIP_ENABLE_VSC_HSW);
5085
5086 if (!enable) {
5087 intel_de_write(dev_priv, reg, val);
5088 intel_de_posting_read(dev_priv, reg);
5089 return;
5090 }
5091
5092 intel_de_write(dev_priv, reg, val);
5093 intel_de_posting_read(dev_priv, reg);
5094
5095 /* When PSR is enabled, VSC SDP is handled by PSR routine */
5096 if (!intel_psr_enabled(intel_dp))
5097 intel_write_dp_sdp(encoder, crtc_state, DP_SDP_VSC);
5098
5099 intel_write_dp_sdp(encoder, crtc_state, HDMI_PACKET_TYPE_GAMUT_METADATA);
5100}
5101
1b404b7d
GM
5102static int intel_dp_vsc_sdp_unpack(struct drm_dp_vsc_sdp *vsc,
5103 const void *buffer, size_t size)
5104{
5105 const struct dp_sdp *sdp = buffer;
5106
5107 if (size < sizeof(struct dp_sdp))
5108 return -EINVAL;
5109
5110 memset(vsc, 0, size);
5111
5112 if (sdp->sdp_header.HB0 != 0)
5113 return -EINVAL;
5114
5115 if (sdp->sdp_header.HB1 != DP_SDP_VSC)
5116 return -EINVAL;
5117
5118 vsc->sdp_type = sdp->sdp_header.HB1;
5119 vsc->revision = sdp->sdp_header.HB2;
5120 vsc->length = sdp->sdp_header.HB3;
5121
5122 if ((sdp->sdp_header.HB2 == 0x2 && sdp->sdp_header.HB3 == 0x8) ||
5123 (sdp->sdp_header.HB2 == 0x4 && sdp->sdp_header.HB3 == 0xe)) {
5124 /*
5125 * - HB2 = 0x2, HB3 = 0x8
5126 * VSC SDP supporting 3D stereo + PSR
5127 * - HB2 = 0x4, HB3 = 0xe
5128 * VSC SDP supporting 3D stereo + PSR2 with Y-coordinate of
5129 * first scan line of the SU region (applies to eDP v1.4b
5130 * and higher).
5131 */
5132 return 0;
5133 } else if (sdp->sdp_header.HB2 == 0x5 && sdp->sdp_header.HB3 == 0x13) {
5134 /*
5135 * - HB2 = 0x5, HB3 = 0x13
5136 * VSC SDP supporting 3D stereo + PSR2 + Pixel Encoding/Colorimetry
5137 * Format.
5138 */
5139 vsc->pixelformat = (sdp->db[16] >> 4) & 0xf;
5140 vsc->colorimetry = sdp->db[16] & 0xf;
5141 vsc->dynamic_range = (sdp->db[17] >> 7) & 0x1;
5142
5143 switch (sdp->db[17] & 0x7) {
5144 case 0x0:
5145 vsc->bpc = 6;
5146 break;
5147 case 0x1:
5148 vsc->bpc = 8;
5149 break;
5150 case 0x2:
5151 vsc->bpc = 10;
5152 break;
5153 case 0x3:
5154 vsc->bpc = 12;
5155 break;
5156 case 0x4:
5157 vsc->bpc = 16;
5158 break;
5159 default:
5160 MISSING_CASE(sdp->db[17] & 0x7);
5161 return -EINVAL;
5162 }
5163
5164 vsc->content_type = sdp->db[18] & 0x7;
5165 } else {
5166 return -EINVAL;
5167 }
5168
5169 return 0;
5170}
5171
5172static int
5173intel_dp_hdr_metadata_infoframe_sdp_unpack(struct hdmi_drm_infoframe *drm_infoframe,
5174 const void *buffer, size_t size)
5175{
5176 int ret;
5177
5178 const struct dp_sdp *sdp = buffer;
5179
5180 if (size < sizeof(struct dp_sdp))
5181 return -EINVAL;
5182
5183 if (sdp->sdp_header.HB0 != 0)
5184 return -EINVAL;
5185
5186 if (sdp->sdp_header.HB1 != HDMI_INFOFRAME_TYPE_DRM)
5187 return -EINVAL;
5188
5189 /*
5190 * Least Significant Eight Bits of (Data Byte Count – 1)
5191 * 1Dh (i.e., Data Byte Count = 30 bytes).
5192 */
5193 if (sdp->sdp_header.HB2 != 0x1D)
5194 return -EINVAL;
5195
5196 /* Most Significant Two Bits of (Data Byte Count – 1), Clear to 00b. */
5197 if ((sdp->sdp_header.HB3 & 0x3) != 0)
5198 return -EINVAL;
5199
5200 /* INFOFRAME SDP Version Number */
5201 if (((sdp->sdp_header.HB3 >> 2) & 0x3f) != 0x13)
5202 return -EINVAL;
5203
5204 /* CTA Header Byte 2 (INFOFRAME Version Number) */
5205 if (sdp->db[0] != 1)
5206 return -EINVAL;
5207
5208 /* CTA Header Byte 3 (Length of INFOFRAME): HDMI_DRM_INFOFRAME_SIZE */
5209 if (sdp->db[1] != HDMI_DRM_INFOFRAME_SIZE)
5210 return -EINVAL;
5211
5212 ret = hdmi_drm_infoframe_unpack_only(drm_infoframe, &sdp->db[2],
5213 HDMI_DRM_INFOFRAME_SIZE);
5214
5215 return ret;
5216}
5217
5218static void intel_read_dp_vsc_sdp(struct intel_encoder *encoder,
5219 struct intel_crtc_state *crtc_state,
5220 struct drm_dp_vsc_sdp *vsc)
5221{
7801f3b7 5222 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1b404b7d
GM
5223 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
5224 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5225 unsigned int type = DP_SDP_VSC;
5226 struct dp_sdp sdp = {};
5227 int ret;
5228
5229 /* When PSR is enabled, VSC SDP is handled by PSR routine */
5230 if (intel_psr_enabled(intel_dp))
5231 return;
5232
5233 if ((crtc_state->infoframes.enable &
5234 intel_hdmi_infoframe_enable(type)) == 0)
5235 return;
5236
7801f3b7 5237 dig_port->read_infoframe(encoder, crtc_state, type, &sdp, sizeof(sdp));
1b404b7d
GM
5238
5239 ret = intel_dp_vsc_sdp_unpack(vsc, &sdp, sizeof(sdp));
5240
5241 if (ret)
5242 drm_dbg_kms(&dev_priv->drm, "Failed to unpack DP VSC SDP\n");
5243}
5244
5245static void intel_read_dp_hdr_metadata_infoframe_sdp(struct intel_encoder *encoder,
5246 struct intel_crtc_state *crtc_state,
5247 struct hdmi_drm_infoframe *drm_infoframe)
5248{
7801f3b7 5249 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
1b404b7d
GM
5250 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5251 unsigned int type = HDMI_PACKET_TYPE_GAMUT_METADATA;
5252 struct dp_sdp sdp = {};
5253 int ret;
5254
5255 if ((crtc_state->infoframes.enable &
5256 intel_hdmi_infoframe_enable(type)) == 0)
5257 return;
5258
7801f3b7
LDM
5259 dig_port->read_infoframe(encoder, crtc_state, type, &sdp,
5260 sizeof(sdp));
1b404b7d
GM
5261
5262 ret = intel_dp_hdr_metadata_infoframe_sdp_unpack(drm_infoframe, &sdp,
5263 sizeof(sdp));
5264
5265 if (ret)
5266 drm_dbg_kms(&dev_priv->drm,
5267 "Failed to unpack DP HDR Metadata Infoframe SDP\n");
5268}
5269
5270void intel_read_dp_sdp(struct intel_encoder *encoder,
5271 struct intel_crtc_state *crtc_state,
5272 unsigned int type)
5273{
22da5d84
CW
5274 if (encoder->type != INTEL_OUTPUT_DDI)
5275 return;
5276
1b404b7d
GM
5277 switch (type) {
5278 case DP_SDP_VSC:
5279 intel_read_dp_vsc_sdp(encoder, crtc_state,
5280 &crtc_state->infoframes.vsc);
5281 break;
5282 case HDMI_PACKET_TYPE_GAMUT_METADATA:
5283 intel_read_dp_hdr_metadata_infoframe_sdp(encoder, crtc_state,
5284 &crtc_state->infoframes.drm.drm);
5285 break;
5286 default:
5287 MISSING_CASE(type);
5288 break;
5289 }
5290}
5291
830de422 5292static u8 intel_dp_autotest_link_training(struct intel_dp *intel_dp)
c5d5ab7a 5293{
af67009c 5294 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
da15f7cb 5295 int status = 0;
140ef138 5296 int test_link_rate;
830de422 5297 u8 test_lane_count, test_link_bw;
da15f7cb
MN
5298 /* (DP CTS 1.2)
5299 * 4.3.1.11
5300 */
5301 /* Read the TEST_LANE_COUNT and TEST_LINK_RTAE fields (DP CTS 3.1.4) */
5302 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LANE_COUNT,
5303 &test_lane_count);
5304
5305 if (status <= 0) {
af67009c 5306 drm_dbg_kms(&i915->drm, "Lane count read failed\n");
da15f7cb
MN
5307 return DP_TEST_NAK;
5308 }
5309 test_lane_count &= DP_MAX_LANE_COUNT_MASK;
da15f7cb
MN
5310
5311 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_LINK_RATE,
5312 &test_link_bw);
5313 if (status <= 0) {
af67009c 5314 drm_dbg_kms(&i915->drm, "Link Rate read failed\n");
da15f7cb
MN
5315 return DP_TEST_NAK;
5316 }
da15f7cb 5317 test_link_rate = drm_dp_bw_code_to_link_rate(test_link_bw);
140ef138
MN
5318
5319 /* Validate the requested link rate and lane count */
5320 if (!intel_dp_link_params_valid(intel_dp, test_link_rate,
5321 test_lane_count))
da15f7cb
MN
5322 return DP_TEST_NAK;
5323
5324 intel_dp->compliance.test_lane_count = test_lane_count;
5325 intel_dp->compliance.test_link_rate = test_link_rate;
5326
5327 return DP_TEST_ACK;
c5d5ab7a
TP
5328}
5329
830de422 5330static u8 intel_dp_autotest_video_pattern(struct intel_dp *intel_dp)
c5d5ab7a 5331{
af67009c 5332 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
830de422
JN
5333 u8 test_pattern;
5334 u8 test_misc;
611032bf
MN
5335 __be16 h_width, v_height;
5336 int status = 0;
5337
5338 /* Read the TEST_PATTERN (DP CTS 3.1.5) */
010b9b39
JN
5339 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_PATTERN,
5340 &test_pattern);
611032bf 5341 if (status <= 0) {
af67009c 5342 drm_dbg_kms(&i915->drm, "Test pattern read failed\n");
611032bf
MN
5343 return DP_TEST_NAK;
5344 }
5345 if (test_pattern != DP_COLOR_RAMP)
5346 return DP_TEST_NAK;
5347
5348 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_H_WIDTH_HI,
5349 &h_width, 2);
5350 if (status <= 0) {
af67009c 5351 drm_dbg_kms(&i915->drm, "H Width read failed\n");
611032bf
MN
5352 return DP_TEST_NAK;
5353 }
5354
5355 status = drm_dp_dpcd_read(&intel_dp->aux, DP_TEST_V_HEIGHT_HI,
5356 &v_height, 2);
5357 if (status <= 0) {
af67009c 5358 drm_dbg_kms(&i915->drm, "V Height read failed\n");
611032bf
MN
5359 return DP_TEST_NAK;
5360 }
5361
010b9b39
JN
5362 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_MISC0,
5363 &test_misc);
611032bf 5364 if (status <= 0) {
af67009c 5365 drm_dbg_kms(&i915->drm, "TEST MISC read failed\n");
611032bf
MN
5366 return DP_TEST_NAK;
5367 }
5368 if ((test_misc & DP_TEST_COLOR_FORMAT_MASK) != DP_COLOR_FORMAT_RGB)
5369 return DP_TEST_NAK;
5370 if (test_misc & DP_TEST_DYNAMIC_RANGE_CEA)
5371 return DP_TEST_NAK;
5372 switch (test_misc & DP_TEST_BIT_DEPTH_MASK) {
5373 case DP_TEST_BIT_DEPTH_6:
5374 intel_dp->compliance.test_data.bpc = 6;
5375 break;
5376 case DP_TEST_BIT_DEPTH_8:
5377 intel_dp->compliance.test_data.bpc = 8;
5378 break;
5379 default:
5380 return DP_TEST_NAK;
5381 }
5382
5383 intel_dp->compliance.test_data.video_pattern = test_pattern;
5384 intel_dp->compliance.test_data.hdisplay = be16_to_cpu(h_width);
5385 intel_dp->compliance.test_data.vdisplay = be16_to_cpu(v_height);
5386 /* Set test active flag here so userspace doesn't interrupt things */
dd93cecf 5387 intel_dp->compliance.test_active = true;
611032bf
MN
5388
5389 return DP_TEST_ACK;
c5d5ab7a
TP
5390}
5391
830de422 5392static u8 intel_dp_autotest_edid(struct intel_dp *intel_dp)
a60f0e38 5393{
af67009c 5394 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
830de422 5395 u8 test_result = DP_TEST_ACK;
559be30c
TP
5396 struct intel_connector *intel_connector = intel_dp->attached_connector;
5397 struct drm_connector *connector = &intel_connector->base;
5398
5399 if (intel_connector->detect_edid == NULL ||
ac6f2e29 5400 connector->edid_corrupt ||
559be30c
TP
5401 intel_dp->aux.i2c_defer_count > 6) {
5402 /* Check EDID read for NACKs, DEFERs and corruption
5403 * (DP CTS 1.2 Core r1.1)
5404 * 4.2.2.4 : Failed EDID read, I2C_NAK
5405 * 4.2.2.5 : Failed EDID read, I2C_DEFER
5406 * 4.2.2.6 : EDID corruption detected
5407 * Use failsafe mode for all cases
5408 */
5409 if (intel_dp->aux.i2c_nack_count > 0 ||
5410 intel_dp->aux.i2c_defer_count > 0)
af67009c
JN
5411 drm_dbg_kms(&i915->drm,
5412 "EDID read had %d NACKs, %d DEFERs\n",
5413 intel_dp->aux.i2c_nack_count,
5414 intel_dp->aux.i2c_defer_count);
c1617abc 5415 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_FAILSAFE;
559be30c 5416 } else {
f79b468e
TS
5417 struct edid *block = intel_connector->detect_edid;
5418
5419 /* We have to write the checksum
5420 * of the last block read
5421 */
5422 block += intel_connector->detect_edid->extensions;
5423
010b9b39
JN
5424 if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_EDID_CHECKSUM,
5425 block->checksum) <= 0)
af67009c
JN
5426 drm_dbg_kms(&i915->drm,
5427 "Failed to write EDID checksum\n");
559be30c
TP
5428
5429 test_result = DP_TEST_ACK | DP_TEST_EDID_CHECKSUM_WRITE;
b48a5ba9 5430 intel_dp->compliance.test_data.edid = INTEL_DP_RESOLUTION_PREFERRED;
559be30c
TP
5431 }
5432
5433 /* Set test active flag here so userspace doesn't interrupt things */
dd93cecf 5434 intel_dp->compliance.test_active = true;
559be30c 5435
c5d5ab7a
TP
5436 return test_result;
5437}
5438
a621860a
VS
5439static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp,
5440 const struct intel_crtc_state *crtc_state)
8cdf7271
AM
5441{
5442 struct drm_i915_private *dev_priv =
5443 to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
8cdf7271
AM
5444 struct drm_dp_phy_test_params *data =
5445 &intel_dp->compliance.test_data.phytest;
a621860a 5446 struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
8cdf7271
AM
5447 enum pipe pipe = crtc->pipe;
5448 u32 pattern_val;
5449
5450 switch (data->phy_pattern) {
5451 case DP_PHY_TEST_PATTERN_NONE:
5452 DRM_DEBUG_KMS("Disable Phy Test Pattern\n");
5453 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0);
5454 break;
5455 case DP_PHY_TEST_PATTERN_D10_2:
5456 DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n");
5457 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5458 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2);
5459 break;
5460 case DP_PHY_TEST_PATTERN_ERROR_COUNT:
5461 DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n");
5462 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5463 DDI_DP_COMP_CTL_ENABLE |
5464 DDI_DP_COMP_CTL_SCRAMBLED_0);
5465 break;
5466 case DP_PHY_TEST_PATTERN_PRBS7:
5467 DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n");
5468 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5469 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7);
5470 break;
5471 case DP_PHY_TEST_PATTERN_80BIT_CUSTOM:
5472 /*
5473 * FIXME: Ideally pattern should come from DPCD 0x250. As
5474 * current firmware of DPR-100 could not set it, so hardcoding
5475 * now for complaince test.
5476 */
5477 DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n");
5478 pattern_val = 0x3e0f83e0;
5479 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val);
5480 pattern_val = 0x0f83e0f8;
5481 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 1), pattern_val);
5482 pattern_val = 0x0000f83e;
5483 intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 2), pattern_val);
5484 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5485 DDI_DP_COMP_CTL_ENABLE |
5486 DDI_DP_COMP_CTL_CUSTOM80);
5487 break;
5488 case DP_PHY_TEST_PATTERN_CP2520:
5489 /*
5490 * FIXME: Ideally pattern should come from DPCD 0x24A. As
5491 * current firmware of DPR-100 could not set it, so hardcoding
5492 * now for complaince test.
5493 */
5494 DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n");
5495 pattern_val = 0xFB;
5496 intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe),
5497 DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 |
5498 pattern_val);
5499 break;
5500 default:
5501 WARN(1, "Invalid Phy Test Pattern\n");
5502 }
5503}
5504
5505static void
a621860a
VS
5506intel_dp_autotest_phy_ddi_disable(struct intel_dp *intel_dp,
5507 const struct intel_crtc_state *crtc_state)
8cdf7271 5508{
7801f3b7
LDM
5509 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5510 struct drm_device *dev = dig_port->base.base.dev;
8cdf7271 5511 struct drm_i915_private *dev_priv = to_i915(dev);
7801f3b7 5512 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
8cdf7271
AM
5513 enum pipe pipe = crtc->pipe;
5514 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
5515
5516 trans_ddi_func_ctl_value = intel_de_read(dev_priv,
5517 TRANS_DDI_FUNC_CTL(pipe));
5518 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
5519 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
5520
5521 trans_ddi_func_ctl_value &= ~(TRANS_DDI_FUNC_ENABLE |
5522 TGL_TRANS_DDI_PORT_MASK);
5523 trans_conf_value &= ~PIPECONF_ENABLE;
5524 dp_tp_ctl_value &= ~DP_TP_CTL_ENABLE;
5525
5526 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
5527 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
5528 trans_ddi_func_ctl_value);
5529 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
5530}
5531
5532static void
a621860a
VS
5533intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp,
5534 const struct intel_crtc_state *crtc_state)
8cdf7271 5535{
7801f3b7
LDM
5536 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
5537 struct drm_device *dev = dig_port->base.base.dev;
8cdf7271 5538 struct drm_i915_private *dev_priv = to_i915(dev);
7801f3b7
LDM
5539 enum port port = dig_port->base.port;
5540 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
8cdf7271
AM
5541 enum pipe pipe = crtc->pipe;
5542 u32 trans_ddi_func_ctl_value, trans_conf_value, dp_tp_ctl_value;
5543
5544 trans_ddi_func_ctl_value = intel_de_read(dev_priv,
5545 TRANS_DDI_FUNC_CTL(pipe));
5546 trans_conf_value = intel_de_read(dev_priv, PIPECONF(pipe));
5547 dp_tp_ctl_value = intel_de_read(dev_priv, TGL_DP_TP_CTL(pipe));
5548
5549 trans_ddi_func_ctl_value |= TRANS_DDI_FUNC_ENABLE |
5550 TGL_TRANS_DDI_SELECT_PORT(port);
5551 trans_conf_value |= PIPECONF_ENABLE;
5552 dp_tp_ctl_value |= DP_TP_CTL_ENABLE;
5553
5554 intel_de_write(dev_priv, PIPECONF(pipe), trans_conf_value);
5555 intel_de_write(dev_priv, TGL_DP_TP_CTL(pipe), dp_tp_ctl_value);
5556 intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe),
5557 trans_ddi_func_ctl_value);
5558}
5559
a621860a
VS
5560static void intel_dp_process_phy_request(struct intel_dp *intel_dp,
5561 const struct intel_crtc_state *crtc_state)
8cdf7271
AM
5562{
5563 struct drm_dp_phy_test_params *data =
5564 &intel_dp->compliance.test_data.phytest;
5565 u8 link_status[DP_LINK_STATUS_SIZE];
5566
5567 if (!intel_dp_get_link_status(intel_dp, link_status)) {
5568 DRM_DEBUG_KMS("failed to get link status\n");
5569 return;
5570 }
5571
5572 /* retrieve vswing & pre-emphasis setting */
a621860a 5573 intel_dp_get_adjust_train(intel_dp, crtc_state, link_status);
8cdf7271 5574
a621860a 5575 intel_dp_autotest_phy_ddi_disable(intel_dp, crtc_state);
8cdf7271 5576
a621860a 5577 intel_dp_set_signal_levels(intel_dp, crtc_state);
8cdf7271 5578
a621860a 5579 intel_dp_phy_pattern_update(intel_dp, crtc_state);
8cdf7271 5580
a621860a 5581 intel_dp_autotest_phy_ddi_enable(intel_dp, crtc_state);
8cdf7271
AM
5582
5583 drm_dp_set_phy_test_pattern(&intel_dp->aux, data,
5584 link_status[DP_DPCD_REV]);
5585}
5586
830de422 5587static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp)
a60f0e38 5588{
193af12c
VS
5589 struct drm_dp_phy_test_params *data =
5590 &intel_dp->compliance.test_data.phytest;
88afbfdb 5591
193af12c
VS
5592 if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) {
5593 DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n");
5594 return DP_TEST_NAK;
5595 }
88afbfdb 5596
193af12c
VS
5597 /* Set test active flag here so userspace doesn't interrupt things */
5598 intel_dp->compliance.test_active = true;
8cdf7271 5599
193af12c 5600 return DP_TEST_ACK;
c5d5ab7a
TP
5601}
5602
5603static void intel_dp_handle_test_request(struct intel_dp *intel_dp)
5604{
af67009c 5605 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
830de422
JN
5606 u8 response = DP_TEST_NAK;
5607 u8 request = 0;
5ec63bbd 5608 int status;
c5d5ab7a 5609
5ec63bbd 5610 status = drm_dp_dpcd_readb(&intel_dp->aux, DP_TEST_REQUEST, &request);
c5d5ab7a 5611 if (status <= 0) {
af67009c
JN
5612 drm_dbg_kms(&i915->drm,
5613 "Could not read test request from sink\n");
c5d5ab7a
TP
5614 goto update_status;
5615 }
5616
5ec63bbd 5617 switch (request) {
c5d5ab7a 5618 case DP_TEST_LINK_TRAINING:
af67009c 5619 drm_dbg_kms(&i915->drm, "LINK_TRAINING test requested\n");
c5d5ab7a
TP
5620 response = intel_dp_autotest_link_training(intel_dp);
5621 break;
5622 case DP_TEST_LINK_VIDEO_PATTERN:
af67009c 5623 drm_dbg_kms(&i915->drm, "TEST_PATTERN test requested\n");
c5d5ab7a
TP
5624 response = intel_dp_autotest_video_pattern(intel_dp);
5625 break;
5626 case DP_TEST_LINK_EDID_READ:
af67009c 5627 drm_dbg_kms(&i915->drm, "EDID test requested\n");
c5d5ab7a
TP
5628 response = intel_dp_autotest_edid(intel_dp);
5629 break;
5630 case DP_TEST_LINK_PHY_TEST_PATTERN:
af67009c 5631 drm_dbg_kms(&i915->drm, "PHY_PATTERN test requested\n");
c5d5ab7a
TP
5632 response = intel_dp_autotest_phy_pattern(intel_dp);
5633 break;
5634 default:
af67009c
JN
5635 drm_dbg_kms(&i915->drm, "Invalid test request '%02x'\n",
5636 request);
c5d5ab7a
TP
5637 break;
5638 }
5639
5ec63bbd
JN
5640 if (response & DP_TEST_ACK)
5641 intel_dp->compliance.test_type = request;
5642
c5d5ab7a 5643update_status:
5ec63bbd 5644 status = drm_dp_dpcd_writeb(&intel_dp->aux, DP_TEST_RESPONSE, response);
c5d5ab7a 5645 if (status <= 0)
af67009c
JN
5646 drm_dbg_kms(&i915->drm,
5647 "Could not write test response to sink\n");
a60f0e38
JB
5648}
5649
8d712a7e
ID
5650/**
5651 * intel_dp_check_mst_status - service any pending MST interrupts, check link status
5652 * @intel_dp: Intel DP struct
5653 *
5654 * Read any pending MST interrupts, call MST core to handle these and ack the
5655 * interrupts. Check if the main and AUX link state is ok.
5656 *
5657 * Returns:
5658 * - %true if pending interrupts were serviced (or no interrupts were
5659 * pending) w/o detecting an error condition.
5660 * - %false if an error condition - like AUX failure or a loss of link - is
5661 * detected, which needs servicing from the hotplug work.
5662 */
5663static bool
0e32b39c
DA
5664intel_dp_check_mst_status(struct intel_dp *intel_dp)
5665{
af67009c 5666 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
8d712a7e 5667 bool link_ok = true;
3c0ec2c2 5668
4f360482 5669 drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0);
3c0ec2c2
VS
5670
5671 for (;;) {
5672 u8 esi[DP_DPRX_ESI_LEN] = {};
8d712a7e 5673 bool handled;
0e32b39c 5674 int retry;
45ef40aa 5675
8d712a7e 5676 if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) {
3c0ec2c2
VS
5677 drm_dbg_kms(&i915->drm,
5678 "failed to get ESI - device may have failed\n");
8d712a7e
ID
5679 link_ok = false;
5680
5681 break;
3c0ec2c2 5682 }
0e32b39c 5683
3c0ec2c2 5684 /* check link status - esi[10] = 0x200c */
8d712a7e 5685 if (intel_dp->active_mst_links > 0 && link_ok &&
3c0ec2c2
VS
5686 !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) {
5687 drm_dbg_kms(&i915->drm,
5688 "channel EQ not ok, retraining\n");
8d712a7e 5689 link_ok = false;
3c0ec2c2 5690 }
0e32b39c 5691
3c0ec2c2 5692 drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi);
0e32b39c 5693
3c0ec2c2
VS
5694 drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled);
5695 if (!handled)
5696 break;
5697
5698 for (retry = 0; retry < 3; retry++) {
5699 int wret;
5700
5701 wret = drm_dp_dpcd_write(&intel_dp->aux,
5702 DP_SINK_COUNT_ESI+1,
5703 &esi[1], 3);
5704 if (wret == 3)
5705 break;
0e32b39c
DA
5706 }
5707 }
3c0ec2c2 5708
8d712a7e 5709 return link_ok;
0e32b39c
DA
5710}
5711
c85d200e
VS
5712static bool
5713intel_dp_needs_link_retrain(struct intel_dp *intel_dp)
5714{
5715 u8 link_status[DP_LINK_STATUS_SIZE];
5716
edb2e530 5717 if (!intel_dp->link_trained)
2f8e7ea9
JRS
5718 return false;
5719
5720 /*
5721 * While PSR source HW is enabled, it will control main-link sending
5722 * frames, enabling and disabling it so trying to do a retrain will fail
5723 * as the link would or not be on or it could mix training patterns
5724 * and frame data at the same time causing retrain to fail.
5725 * Also when exiting PSR, HW will retrain the link anyways fixing
5726 * any link status error.
5727 */
5728 if (intel_psr_enabled(intel_dp))
edb2e530
VS
5729 return false;
5730
5731 if (!intel_dp_get_link_status(intel_dp, link_status))
c85d200e 5732 return false;
c85d200e
VS
5733
5734 /*
5735 * Validate the cached values of intel_dp->link_rate and
5736 * intel_dp->lane_count before attempting to retrain.
a621860a
VS
5737 *
5738 * FIXME would be nice to user the crtc state here, but since
5739 * we need to call this from the short HPD handler that seems
5740 * a bit hard.
c85d200e
VS
5741 */
5742 if (!intel_dp_link_params_valid(intel_dp, intel_dp->link_rate,
5743 intel_dp->lane_count))
5744 return false;
5745
5746 /* Retrain if Channel EQ or CR not ok */
5747 return !drm_dp_channel_eq_ok(link_status, intel_dp->lane_count);
5748}
5749
f0617ff0
VS
5750static bool intel_dp_has_connector(struct intel_dp *intel_dp,
5751 const struct drm_connector_state *conn_state)
5752{
5753 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5754 struct intel_encoder *encoder;
5755 enum pipe pipe;
5756
5757 if (!conn_state->best_encoder)
5758 return false;
5759
5760 /* SST */
5761 encoder = &dp_to_dig_port(intel_dp)->base;
5762 if (conn_state->best_encoder == &encoder->base)
5763 return true;
5764
5765 /* MST */
5766 for_each_pipe(i915, pipe) {
5767 encoder = &intel_dp->mst_encoders[pipe]->base;
5768 if (conn_state->best_encoder == &encoder->base)
5769 return true;
5770 }
5771
5772 return false;
5773}
5774
5775static int intel_dp_prep_link_retrain(struct intel_dp *intel_dp,
5776 struct drm_modeset_acquire_ctx *ctx,
5777 u32 *crtc_mask)
5778{
5779 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5780 struct drm_connector_list_iter conn_iter;
5781 struct intel_connector *connector;
5782 int ret = 0;
5783
5784 *crtc_mask = 0;
5785
5786 if (!intel_dp_needs_link_retrain(intel_dp))
5787 return 0;
5788
5789 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5790 for_each_intel_connector_iter(connector, &conn_iter) {
5791 struct drm_connector_state *conn_state =
5792 connector->base.state;
5793 struct intel_crtc_state *crtc_state;
5794 struct intel_crtc *crtc;
5795
5796 if (!intel_dp_has_connector(intel_dp, conn_state))
5797 continue;
5798
5799 crtc = to_intel_crtc(conn_state->crtc);
5800 if (!crtc)
5801 continue;
5802
5803 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5804 if (ret)
5805 break;
5806
5807 crtc_state = to_intel_crtc_state(crtc->base.state);
5808
5809 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5810
5811 if (!crtc_state->hw.active)
5812 continue;
5813
5814 if (conn_state->commit &&
5815 !try_wait_for_completion(&conn_state->commit->hw_done))
5816 continue;
5817
5818 *crtc_mask |= drm_crtc_mask(&crtc->base);
5819 }
5820 drm_connector_list_iter_end(&conn_iter);
5821
5822 if (!intel_dp_needs_link_retrain(intel_dp))
5823 *crtc_mask = 0;
5824
5825 return ret;
5826}
5827
5828static bool intel_dp_is_connected(struct intel_dp *intel_dp)
5829{
5830 struct intel_connector *connector = intel_dp->attached_connector;
5831
5832 return connector->base.status == connector_status_connected ||
5833 intel_dp->is_mst;
5834}
5835
c85d200e
VS
5836int intel_dp_retrain_link(struct intel_encoder *encoder,
5837 struct drm_modeset_acquire_ctx *ctx)
bfd02b3c 5838{
bfd02b3c 5839 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
b7d02c3a 5840 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
c85d200e 5841 struct intel_crtc *crtc;
f0617ff0 5842 u32 crtc_mask;
c85d200e
VS
5843 int ret;
5844
f0617ff0 5845 if (!intel_dp_is_connected(intel_dp))
c85d200e
VS
5846 return 0;
5847
5848 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5849 ctx);
5850 if (ret)
5851 return ret;
5852
f0617ff0 5853 ret = intel_dp_prep_link_retrain(intel_dp, ctx, &crtc_mask);
c85d200e
VS
5854 if (ret)
5855 return ret;
5856
f0617ff0 5857 if (crtc_mask == 0)
c85d200e
VS
5858 return 0;
5859
f0617ff0
VS
5860 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] retraining link\n",
5861 encoder->base.base.id, encoder->base.name);
c85d200e 5862
f0617ff0
VS
5863 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5864 const struct intel_crtc_state *crtc_state =
5865 to_intel_crtc_state(crtc->base.state);
bfd02b3c 5866
f0617ff0
VS
5867 /* Suppress underruns caused by re-training */
5868 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5869 if (crtc_state->has_pch_encoder)
5870 intel_set_pch_fifo_underrun_reporting(dev_priv,
5871 intel_crtc_pch_transcoder(crtc), false);
5872 }
bfd02b3c 5873
a621860a
VS
5874 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5875 const struct intel_crtc_state *crtc_state =
5876 to_intel_crtc_state(crtc->base.state);
5877
5878 /* retrain on the MST master transcoder */
5879 if (INTEL_GEN(dev_priv) >= 12 &&
5880 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
5881 !intel_dp_mst_is_master_trans(crtc_state))
5882 continue;
5883
5884 intel_dp_start_link_train(intel_dp, crtc_state);
5885 intel_dp_stop_link_train(intel_dp, crtc_state);
5886 break;
5887 }
bfd02b3c 5888
f0617ff0
VS
5889 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5890 const struct intel_crtc_state *crtc_state =
5891 to_intel_crtc_state(crtc->base.state);
bfd02b3c 5892
f0617ff0
VS
5893 /* Keep underrun reporting disabled until things are stable */
5894 intel_wait_for_vblank(dev_priv, crtc->pipe);
5895
5896 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
5897 if (crtc_state->has_pch_encoder)
5898 intel_set_pch_fifo_underrun_reporting(dev_priv,
5899 intel_crtc_pch_transcoder(crtc), true);
5900 }
c85d200e
VS
5901
5902 return 0;
bfd02b3c
VS
5903}
5904
193af12c
VS
5905static int intel_dp_prep_phy_test(struct intel_dp *intel_dp,
5906 struct drm_modeset_acquire_ctx *ctx,
5907 u32 *crtc_mask)
5908{
5909 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
5910 struct drm_connector_list_iter conn_iter;
5911 struct intel_connector *connector;
5912 int ret = 0;
5913
5914 *crtc_mask = 0;
5915
5916 drm_connector_list_iter_begin(&i915->drm, &conn_iter);
5917 for_each_intel_connector_iter(connector, &conn_iter) {
5918 struct drm_connector_state *conn_state =
5919 connector->base.state;
5920 struct intel_crtc_state *crtc_state;
5921 struct intel_crtc *crtc;
5922
5923 if (!intel_dp_has_connector(intel_dp, conn_state))
5924 continue;
5925
5926 crtc = to_intel_crtc(conn_state->crtc);
5927 if (!crtc)
5928 continue;
5929
5930 ret = drm_modeset_lock(&crtc->base.mutex, ctx);
5931 if (ret)
5932 break;
5933
5934 crtc_state = to_intel_crtc_state(crtc->base.state);
5935
5936 drm_WARN_ON(&i915->drm, !intel_crtc_has_dp_encoder(crtc_state));
5937
5938 if (!crtc_state->hw.active)
5939 continue;
5940
5941 if (conn_state->commit &&
5942 !try_wait_for_completion(&conn_state->commit->hw_done))
5943 continue;
5944
5945 *crtc_mask |= drm_crtc_mask(&crtc->base);
5946 }
5947 drm_connector_list_iter_end(&conn_iter);
5948
5949 return ret;
5950}
5951
5952static int intel_dp_do_phy_test(struct intel_encoder *encoder,
5953 struct drm_modeset_acquire_ctx *ctx)
5954{
5955 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
5956 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
a621860a 5957 struct intel_crtc *crtc;
193af12c
VS
5958 u32 crtc_mask;
5959 int ret;
5960
5961 ret = drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex,
5962 ctx);
5963 if (ret)
5964 return ret;
5965
5966 ret = intel_dp_prep_phy_test(intel_dp, ctx, &crtc_mask);
5967 if (ret)
5968 return ret;
5969
5970 if (crtc_mask == 0)
5971 return 0;
5972
5973 drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] PHY test\n",
5974 encoder->base.base.id, encoder->base.name);
a621860a
VS
5975
5976 for_each_intel_crtc_mask(&dev_priv->drm, crtc, crtc_mask) {
5977 const struct intel_crtc_state *crtc_state =
5978 to_intel_crtc_state(crtc->base.state);
5979
5980 /* test on the MST master transcoder */
5981 if (INTEL_GEN(dev_priv) >= 12 &&
5982 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST) &&
5983 !intel_dp_mst_is_master_trans(crtc_state))
5984 continue;
5985
5986 intel_dp_process_phy_request(intel_dp, crtc_state);
5987 break;
5988 }
193af12c
VS
5989
5990 return 0;
5991}
5992
5993static void intel_dp_phy_test(struct intel_encoder *encoder)
5994{
5995 struct drm_modeset_acquire_ctx ctx;
5996 int ret;
5997
5998 drm_modeset_acquire_init(&ctx, 0);
5999
6000 for (;;) {
6001 ret = intel_dp_do_phy_test(encoder, &ctx);
6002
6003 if (ret == -EDEADLK) {
6004 drm_modeset_backoff(&ctx);
6005 continue;
6006 }
6007
6008 break;
6009 }
6010
6011 drm_modeset_drop_locks(&ctx);
6012 drm_modeset_acquire_fini(&ctx);
6013 drm_WARN(encoder->base.dev, ret,
6014 "Acquiring modeset locks failed with %i\n", ret);
6015}
6016
c85d200e
VS
6017/*
6018 * If display is now connected check links status,
6019 * there has been known issues of link loss triggering
6020 * long pulse.
6021 *
6022 * Some sinks (eg. ASUS PB287Q) seem to perform some
6023 * weird HPD ping pong during modesets. So we can apparently
6024 * end up with HPD going low during a modeset, and then
6025 * going back up soon after. And once that happens we must
6026 * retrain the link to get a picture. That's in case no
6027 * userspace component reacted to intermittent HPD dip.
6028 */
3944709d
ID
6029static enum intel_hotplug_state
6030intel_dp_hotplug(struct intel_encoder *encoder,
8c8919c7 6031 struct intel_connector *connector)
5c9114d0 6032{
193af12c 6033 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
c85d200e 6034 struct drm_modeset_acquire_ctx ctx;
3944709d 6035 enum intel_hotplug_state state;
c85d200e 6036 int ret;
5c9114d0 6037
193af12c
VS
6038 if (intel_dp->compliance.test_active &&
6039 intel_dp->compliance.test_type == DP_TEST_LINK_PHY_TEST_PATTERN) {
6040 intel_dp_phy_test(encoder);
6041 /* just do the PHY test and nothing else */
6042 return INTEL_HOTPLUG_UNCHANGED;
6043 }
6044
8c8919c7 6045 state = intel_encoder_hotplug(encoder, connector);
5c9114d0 6046
c85d200e 6047 drm_modeset_acquire_init(&ctx, 0);
42e5e657 6048
c85d200e
VS
6049 for (;;) {
6050 ret = intel_dp_retrain_link(encoder, &ctx);
5c9114d0 6051
c85d200e
VS
6052 if (ret == -EDEADLK) {
6053 drm_modeset_backoff(&ctx);
6054 continue;
6055 }
5c9114d0 6056
c85d200e
VS
6057 break;
6058 }
d4cb3fd9 6059
c85d200e
VS
6060 drm_modeset_drop_locks(&ctx);
6061 drm_modeset_acquire_fini(&ctx);
3a47ae20
PB
6062 drm_WARN(encoder->base.dev, ret,
6063 "Acquiring modeset locks failed with %i\n", ret);
bfd02b3c 6064
bb80c925
JRS
6065 /*
6066 * Keeping it consistent with intel_ddi_hotplug() and
6067 * intel_hdmi_hotplug().
6068 */
8c8919c7 6069 if (state == INTEL_HOTPLUG_UNCHANGED && !connector->hotplug_retries)
bb80c925
JRS
6070 state = INTEL_HOTPLUG_RETRY;
6071
3944709d 6072 return state;
5c9114d0
SS
6073}
6074
9844bc87
DP
6075static void intel_dp_check_service_irq(struct intel_dp *intel_dp)
6076{
af67009c 6077 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
9844bc87
DP
6078 u8 val;
6079
6080 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
6081 return;
6082
6083 if (drm_dp_dpcd_readb(&intel_dp->aux,
6084 DP_DEVICE_SERVICE_IRQ_VECTOR, &val) != 1 || !val)
6085 return;
6086
6087 drm_dp_dpcd_writeb(&intel_dp->aux, DP_DEVICE_SERVICE_IRQ_VECTOR, val);
6088
6089 if (val & DP_AUTOMATED_TEST_REQUEST)
6090 intel_dp_handle_test_request(intel_dp);
6091
342ac601 6092 if (val & DP_CP_IRQ)
09d56393 6093 intel_hdcp_handle_cp_irq(intel_dp->attached_connector);
342ac601
R
6094
6095 if (val & DP_SINK_SPECIFIC_IRQ)
af67009c 6096 drm_dbg_kms(&i915->drm, "Sink specific irq unhandled\n");
9844bc87
DP
6097}
6098
a4fc5ed6
KP
6099/*
6100 * According to DP spec
6101 * 5.1.2:
6102 * 1. Read DPCD
6103 * 2. Configure link according to Receiver Capabilities
6104 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
6105 * 4. Check link status on receipt of hot-plug interrupt
39ff747b
SS
6106 *
6107 * intel_dp_short_pulse - handles short pulse interrupts
6108 * when full detection is not required.
6109 * Returns %true if short pulse is handled and full detection
6110 * is NOT required and %false otherwise.
a4fc5ed6 6111 */
39ff747b 6112static bool
5c9114d0 6113intel_dp_short_pulse(struct intel_dp *intel_dp)
a4fc5ed6 6114{
de25eb7f 6115 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
39ff747b
SS
6116 u8 old_sink_count = intel_dp->sink_count;
6117 bool ret;
5b215bcf 6118
4df6960e
SS
6119 /*
6120 * Clearing compliance test variables to allow capturing
6121 * of values for next automated test request.
6122 */
c1617abc 6123 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
4df6960e 6124
39ff747b
SS
6125 /*
6126 * Now read the DPCD to see if it's actually running
6127 * If the current value of sink count doesn't match with
6128 * the value that was stored earlier or dpcd read failed
6129 * we need to do full detection
6130 */
6131 ret = intel_dp_get_dpcd(intel_dp);
6132
6133 if ((old_sink_count != intel_dp->sink_count) || !ret) {
6134 /* No need to proceed if we are going to do full detect */
6135 return false;
59cd09e1
JB
6136 }
6137
9844bc87 6138 intel_dp_check_service_irq(intel_dp);
a60f0e38 6139
82e00d11
HV
6140 /* Handle CEC interrupts, if any */
6141 drm_dp_cec_irq(&intel_dp->aux);
6142
c85d200e
VS
6143 /* defer to the hotplug work for link retraining if needed */
6144 if (intel_dp_needs_link_retrain(intel_dp))
6145 return false;
42e5e657 6146
cc3054ff
JRS
6147 intel_psr_short_pulse(intel_dp);
6148
193af12c
VS
6149 switch (intel_dp->compliance.test_type) {
6150 case DP_TEST_LINK_TRAINING:
bdc6114e
WK
6151 drm_dbg_kms(&dev_priv->drm,
6152 "Link Training Compliance Test requested\n");
da15f7cb 6153 /* Send a Hotplug Uevent to userspace to start modeset */
2f773477 6154 drm_kms_helper_hotplug_event(&dev_priv->drm);
193af12c
VS
6155 break;
6156 case DP_TEST_LINK_PHY_TEST_PATTERN:
6157 drm_dbg_kms(&dev_priv->drm,
6158 "PHY test pattern Compliance Test requested\n");
6159 /*
6160 * Schedule long hpd to do the test
6161 *
6162 * FIXME get rid of the ad-hoc phy test modeset code
6163 * and properly incorporate it into the normal modeset.
6164 */
6165 return false;
da15f7cb 6166 }
39ff747b
SS
6167
6168 return true;
a4fc5ed6 6169}
a4fc5ed6 6170
caf9ab24 6171/* XXX this is probably wrong for multiple downstream ports */
71ba9000 6172static enum drm_connector_status
26d61aad 6173intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 6174{
af67009c 6175 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
f542d671 6176 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
830de422
JN
6177 u8 *dpcd = intel_dp->dpcd;
6178 u8 type;
caf9ab24 6179
4f360482 6180 if (drm_WARN_ON(&i915->drm, intel_dp_is_edp(intel_dp)))
ad5125d6
ID
6181 return connector_status_connected;
6182
f542d671 6183 lspcon_resume(dig_port);
e393d0d6 6184
caf9ab24
AJ
6185 if (!intel_dp_get_dpcd(intel_dp))
6186 return connector_status_disconnected;
6187
6188 /* if there's no downstream port, we're done */
c726ad01 6189 if (!drm_dp_is_branch(dpcd))
26d61aad 6190 return connector_status_connected;
caf9ab24
AJ
6191
6192 /* If we're HPD-aware, SINK_COUNT changes dynamically */
693c3ec5 6193 if (intel_dp_has_sink_count(intel_dp) &&
c9ff160b 6194 intel_dp->downstream_ports[0] & DP_DS_PORT_HPD) {
30d9aa42
SS
6195 return intel_dp->sink_count ?
6196 connector_status_connected : connector_status_disconnected;
caf9ab24
AJ
6197 }
6198
c4e3170a
VS
6199 if (intel_dp_can_mst(intel_dp))
6200 return connector_status_connected;
6201
caf9ab24 6202 /* If no HPD, poke DDC gently */
0b99836f 6203 if (drm_probe_ddc(&intel_dp->aux.ddc))
26d61aad 6204 return connector_status_connected;
caf9ab24
AJ
6205
6206 /* Well we tried, say unknown for unreliable port types */
c9ff160b
JN
6207 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) {
6208 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
6209 if (type == DP_DS_PORT_TYPE_VGA ||
6210 type == DP_DS_PORT_TYPE_NON_EDID)
6211 return connector_status_unknown;
6212 } else {
6213 type = intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
6214 DP_DWN_STRM_PORT_TYPE_MASK;
6215 if (type == DP_DWN_STRM_PORT_TYPE_ANALOG ||
6216 type == DP_DWN_STRM_PORT_TYPE_OTHER)
6217 return connector_status_unknown;
6218 }
caf9ab24
AJ
6219
6220 /* Anything else is out of spec, warn and ignore */
af67009c 6221 drm_dbg_kms(&i915->drm, "Broken DP branch device, ignoring\n");
26d61aad 6222 return connector_status_disconnected;
71ba9000
AJ
6223}
6224
d410b56d
CW
6225static enum drm_connector_status
6226edp_detect(struct intel_dp *intel_dp)
6227{
b93b41af 6228 return connector_status_connected;
d410b56d
CW
6229}
6230
7533eb4f 6231static bool ibx_digital_port_connected(struct intel_encoder *encoder)
5eb08b69 6232{
7533eb4f 6233 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
c7e8a3d6 6234 u32 bit = dev_priv->hotplug.pch_hpd[encoder->hpd_pin];
93e5f0b6 6235
b4e33881 6236 return intel_de_read(dev_priv, SDEISR) & bit;
93e5f0b6
VS
6237}
6238
7533eb4f 6239static bool g4x_digital_port_connected(struct intel_encoder *encoder)
a4fc5ed6 6240{
7533eb4f 6241 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9642c81c 6242 u32 bit;
5eb08b69 6243
7533eb4f
RV
6244 switch (encoder->hpd_pin) {
6245 case HPD_PORT_B:
9642c81c
JN
6246 bit = PORTB_HOTPLUG_LIVE_STATUS_G4X;
6247 break;
7533eb4f 6248 case HPD_PORT_C:
9642c81c
JN
6249 bit = PORTC_HOTPLUG_LIVE_STATUS_G4X;
6250 break;
7533eb4f 6251 case HPD_PORT_D:
9642c81c
JN
6252 bit = PORTD_HOTPLUG_LIVE_STATUS_G4X;
6253 break;
6254 default:
7533eb4f 6255 MISSING_CASE(encoder->hpd_pin);
9642c81c
JN
6256 return false;
6257 }
6258
b4e33881 6259 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
9642c81c
JN
6260}
6261
7533eb4f 6262static bool gm45_digital_port_connected(struct intel_encoder *encoder)
9642c81c 6263{
7533eb4f 6264 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
9642c81c
JN
6265 u32 bit;
6266
7533eb4f
RV
6267 switch (encoder->hpd_pin) {
6268 case HPD_PORT_B:
0780cd36 6269 bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
9642c81c 6270 break;
7533eb4f 6271 case HPD_PORT_C:
0780cd36 6272 bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
9642c81c 6273 break;
7533eb4f 6274 case HPD_PORT_D:
0780cd36 6275 bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
9642c81c
JN
6276 break;
6277 default:
7533eb4f 6278 MISSING_CASE(encoder->hpd_pin);
9642c81c 6279 return false;
a4fc5ed6
KP
6280 }
6281
b4e33881 6282 return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
2a592bec
DA
6283}
6284
7533eb4f 6285static bool ilk_digital_port_connected(struct intel_encoder *encoder)
93e5f0b6 6286{
7533eb4f 6287 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
c7e8a3d6 6288 u32 bit = dev_priv->hotplug.hpd[encoder->hpd_pin];
7533eb4f 6289
c7e8a3d6 6290 return intel_de_read(dev_priv, DEISR) & bit;
b9fcddab
PZ
6291}
6292
7e66bcf2
JN
6293/*
6294 * intel_digital_port_connected - is the specified port connected?
7533eb4f 6295 * @encoder: intel_encoder
7e66bcf2 6296 *
39d1e234
PZ
6297 * In cases where there's a connector physically connected but it can't be used
6298 * by our hardware we also return false, since the rest of the driver should
6299 * pretty much treat the port as disconnected. This is relevant for type-C
6300 * (starting on ICL) where there's ownership involved.
6301 *
7533eb4f 6302 * Return %true if port is connected, %false otherwise.
7e66bcf2 6303 */
6cfe7ec0
ID
6304bool intel_digital_port_connected(struct intel_encoder *encoder)
6305{
6306 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
edc0e09c 6307 struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
96ac0813 6308 bool is_connected = false;
6cfe7ec0 6309 intel_wakeref_t wakeref;
6cfe7ec0
ID
6310
6311 with_intel_display_power(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref)
edc0e09c 6312 is_connected = dig_port->connected(encoder);
6cfe7ec0
ID
6313
6314 return is_connected;
6315}
6316
8c241fef 6317static struct edid *
beb60608 6318intel_dp_get_edid(struct intel_dp *intel_dp)
8c241fef 6319{
beb60608 6320 struct intel_connector *intel_connector = intel_dp->attached_connector;
d6f24d0f 6321
9cd300e0
JN
6322 /* use cached edid if we have one */
6323 if (intel_connector->edid) {
9cd300e0
JN
6324 /* invalid edid */
6325 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
6326 return NULL;
6327
55e9edeb 6328 return drm_edid_duplicate(intel_connector->edid);
beb60608
CW
6329 } else
6330 return drm_get_edid(&intel_connector->base,
6331 &intel_dp->aux.ddc);
6332}
8c241fef 6333
beb60608 6334static void
181567aa
VS
6335intel_dp_update_dfp(struct intel_dp *intel_dp,
6336 const struct edid *edid)
beb60608 6337{
530df3c0
VS
6338 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6339 struct intel_connector *connector = intel_dp->attached_connector;
530df3c0
VS
6340
6341 intel_dp->dfp.max_bpc =
6342 drm_dp_downstream_max_bpc(intel_dp->dpcd,
42f2562c 6343 intel_dp->downstream_ports, edid);
530df3c0 6344
fe7cf496
VS
6345 intel_dp->dfp.max_dotclock =
6346 drm_dp_downstream_max_dotclock(intel_dp->dpcd,
6347 intel_dp->downstream_ports);
6348
3977cd1c
VS
6349 intel_dp->dfp.min_tmds_clock =
6350 drm_dp_downstream_min_tmds_clock(intel_dp->dpcd,
6351 intel_dp->downstream_ports,
6352 edid);
6353 intel_dp->dfp.max_tmds_clock =
6354 drm_dp_downstream_max_tmds_clock(intel_dp->dpcd,
6355 intel_dp->downstream_ports,
6356 edid);
6357
fe7cf496 6358 drm_dbg_kms(&i915->drm,
3977cd1c 6359 "[CONNECTOR:%d:%s] DFP max bpc %d, max dotclock %d, TMDS clock %d-%d\n",
530df3c0 6360 connector->base.base.id, connector->base.name,
3977cd1c
VS
6361 intel_dp->dfp.max_bpc,
6362 intel_dp->dfp.max_dotclock,
6363 intel_dp->dfp.min_tmds_clock,
6364 intel_dp->dfp.max_tmds_clock);
181567aa
VS
6365}
6366
6367static void
6368intel_dp_update_420(struct intel_dp *intel_dp)
6369{
6370 struct drm_i915_private *i915 = dp_to_i915(intel_dp);
6371 struct intel_connector *connector = intel_dp->attached_connector;
6372 bool is_branch, ycbcr_420_passthrough, ycbcr_444_to_420;
6373
6374 /* No YCbCr output support on gmch platforms */
6375 if (HAS_GMCH(i915))
6376 return;
6377
6378 /*
6379 * ILK doesn't seem capable of DP YCbCr output. The
6380 * displayed image is severly corrupted. SNB+ is fine.
6381 */
6382 if (IS_GEN(i915, 5))
6383 return;
6384
6385 is_branch = drm_dp_is_branch(intel_dp->dpcd);
6386 ycbcr_420_passthrough =
6387 drm_dp_downstream_420_passthrough(intel_dp->dpcd,
6388 intel_dp->downstream_ports);
6389 ycbcr_444_to_420 =
6390 drm_dp_downstream_444_to_420_conversion(intel_dp->dpcd,
6391 intel_dp->downstream_ports);
6392
6393 if (INTEL_GEN(i915) >= 11) {
6394 /* Prefer 4:2:0 passthrough over 4:4:4->4:2:0 conversion */
6395 intel_dp->dfp.ycbcr_444_to_420 =
6396 ycbcr_444_to_420 && !ycbcr_420_passthrough;
6397
6398 connector->base.ycbcr_420_allowed =
6399 !is_branch || ycbcr_444_to_420 || ycbcr_420_passthrough;
6400 } else {
6401 /* 4:4:4->4:2:0 conversion is the only way */
6402 intel_dp->dfp.ycbcr_444_to_420 = ycbcr_444_to_420;
6403
6404 connector->base.ycbcr_420_allowed = ycbcr_444_to_420;
6405 }
6406
6407 drm_dbg_kms(&i915->drm,
6408 "[CONNECTOR:%d:%s] YCbCr 4:2:0 allowed? %s, YCbCr 4:4:4->4:2:0 conversion? %s\n",
6409 connector->base.base.id, connector->base.name,
6410 yesno(connector->base.ycbcr_420_allowed),
6411 yesno(intel_dp->dfp.ycbcr_444_to_420));
6412}
6413
6414static void
6415intel_dp_set_edid(struct intel_dp *intel_dp)
6416{
6417 struct intel_connector *connector = intel_dp->attached_connector;
6418 struct edid *edid;
6419
6420 intel_dp_unset_edid(intel_dp);
6421 edid = intel_dp_get_edid(intel_dp);
6422 connector->detect_edid = edid;
6423
6424 intel_dp_update_dfp(intel_dp, edid);
6425 intel_dp_update_420(intel_dp);
beb60608 6426
f7af425d
VS
6427 if (edid && edid->input & DRM_EDID_INPUT_DIGITAL) {
6428 intel_dp->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
6429 intel_dp->has_audio = drm_detect_monitor_audio(edid);
6430 }
6431
82e00d11 6432 drm_dp_cec_set_edid(&intel_dp->aux, edid);
0883ce81 6433 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
8c241fef
KP
6434}
6435
beb60608
CW
6436static void
6437intel_dp_unset_edid(struct intel_dp *intel_dp)
8c241fef 6438{
181567aa 6439 struct intel_connector *connector = intel_dp->attached_connector;
8c241fef 6440
82e00d11 6441 drm_dp_cec_unset_edid(&intel_dp->aux);
181567aa
VS
6442 kfree(connector->detect_edid);
6443 connector->detect_edid = NULL;
9cd300e0 6444
f7af425d 6445 intel_dp->has_hdmi_sink = false;
beb60608 6446 intel_dp->has_audio = false;
0883ce81 6447 intel_dp->edid_quirks = 0;
530df3c0
VS
6448
6449 intel_dp->dfp.max_bpc = 0;
fe7cf496 6450 intel_dp->dfp.max_dotclock = 0;
3977cd1c
VS
6451 intel_dp->dfp.min_tmds_clock = 0;
6452 intel_dp->dfp.max_tmds_clock = 0;
181567aa
VS
6453
6454 intel_dp->dfp.ycbcr_444_to_420 = false;
6455 connector->base.ycbcr_420_allowed = false;
beb60608 6456}
d6f24d0f 6457
6c5ed5ae 6458static int
cbfa8ac8
DP
6459intel_dp_detect(struct drm_connector *connector,
6460 struct drm_modeset_acquire_ctx *ctx,
6461 bool force)
a9756bb5 6462{
cbfa8ac8 6463 struct drm_i915_private *dev_priv = to_i915(connector->dev);
43a6d19c 6464 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
337837ac
ID
6465 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6466 struct intel_encoder *encoder = &dig_port->base;
a9756bb5 6467 enum drm_connector_status status;
a9756bb5 6468
bdc6114e
WK
6469 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
6470 connector->base.id, connector->name);
eb020ca3
PB
6471 drm_WARN_ON(&dev_priv->drm,
6472 !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
6c5ed5ae 6473
b81dddb9
VS
6474 if (!INTEL_DISPLAY_ENABLED(dev_priv))
6475 return connector_status_disconnected;
6476
b93b41af 6477 /* Can't disconnect eDP */
1853a9da 6478 if (intel_dp_is_edp(intel_dp))
d410b56d 6479 status = edp_detect(intel_dp);
d5acd97f 6480 else if (intel_digital_port_connected(encoder))
c555a81d 6481 status = intel_dp_detect_dpcd(intel_dp);
a9756bb5 6482 else
c555a81d
ACO
6483 status = connector_status_disconnected;
6484
5cb651a7 6485 if (status == connector_status_disconnected) {
c1617abc 6486 memset(&intel_dp->compliance, 0, sizeof(intel_dp->compliance));
93ac092f 6487 memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
4df6960e 6488
0e505a08 6489 if (intel_dp->is_mst) {
bdc6114e
WK
6490 drm_dbg_kms(&dev_priv->drm,
6491 "MST device may have disappeared %d vs %d\n",
6492 intel_dp->is_mst,
6493 intel_dp->mst_mgr.mst_state);
0e505a08 6494 intel_dp->is_mst = false;
6495 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
6496 intel_dp->is_mst);
6497 }
6498
c8c8fb33 6499 goto out;
4df6960e 6500 }
a9756bb5 6501
80a8cecf
ID
6502 /* Read DP Sink DSC Cap DPCD regs for DP v1.4 */
6503 if (INTEL_GEN(dev_priv) >= 11)
6504 intel_dp_get_dsc_sink_cap(intel_dp);
6505
6506 intel_dp_configure_mst(intel_dp);
6507
6508 /*
6509 * TODO: Reset link params when switching to MST mode, until MST
6510 * supports link training fallback params.
6511 */
6512 if (intel_dp->reset_link_params || intel_dp->is_mst) {
540b0b7f
JN
6513 /* Initial max link lane count */
6514 intel_dp->max_link_lane_count = intel_dp_max_common_lane_count(intel_dp);
f482984a 6515
540b0b7f
JN
6516 /* Initial max link rate */
6517 intel_dp->max_link_rate = intel_dp_max_common_rate(intel_dp);
d7e8ef02
MN
6518
6519 intel_dp->reset_link_params = false;
6520 }
f482984a 6521
fe5a66f9
VS
6522 intel_dp_print_rates(intel_dp);
6523
c4e3170a 6524 if (intel_dp->is_mst) {
f21a2198
SS
6525 /*
6526 * If we are in MST mode then this connector
6527 * won't appear connected or have anything
6528 * with EDID on it
6529 */
0e32b39c
DA
6530 status = connector_status_disconnected;
6531 goto out;
f24f6eb9
DP
6532 }
6533
6534 /*
6535 * Some external monitors do not signal loss of link synchronization
6536 * with an IRQ_HPD, so force a link status check.
6537 */
47658556
DP
6538 if (!intel_dp_is_edp(intel_dp)) {
6539 int ret;
6540
6541 ret = intel_dp_retrain_link(encoder, ctx);
6cfe7ec0 6542 if (ret)
47658556 6543 return ret;
47658556 6544 }
0e32b39c 6545
4df6960e
SS
6546 /*
6547 * Clearing NACK and defer counts to get their exact values
6548 * while reading EDID which are required by Compliance tests
6549 * 4.2.2.4 and 4.2.2.5
6550 */
6551 intel_dp->aux.i2c_nack_count = 0;
6552 intel_dp->aux.i2c_defer_count = 0;
6553
beb60608 6554 intel_dp_set_edid(intel_dp);
cbfa8ac8
DP
6555 if (intel_dp_is_edp(intel_dp) ||
6556 to_intel_connector(connector)->detect_edid)
5cb651a7 6557 status = connector_status_connected;
c8c8fb33 6558
9844bc87 6559 intel_dp_check_service_irq(intel_dp);
09b1eb13 6560
c8c8fb33 6561out:
5cb651a7 6562 if (status != connector_status_connected && !intel_dp->is_mst)
f21a2198 6563 intel_dp_unset_edid(intel_dp);
7d23e3c3 6564
a8ddac7c
ID
6565 /*
6566 * Make sure the refs for power wells enabled during detect are
6567 * dropped to avoid a new detect cycle triggered by HPD polling.
6568 */
6569 intel_display_power_flush_work(dev_priv);
6570
fb823134
OV
6571 if (!intel_dp_is_edp(intel_dp))
6572 drm_dp_set_subconnector_property(connector,
6573 status,
6574 intel_dp->dpcd,
6575 intel_dp->downstream_ports);
5cb651a7 6576 return status;
f21a2198
SS
6577}
6578
beb60608
CW
6579static void
6580intel_dp_force(struct drm_connector *connector)
a4fc5ed6 6581{
43a6d19c 6582 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
337837ac
ID
6583 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
6584 struct intel_encoder *intel_encoder = &dig_port->base;
25f78f58 6585 struct drm_i915_private *dev_priv = to_i915(intel_encoder->base.dev);
337837ac
ID
6586 enum intel_display_power_domain aux_domain =
6587 intel_aux_power_domain(dig_port);
0e6e0be4 6588 intel_wakeref_t wakeref;
a4fc5ed6 6589
bdc6114e
WK
6590 drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
6591 connector->base.id, connector->name);
beb60608 6592 intel_dp_unset_edid(intel_dp);
a4fc5ed6 6593
beb60608
CW
6594 if (connector->status != connector_status_connected)
6595 return;
671dedd2 6596
0e6e0be4 6597 wakeref = intel_display_power_get(dev_priv, aux_domain);
beb60608
CW
6598
6599 intel_dp_set_edid(intel_dp);
6600
0e6e0be4 6601 intel_display_power_put(dev_priv, aux_domain, wakeref);
beb60608
CW
6602}
6603
6604static int intel_dp_get_modes(struct drm_connector *connector)
6605{
6606 struct intel_connector *intel_connector = to_intel_connector(connector);
6607 struct edid *edid;
6608
6609 edid = intel_connector->detect_edid;
6610 if (edid) {
6611 int ret = intel_connector_update_modes(connector, edid);
6612 if (ret)
6613 return ret;
6614 }
32f9d658 6615
f8779fda 6616 /* if eDP has no EDID, fall back to fixed mode */
4b3bb839 6617 if (intel_dp_is_edp(intel_attached_dp(intel_connector)) &&
beb60608 6618 intel_connector->panel.fixed_mode) {
f8779fda 6619 struct drm_display_mode *mode;
beb60608
CW
6620
6621 mode = drm_mode_duplicate(connector->dev,
dd06f90e 6622 intel_connector->panel.fixed_mode);
f8779fda 6623 if (mode) {
32f9d658
ZW
6624 drm_mode_probed_add(connector, mode);
6625 return 1;
6626 }
6627 }
beb60608 6628
4b3bb839
VS
6629 if (!edid) {
6630 struct intel_dp *intel_dp = intel_attached_dp(intel_connector);
6631 struct drm_display_mode *mode;
6632
6633 mode = drm_dp_downstream_mode(connector->dev,
6634 intel_dp->dpcd,
6635 intel_dp->downstream_ports);
6636 if (mode) {
6637 drm_mode_probed_add(connector, mode);
6638 return 1;
6639 }
6640 }
6641
32f9d658 6642 return 0;
a4fc5ed6
KP
6643}
6644
7a418e34
CW
6645static int
6646intel_dp_connector_register(struct drm_connector *connector)
6647{
af67009c 6648 struct drm_i915_private *i915 = to_i915(connector->dev);
43a6d19c 6649 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
1ebaa0b9
CW
6650 int ret;
6651
6652 ret = intel_connector_register(connector);
6653 if (ret)
6654 return ret;
7a418e34 6655
af67009c
JN
6656 drm_dbg_kms(&i915->drm, "registering %s bus for %s\n",
6657 intel_dp->aux.name, connector->kdev->kobj.name);
7a418e34
CW
6658
6659 intel_dp->aux.dev = connector->kdev;
82e00d11
HV
6660 ret = drm_dp_aux_register(&intel_dp->aux);
6661 if (!ret)
ae85b0df 6662 drm_dp_cec_register_connector(&intel_dp->aux, connector);
82e00d11 6663 return ret;
7a418e34
CW
6664}
6665
c191eca1
CW
6666static void
6667intel_dp_connector_unregister(struct drm_connector *connector)
6668{
43a6d19c 6669 struct intel_dp *intel_dp = intel_attached_dp(to_intel_connector(connector));
82e00d11
HV
6670
6671 drm_dp_cec_unregister_connector(&intel_dp->aux);
6672 drm_dp_aux_unregister(&intel_dp->aux);
c191eca1
CW
6673 intel_connector_unregister(connector);
6674}
6675
f6bff60e 6676void intel_dp_encoder_flush_work(struct drm_encoder *encoder)
24d05927 6677{
7801f3b7
LDM
6678 struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
6679 struct intel_dp *intel_dp = &dig_port->dp;
24d05927 6680
7801f3b7 6681 intel_dp_mst_encoder_cleanup(dig_port);
1853a9da 6682 if (intel_dp_is_edp(intel_dp)) {
69d93820
CW
6683 intel_wakeref_t wakeref;
6684
bd943159 6685 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
951468f3
VS
6686 /*
6687 * vdd might still be enabled do to the delayed vdd off.
6688 * Make sure vdd is actually turned off here.
6689 */
69d93820
CW
6690 with_pps_lock(intel_dp, wakeref)
6691 edp_panel_vdd_off_sync(intel_dp);
773538e8 6692
01527b31
CT
6693 if (intel_dp->edp_notifier.notifier_call) {
6694 unregister_reboot_notifier(&intel_dp->edp_notifier);
6695 intel_dp->edp_notifier.notifier_call = NULL;
6696 }
bd943159 6697 }
99681886
CW
6698
6699 intel_dp_aux_fini(intel_dp);
f6bff60e
ID
6700}
6701
6702static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
6703{
6704 intel_dp_encoder_flush_work(encoder);
99681886 6705
c8bd0e49 6706 drm_encoder_cleanup(encoder);
b7d02c3a 6707 kfree(enc_to_dig_port(to_intel_encoder(encoder)));
24d05927
DV
6708}
6709
bf93ba67 6710void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
07f9cd0b 6711{
b7d02c3a 6712 struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
69d93820 6713 intel_wakeref_t wakeref;
07f9cd0b 6714
1853a9da 6715 if (!intel_dp_is_edp(intel_dp))
07f9cd0b
ID
6716 return;
6717
951468f3
VS
6718 /*
6719 * vdd might still be enabled do to the delayed vdd off.
6720 * Make sure vdd is actually turned off here.
6721 */
afa4e53a 6722 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
69d93820
CW
6723 with_pps_lock(intel_dp, wakeref)
6724 edp_panel_vdd_off_sync(intel_dp);
07f9cd0b
ID
6725}
6726
49e6bc51
VS
6727static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
6728{
de25eb7f 6729 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
337837ac 6730 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
49e6bc51
VS
6731
6732 lockdep_assert_held(&dev_priv->pps_mutex);
6733
6734 if (!edp_have_panel_vdd(intel_dp))
6735 return;
6736
6737 /*
6738 * The VDD bit needs a power domain reference, so if the bit is
6739 * already enabled when we boot or resume, grab this reference and
6740 * schedule a vdd off, so we don't hold on to the reference
6741 * indefinitely.
6742 */
bdc6114e
WK
6743 drm_dbg_kms(&dev_priv->drm,
6744 "VDD left on by BIOS, adjusting state tracking\n");
337837ac 6745 intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
49e6bc51
VS
6746
6747 edp_panel_vdd_schedule_off(intel_dp);
6748}
6749
9f2bdb00
VS
6750static enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
6751{
de25eb7f 6752 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
59b74c49
VS
6753 struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
6754 enum pipe pipe;
9f2bdb00 6755
59b74c49
VS
6756 if (intel_dp_port_enabled(dev_priv, intel_dp->output_reg,
6757 encoder->port, &pipe))
6758 return pipe;
9f2bdb00 6759
59b74c49 6760 return INVALID_PIPE;
9f2bdb00
VS
6761}
6762
bf93ba67 6763void intel_dp_encoder_reset(struct drm_encoder *encoder)
6d93c0c4 6764{
64989ca4 6765 struct drm_i915_private *dev_priv = to_i915(encoder->dev);
b7d02c3a 6766 struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
f542d671 6767 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
69d93820 6768 intel_wakeref_t wakeref;
64989ca4
VS
6769
6770 if (!HAS_DDI(dev_priv))
b4e33881 6771 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
49e6bc51 6772
f542d671 6773 lspcon_resume(dig_port);
910530c0 6774
d7e8ef02
MN
6775 intel_dp->reset_link_params = true;
6776
b4c7ea63
ID
6777 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
6778 !intel_dp_is_edp(intel_dp))
6779 return;
6780
69d93820
CW
6781 with_pps_lock(intel_dp, wakeref) {
6782 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6783 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
49e6bc51 6784
69d93820
CW
6785 if (intel_dp_is_edp(intel_dp)) {
6786 /*
6787 * Reinit the power sequencer, in case BIOS did
6788 * something nasty with it.
6789 */
6790 intel_dp_pps_init(intel_dp);
6791 intel_edp_panel_vdd_sanitize(intel_dp);
6792 }
9f2bdb00 6793 }
6d93c0c4
ID
6794}
6795
e24bcd34
MN
6796static int intel_modeset_tile_group(struct intel_atomic_state *state,
6797 int tile_group_id)
6798{
6799 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6800 struct drm_connector_list_iter conn_iter;
6801 struct drm_connector *connector;
6802 int ret = 0;
6803
6804 drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
6805 drm_for_each_connector_iter(connector, &conn_iter) {
6806 struct drm_connector_state *conn_state;
6807 struct intel_crtc_state *crtc_state;
6808 struct intel_crtc *crtc;
6809
6810 if (!connector->has_tile ||
6811 connector->tile_group->id != tile_group_id)
6812 continue;
6813
6814 conn_state = drm_atomic_get_connector_state(&state->base,
6815 connector);
6816 if (IS_ERR(conn_state)) {
6817 ret = PTR_ERR(conn_state);
6818 break;
6819 }
6820
6821 crtc = to_intel_crtc(conn_state->crtc);
6822
6823 if (!crtc)
6824 continue;
6825
6826 crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
6827 crtc_state->uapi.mode_changed = true;
6828
6829 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6830 if (ret)
6831 break;
6832 }
b7079cbd 6833 drm_connector_list_iter_end(&conn_iter);
e24bcd34
MN
6834
6835 return ret;
6836}
6837
6838static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
6839{
6840 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
6841 struct intel_crtc *crtc;
6842
6843 if (transcoders == 0)
6844 return 0;
6845
6846 for_each_intel_crtc(&dev_priv->drm, crtc) {
6847 struct intel_crtc_state *crtc_state;
6848 int ret;
6849
6850 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
6851 if (IS_ERR(crtc_state))
6852 return PTR_ERR(crtc_state);
6853
6854 if (!crtc_state->hw.enable)
6855 continue;
6856
6857 if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
6858 continue;
6859
6860 crtc_state->uapi.mode_changed = true;
6861
6862 ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
6863 if (ret)
6864 return ret;
6865
6866 ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
6867 if (ret)
6868 return ret;
6869
6870 transcoders &= ~BIT(crtc_state->cpu_transcoder);
6871 }
6872
eb020ca3 6873 drm_WARN_ON(&dev_priv->drm, transcoders != 0);
e24bcd34
MN
6874
6875 return 0;
6876}
6877
6878static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
6879 struct drm_connector *connector)
6880{
6881 const struct drm_connector_state *old_conn_state =
6882 drm_atomic_get_old_connector_state(&state->base, connector);
6883 const struct intel_crtc_state *old_crtc_state;
6884 struct intel_crtc *crtc;
6885 u8 transcoders;
6886
6887 crtc = to_intel_crtc(old_conn_state->crtc);
6888 if (!crtc)
6889 return 0;
6890
6891 old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
6892
6893 if (!old_crtc_state->hw.active)
6894 return 0;
6895
6896 transcoders = old_crtc_state->sync_mode_slaves_mask;
6897 if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
6898 transcoders |= BIT(old_crtc_state->master_transcoder);
6899
6900 return intel_modeset_affected_transcoders(state,
6901 transcoders);
6902}
6903
6904static int intel_dp_connector_atomic_check(struct drm_connector *conn,
6905 struct drm_atomic_state *_state)
6906{
6907 struct drm_i915_private *dev_priv = to_i915(conn->dev);
6908 struct intel_atomic_state *state = to_intel_atomic_state(_state);
6909 int ret;
6910
6911 ret = intel_digital_connector_atomic_check(conn, &state->base);
6912 if (ret)
6913 return ret;
6914
dc5b8ed5
VS
6915 /*
6916 * We don't enable port sync on BDW due to missing w/as and
6917 * due to not having adjusted the modeset sequence appropriately.
6918 */
6919 if (INTEL_GEN(dev_priv) < 9)
e24bcd34
MN
6920 return 0;
6921
6922 if (!intel_connector_needs_modeset(state, conn))
6923 return 0;
6924
6925 if (conn->has_tile) {
6926 ret = intel_modeset_tile_group(state, conn->tile_group->id);
6927 if (ret)
6928 return ret;
6929 }
6930
6931 return intel_modeset_synced_crtcs(state, conn);
6932}
6933
a4fc5ed6 6934static const struct drm_connector_funcs intel_dp_connector_funcs = {
beb60608 6935 .force = intel_dp_force,
a4fc5ed6 6936 .fill_modes = drm_helper_probe_single_connector_modes,
8f647a01
ML
6937 .atomic_get_property = intel_digital_connector_atomic_get_property,
6938 .atomic_set_property = intel_digital_connector_atomic_set_property,
7a418e34 6939 .late_register = intel_dp_connector_register,
c191eca1 6940 .early_unregister = intel_dp_connector_unregister,
d4b26e4f 6941 .destroy = intel_connector_destroy,
c6f95f27 6942 .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
8f647a01 6943 .atomic_duplicate_state = intel_digital_connector_duplicate_state,
a4fc5ed6
KP
6944};
6945
6946static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
6c5ed5ae 6947 .detect_ctx = intel_dp_detect,
a4fc5ed6
KP
6948 .get_modes = intel_dp_get_modes,
6949 .mode_valid = intel_dp_mode_valid,
e24bcd34 6950 .atomic_check = intel_dp_connector_atomic_check,
a4fc5ed6
KP
6951};
6952
a4fc5ed6 6953static const struct drm_encoder_funcs intel_dp_enc_funcs = {
6d93c0c4 6954 .reset = intel_dp_encoder_reset,
24d05927 6955 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
6956};
6957
13ea6db2
AG
6958static bool intel_edp_have_power(struct intel_dp *intel_dp)
6959{
6960 intel_wakeref_t wakeref;
6961 bool have_power = false;
6962
6963 with_pps_lock(intel_dp, wakeref) {
6964 have_power = edp_have_panel_power(intel_dp) &&
6965 edp_have_panel_vdd(intel_dp);
6966 }
6967
6968 return have_power;
6969}
6970
b2c5c181 6971enum irqreturn
7801f3b7 6972intel_dp_hpd_pulse(struct intel_digital_port *dig_port, bool long_hpd)
13cf5504 6973{
7801f3b7
LDM
6974 struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
6975 struct intel_dp *intel_dp = &dig_port->dp;
1c767b33 6976
7801f3b7 6977 if (dig_port->base.type == INTEL_OUTPUT_EDP &&
13ea6db2 6978 (long_hpd || !intel_edp_have_power(intel_dp))) {
7a7f84cc 6979 /*
13ea6db2 6980 * vdd off can generate a long/short pulse on eDP which
7a7f84cc
VS
6981 * would require vdd on to handle it, and thus we
6982 * would end up in an endless cycle of
13ea6db2 6983 * "vdd off -> long/short hpd -> vdd on -> detect -> vdd off -> ..."
7a7f84cc 6984 */
af67009c
JN
6985 drm_dbg_kms(&i915->drm,
6986 "ignoring %s hpd on eDP [ENCODER:%d:%s]\n",
6987 long_hpd ? "long" : "short",
7801f3b7
LDM
6988 dig_port->base.base.base.id,
6989 dig_port->base.base.name);
a8b3d52f 6990 return IRQ_HANDLED;
7a7f84cc
VS
6991 }
6992
af67009c 6993 drm_dbg_kms(&i915->drm, "got hpd irq on [ENCODER:%d:%s] - %s\n",
7801f3b7
LDM
6994 dig_port->base.base.base.id,
6995 dig_port->base.base.name,
af67009c 6996 long_hpd ? "long" : "short");
13cf5504 6997
27d4efc5 6998 if (long_hpd) {
d7e8ef02 6999 intel_dp->reset_link_params = true;
27d4efc5
VS
7000 return IRQ_NONE;
7001 }
7002
27d4efc5 7003 if (intel_dp->is_mst) {
8d712a7e 7004 if (!intel_dp_check_mst_status(intel_dp))
6f08ebe7 7005 return IRQ_NONE;
8d712a7e
ID
7006 } else if (!intel_dp_short_pulse(intel_dp)) {
7007 return IRQ_NONE;
0e32b39c 7008 }
b2c5c181 7009
6f08ebe7 7010 return IRQ_HANDLED;
13cf5504
DA
7011}
7012
477ec328 7013/* check the VBT to see whether the eDP is on another port */
7b91bf7f 7014bool intel_dp_is_port_edp(struct drm_i915_private *dev_priv, enum port port)
36e83a18 7015{
53ce81a7
VS
7016 /*
7017 * eDP not supported on g4x. so bail out early just
7018 * for a bit extra safety in case the VBT is bonkers.
7019 */
dd11bc10 7020 if (INTEL_GEN(dev_priv) < 5)
53ce81a7
VS
7021 return false;
7022
a98d9c1d 7023 if (INTEL_GEN(dev_priv) < 9 && port == PORT_A)
3b32a35b
VS
7024 return true;
7025
951d9efe 7026 return intel_bios_is_port_edp(dev_priv, port);
36e83a18
ZY
7027}
7028
200819ab 7029static void
f684960e
CW
7030intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
7031{
8b45330a 7032 struct drm_i915_private *dev_priv = to_i915(connector->dev);
68ec0736
VS
7033 enum port port = dp_to_dig_port(intel_dp)->base.port;
7034
fb823134
OV
7035 if (!intel_dp_is_edp(intel_dp))
7036 drm_connector_attach_dp_subconnector_property(connector);
7037
68ec0736
VS
7038 if (!IS_G4X(dev_priv) && port != PORT_A)
7039 intel_attach_force_audio_property(connector);
8b45330a 7040
e953fd7b 7041 intel_attach_broadcast_rgb_property(connector);
b2ae318a 7042 if (HAS_GMCH(dev_priv))
f1a12172
RS
7043 drm_connector_attach_max_bpc_property(connector, 6, 10);
7044 else if (INTEL_GEN(dev_priv) >= 5)
7045 drm_connector_attach_max_bpc_property(connector, 6, 12);
53b41837 7046
9d1bb6f0
GM
7047 intel_attach_colorspace_property(connector);
7048
0299dfa7
GM
7049 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 11)
7050 drm_object_attach_property(&connector->base,
7051 connector->dev->mode_config.hdr_output_metadata_property,
7052 0);
7053
1853a9da 7054 if (intel_dp_is_edp(intel_dp)) {
8b45330a
ML
7055 u32 allowed_scalers;
7056
7057 allowed_scalers = BIT(DRM_MODE_SCALE_ASPECT) | BIT(DRM_MODE_SCALE_FULLSCREEN);
b2ae318a 7058 if (!HAS_GMCH(dev_priv))
8b45330a
ML
7059 allowed_scalers |= BIT(DRM_MODE_SCALE_CENTER);
7060
7061 drm_connector_attach_scaling_mode_property(connector, allowed_scalers);
7062
eead06df 7063 connector->state->scaling_mode = DRM_MODE_SCALE_ASPECT;
8b45330a 7064
53b41837 7065 }
f684960e
CW
7066}
7067
dada1a9f
ID
7068static void intel_dp_init_panel_power_timestamps(struct intel_dp *intel_dp)
7069{
d28d4731 7070 intel_dp->panel_power_off_time = ktime_get_boottime();
dada1a9f
ID
7071 intel_dp->last_power_on = jiffies;
7072 intel_dp->last_backlight_off = jiffies;
7073}
7074
67a54566 7075static void
46bd8383 7076intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
67a54566 7077{
de25eb7f 7078 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ab3517c1 7079 u32 pp_on, pp_off, pp_ctl;
8e8232d5 7080 struct pps_registers regs;
453c5420 7081
46bd8383 7082 intel_pps_get_registers(intel_dp, &regs);
67a54566 7083
9eae5e27 7084 pp_ctl = ilk_get_pp_control(intel_dp);
67a54566 7085
1b61c4a3
JN
7086 /* Ensure PPS is unlocked */
7087 if (!HAS_DDI(dev_priv))
b4e33881 7088 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
1b61c4a3 7089
b4e33881
JN
7090 pp_on = intel_de_read(dev_priv, regs.pp_on);
7091 pp_off = intel_de_read(dev_priv, regs.pp_off);
67a54566
DV
7092
7093 /* Pull timing values out of registers */
78b36b10
JN
7094 seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
7095 seq->t8 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, pp_on);
7096 seq->t9 = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, pp_off);
7097 seq->t10 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, pp_off);
67a54566 7098
ab3517c1
JN
7099 if (i915_mmio_reg_valid(regs.pp_div)) {
7100 u32 pp_div;
7101
b4e33881 7102 pp_div = intel_de_read(dev_priv, regs.pp_div);
ab3517c1 7103
78b36b10 7104 seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
ab3517c1 7105 } else {
78b36b10 7106 seq->t11_t12 = REG_FIELD_GET(BXT_POWER_CYCLE_DELAY_MASK, pp_ctl) * 1000;
b0a08bec 7107 }
54648618
ID
7108}
7109
de9c1b6b
ID
7110static void
7111intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq)
7112{
7113 DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
7114 state_name,
7115 seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
7116}
7117
7118static void
46bd8383 7119intel_pps_verify_state(struct intel_dp *intel_dp)
de9c1b6b
ID
7120{
7121 struct edp_power_seq hw;
7122 struct edp_power_seq *sw = &intel_dp->pps_delays;
7123
46bd8383 7124 intel_pps_readout_hw_state(intel_dp, &hw);
de9c1b6b
ID
7125
7126 if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
7127 hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
7128 DRM_ERROR("PPS state mismatch\n");
7129 intel_pps_dump_state("sw", sw);
7130 intel_pps_dump_state("hw", &hw);
7131 }
7132}
7133
54648618 7134static void
46bd8383 7135intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
54648618 7136{
de25eb7f 7137 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
54648618
ID
7138 struct edp_power_seq cur, vbt, spec,
7139 *final = &intel_dp->pps_delays;
7140
7141 lockdep_assert_held(&dev_priv->pps_mutex);
7142
7143 /* already initialized? */
7144 if (final->t11_t12 != 0)
7145 return;
7146
46bd8383 7147 intel_pps_readout_hw_state(intel_dp, &cur);
67a54566 7148
de9c1b6b 7149 intel_pps_dump_state("cur", &cur);
67a54566 7150
6aa23e65 7151 vbt = dev_priv->vbt.edp.pps;
c99a259b
MN
7152 /* On Toshiba Satellite P50-C-18C system the VBT T12 delay
7153 * of 500ms appears to be too short. Ocassionally the panel
7154 * just fails to power back on. Increasing the delay to 800ms
7155 * seems sufficient to avoid this problem.
7156 */
7157 if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
7313f5a9 7158 vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
bdc6114e
WK
7159 drm_dbg_kms(&dev_priv->drm,
7160 "Increasing T12 panel delay as per the quirk to %d\n",
7161 vbt.t11_t12);
c99a259b 7162 }
770a17a5
MN
7163 /* T11_T12 delay is special and actually in units of 100ms, but zero
7164 * based in the hw (so we need to add 100 ms). But the sw vbt
7165 * table multiplies it with 1000 to make it in units of 100usec,
7166 * too. */
7167 vbt.t11_t12 += 100 * 10;
67a54566
DV
7168
7169 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
7170 * our hw here, which are all in 100usec. */
7171 spec.t1_t3 = 210 * 10;
7172 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
7173 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
7174 spec.t10 = 500 * 10;
7175 /* This one is special and actually in units of 100ms, but zero
7176 * based in the hw (so we need to add 100 ms). But the sw vbt
7177 * table multiplies it with 1000 to make it in units of 100usec,
7178 * too. */
7179 spec.t11_t12 = (510 + 100) * 10;
7180
de9c1b6b 7181 intel_pps_dump_state("vbt", &vbt);
67a54566
DV
7182
7183 /* Use the max of the register settings and vbt. If both are
7184 * unset, fall back to the spec limits. */
36b5f425 7185#define assign_final(field) final->field = (max(cur.field, vbt.field) == 0 ? \
67a54566
DV
7186 spec.field : \
7187 max(cur.field, vbt.field))
7188 assign_final(t1_t3);
7189 assign_final(t8);
7190 assign_final(t9);
7191 assign_final(t10);
7192 assign_final(t11_t12);
7193#undef assign_final
7194
36b5f425 7195#define get_delay(field) (DIV_ROUND_UP(final->field, 10))
67a54566
DV
7196 intel_dp->panel_power_up_delay = get_delay(t1_t3);
7197 intel_dp->backlight_on_delay = get_delay(t8);
7198 intel_dp->backlight_off_delay = get_delay(t9);
7199 intel_dp->panel_power_down_delay = get_delay(t10);
7200 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
7201#undef get_delay
7202
bdc6114e
WK
7203 drm_dbg_kms(&dev_priv->drm,
7204 "panel power up delay %d, power down delay %d, power cycle delay %d\n",
7205 intel_dp->panel_power_up_delay,
7206 intel_dp->panel_power_down_delay,
7207 intel_dp->panel_power_cycle_delay);
f30d26e4 7208
bdc6114e
WK
7209 drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
7210 intel_dp->backlight_on_delay,
7211 intel_dp->backlight_off_delay);
de9c1b6b
ID
7212
7213 /*
7214 * We override the HW backlight delays to 1 because we do manual waits
7215 * on them. For T8, even BSpec recommends doing it. For T9, if we
7216 * don't do this, we'll end up waiting for the backlight off delay
7217 * twice: once when we do the manual sleep, and once when we disable
7218 * the panel and wait for the PP_STATUS bit to become zero.
7219 */
7220 final->t8 = 1;
7221 final->t9 = 1;
5643205c
ID
7222
7223 /*
7224 * HW has only a 100msec granularity for t11_t12 so round it up
7225 * accordingly.
7226 */
7227 final->t11_t12 = roundup(final->t11_t12, 100 * 10);
f30d26e4
JN
7228}
7229
7230static void
46bd8383 7231intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
5d5ab2d2 7232 bool force_disable_vdd)
f30d26e4 7233{
de25eb7f 7234 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
ab3517c1 7235 u32 pp_on, pp_off, port_sel = 0;
b04002f4 7236 int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
8e8232d5 7237 struct pps_registers regs;
8f4f2797 7238 enum port port = dp_to_dig_port(intel_dp)->base.port;
36b5f425 7239 const struct edp_power_seq *seq = &intel_dp->pps_delays;
453c5420 7240
e39b999a 7241 lockdep_assert_held(&dev_priv->pps_mutex);
453c5420 7242
46bd8383 7243 intel_pps_get_registers(intel_dp, &regs);
453c5420 7244
5d5ab2d2
VS
7245 /*
7246 * On some VLV machines the BIOS can leave the VDD
e7f2af78 7247 * enabled even on power sequencers which aren't
5d5ab2d2
VS
7248 * hooked up to any port. This would mess up the
7249 * power domain tracking the first time we pick
7250 * one of these power sequencers for use since
7251 * edp_panel_vdd_on() would notice that the VDD was
7252 * already on and therefore wouldn't grab the power
7253 * domain reference. Disable VDD first to avoid this.
7254 * This also avoids spuriously turning the VDD on as
e7f2af78 7255 * soon as the new power sequencer gets initialized.
5d5ab2d2
VS
7256 */
7257 if (force_disable_vdd) {
9eae5e27 7258 u32 pp = ilk_get_pp_control(intel_dp);
5d5ab2d2 7259
eb020ca3
PB
7260 drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
7261 "Panel power already on\n");
5d5ab2d2
VS
7262
7263 if (pp & EDP_FORCE_VDD)
bdc6114e
WK
7264 drm_dbg_kms(&dev_priv->drm,
7265 "VDD already on, disabling first\n");
5d5ab2d2
VS
7266
7267 pp &= ~EDP_FORCE_VDD;
7268
b4e33881 7269 intel_de_write(dev_priv, regs.pp_ctrl, pp);
5d5ab2d2
VS
7270 }
7271
78b36b10
JN
7272 pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
7273 REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, seq->t8);
7274 pp_off = REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, seq->t9) |
7275 REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, seq->t10);
67a54566
DV
7276
7277 /* Haswell doesn't have any port selection bits for the panel
7278 * power sequencer any more. */
920a14b2 7279 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ad933b56 7280 port_sel = PANEL_PORT_SELECT_VLV(port);
6e266956 7281 } else if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
05bf51d3
VS
7282 switch (port) {
7283 case PORT_A:
a24c144c 7284 port_sel = PANEL_PORT_SELECT_DPA;
05bf51d3
VS
7285 break;
7286 case PORT_C:
7287 port_sel = PANEL_PORT_SELECT_DPC;
7288 break;
7289 case PORT_D:
a24c144c 7290 port_sel = PANEL_PORT_SELECT_DPD;
05bf51d3
VS
7291 break;
7292 default:
7293 MISSING_CASE(port);
7294 break;
7295 }
67a54566
DV
7296 }
7297
453c5420
JB
7298 pp_on |= port_sel;
7299
b4e33881
JN
7300 intel_de_write(dev_priv, regs.pp_on, pp_on);
7301 intel_de_write(dev_priv, regs.pp_off, pp_off);
ab3517c1
JN
7302
7303 /*
7304 * Compute the divisor for the pp clock, simply match the Bspec formula.
7305 */
7306 if (i915_mmio_reg_valid(regs.pp_div)) {
b4e33881
JN
7307 intel_de_write(dev_priv, regs.pp_div,
7308 REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
ab3517c1
JN
7309 } else {
7310 u32 pp_ctl;
7311
b4e33881 7312 pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
ab3517c1 7313 pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
78b36b10 7314 pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
b4e33881 7315 intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
ab3517c1 7316 }
67a54566 7317
bdc6114e
WK
7318 drm_dbg_kms(&dev_priv->drm,
7319 "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
b4e33881
JN
7320 intel_de_read(dev_priv, regs.pp_on),
7321 intel_de_read(dev_priv, regs.pp_off),
bdc6114e 7322 i915_mmio_reg_valid(regs.pp_div) ?
b4e33881
JN
7323 intel_de_read(dev_priv, regs.pp_div) :
7324 (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
f684960e
CW
7325}
7326
46bd8383 7327static void intel_dp_pps_init(struct intel_dp *intel_dp)
335f752b 7328{
de25eb7f 7329 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
920a14b2
TU
7330
7331 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
335f752b
ID
7332 vlv_initial_power_sequencer_setup(intel_dp);
7333 } else {
46bd8383
VS
7334 intel_dp_init_panel_power_sequencer(intel_dp);
7335 intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
335f752b
ID
7336 }
7337}
7338
b33a2815
VK
7339/**
7340 * intel_dp_set_drrs_state - program registers for RR switch to take effect
5423adf1 7341 * @dev_priv: i915 device
e896402c 7342 * @crtc_state: a pointer to the active intel_crtc_state
b33a2815
VK
7343 * @refresh_rate: RR to be programmed
7344 *
7345 * This function gets called when refresh rate (RR) has to be changed from
7346 * one frequency to another. Switches can be between high and low RR
7347 * supported by the panel or to any other RR based on media playback (in
7348 * this case, RR value needs to be passed from user space).
7349 *
7350 * The caller of this function needs to take a lock on dev_priv->drrs.
7351 */
85cb48a1 7352static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
5f88a9c6 7353 const struct intel_crtc_state *crtc_state,
85cb48a1 7354 int refresh_rate)
439d7ac0 7355{
96178eeb 7356 struct intel_dp *intel_dp = dev_priv->drrs.dp;
2225f3c6 7357 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
96178eeb 7358 enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
439d7ac0
PB
7359
7360 if (refresh_rate <= 0) {
bdc6114e
WK
7361 drm_dbg_kms(&dev_priv->drm,
7362 "Refresh rate should be positive non-zero.\n");
439d7ac0
PB
7363 return;
7364 }
7365
96178eeb 7366 if (intel_dp == NULL) {
bdc6114e 7367 drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
439d7ac0
PB
7368 return;
7369 }
7370
439d7ac0 7371 if (!intel_crtc) {
bdc6114e
WK
7372 drm_dbg_kms(&dev_priv->drm,
7373 "DRRS: intel_crtc not initialized\n");
439d7ac0
PB
7374 return;
7375 }
7376
96178eeb 7377 if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
bdc6114e 7378 drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
439d7ac0
PB
7379 return;
7380 }
7381
0425662f 7382 if (drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode) ==
96178eeb 7383 refresh_rate)
439d7ac0
PB
7384 index = DRRS_LOW_RR;
7385
96178eeb 7386 if (index == dev_priv->drrs.refresh_rate_type) {
bdc6114e
WK
7387 drm_dbg_kms(&dev_priv->drm,
7388 "DRRS requested for previously set RR...ignoring\n");
439d7ac0
PB
7389 return;
7390 }
7391
1326a92c 7392 if (!crtc_state->hw.active) {
bdc6114e
WK
7393 drm_dbg_kms(&dev_priv->drm,
7394 "eDP encoder disabled. CRTC not Active\n");
439d7ac0
PB
7395 return;
7396 }
7397
85cb48a1 7398 if (INTEL_GEN(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) {
a4c30b1d
VK
7399 switch (index) {
7400 case DRRS_HIGH_RR:
4c354754 7401 intel_dp_set_m_n(crtc_state, M1_N1);
a4c30b1d
VK
7402 break;
7403 case DRRS_LOW_RR:
4c354754 7404 intel_dp_set_m_n(crtc_state, M2_N2);
a4c30b1d
VK
7405 break;
7406 case DRRS_MAX_RR:
7407 default:
bdc6114e
WK
7408 drm_err(&dev_priv->drm,
7409 "Unsupported refreshrate type\n");
a4c30b1d 7410 }
85cb48a1
ML
7411 } else if (INTEL_GEN(dev_priv) > 6) {
7412 i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
649636ef 7413 u32 val;
a4c30b1d 7414
b4e33881 7415 val = intel_de_read(dev_priv, reg);
439d7ac0 7416 if (index > DRRS_HIGH_RR) {
85cb48a1 7417 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6fa7aec1
VK
7418 val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
7419 else
7420 val |= PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 7421 } else {
85cb48a1 7422 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
6fa7aec1
VK
7423 val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV;
7424 else
7425 val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
439d7ac0 7426 }
b4e33881 7427 intel_de_write(dev_priv, reg, val);
439d7ac0
PB
7428 }
7429
4e9ac947
VK
7430 dev_priv->drrs.refresh_rate_type = index;
7431
bdc6114e
WK
7432 drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
7433 refresh_rate);
4e9ac947
VK
7434}
7435
8040fefa
JRS
7436static void
7437intel_edp_drrs_enable_locked(struct intel_dp *intel_dp)
7438{
7439 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7440
7441 dev_priv->drrs.busy_frontbuffer_bits = 0;
7442 dev_priv->drrs.dp = intel_dp;
7443}
7444
b33a2815
VK
7445/**
7446 * intel_edp_drrs_enable - init drrs struct if supported
7447 * @intel_dp: DP struct
5423adf1 7448 * @crtc_state: A pointer to the active crtc state.
b33a2815
VK
7449 *
7450 * Initializes frontbuffer_bits and drrs.dp
7451 */
85cb48a1 7452void intel_edp_drrs_enable(struct intel_dp *intel_dp,
5f88a9c6 7453 const struct intel_crtc_state *crtc_state)
c395578e 7454{
de25eb7f 7455 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
c395578e 7456
be2dd718 7457 if (!crtc_state->has_drrs)
c395578e 7458 return;
c395578e 7459
be2dd718 7460 drm_dbg_kms(&dev_priv->drm, "Enabling DRRS\n");
da83ef85 7461
c395578e 7462 mutex_lock(&dev_priv->drrs.mutex);
8040fefa 7463
f69a0d71 7464 if (dev_priv->drrs.dp) {
8040fefa 7465 drm_warn(&dev_priv->drm, "DRRS already enabled\n");
c395578e
VK
7466 goto unlock;
7467 }
7468
8040fefa 7469 intel_edp_drrs_enable_locked(intel_dp);
c395578e
VK
7470
7471unlock:
7472 mutex_unlock(&dev_priv->drrs.mutex);
7473}
7474
8040fefa
JRS
7475static void
7476intel_edp_drrs_disable_locked(struct intel_dp *intel_dp,
7477 const struct intel_crtc_state *crtc_state)
7478{
7479 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7480
7481 if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) {
7482 int refresh;
7483
7484 refresh = drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode);
7485 intel_dp_set_drrs_state(dev_priv, crtc_state, refresh);
7486 }
7487
7488 dev_priv->drrs.dp = NULL;
7489}
7490
b33a2815
VK
7491/**
7492 * intel_edp_drrs_disable - Disable DRRS
7493 * @intel_dp: DP struct
5423adf1 7494 * @old_crtc_state: Pointer to old crtc_state.
b33a2815
VK
7495 *
7496 */
85cb48a1 7497void intel_edp_drrs_disable(struct intel_dp *intel_dp,
5f88a9c6 7498 const struct intel_crtc_state *old_crtc_state)
c395578e 7499{
de25eb7f 7500 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
c395578e 7501
85cb48a1 7502 if (!old_crtc_state->has_drrs)
c395578e
VK
7503 return;
7504
7505 mutex_lock(&dev_priv->drrs.mutex);
7506 if (!dev_priv->drrs.dp) {
7507 mutex_unlock(&dev_priv->drrs.mutex);
7508 return;
7509 }
7510
8040fefa 7511 intel_edp_drrs_disable_locked(intel_dp, old_crtc_state);
c395578e
VK
7512 mutex_unlock(&dev_priv->drrs.mutex);
7513
7514 cancel_delayed_work_sync(&dev_priv->drrs.work);
7515}
7516
8040fefa
JRS
7517/**
7518 * intel_edp_drrs_update - Update DRRS state
7519 * @intel_dp: Intel DP
7520 * @crtc_state: new CRTC state
7521 *
7522 * This function will update DRRS states, disabling or enabling DRRS when
7523 * executing fastsets. For full modeset, intel_edp_drrs_disable() and
7524 * intel_edp_drrs_enable() should be called instead.
7525 */
7526void
7527intel_edp_drrs_update(struct intel_dp *intel_dp,
7528 const struct intel_crtc_state *crtc_state)
7529{
7530 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7531
7532 if (dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT)
7533 return;
7534
7535 mutex_lock(&dev_priv->drrs.mutex);
7536
7537 /* New state matches current one? */
7538 if (crtc_state->has_drrs == !!dev_priv->drrs.dp)
7539 goto unlock;
7540
7541 if (crtc_state->has_drrs)
7542 intel_edp_drrs_enable_locked(intel_dp);
7543 else
7544 intel_edp_drrs_disable_locked(intel_dp, crtc_state);
7545
7546unlock:
7547 mutex_unlock(&dev_priv->drrs.mutex);
7548}
7549
4e9ac947
VK
7550static void intel_edp_drrs_downclock_work(struct work_struct *work)
7551{
7552 struct drm_i915_private *dev_priv =
7553 container_of(work, typeof(*dev_priv), drrs.work.work);
7554 struct intel_dp *intel_dp;
7555
7556 mutex_lock(&dev_priv->drrs.mutex);
7557
7558 intel_dp = dev_priv->drrs.dp;
7559
7560 if (!intel_dp)
7561 goto unlock;
7562
439d7ac0 7563 /*
4e9ac947
VK
7564 * The delayed work can race with an invalidate hence we need to
7565 * recheck.
439d7ac0
PB
7566 */
7567
4e9ac947
VK
7568 if (dev_priv->drrs.busy_frontbuffer_bits)
7569 goto unlock;
439d7ac0 7570
85cb48a1
ML
7571 if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR) {
7572 struct drm_crtc *crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
7573
7574 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
0425662f 7575 drm_mode_vrefresh(intel_dp->attached_connector->panel.downclock_mode));
85cb48a1 7576 }
439d7ac0 7577
4e9ac947 7578unlock:
4e9ac947 7579 mutex_unlock(&dev_priv->drrs.mutex);
439d7ac0
PB
7580}
7581
b33a2815 7582/**
0ddfd203 7583 * intel_edp_drrs_invalidate - Disable Idleness DRRS
5748b6a1 7584 * @dev_priv: i915 device
b33a2815
VK
7585 * @frontbuffer_bits: frontbuffer plane tracking bits
7586 *
0ddfd203
R
7587 * This function gets called everytime rendering on the given planes start.
7588 * Hence DRRS needs to be Upclocked, i.e. (LOW_RR -> HIGH_RR).
b33a2815
VK
7589 *
7590 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7591 */
5748b6a1
CW
7592void intel_edp_drrs_invalidate(struct drm_i915_private *dev_priv,
7593 unsigned int frontbuffer_bits)
a93fad0f 7594{
6770ef33 7595 struct intel_dp *intel_dp;
a93fad0f
VK
7596 struct drm_crtc *crtc;
7597 enum pipe pipe;
7598
9da7d693 7599 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
7600 return;
7601
88f933a8 7602 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 7603
a93fad0f 7604 mutex_lock(&dev_priv->drrs.mutex);
6770ef33
VS
7605
7606 intel_dp = dev_priv->drrs.dp;
7607 if (!intel_dp) {
9da7d693
DV
7608 mutex_unlock(&dev_priv->drrs.mutex);
7609 return;
7610 }
7611
6770ef33 7612 crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
a93fad0f
VK
7613 pipe = to_intel_crtc(crtc)->pipe;
7614
c1d038c6
DV
7615 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
7616 dev_priv->drrs.busy_frontbuffer_bits |= frontbuffer_bits;
7617
0ddfd203 7618 /* invalidate means busy screen hence upclock */
c1d038c6 7619 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
85cb48a1 7620 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
0425662f 7621 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
a93fad0f 7622
a93fad0f
VK
7623 mutex_unlock(&dev_priv->drrs.mutex);
7624}
7625
b33a2815 7626/**
0ddfd203 7627 * intel_edp_drrs_flush - Restart Idleness DRRS
5748b6a1 7628 * @dev_priv: i915 device
b33a2815
VK
7629 * @frontbuffer_bits: frontbuffer plane tracking bits
7630 *
0ddfd203
R
7631 * This function gets called every time rendering on the given planes has
7632 * completed or flip on a crtc is completed. So DRRS should be upclocked
7633 * (LOW_RR -> HIGH_RR). And also Idleness detection should be started again,
7634 * if no other planes are dirty.
b33a2815
VK
7635 *
7636 * Dirty frontbuffers relevant to DRRS are tracked in busy_frontbuffer_bits.
7637 */
5748b6a1
CW
7638void intel_edp_drrs_flush(struct drm_i915_private *dev_priv,
7639 unsigned int frontbuffer_bits)
a93fad0f 7640{
6770ef33 7641 struct intel_dp *intel_dp;
a93fad0f
VK
7642 struct drm_crtc *crtc;
7643 enum pipe pipe;
7644
9da7d693 7645 if (dev_priv->drrs.type == DRRS_NOT_SUPPORTED)
a93fad0f
VK
7646 return;
7647
88f933a8 7648 cancel_delayed_work(&dev_priv->drrs.work);
3954e733 7649
a93fad0f 7650 mutex_lock(&dev_priv->drrs.mutex);
6770ef33
VS
7651
7652 intel_dp = dev_priv->drrs.dp;
7653 if (!intel_dp) {
9da7d693
DV
7654 mutex_unlock(&dev_priv->drrs.mutex);
7655 return;
7656 }
7657
6770ef33 7658 crtc = dp_to_dig_port(intel_dp)->base.base.crtc;
a93fad0f 7659 pipe = to_intel_crtc(crtc)->pipe;
c1d038c6
DV
7660
7661 frontbuffer_bits &= INTEL_FRONTBUFFER_ALL_MASK(pipe);
a93fad0f
VK
7662 dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits;
7663
0ddfd203 7664 /* flush means busy screen hence upclock */
c1d038c6 7665 if (frontbuffer_bits && dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR)
85cb48a1 7666 intel_dp_set_drrs_state(dev_priv, to_intel_crtc(crtc)->config,
0425662f 7667 drm_mode_vrefresh(intel_dp->attached_connector->panel.fixed_mode));
0ddfd203
R
7668
7669 /*
7670 * flush also means no more activity hence schedule downclock, if all
7671 * other fbs are quiescent too
7672 */
7673 if (!dev_priv->drrs.busy_frontbuffer_bits)
a93fad0f
VK
7674 schedule_delayed_work(&dev_priv->drrs.work,
7675 msecs_to_jiffies(1000));
7676 mutex_unlock(&dev_priv->drrs.mutex);
7677}
7678
b33a2815
VK
7679/**
7680 * DOC: Display Refresh Rate Switching (DRRS)
7681 *
7682 * Display Refresh Rate Switching (DRRS) is a power conservation feature
7683 * which enables swtching between low and high refresh rates,
7684 * dynamically, based on the usage scenario. This feature is applicable
7685 * for internal panels.
7686 *
7687 * Indication that the panel supports DRRS is given by the panel EDID, which
7688 * would list multiple refresh rates for one resolution.
7689 *
7690 * DRRS is of 2 types - static and seamless.
7691 * Static DRRS involves changing refresh rate (RR) by doing a full modeset
7692 * (may appear as a blink on screen) and is used in dock-undock scenario.
7693 * Seamless DRRS involves changing RR without any visual effect to the user
7694 * and can be used during normal system usage. This is done by programming
7695 * certain registers.
7696 *
7697 * Support for static/seamless DRRS may be indicated in the VBT based on
7698 * inputs from the panel spec.
7699 *
7700 * DRRS saves power by switching to low RR based on usage scenarios.
7701 *
2e7a5701
DV
7702 * The implementation is based on frontbuffer tracking implementation. When
7703 * there is a disturbance on the screen triggered by user activity or a periodic
7704 * system activity, DRRS is disabled (RR is changed to high RR). When there is
7705 * no movement on screen, after a timeout of 1 second, a switch to low RR is
7706 * made.
7707 *
7708 * For integration with frontbuffer tracking code, intel_edp_drrs_invalidate()
7709 * and intel_edp_drrs_flush() are called.
b33a2815
VK
7710 *
7711 * DRRS can be further extended to support other internal panels and also
7712 * the scenario of video playback wherein RR is set based on the rate
7713 * requested by userspace.
7714 */
7715
7716/**
7717 * intel_dp_drrs_init - Init basic DRRS work and mutex.
2f773477 7718 * @connector: eDP connector
b33a2815
VK
7719 * @fixed_mode: preferred mode of panel
7720 *
7721 * This function is called only once at driver load to initialize basic
7722 * DRRS stuff.
7723 *
7724 * Returns:
7725 * Downclock mode if panel supports it, else return NULL.
7726 * DRRS support is determined by the presence of downclock mode (apart
7727 * from VBT setting).
7728 */
4f9db5b5 7729static struct drm_display_mode *
2f773477
VS
7730intel_dp_drrs_init(struct intel_connector *connector,
7731 struct drm_display_mode *fixed_mode)
4f9db5b5 7732{
2f773477 7733 struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
4f9db5b5
PB
7734 struct drm_display_mode *downclock_mode = NULL;
7735
9da7d693
DV
7736 INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_edp_drrs_downclock_work);
7737 mutex_init(&dev_priv->drrs.mutex);
7738
dd11bc10 7739 if (INTEL_GEN(dev_priv) <= 6) {
bdc6114e
WK
7740 drm_dbg_kms(&dev_priv->drm,
7741 "DRRS supported for Gen7 and above\n");
4f9db5b5
PB
7742 return NULL;
7743 }
7744
7745 if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
bdc6114e 7746 drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
4f9db5b5
PB
7747 return NULL;
7748 }
7749
abf1aae8 7750 downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
4f9db5b5 7751 if (!downclock_mode) {
bdc6114e
WK
7752 drm_dbg_kms(&dev_priv->drm,
7753 "Downclock mode is not found. DRRS not supported\n");
4f9db5b5
PB
7754 return NULL;
7755 }
7756
96178eeb 7757 dev_priv->drrs.type = dev_priv->vbt.drrs_type;
4f9db5b5 7758
96178eeb 7759 dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
bdc6114e
WK
7760 drm_dbg_kms(&dev_priv->drm,
7761 "seamless DRRS supported for eDP panel.\n");
4f9db5b5
PB
7762 return downclock_mode;
7763}
7764
ed92f0b2 7765static bool intel_edp_init_connector(struct intel_dp *intel_dp,
36b5f425 7766 struct intel_connector *intel_connector)
ed92f0b2 7767{
de25eb7f
RV
7768 struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
7769 struct drm_device *dev = &dev_priv->drm;
2f773477 7770 struct drm_connector *connector = &intel_connector->base;
ed92f0b2 7771 struct drm_display_mode *fixed_mode = NULL;
4f9db5b5 7772 struct drm_display_mode *downclock_mode = NULL;
ed92f0b2 7773 bool has_dpcd;
6517d273 7774 enum pipe pipe = INVALID_PIPE;
69d93820
CW
7775 intel_wakeref_t wakeref;
7776 struct edid *edid;
ed92f0b2 7777
1853a9da 7778 if (!intel_dp_is_edp(intel_dp))
ed92f0b2
PZ
7779 return true;
7780
36b80aa3
JRS
7781 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, edp_panel_vdd_work);
7782
97a824e1
ID
7783 /*
7784 * On IBX/CPT we may get here with LVDS already registered. Since the
7785 * driver uses the only internal power sequencer available for both
7786 * eDP and LVDS bail out early in this case to prevent interfering
7787 * with an already powered-on LVDS power sequencer.
7788 */
17be4942 7789 if (intel_get_lvds_encoder(dev_priv)) {
eb020ca3
PB
7790 drm_WARN_ON(dev,
7791 !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
bdc6114e
WK
7792 drm_info(&dev_priv->drm,
7793 "LVDS was detected, not registering eDP\n");
97a824e1
ID
7794
7795 return false;
7796 }
7797
69d93820
CW
7798 with_pps_lock(intel_dp, wakeref) {
7799 intel_dp_init_panel_power_timestamps(intel_dp);
7800 intel_dp_pps_init(intel_dp);
7801 intel_edp_panel_vdd_sanitize(intel_dp);
7802 }
63635217 7803
ed92f0b2 7804 /* Cache DPCD and EDID for edp. */
fe5a66f9 7805 has_dpcd = intel_edp_init_dpcd(intel_dp);
ed92f0b2 7806
fe5a66f9 7807 if (!has_dpcd) {
ed92f0b2 7808 /* if this fails, presume the device is a ghost */
bdc6114e
WK
7809 drm_info(&dev_priv->drm,
7810 "failed to retrieve link info, disabling eDP\n");
b4d06ede 7811 goto out_vdd_off;
ed92f0b2
PZ
7812 }
7813
060c8778 7814 mutex_lock(&dev->mode_config.mutex);
0b99836f 7815 edid = drm_get_edid(connector, &intel_dp->aux.ddc);
ed92f0b2
PZ
7816 if (edid) {
7817 if (drm_add_edid_modes(connector, edid)) {
0883ce81
LP
7818 drm_connector_update_edid_property(connector, edid);
7819 intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
ed92f0b2
PZ
7820 } else {
7821 kfree(edid);
7822 edid = ERR_PTR(-EINVAL);
7823 }
7824 } else {
7825 edid = ERR_PTR(-ENOENT);
7826 }
7827 intel_connector->edid = edid;
7828
0dc927eb
VS
7829 fixed_mode = intel_panel_edid_fixed_mode(intel_connector);
7830 if (fixed_mode)
7831 downclock_mode = intel_dp_drrs_init(intel_connector, fixed_mode);
ed92f0b2
PZ
7832
7833 /* fallback to VBT if available for eDP */
325710d3
VS
7834 if (!fixed_mode)
7835 fixed_mode = intel_panel_vbt_fixed_mode(intel_connector);
060c8778 7836 mutex_unlock(&dev->mode_config.mutex);
ed92f0b2 7837
920a14b2 7838 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
01527b31
CT
7839 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
7840 register_reboot_notifier(&intel_dp->edp_notifier);
6517d273
VS
7841
7842 /*
7843 * Figure out the current pipe for the initial backlight setup.
7844 * If the current pipe isn't valid, try the PPS pipe, and if that
7845 * fails just assume pipe A.
7846 */
9f2bdb00 7847 pipe = vlv_active_pipe(intel_dp);
6517d273
VS
7848
7849 if (pipe != PIPE_A && pipe != PIPE_B)
7850 pipe = intel_dp->pps_pipe;
7851
7852 if (pipe != PIPE_A && pipe != PIPE_B)
7853 pipe = PIPE_A;
7854
bdc6114e
WK
7855 drm_dbg_kms(&dev_priv->drm,
7856 "using pipe %c for initial backlight setup\n",
7857 pipe_name(pipe));
01527b31
CT
7858 }
7859
d93fa1b4 7860 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
5507faeb 7861 intel_connector->panel.backlight.power = intel_edp_backlight_power;
6517d273 7862 intel_panel_setup_backlight(connector, pipe);
ed92f0b2 7863
69654c63 7864 if (fixed_mode) {
69654c63 7865 drm_connector_set_panel_orientation_with_quirk(connector,
0dd5b133 7866 dev_priv->vbt.orientation,
69654c63
DB
7867 fixed_mode->hdisplay, fixed_mode->vdisplay);
7868 }
9531221d 7869
ed92f0b2 7870 return true;
b4d06ede
ID
7871
7872out_vdd_off:
7873 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
7874 /*
7875 * vdd might still be enabled do to the delayed vdd off.
7876 * Make sure vdd is actually turned off here.
7877 */
69d93820
CW
7878 with_pps_lock(intel_dp, wakeref)
7879 edp_panel_vdd_off_sync(intel_dp);
b4d06ede
ID
7880
7881 return false;
ed92f0b2
PZ
7882}
7883
9301397a
MN
7884static void intel_dp_modeset_retry_work_fn(struct work_struct *work)
7885{
7886 struct intel_connector *intel_connector;
7887 struct drm_connector *connector;
7888
7889 intel_connector = container_of(work, typeof(*intel_connector),
7890 modeset_retry_work);
7891 connector = &intel_connector->base;
7892 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id,
7893 connector->name);
7894
7895 /* Grab the locks before changing connector property*/
7896 mutex_lock(&connector->dev->mode_config.mutex);
7897 /* Set connector link status to BAD and send a Uevent to notify
7898 * userspace to do a modeset.
7899 */
97e14fbe
DV
7900 drm_connector_set_link_status_property(connector,
7901 DRM_MODE_LINK_STATUS_BAD);
9301397a
MN
7902 mutex_unlock(&connector->dev->mode_config.mutex);
7903 /* Send Hotplug uevent so userspace can reprobe */
7904 drm_kms_helper_hotplug_event(connector->dev);
7905}
7906
16c25533 7907bool
7801f3b7 7908intel_dp_init_connector(struct intel_digital_port *dig_port,
f0fec3f2 7909 struct intel_connector *intel_connector)
a4fc5ed6 7910{
f0fec3f2 7911 struct drm_connector *connector = &intel_connector->base;
7801f3b7
LDM
7912 struct intel_dp *intel_dp = &dig_port->dp;
7913 struct intel_encoder *intel_encoder = &dig_port->base;
f0fec3f2 7914 struct drm_device *dev = intel_encoder->base.dev;
fac5e23e 7915 struct drm_i915_private *dev_priv = to_i915(dev);
8f4f2797 7916 enum port port = intel_encoder->port;
d8fe2ab6 7917 enum phy phy = intel_port_to_phy(dev_priv, port);
7a418e34 7918 int type;
a4fc5ed6 7919
9301397a
MN
7920 /* Initialize the work for modeset in case of link train failure */
7921 INIT_WORK(&intel_connector->modeset_retry_work,
7922 intel_dp_modeset_retry_work_fn);
7923
7801f3b7 7924 if (drm_WARN(dev, dig_port->max_lanes < 1,
eb020ca3 7925 "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
7801f3b7 7926 dig_port->max_lanes, intel_encoder->base.base.id,
eb020ca3 7927 intel_encoder->base.name))
ccb1a831
VS
7928 return false;
7929
d3913019
MA
7930 intel_dp_set_source_rates(intel_dp);
7931
d7e8ef02 7932 intel_dp->reset_link_params = true;
a4a5d2f8 7933 intel_dp->pps_pipe = INVALID_PIPE;
9f2bdb00 7934 intel_dp->active_pipe = INVALID_PIPE;
a4a5d2f8 7935
0767935e 7936 /* Preserve the current hw state. */
b4e33881 7937 intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
dd06f90e 7938 intel_dp->attached_connector = intel_connector;
3d3dc149 7939
4e309baf
ID
7940 if (intel_dp_is_port_edp(dev_priv, port)) {
7941 /*
7942 * Currently we don't support eDP on TypeC ports, although in
7943 * theory it could work on TypeC legacy ports.
7944 */
eb020ca3 7945 drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
b329530c 7946 type = DRM_MODE_CONNECTOR_eDP;
4e309baf 7947 } else {
3b32a35b 7948 type = DRM_MODE_CONNECTOR_DisplayPort;
4e309baf 7949 }
b329530c 7950
9f2bdb00
VS
7951 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
7952 intel_dp->active_pipe = vlv_active_pipe(intel_dp);
7953
d3913019
MA
7954 /*
7955 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
7956 * for DP the encoder type can be set by the caller to
7957 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
7958 */
7959 if (type == DRM_MODE_CONNECTOR_eDP)
7960 intel_encoder->type = INTEL_OUTPUT_EDP;
7961
7962 /* eDP only on port B and/or C on vlv/chv */
7963 if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
7964 IS_CHERRYVIEW(dev_priv)) &&
7965 intel_dp_is_edp(intel_dp) &&
7966 port != PORT_B && port != PORT_C))
7967 return false;
7968
bdc6114e
WK
7969 drm_dbg_kms(&dev_priv->drm,
7970 "Adding %s connector on [ENCODER:%d:%s]\n",
7971 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
7972 intel_encoder->base.base.id, intel_encoder->base.name);
e7281eab 7973
b329530c 7974 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
7975 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
7976
b2ae318a 7977 if (!HAS_GMCH(dev_priv))
05021389 7978 connector->interlace_allowed = true;
a4fc5ed6
KP
7979 connector->doublescan_allowed = 0;
7980
5fb908eb 7981 intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
5432fcaf 7982
b6339585 7983 intel_dp_aux_init(intel_dp);
7a418e34 7984
df0e9248 7985 intel_connector_attach_encoder(intel_connector, intel_encoder);
a4fc5ed6 7986
4f8036a2 7987 if (HAS_DDI(dev_priv))
bcbc889b
PZ
7988 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
7989 else
7990 intel_connector->get_hw_state = intel_connector_get_hw_state;
7991
0e32b39c 7992 /* init MST on ports that can support it */
7801f3b7 7993 intel_dp_mst_encoder_init(dig_port,
10d987fd 7994 intel_connector->base.base.id);
0e32b39c 7995
36b5f425 7996 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
a121f4e5 7997 intel_dp_aux_fini(intel_dp);
7801f3b7 7998 intel_dp_mst_encoder_cleanup(dig_port);
a121f4e5 7999 goto fail;
b2f246a8 8000 }
32f9d658 8001
f684960e 8002 intel_dp_add_properties(intel_dp, connector);
20f24d77 8003
fdddd08c 8004 if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
d079b7e4 8005 int ret = intel_dp_init_hdcp(dig_port, intel_connector);
20f24d77 8006 if (ret)
bdc6114e
WK
8007 drm_dbg_kms(&dev_priv->drm,
8008 "HDCP init failed, skipping.\n");
20f24d77 8009 }
f684960e 8010
a4fc5ed6
KP
8011 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
8012 * 0xd. Failure to do so will result in spurious interrupts being
8013 * generated on the port when a cable is not attached.
8014 */
1c0f1b3d 8015 if (IS_G45(dev_priv)) {
b4e33881
JN
8016 u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
8017 intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
8018 (temp & ~0xf) | 0xd);
a4fc5ed6 8019 }
16c25533
PZ
8020
8021 return true;
a121f4e5
VS
8022
8023fail:
a121f4e5
VS
8024 drm_connector_cleanup(connector);
8025
8026 return false;
a4fc5ed6 8027}
f0fec3f2 8028
c39055b0 8029bool intel_dp_init(struct drm_i915_private *dev_priv,
457c52d8
CW
8030 i915_reg_t output_reg,
8031 enum port port)
f0fec3f2 8032{
7801f3b7 8033 struct intel_digital_port *dig_port;
f0fec3f2
PZ
8034 struct intel_encoder *intel_encoder;
8035 struct drm_encoder *encoder;
8036 struct intel_connector *intel_connector;
8037
7801f3b7
LDM
8038 dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
8039 if (!dig_port)
457c52d8 8040 return false;
f0fec3f2 8041
08d9bc92 8042 intel_connector = intel_connector_alloc();
11aee0f6
SM
8043 if (!intel_connector)
8044 goto err_connector_alloc;
f0fec3f2 8045
7801f3b7 8046 intel_encoder = &dig_port->base;
f0fec3f2
PZ
8047 encoder = &intel_encoder->base;
8048
36e5e704
SP
8049 mutex_init(&dig_port->hdcp_mutex);
8050
c39055b0
ACO
8051 if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
8052 &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
8053 "DP %c", port_name(port)))
893da0c9 8054 goto err_encoder_init;
f0fec3f2 8055
c85d200e 8056 intel_encoder->hotplug = intel_dp_hotplug;
5bfe2ac0 8057 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70 8058 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 8059 intel_encoder->get_config = intel_dp_get_config;
63a23d24 8060 intel_encoder->update_pipe = intel_panel_update_backlight;
07f9cd0b 8061 intel_encoder->suspend = intel_dp_encoder_suspend;
920a14b2 8062 if (IS_CHERRYVIEW(dev_priv)) {
9197c88b 8063 intel_encoder->pre_pll_enable = chv_dp_pre_pll_enable;
e4a1d846
CML
8064 intel_encoder->pre_enable = chv_pre_enable_dp;
8065 intel_encoder->enable = vlv_enable_dp;
1a8ff607 8066 intel_encoder->disable = vlv_disable_dp;
580d3811 8067 intel_encoder->post_disable = chv_post_disable_dp;
d6db995f 8068 intel_encoder->post_pll_disable = chv_dp_post_pll_disable;
11a914c2 8069 } else if (IS_VALLEYVIEW(dev_priv)) {
ecff4f3b 8070 intel_encoder->pre_pll_enable = vlv_dp_pre_pll_enable;
ab1f90f9
JN
8071 intel_encoder->pre_enable = vlv_pre_enable_dp;
8072 intel_encoder->enable = vlv_enable_dp;
1a8ff607 8073 intel_encoder->disable = vlv_disable_dp;
49277c31 8074 intel_encoder->post_disable = vlv_post_disable_dp;
ab1f90f9 8075 } else {
ecff4f3b
JN
8076 intel_encoder->pre_enable = g4x_pre_enable_dp;
8077 intel_encoder->enable = g4x_enable_dp;
1a8ff607 8078 intel_encoder->disable = g4x_disable_dp;
51a9f6df 8079 intel_encoder->post_disable = g4x_post_disable_dp;
ab1f90f9 8080 }
f0fec3f2 8081
eee3f911
VS
8082 if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
8083 (HAS_PCH_CPT(dev_priv) && port != PORT_A))
7801f3b7 8084 dig_port->dp.set_link_train = cpt_set_link_train;
eee3f911 8085 else
7801f3b7 8086 dig_port->dp.set_link_train = g4x_set_link_train;
eee3f911 8087
fb83f72c 8088 if (IS_CHERRYVIEW(dev_priv))
7801f3b7 8089 dig_port->dp.set_signal_levels = chv_set_signal_levels;
fb83f72c 8090 else if (IS_VALLEYVIEW(dev_priv))
7801f3b7 8091 dig_port->dp.set_signal_levels = vlv_set_signal_levels;
fb83f72c 8092 else if (IS_IVYBRIDGE(dev_priv) && port == PORT_A)
7801f3b7 8093 dig_port->dp.set_signal_levels = ivb_cpu_edp_set_signal_levels;
fb83f72c 8094 else if (IS_GEN(dev_priv, 6) && port == PORT_A)
7801f3b7 8095 dig_port->dp.set_signal_levels = snb_cpu_edp_set_signal_levels;
fb83f72c 8096 else
7801f3b7 8097 dig_port->dp.set_signal_levels = g4x_set_signal_levels;
fb83f72c 8098
53de0a20
VS
8099 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv) ||
8100 (HAS_PCH_SPLIT(dev_priv) && port != PORT_A)) {
6694d2be 8101 dig_port->dp.preemph_max = intel_dp_preemph_max_3;
7801f3b7 8102 dig_port->dp.voltage_max = intel_dp_voltage_max_3;
53de0a20 8103 } else {
6694d2be 8104 dig_port->dp.preemph_max = intel_dp_preemph_max_2;
7801f3b7 8105 dig_port->dp.voltage_max = intel_dp_voltage_max_2;
53de0a20
VS
8106 }
8107
7801f3b7
LDM
8108 dig_port->dp.output_reg = output_reg;
8109 dig_port->max_lanes = 4;
f0fec3f2 8110
cca0502b 8111 intel_encoder->type = INTEL_OUTPUT_DP;
79f255a0 8112 intel_encoder->power_domain = intel_port_to_power_domain(port);
920a14b2 8113 if (IS_CHERRYVIEW(dev_priv)) {
882ec384 8114 if (port == PORT_D)
981329ce 8115 intel_encoder->pipe_mask = BIT(PIPE_C);
882ec384 8116 else
981329ce 8117 intel_encoder->pipe_mask = BIT(PIPE_A) | BIT(PIPE_B);
882ec384 8118 } else {
34053ee1 8119 intel_encoder->pipe_mask = ~0;
882ec384 8120 }
bc079e8b 8121 intel_encoder->cloneable = 0;
03cdc1d4 8122 intel_encoder->port = port;
03c7e4f1 8123 intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
f0fec3f2 8124
7801f3b7 8125 dig_port->hpd_pulse = intel_dp_hpd_pulse;
13cf5504 8126
edc0e09c
VS
8127 if (HAS_GMCH(dev_priv)) {
8128 if (IS_GM45(dev_priv))
7801f3b7 8129 dig_port->connected = gm45_digital_port_connected;
edc0e09c 8130 else
7801f3b7 8131 dig_port->connected = g4x_digital_port_connected;
edc0e09c 8132 } else {
c7e8a3d6 8133 if (port == PORT_A)
7801f3b7 8134 dig_port->connected = ilk_digital_port_connected;
edc0e09c 8135 else
7801f3b7 8136 dig_port->connected = ibx_digital_port_connected;
edc0e09c
VS
8137 }
8138
385e4de0 8139 if (port != PORT_A)
7801f3b7 8140 intel_infoframe_init(dig_port);
385e4de0 8141
7801f3b7
LDM
8142 dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
8143 if (!intel_dp_init_connector(dig_port, intel_connector))
11aee0f6
SM
8144 goto err_init_connector;
8145
457c52d8 8146 return true;
11aee0f6
SM
8147
8148err_init_connector:
8149 drm_encoder_cleanup(encoder);
893da0c9 8150err_encoder_init:
11aee0f6
SM
8151 kfree(intel_connector);
8152err_connector_alloc:
7801f3b7 8153 kfree(dig_port);
457c52d8 8154 return false;
f0fec3f2 8155}
0e32b39c 8156
1a4313d1 8157void intel_dp_mst_suspend(struct drm_i915_private *dev_priv)
0e32b39c 8158{
1a4313d1
VS
8159 struct intel_encoder *encoder;
8160
8161 for_each_intel_encoder(&dev_priv->drm, encoder) {
8162 struct intel_dp *intel_dp;
0e32b39c 8163
1a4313d1
VS
8164 if (encoder->type != INTEL_OUTPUT_DDI)
8165 continue;
5aa56969 8166
b7d02c3a 8167 intel_dp = enc_to_intel_dp(encoder);
5aa56969 8168
1a4313d1 8169 if (!intel_dp->can_mst)
0e32b39c
DA
8170 continue;
8171
1a4313d1
VS
8172 if (intel_dp->is_mst)
8173 drm_dp_mst_topology_mgr_suspend(&intel_dp->mst_mgr);
0e32b39c
DA
8174 }
8175}
8176
1a4313d1 8177void intel_dp_mst_resume(struct drm_i915_private *dev_priv)
0e32b39c 8178{
1a4313d1 8179 struct intel_encoder *encoder;
0e32b39c 8180
1a4313d1
VS
8181 for_each_intel_encoder(&dev_priv->drm, encoder) {
8182 struct intel_dp *intel_dp;
5aa56969 8183 int ret;
0e32b39c 8184
1a4313d1
VS
8185 if (encoder->type != INTEL_OUTPUT_DDI)
8186 continue;
8187
b7d02c3a 8188 intel_dp = enc_to_intel_dp(encoder);
1a4313d1
VS
8189
8190 if (!intel_dp->can_mst)
5aa56969 8191 continue;
0e32b39c 8192
6f85f738
LP
8193 ret = drm_dp_mst_topology_mgr_resume(&intel_dp->mst_mgr,
8194 true);
6be1cf96
LP
8195 if (ret) {
8196 intel_dp->is_mst = false;
8197 drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
8198 false);
8199 }
0e32b39c
DA
8200 }
8201}