]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/gpu/drm/i915/intel_dpio_phy.c
Merge tag 'f2fs-for-4.14-rc5' of git://git.kernel.org/pub/scm/linux/kernel/git/jaegeu...
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_dpio_phy.c
1 /*
2 * Copyright © 2014-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 */
23
24 #include "intel_drv.h"
25
26 /**
27 * DOC: DPIO
28 *
29 * VLV, CHV and BXT have slightly peculiar display PHYs for driving DP/HDMI
30 * ports. DPIO is the name given to such a display PHY. These PHYs
31 * don't follow the standard programming model using direct MMIO
32 * registers, and instead their registers must be accessed trough IOSF
33 * sideband. VLV has one such PHY for driving ports B and C, and CHV
34 * adds another PHY for driving port D. Each PHY responds to specific
35 * IOSF-SB port.
36 *
37 * Each display PHY is made up of one or two channels. Each channel
38 * houses a common lane part which contains the PLL and other common
39 * logic. CH0 common lane also contains the IOSF-SB logic for the
40 * Common Register Interface (CRI) ie. the DPIO registers. CRI clock
41 * must be running when any DPIO registers are accessed.
42 *
43 * In addition to having their own registers, the PHYs are also
44 * controlled through some dedicated signals from the display
45 * controller. These include PLL reference clock enable, PLL enable,
46 * and CRI clock selection, for example.
47 *
48 * Eeach channel also has two splines (also called data lanes), and
49 * each spline is made up of one Physical Access Coding Sub-Layer
50 * (PCS) block and two TX lanes. So each channel has two PCS blocks
51 * and four TX lanes. The TX lanes are used as DP lanes or TMDS
52 * data/clock pairs depending on the output type.
53 *
54 * Additionally the PHY also contains an AUX lane with AUX blocks
55 * for each channel. This is used for DP AUX communication, but
56 * this fact isn't really relevant for the driver since AUX is
57 * controlled from the display controller side. No DPIO registers
58 * need to be accessed during AUX communication,
59 *
60 * Generally on VLV/CHV the common lane corresponds to the pipe and
61 * the spline (PCS/TX) corresponds to the port.
62 *
63 * For dual channel PHY (VLV/CHV):
64 *
65 * pipe A == CMN/PLL/REF CH0
66 *
67 * pipe B == CMN/PLL/REF CH1
68 *
69 * port B == PCS/TX CH0
70 *
71 * port C == PCS/TX CH1
72 *
73 * This is especially important when we cross the streams
74 * ie. drive port B with pipe B, or port C with pipe A.
75 *
76 * For single channel PHY (CHV):
77 *
78 * pipe C == CMN/PLL/REF CH0
79 *
80 * port D == PCS/TX CH0
81 *
82 * On BXT the entire PHY channel corresponds to the port. That means
83 * the PLL is also now associated with the port rather than the pipe,
84 * and so the clock needs to be routed to the appropriate transcoder.
85 * Port A PLL is directly connected to transcoder EDP and port B/C
86 * PLLs can be routed to any transcoder A/B/C.
87 *
88 * Note: DDI0 is digital port B, DD1 is digital port C, and DDI2 is
89 * digital port D (CHV) or port A (BXT). ::
90 *
91 *
92 * Dual channel PHY (VLV/CHV/BXT)
93 * ---------------------------------
94 * | CH0 | CH1 |
95 * | CMN/PLL/REF | CMN/PLL/REF |
96 * |---------------|---------------| Display PHY
97 * | PCS01 | PCS23 | PCS01 | PCS23 |
98 * |-------|-------|-------|-------|
99 * |TX0|TX1|TX2|TX3|TX0|TX1|TX2|TX3|
100 * ---------------------------------
101 * | DDI0 | DDI1 | DP/HDMI ports
102 * ---------------------------------
103 *
104 * Single channel PHY (CHV/BXT)
105 * -----------------
106 * | CH0 |
107 * | CMN/PLL/REF |
108 * |---------------| Display PHY
109 * | PCS01 | PCS23 |
110 * |-------|-------|
111 * |TX0|TX1|TX2|TX3|
112 * -----------------
113 * | DDI2 | DP/HDMI port
114 * -----------------
115 */
116
117 /**
118 * struct bxt_ddi_phy_info - Hold info for a broxton DDI phy
119 */
120 struct bxt_ddi_phy_info {
121 /**
122 * @dual_channel: true if this phy has a second channel.
123 */
124 bool dual_channel;
125
126 /**
127 * @rcomp_phy: If -1, indicates this phy has its own rcomp resistor.
128 * Otherwise the GRC value will be copied from the phy indicated by
129 * this field.
130 */
131 enum dpio_phy rcomp_phy;
132
133 /**
134 * @reset_delay: delay in us to wait before setting the common reset
135 * bit in BXT_PHY_CTL_FAMILY, which effectively enables the phy.
136 */
137 int reset_delay;
138
139 /**
140 * @pwron_mask: Mask with the appropriate bit set that would cause the
141 * punit to power this phy if written to BXT_P_CR_GT_DISP_PWRON.
142 */
143 u32 pwron_mask;
144
145 /**
146 * @channel: struct containing per channel information.
147 */
148 struct {
149 /**
150 * @port: which port maps to this channel.
151 */
152 enum port port;
153 } channel[2];
154 };
155
156 static const struct bxt_ddi_phy_info bxt_ddi_phy_info[] = {
157 [DPIO_PHY0] = {
158 .dual_channel = true,
159 .rcomp_phy = DPIO_PHY1,
160 .pwron_mask = BIT(0),
161
162 .channel = {
163 [DPIO_CH0] = { .port = PORT_B },
164 [DPIO_CH1] = { .port = PORT_C },
165 }
166 },
167 [DPIO_PHY1] = {
168 .dual_channel = false,
169 .rcomp_phy = -1,
170 .pwron_mask = BIT(1),
171
172 .channel = {
173 [DPIO_CH0] = { .port = PORT_A },
174 }
175 },
176 };
177
178 static const struct bxt_ddi_phy_info glk_ddi_phy_info[] = {
179 [DPIO_PHY0] = {
180 .dual_channel = false,
181 .rcomp_phy = DPIO_PHY1,
182 .pwron_mask = BIT(0),
183 .reset_delay = 20,
184
185 .channel = {
186 [DPIO_CH0] = { .port = PORT_B },
187 }
188 },
189 [DPIO_PHY1] = {
190 .dual_channel = false,
191 .rcomp_phy = -1,
192 .pwron_mask = BIT(3),
193 .reset_delay = 20,
194
195 .channel = {
196 [DPIO_CH0] = { .port = PORT_A },
197 }
198 },
199 [DPIO_PHY2] = {
200 .dual_channel = false,
201 .rcomp_phy = DPIO_PHY1,
202 .pwron_mask = BIT(1),
203 .reset_delay = 20,
204
205 .channel = {
206 [DPIO_CH0] = { .port = PORT_C },
207 }
208 },
209 };
210
211 static const struct bxt_ddi_phy_info *
212 bxt_get_phy_list(struct drm_i915_private *dev_priv, int *count)
213 {
214 if (IS_GEMINILAKE(dev_priv)) {
215 *count = ARRAY_SIZE(glk_ddi_phy_info);
216 return glk_ddi_phy_info;
217 } else {
218 *count = ARRAY_SIZE(bxt_ddi_phy_info);
219 return bxt_ddi_phy_info;
220 }
221 }
222
223 static const struct bxt_ddi_phy_info *
224 bxt_get_phy_info(struct drm_i915_private *dev_priv, enum dpio_phy phy)
225 {
226 int count;
227 const struct bxt_ddi_phy_info *phy_list =
228 bxt_get_phy_list(dev_priv, &count);
229
230 return &phy_list[phy];
231 }
232
233 void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
234 enum dpio_phy *phy, enum dpio_channel *ch)
235 {
236 const struct bxt_ddi_phy_info *phy_info, *phys;
237 int i, count;
238
239 phys = bxt_get_phy_list(dev_priv, &count);
240
241 for (i = 0; i < count; i++) {
242 phy_info = &phys[i];
243
244 if (port == phy_info->channel[DPIO_CH0].port) {
245 *phy = i;
246 *ch = DPIO_CH0;
247 return;
248 }
249
250 if (phy_info->dual_channel &&
251 port == phy_info->channel[DPIO_CH1].port) {
252 *phy = i;
253 *ch = DPIO_CH1;
254 return;
255 }
256 }
257
258 WARN(1, "PHY not found for PORT %c", port_name(port));
259 *phy = DPIO_PHY0;
260 *ch = DPIO_CH0;
261 }
262
263 void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
264 enum port port, u32 margin, u32 scale,
265 u32 enable, u32 deemphasis)
266 {
267 u32 val;
268 enum dpio_phy phy;
269 enum dpio_channel ch;
270
271 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
272
273 /*
274 * While we write to the group register to program all lanes at once we
275 * can read only lane registers and we pick lanes 0/1 for that.
276 */
277 val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
278 val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
279 I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
280
281 val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
282 val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
283 val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
284 I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
285
286 val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
287 val &= ~SCALE_DCOMP_METHOD;
288 if (enable)
289 val |= SCALE_DCOMP_METHOD;
290
291 if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
292 DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
293
294 I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
295
296 val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
297 val &= ~DE_EMPHASIS;
298 val |= deemphasis << DEEMPH_SHIFT;
299 I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
300
301 val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
302 val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
303 I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
304 }
305
306 bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
307 enum dpio_phy phy)
308 {
309 const struct bxt_ddi_phy_info *phy_info;
310
311 phy_info = bxt_get_phy_info(dev_priv, phy);
312
313 if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
314 return false;
315
316 if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
317 (PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
318 DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
319 phy);
320
321 return false;
322 }
323
324 if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
325 DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
326 phy);
327
328 return false;
329 }
330
331 return true;
332 }
333
334 static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
335 {
336 u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
337
338 return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
339 }
340
341 static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
342 enum dpio_phy phy)
343 {
344 if (intel_wait_for_register(dev_priv,
345 BXT_PORT_REF_DW3(phy),
346 GRC_DONE, GRC_DONE,
347 10))
348 DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
349 }
350
351 static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
352 enum dpio_phy phy)
353 {
354 const struct bxt_ddi_phy_info *phy_info;
355 u32 val;
356
357 phy_info = bxt_get_phy_info(dev_priv, phy);
358
359 if (bxt_ddi_phy_is_enabled(dev_priv, phy)) {
360 /* Still read out the GRC value for state verification */
361 if (phy_info->rcomp_phy != -1)
362 dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
363
364 if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
365 DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
366 "won't reprogram it\n", phy);
367 return;
368 }
369
370 DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
371 "force reprogramming it\n", phy);
372 }
373
374 val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
375 val |= phy_info->pwron_mask;
376 I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
377
378 /*
379 * The PHY registers start out inaccessible and respond to reads with
380 * all 1s. Eventually they become accessible as they power up, then
381 * the reserved bit will give the default 0. Poll on the reserved bit
382 * becoming 0 to find when the PHY is accessible.
383 * HW team confirmed that the time to reach phypowergood status is
384 * anywhere between 50 us and 100us.
385 */
386 if (wait_for_us(((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
387 (PHY_RESERVED | PHY_POWER_GOOD)) == PHY_POWER_GOOD), 100)) {
388 DRM_ERROR("timeout during PHY%d power on\n", phy);
389 }
390
391 /* Program PLL Rcomp code offset */
392 val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
393 val &= ~IREF0RC_OFFSET_MASK;
394 val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
395 I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
396
397 val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
398 val &= ~IREF1RC_OFFSET_MASK;
399 val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
400 I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
401
402 /* Program power gating */
403 val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
404 val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
405 SUS_CLK_CONFIG;
406 I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
407
408 if (phy_info->dual_channel) {
409 val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
410 val |= DW6_OLDO_DYN_PWR_DOWN_EN;
411 I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
412 }
413
414 if (phy_info->rcomp_phy != -1) {
415 uint32_t grc_code;
416
417 bxt_phy_wait_grc_done(dev_priv, phy_info->rcomp_phy);
418
419 /*
420 * PHY0 isn't connected to an RCOMP resistor so copy over
421 * the corresponding calibrated value from PHY1, and disable
422 * the automatic calibration on PHY0.
423 */
424 val = dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv,
425 phy_info->rcomp_phy);
426 grc_code = val << GRC_CODE_FAST_SHIFT |
427 val << GRC_CODE_SLOW_SHIFT |
428 val;
429 I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
430
431 val = I915_READ(BXT_PORT_REF_DW8(phy));
432 val |= GRC_DIS | GRC_RDY_OVRD;
433 I915_WRITE(BXT_PORT_REF_DW8(phy), val);
434 }
435
436 if (phy_info->reset_delay)
437 udelay(phy_info->reset_delay);
438
439 val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
440 val |= COMMON_RESET_DIS;
441 I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
442 }
443
444 void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
445 {
446 const struct bxt_ddi_phy_info *phy_info;
447 uint32_t val;
448
449 phy_info = bxt_get_phy_info(dev_priv, phy);
450
451 val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
452 val &= ~COMMON_RESET_DIS;
453 I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
454
455 val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
456 val &= ~phy_info->pwron_mask;
457 I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
458 }
459
460 void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
461 {
462 const struct bxt_ddi_phy_info *phy_info =
463 bxt_get_phy_info(dev_priv, phy);
464 enum dpio_phy rcomp_phy = phy_info->rcomp_phy;
465 bool was_enabled;
466
467 lockdep_assert_held(&dev_priv->power_domains.lock);
468
469 if (rcomp_phy != -1) {
470 was_enabled = bxt_ddi_phy_is_enabled(dev_priv, rcomp_phy);
471
472 /*
473 * We need to copy the GRC calibration value from rcomp_phy,
474 * so make sure it's powered up.
475 */
476 if (!was_enabled)
477 _bxt_ddi_phy_init(dev_priv, rcomp_phy);
478 }
479
480 _bxt_ddi_phy_init(dev_priv, phy);
481
482 if (rcomp_phy != -1 && !was_enabled)
483 bxt_ddi_phy_uninit(dev_priv, phy_info->rcomp_phy);
484 }
485
486 static bool __printf(6, 7)
487 __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
488 i915_reg_t reg, u32 mask, u32 expected,
489 const char *reg_fmt, ...)
490 {
491 struct va_format vaf;
492 va_list args;
493 u32 val;
494
495 val = I915_READ(reg);
496 if ((val & mask) == expected)
497 return true;
498
499 va_start(args, reg_fmt);
500 vaf.fmt = reg_fmt;
501 vaf.va = &args;
502
503 DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
504 "current %08x, expected %08x (mask %08x)\n",
505 phy, &vaf, reg.reg, val, (val & ~mask) | expected,
506 mask);
507
508 va_end(args);
509
510 return false;
511 }
512
513 bool bxt_ddi_phy_verify_state(struct drm_i915_private *dev_priv,
514 enum dpio_phy phy)
515 {
516 const struct bxt_ddi_phy_info *phy_info;
517 uint32_t mask;
518 bool ok;
519
520 phy_info = bxt_get_phy_info(dev_priv, phy);
521
522 #define _CHK(reg, mask, exp, fmt, ...) \
523 __phy_reg_verify_state(dev_priv, phy, reg, mask, exp, fmt, \
524 ## __VA_ARGS__)
525
526 if (!bxt_ddi_phy_is_enabled(dev_priv, phy))
527 return false;
528
529 ok = true;
530
531 /* PLL Rcomp code offset */
532 ok &= _CHK(BXT_PORT_CL1CM_DW9(phy),
533 IREF0RC_OFFSET_MASK, 0xe4 << IREF0RC_OFFSET_SHIFT,
534 "BXT_PORT_CL1CM_DW9(%d)", phy);
535 ok &= _CHK(BXT_PORT_CL1CM_DW10(phy),
536 IREF1RC_OFFSET_MASK, 0xe4 << IREF1RC_OFFSET_SHIFT,
537 "BXT_PORT_CL1CM_DW10(%d)", phy);
538
539 /* Power gating */
540 mask = OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN | SUS_CLK_CONFIG;
541 ok &= _CHK(BXT_PORT_CL1CM_DW28(phy), mask, mask,
542 "BXT_PORT_CL1CM_DW28(%d)", phy);
543
544 if (phy_info->dual_channel)
545 ok &= _CHK(BXT_PORT_CL2CM_DW6(phy),
546 DW6_OLDO_DYN_PWR_DOWN_EN, DW6_OLDO_DYN_PWR_DOWN_EN,
547 "BXT_PORT_CL2CM_DW6(%d)", phy);
548
549 if (phy_info->rcomp_phy != -1) {
550 u32 grc_code = dev_priv->bxt_phy_grc;
551
552 grc_code = grc_code << GRC_CODE_FAST_SHIFT |
553 grc_code << GRC_CODE_SLOW_SHIFT |
554 grc_code;
555 mask = GRC_CODE_FAST_MASK | GRC_CODE_SLOW_MASK |
556 GRC_CODE_NOM_MASK;
557 ok &= _CHK(BXT_PORT_REF_DW6(phy), mask, grc_code,
558 "BXT_PORT_REF_DW6(%d)", phy);
559
560 mask = GRC_DIS | GRC_RDY_OVRD;
561 ok &= _CHK(BXT_PORT_REF_DW8(phy), mask, mask,
562 "BXT_PORT_REF_DW8(%d)", phy);
563 }
564
565 return ok;
566 #undef _CHK
567 }
568
569 uint8_t
570 bxt_ddi_phy_calc_lane_lat_optim_mask(struct intel_encoder *encoder,
571 uint8_t lane_count)
572 {
573 switch (lane_count) {
574 case 1:
575 return 0;
576 case 2:
577 return BIT(2) | BIT(0);
578 case 4:
579 return BIT(3) | BIT(2) | BIT(0);
580 default:
581 MISSING_CASE(lane_count);
582
583 return 0;
584 }
585 }
586
587 void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
588 uint8_t lane_lat_optim_mask)
589 {
590 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
591 struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
592 enum port port = dport->port;
593 enum dpio_phy phy;
594 enum dpio_channel ch;
595 int lane;
596
597 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
598
599 for (lane = 0; lane < 4; lane++) {
600 u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
601
602 /*
603 * Note that on CHV this flag is called UPAR, but has
604 * the same function.
605 */
606 val &= ~LATENCY_OPTIM;
607 if (lane_lat_optim_mask & BIT(lane))
608 val |= LATENCY_OPTIM;
609
610 I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
611 }
612 }
613
614 uint8_t
615 bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
616 {
617 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
618 struct drm_i915_private *dev_priv = to_i915(dport->base.base.dev);
619 enum port port = dport->port;
620 enum dpio_phy phy;
621 enum dpio_channel ch;
622 int lane;
623 uint8_t mask;
624
625 bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
626
627 mask = 0;
628 for (lane = 0; lane < 4; lane++) {
629 u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
630
631 if (val & LATENCY_OPTIM)
632 mask |= BIT(lane);
633 }
634
635 return mask;
636 }
637
638
639 void chv_set_phy_signal_level(struct intel_encoder *encoder,
640 u32 deemph_reg_value, u32 margin_reg_value,
641 bool uniq_trans_scale)
642 {
643 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
644 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
645 struct intel_crtc *intel_crtc = to_intel_crtc(dport->base.base.crtc);
646 enum dpio_channel ch = vlv_dport_to_channel(dport);
647 enum pipe pipe = intel_crtc->pipe;
648 u32 val;
649 int i;
650
651 mutex_lock(&dev_priv->sb_lock);
652
653 /* Clear calc init */
654 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
655 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
656 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
657 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
658 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
659
660 if (intel_crtc->config->lane_count > 2) {
661 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
662 val &= ~(DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3);
663 val &= ~(DPIO_PCS_TX1DEEMP_MASK | DPIO_PCS_TX2DEEMP_MASK);
664 val |= DPIO_PCS_TX1DEEMP_9P5 | DPIO_PCS_TX2DEEMP_9P5;
665 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
666 }
667
668 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW9(ch));
669 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
670 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
671 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW9(ch), val);
672
673 if (intel_crtc->config->lane_count > 2) {
674 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW9(ch));
675 val &= ~(DPIO_PCS_TX1MARGIN_MASK | DPIO_PCS_TX2MARGIN_MASK);
676 val |= DPIO_PCS_TX1MARGIN_000 | DPIO_PCS_TX2MARGIN_000;
677 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW9(ch), val);
678 }
679
680 /* Program swing deemph */
681 for (i = 0; i < intel_crtc->config->lane_count; i++) {
682 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW4(ch, i));
683 val &= ~DPIO_SWING_DEEMPH9P5_MASK;
684 val |= deemph_reg_value << DPIO_SWING_DEEMPH9P5_SHIFT;
685 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW4(ch, i), val);
686 }
687
688 /* Program swing margin */
689 for (i = 0; i < intel_crtc->config->lane_count; i++) {
690 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW2(ch, i));
691
692 val &= ~DPIO_SWING_MARGIN000_MASK;
693 val |= margin_reg_value << DPIO_SWING_MARGIN000_SHIFT;
694
695 /*
696 * Supposedly this value shouldn't matter when unique transition
697 * scale is disabled, but in fact it does matter. Let's just
698 * always program the same value and hope it's OK.
699 */
700 val &= ~(0xff << DPIO_UNIQ_TRANS_SCALE_SHIFT);
701 val |= 0x9a << DPIO_UNIQ_TRANS_SCALE_SHIFT;
702
703 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW2(ch, i), val);
704 }
705
706 /*
707 * The document said it needs to set bit 27 for ch0 and bit 26
708 * for ch1. Might be a typo in the doc.
709 * For now, for this unique transition scale selection, set bit
710 * 27 for ch0 and ch1.
711 */
712 for (i = 0; i < intel_crtc->config->lane_count; i++) {
713 val = vlv_dpio_read(dev_priv, pipe, CHV_TX_DW3(ch, i));
714 if (uniq_trans_scale)
715 val |= DPIO_TX_UNIQ_TRANS_SCALE_EN;
716 else
717 val &= ~DPIO_TX_UNIQ_TRANS_SCALE_EN;
718 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW3(ch, i), val);
719 }
720
721 /* Start swing calculation */
722 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW10(ch));
723 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
724 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW10(ch), val);
725
726 if (intel_crtc->config->lane_count > 2) {
727 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW10(ch));
728 val |= DPIO_PCS_SWING_CALC_TX0_TX2 | DPIO_PCS_SWING_CALC_TX1_TX3;
729 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW10(ch), val);
730 }
731
732 mutex_unlock(&dev_priv->sb_lock);
733
734 }
735
736 void chv_data_lane_soft_reset(struct intel_encoder *encoder,
737 bool reset)
738 {
739 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
740 enum dpio_channel ch = vlv_dport_to_channel(enc_to_dig_port(&encoder->base));
741 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
742 enum pipe pipe = crtc->pipe;
743 uint32_t val;
744
745 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW0(ch));
746 if (reset)
747 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
748 else
749 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
750 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW0(ch), val);
751
752 if (crtc->config->lane_count > 2) {
753 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW0(ch));
754 if (reset)
755 val &= ~(DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET);
756 else
757 val |= DPIO_PCS_TX_LANE2_RESET | DPIO_PCS_TX_LANE1_RESET;
758 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW0(ch), val);
759 }
760
761 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW1(ch));
762 val |= CHV_PCS_REQ_SOFTRESET_EN;
763 if (reset)
764 val &= ~DPIO_PCS_CLK_SOFT_RESET;
765 else
766 val |= DPIO_PCS_CLK_SOFT_RESET;
767 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW1(ch), val);
768
769 if (crtc->config->lane_count > 2) {
770 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW1(ch));
771 val |= CHV_PCS_REQ_SOFTRESET_EN;
772 if (reset)
773 val &= ~DPIO_PCS_CLK_SOFT_RESET;
774 else
775 val |= DPIO_PCS_CLK_SOFT_RESET;
776 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW1(ch), val);
777 }
778 }
779
780 void chv_phy_pre_pll_enable(struct intel_encoder *encoder)
781 {
782 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
783 struct drm_device *dev = encoder->base.dev;
784 struct drm_i915_private *dev_priv = to_i915(dev);
785 struct intel_crtc *intel_crtc =
786 to_intel_crtc(encoder->base.crtc);
787 enum dpio_channel ch = vlv_dport_to_channel(dport);
788 enum pipe pipe = intel_crtc->pipe;
789 unsigned int lane_mask =
790 intel_dp_unused_lane_mask(intel_crtc->config->lane_count);
791 u32 val;
792
793 /*
794 * Must trick the second common lane into life.
795 * Otherwise we can't even access the PLL.
796 */
797 if (ch == DPIO_CH0 && pipe == PIPE_B)
798 dport->release_cl2_override =
799 !chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, true);
800
801 chv_phy_powergate_lanes(encoder, true, lane_mask);
802
803 mutex_lock(&dev_priv->sb_lock);
804
805 /* Assert data lane reset */
806 chv_data_lane_soft_reset(encoder, true);
807
808 /* program left/right clock distribution */
809 if (pipe != PIPE_B) {
810 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
811 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
812 if (ch == DPIO_CH0)
813 val |= CHV_BUFLEFTENA1_FORCE;
814 if (ch == DPIO_CH1)
815 val |= CHV_BUFRIGHTENA1_FORCE;
816 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
817 } else {
818 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
819 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
820 if (ch == DPIO_CH0)
821 val |= CHV_BUFLEFTENA2_FORCE;
822 if (ch == DPIO_CH1)
823 val |= CHV_BUFRIGHTENA2_FORCE;
824 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
825 }
826
827 /* program clock channel usage */
828 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(ch));
829 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
830 if (pipe != PIPE_B)
831 val &= ~CHV_PCS_USEDCLKCHANNEL;
832 else
833 val |= CHV_PCS_USEDCLKCHANNEL;
834 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW8(ch), val);
835
836 if (intel_crtc->config->lane_count > 2) {
837 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW8(ch));
838 val |= CHV_PCS_USEDCLKCHANNEL_OVRRIDE;
839 if (pipe != PIPE_B)
840 val &= ~CHV_PCS_USEDCLKCHANNEL;
841 else
842 val |= CHV_PCS_USEDCLKCHANNEL;
843 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW8(ch), val);
844 }
845
846 /*
847 * This a a bit weird since generally CL
848 * matches the pipe, but here we need to
849 * pick the CL based on the port.
850 */
851 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW19(ch));
852 if (pipe != PIPE_B)
853 val &= ~CHV_CMN_USEDCLKCHANNEL;
854 else
855 val |= CHV_CMN_USEDCLKCHANNEL;
856 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW19(ch), val);
857
858 mutex_unlock(&dev_priv->sb_lock);
859 }
860
861 void chv_phy_pre_encoder_enable(struct intel_encoder *encoder)
862 {
863 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
864 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
865 struct drm_device *dev = encoder->base.dev;
866 struct drm_i915_private *dev_priv = to_i915(dev);
867 struct intel_crtc *intel_crtc =
868 to_intel_crtc(encoder->base.crtc);
869 enum dpio_channel ch = vlv_dport_to_channel(dport);
870 int pipe = intel_crtc->pipe;
871 int data, i, stagger;
872 u32 val;
873
874 mutex_lock(&dev_priv->sb_lock);
875
876 /* allow hardware to manage TX FIFO reset source */
877 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
878 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
879 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
880
881 if (intel_crtc->config->lane_count > 2) {
882 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
883 val &= ~DPIO_LANEDESKEW_STRAP_OVRD;
884 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
885 }
886
887 /* Program Tx lane latency optimal setting*/
888 for (i = 0; i < intel_crtc->config->lane_count; i++) {
889 /* Set the upar bit */
890 if (intel_crtc->config->lane_count == 1)
891 data = 0x0;
892 else
893 data = (i == 1) ? 0x0 : 0x1;
894 vlv_dpio_write(dev_priv, pipe, CHV_TX_DW14(ch, i),
895 data << DPIO_UPAR_SHIFT);
896 }
897
898 /* Data lane stagger programming */
899 if (intel_crtc->config->port_clock > 270000)
900 stagger = 0x18;
901 else if (intel_crtc->config->port_clock > 135000)
902 stagger = 0xd;
903 else if (intel_crtc->config->port_clock > 67500)
904 stagger = 0x7;
905 else if (intel_crtc->config->port_clock > 33750)
906 stagger = 0x4;
907 else
908 stagger = 0x2;
909
910 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW11(ch));
911 val |= DPIO_TX2_STAGGER_MASK(0x1f);
912 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW11(ch), val);
913
914 if (intel_crtc->config->lane_count > 2) {
915 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS23_DW11(ch));
916 val |= DPIO_TX2_STAGGER_MASK(0x1f);
917 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW11(ch), val);
918 }
919
920 vlv_dpio_write(dev_priv, pipe, VLV_PCS01_DW12(ch),
921 DPIO_LANESTAGGER_STRAP(stagger) |
922 DPIO_LANESTAGGER_STRAP_OVRD |
923 DPIO_TX1_STAGGER_MASK(0x1f) |
924 DPIO_TX1_STAGGER_MULT(6) |
925 DPIO_TX2_STAGGER_MULT(0));
926
927 if (intel_crtc->config->lane_count > 2) {
928 vlv_dpio_write(dev_priv, pipe, VLV_PCS23_DW12(ch),
929 DPIO_LANESTAGGER_STRAP(stagger) |
930 DPIO_LANESTAGGER_STRAP_OVRD |
931 DPIO_TX1_STAGGER_MASK(0x1f) |
932 DPIO_TX1_STAGGER_MULT(7) |
933 DPIO_TX2_STAGGER_MULT(5));
934 }
935
936 /* Deassert data lane reset */
937 chv_data_lane_soft_reset(encoder, false);
938
939 mutex_unlock(&dev_priv->sb_lock);
940 }
941
942 void chv_phy_release_cl2_override(struct intel_encoder *encoder)
943 {
944 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
945 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
946
947 if (dport->release_cl2_override) {
948 chv_phy_powergate_ch(dev_priv, DPIO_PHY0, DPIO_CH1, false);
949 dport->release_cl2_override = false;
950 }
951 }
952
953 void chv_phy_post_pll_disable(struct intel_encoder *encoder)
954 {
955 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
956 enum pipe pipe = to_intel_crtc(encoder->base.crtc)->pipe;
957 u32 val;
958
959 mutex_lock(&dev_priv->sb_lock);
960
961 /* disable left/right clock distribution */
962 if (pipe != PIPE_B) {
963 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW5_CH0);
964 val &= ~(CHV_BUFLEFTENA1_MASK | CHV_BUFRIGHTENA1_MASK);
965 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW5_CH0, val);
966 } else {
967 val = vlv_dpio_read(dev_priv, pipe, _CHV_CMN_DW1_CH1);
968 val &= ~(CHV_BUFLEFTENA2_MASK | CHV_BUFRIGHTENA2_MASK);
969 vlv_dpio_write(dev_priv, pipe, _CHV_CMN_DW1_CH1, val);
970 }
971
972 mutex_unlock(&dev_priv->sb_lock);
973
974 /*
975 * Leave the power down bit cleared for at least one
976 * lane so that chv_powergate_phy_ch() will power
977 * on something when the channel is otherwise unused.
978 * When the port is off and the override is removed
979 * the lanes power down anyway, so otherwise it doesn't
980 * really matter what the state of power down bits is
981 * after this.
982 */
983 chv_phy_powergate_lanes(encoder, false, 0x0);
984 }
985
986 void vlv_set_phy_signal_level(struct intel_encoder *encoder,
987 u32 demph_reg_value, u32 preemph_reg_value,
988 u32 uniqtranscale_reg_value, u32 tx3_demph)
989 {
990 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
991 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
992 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
993 enum dpio_channel port = vlv_dport_to_channel(dport);
994 int pipe = intel_crtc->pipe;
995
996 mutex_lock(&dev_priv->sb_lock);
997 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), 0x00000000);
998 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW4(port), demph_reg_value);
999 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW2(port),
1000 uniqtranscale_reg_value);
1001 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW3(port), 0x0C782040);
1002
1003 if (tx3_demph)
1004 vlv_dpio_write(dev_priv, pipe, VLV_TX3_DW4(port), tx3_demph);
1005
1006 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW11(port), 0x00030000);
1007 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW9(port), preemph_reg_value);
1008 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW5(port), DPIO_TX_OCALINIT_EN);
1009 mutex_unlock(&dev_priv->sb_lock);
1010 }
1011
1012 void vlv_phy_pre_pll_enable(struct intel_encoder *encoder)
1013 {
1014 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1015 struct drm_device *dev = encoder->base.dev;
1016 struct drm_i915_private *dev_priv = to_i915(dev);
1017 struct intel_crtc *intel_crtc =
1018 to_intel_crtc(encoder->base.crtc);
1019 enum dpio_channel port = vlv_dport_to_channel(dport);
1020 int pipe = intel_crtc->pipe;
1021
1022 /* Program Tx lane resets to default */
1023 mutex_lock(&dev_priv->sb_lock);
1024 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port),
1025 DPIO_PCS_TX_LANE2_RESET |
1026 DPIO_PCS_TX_LANE1_RESET);
1027 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port),
1028 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1029 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1030 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1031 DPIO_PCS_CLK_SOFT_RESET);
1032
1033 /* Fix up inter-pair skew failure */
1034 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW12(port), 0x00750f00);
1035 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW11(port), 0x00001500);
1036 vlv_dpio_write(dev_priv, pipe, VLV_TX_DW14(port), 0x40400000);
1037 mutex_unlock(&dev_priv->sb_lock);
1038 }
1039
1040 void vlv_phy_pre_encoder_enable(struct intel_encoder *encoder)
1041 {
1042 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1043 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1044 struct drm_device *dev = encoder->base.dev;
1045 struct drm_i915_private *dev_priv = to_i915(dev);
1046 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->base.crtc);
1047 enum dpio_channel port = vlv_dport_to_channel(dport);
1048 int pipe = intel_crtc->pipe;
1049 u32 val;
1050
1051 mutex_lock(&dev_priv->sb_lock);
1052
1053 /* Enable clock channels for this port */
1054 val = vlv_dpio_read(dev_priv, pipe, VLV_PCS01_DW8(port));
1055 val = 0;
1056 if (pipe)
1057 val |= (1<<21);
1058 else
1059 val &= ~(1<<21);
1060 val |= 0x001000c4;
1061 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW8(port), val);
1062
1063 /* Program lane clock */
1064 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW14(port), 0x00760018);
1065 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW23(port), 0x00400888);
1066
1067 mutex_unlock(&dev_priv->sb_lock);
1068 }
1069
1070 void vlv_phy_reset_lanes(struct intel_encoder *encoder)
1071 {
1072 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1073 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
1074 struct intel_crtc *intel_crtc =
1075 to_intel_crtc(encoder->base.crtc);
1076 enum dpio_channel port = vlv_dport_to_channel(dport);
1077 int pipe = intel_crtc->pipe;
1078
1079 mutex_lock(&dev_priv->sb_lock);
1080 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW0(port), 0x00000000);
1081 vlv_dpio_write(dev_priv, pipe, VLV_PCS_DW1(port), 0x00e00060);
1082 mutex_unlock(&dev_priv->sb_lock);
1083 }