]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Hook PSR functionality
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
760285e7
DH
31#include <drm/drmP.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h>
a4fc5ed6 35#include "intel_drv.h"
760285e7 36#include <drm/i915_drm.h>
a4fc5ed6 37#include "i915_drv.h"
a4fc5ed6 38
a4fc5ed6
KP
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40
cfcb0fc9
JB
41/**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct
44 *
45 * If a CPU or PCH DP output is attached to an eDP panel, this function
46 * will return true, and false otherwise.
47 */
48static bool is_edp(struct intel_dp *intel_dp)
49{
da63a9f2
PZ
50 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
51
52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
53}
54
68b4d824 55static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 56{
68b4d824
ID
57 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
58
59 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
60}
61
df0e9248
CW
62static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
63{
fa90ecef 64 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
65}
66
ea5b213a 67static void intel_dp_link_down(struct intel_dp *intel_dp);
a4fc5ed6 68
a4fc5ed6 69static int
ea5b213a 70intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 71{
7183dc29 72 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
73
74 switch (max_link_bw) {
75 case DP_LINK_BW_1_62:
76 case DP_LINK_BW_2_7:
77 break;
78 default:
79 max_link_bw = DP_LINK_BW_1_62;
80 break;
81 }
82 return max_link_bw;
83}
84
cd9dde44
AJ
85/*
86 * The units on the numbers in the next two are... bizarre. Examples will
87 * make it clearer; this one parallels an example in the eDP spec.
88 *
89 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
90 *
91 * 270000 * 1 * 8 / 10 == 216000
92 *
93 * The actual data capacity of that configuration is 2.16Gbit/s, so the
94 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
95 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
96 * 119000. At 18bpp that's 2142000 kilobits per second.
97 *
98 * Thus the strange-looking division by 10 in intel_dp_link_required, to
99 * get the result in decakilobits instead of kilobits.
100 */
101
a4fc5ed6 102static int
c898261c 103intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 104{
cd9dde44 105 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
106}
107
fe27d53e
DA
108static int
109intel_dp_max_data_rate(int max_link_clock, int max_lanes)
110{
111 return (max_link_clock * max_lanes * 8) / 10;
112}
113
a4fc5ed6
KP
114static int
115intel_dp_mode_valid(struct drm_connector *connector,
116 struct drm_display_mode *mode)
117{
df0e9248 118 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
119 struct intel_connector *intel_connector = to_intel_connector(connector);
120 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
121 int target_clock = mode->clock;
122 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 123
dd06f90e
JN
124 if (is_edp(intel_dp) && fixed_mode) {
125 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
126 return MODE_PANEL;
127
dd06f90e 128 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 129 return MODE_PANEL;
03afc4a2
DV
130
131 target_clock = fixed_mode->clock;
7de56f43
ZY
132 }
133
36008365
DV
134 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
135 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
136
137 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
138 mode_rate = intel_dp_link_required(target_clock, 18);
139
140 if (mode_rate > max_rate)
c4867936 141 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
142
143 if (mode->clock < 10000)
144 return MODE_CLOCK_LOW;
145
0af78a2b
DV
146 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
147 return MODE_H_ILLEGAL;
148
a4fc5ed6
KP
149 return MODE_OK;
150}
151
152static uint32_t
153pack_aux(uint8_t *src, int src_bytes)
154{
155 int i;
156 uint32_t v = 0;
157
158 if (src_bytes > 4)
159 src_bytes = 4;
160 for (i = 0; i < src_bytes; i++)
161 v |= ((uint32_t) src[i]) << ((3-i) * 8);
162 return v;
163}
164
165static void
166unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
167{
168 int i;
169 if (dst_bytes > 4)
170 dst_bytes = 4;
171 for (i = 0; i < dst_bytes; i++)
172 dst[i] = src >> ((3-i) * 8);
173}
174
fb0f8fbf
KP
175/* hrawclock is 1/4 the FSB frequency */
176static int
177intel_hrawclk(struct drm_device *dev)
178{
179 struct drm_i915_private *dev_priv = dev->dev_private;
180 uint32_t clkcfg;
181
9473c8f4
VP
182 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
183 if (IS_VALLEYVIEW(dev))
184 return 200;
185
fb0f8fbf
KP
186 clkcfg = I915_READ(CLKCFG);
187 switch (clkcfg & CLKCFG_FSB_MASK) {
188 case CLKCFG_FSB_400:
189 return 100;
190 case CLKCFG_FSB_533:
191 return 133;
192 case CLKCFG_FSB_667:
193 return 166;
194 case CLKCFG_FSB_800:
195 return 200;
196 case CLKCFG_FSB_1067:
197 return 266;
198 case CLKCFG_FSB_1333:
199 return 333;
200 /* these two are just a guess; one of them might be right */
201 case CLKCFG_FSB_1600:
202 case CLKCFG_FSB_1600_ALT:
203 return 400;
204 default:
205 return 133;
206 }
207}
208
ebf33b18
KP
209static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
210{
30add22d 211 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18 212 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420 213 u32 pp_stat_reg;
ebf33b18 214
453c5420
JB
215 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
216 return (I915_READ(pp_stat_reg) & PP_ON) != 0;
ebf33b18
KP
217}
218
219static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
220{
30add22d 221 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18 222 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420 223 u32 pp_ctrl_reg;
ebf33b18 224
453c5420
JB
225 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
226 return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
ebf33b18
KP
227}
228
9b984dae
KP
229static void
230intel_dp_check_edp(struct intel_dp *intel_dp)
231{
30add22d 232 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 233 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420 234 u32 pp_stat_reg, pp_ctrl_reg;
ebf33b18 235
9b984dae
KP
236 if (!is_edp(intel_dp))
237 return;
453c5420
JB
238
239 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
240 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
241
ebf33b18 242 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
243 WARN(1, "eDP powered off while attempting aux channel communication.\n");
244 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
453c5420
JB
245 I915_READ(pp_stat_reg),
246 I915_READ(pp_ctrl_reg));
9b984dae
KP
247 }
248}
249
9ee32fea
DV
250static uint32_t
251intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
252{
253 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
254 struct drm_device *dev = intel_dig_port->base.base.dev;
255 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 256 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
257 uint32_t status;
258 bool done;
259
ef04f00d 260#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 261 if (has_aux_irq)
b18ac466 262 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 263 msecs_to_jiffies_timeout(10));
9ee32fea
DV
264 else
265 done = wait_for_atomic(C, 10) == 0;
266 if (!done)
267 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
268 has_aux_irq);
269#undef C
270
271 return status;
272}
273
b84a1cf8 274static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp)
a4fc5ed6 275{
174edf1f
PZ
276 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
277 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 278 struct drm_i915_private *dev_priv = dev->dev_private;
9ee32fea 279
a4fc5ed6 280 /* The clock divider is based off the hrawclk,
fb0f8fbf
KP
281 * and would like to run at 2MHz. So, take the
282 * hrawclk value and divide by 2 and use that
6176b8f9
JB
283 *
284 * Note that PCH attached eDP panels should use a 125MHz input
285 * clock divider.
a4fc5ed6 286 */
a62d0834 287 if (IS_VALLEYVIEW(dev)) {
b84a1cf8 288 return 100;
a62d0834 289 } else if (intel_dig_port->port == PORT_A) {
affa9354 290 if (HAS_DDI(dev))
b84a1cf8 291 return DIV_ROUND_CLOSEST(
b2b877ff 292 intel_ddi_get_cdclk_freq(dev_priv), 2000);
9473c8f4 293 else if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 294 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 295 else
b84a1cf8 296 return 225; /* eDP input clock at 450Mhz */
2c55c336
JN
297 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
298 /* Workaround for non-ULT HSW */
b84a1cf8 299 return 74;
2c55c336 300 } else if (HAS_PCH_SPLIT(dev)) {
b84a1cf8 301 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 302 } else {
b84a1cf8 303 return intel_hrawclk(dev) / 2;
2c55c336 304 }
b84a1cf8
RV
305}
306
307static int
308intel_dp_aux_ch(struct intel_dp *intel_dp,
309 uint8_t *send, int send_bytes,
310 uint8_t *recv, int recv_size)
311{
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct drm_device *dev = intel_dig_port->base.base.dev;
314 struct drm_i915_private *dev_priv = dev->dev_private;
315 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
316 uint32_t ch_data = ch_ctl + 4;
317 int i, ret, recv_bytes;
318 uint32_t status;
319 uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp);
320 int try, precharge;
321 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
322
323 /* dp aux is extremely sensitive to irq latency, hence request the
324 * lowest possible wakeup latency and so prevent the cpu from going into
325 * deep sleep states.
326 */
327 pm_qos_update_request(&dev_priv->pm_qos, 0);
328
329 intel_dp_check_edp(intel_dp);
5eb08b69 330
6b4e0a93
DV
331 if (IS_GEN6(dev))
332 precharge = 3;
333 else
334 precharge = 5;
335
11bee43e
JB
336 /* Try to wait for any previous AUX channel activity */
337 for (try = 0; try < 3; try++) {
ef04f00d 338 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
339 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
340 break;
341 msleep(1);
342 }
343
344 if (try == 3) {
345 WARN(1, "dp_aux_ch not started status 0x%08x\n",
346 I915_READ(ch_ctl));
9ee32fea
DV
347 ret = -EBUSY;
348 goto out;
4f7f7b7e
CW
349 }
350
fb0f8fbf
KP
351 /* Must try at least 3 times according to DP spec */
352 for (try = 0; try < 5; try++) {
353 /* Load the send data into the aux channel data registers */
4f7f7b7e
CW
354 for (i = 0; i < send_bytes; i += 4)
355 I915_WRITE(ch_data + i,
356 pack_aux(send + i, send_bytes - i));
0206e353 357
fb0f8fbf 358 /* Send the command and wait for it to complete */
4f7f7b7e
CW
359 I915_WRITE(ch_ctl,
360 DP_AUX_CH_CTL_SEND_BUSY |
9ee32fea 361 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
4f7f7b7e
CW
362 DP_AUX_CH_CTL_TIME_OUT_400us |
363 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
364 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
365 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
366 DP_AUX_CH_CTL_DONE |
367 DP_AUX_CH_CTL_TIME_OUT_ERROR |
368 DP_AUX_CH_CTL_RECEIVE_ERROR);
9ee32fea
DV
369
370 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
0206e353 371
fb0f8fbf 372 /* Clear done status and any errors */
4f7f7b7e
CW
373 I915_WRITE(ch_ctl,
374 status |
375 DP_AUX_CH_CTL_DONE |
376 DP_AUX_CH_CTL_TIME_OUT_ERROR |
377 DP_AUX_CH_CTL_RECEIVE_ERROR);
d7e96fea
AJ
378
379 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
380 DP_AUX_CH_CTL_RECEIVE_ERROR))
381 continue;
4f7f7b7e 382 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
383 break;
384 }
385
a4fc5ed6 386 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 387 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
388 ret = -EBUSY;
389 goto out;
a4fc5ed6
KP
390 }
391
392 /* Check for timeout or receive error.
393 * Timeouts occur when the sink is not connected
394 */
a5b3da54 395 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 396 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
397 ret = -EIO;
398 goto out;
a5b3da54 399 }
1ae8c0a5
KP
400
401 /* Timeouts occur when the device isn't connected, so they're
402 * "normal" -- don't fill the kernel log with these */
a5b3da54 403 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 404 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
405 ret = -ETIMEDOUT;
406 goto out;
a4fc5ed6
KP
407 }
408
409 /* Unload any bytes sent back from the other side */
410 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
411 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
412 if (recv_bytes > recv_size)
413 recv_bytes = recv_size;
0206e353 414
4f7f7b7e
CW
415 for (i = 0; i < recv_bytes; i += 4)
416 unpack_aux(I915_READ(ch_data + i),
417 recv + i, recv_bytes - i);
a4fc5ed6 418
9ee32fea
DV
419 ret = recv_bytes;
420out:
421 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
422
423 return ret;
a4fc5ed6
KP
424}
425
426/* Write data to the aux channel in native mode */
427static int
ea5b213a 428intel_dp_aux_native_write(struct intel_dp *intel_dp,
a4fc5ed6
KP
429 uint16_t address, uint8_t *send, int send_bytes)
430{
431 int ret;
432 uint8_t msg[20];
433 int msg_bytes;
434 uint8_t ack;
435
9b984dae 436 intel_dp_check_edp(intel_dp);
a4fc5ed6
KP
437 if (send_bytes > 16)
438 return -1;
439 msg[0] = AUX_NATIVE_WRITE << 4;
440 msg[1] = address >> 8;
eebc863e 441 msg[2] = address & 0xff;
a4fc5ed6
KP
442 msg[3] = send_bytes - 1;
443 memcpy(&msg[4], send, send_bytes);
444 msg_bytes = send_bytes + 4;
445 for (;;) {
ea5b213a 446 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
a4fc5ed6
KP
447 if (ret < 0)
448 return ret;
449 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
450 break;
451 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
452 udelay(100);
453 else
a5b3da54 454 return -EIO;
a4fc5ed6
KP
455 }
456 return send_bytes;
457}
458
459/* Write a single byte to the aux channel in native mode */
460static int
ea5b213a 461intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
a4fc5ed6
KP
462 uint16_t address, uint8_t byte)
463{
ea5b213a 464 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
a4fc5ed6
KP
465}
466
467/* read bytes from a native aux channel */
468static int
ea5b213a 469intel_dp_aux_native_read(struct intel_dp *intel_dp,
a4fc5ed6
KP
470 uint16_t address, uint8_t *recv, int recv_bytes)
471{
472 uint8_t msg[4];
473 int msg_bytes;
474 uint8_t reply[20];
475 int reply_bytes;
476 uint8_t ack;
477 int ret;
478
9b984dae 479 intel_dp_check_edp(intel_dp);
a4fc5ed6
KP
480 msg[0] = AUX_NATIVE_READ << 4;
481 msg[1] = address >> 8;
482 msg[2] = address & 0xff;
483 msg[3] = recv_bytes - 1;
484
485 msg_bytes = 4;
486 reply_bytes = recv_bytes + 1;
487
488 for (;;) {
ea5b213a 489 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
a4fc5ed6 490 reply, reply_bytes);
a5b3da54
KP
491 if (ret == 0)
492 return -EPROTO;
493 if (ret < 0)
a4fc5ed6
KP
494 return ret;
495 ack = reply[0];
496 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
497 memcpy(recv, reply + 1, ret - 1);
498 return ret - 1;
499 }
500 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
501 udelay(100);
502 else
a5b3da54 503 return -EIO;
a4fc5ed6
KP
504 }
505}
506
507static int
ab2c0672
DA
508intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
509 uint8_t write_byte, uint8_t *read_byte)
a4fc5ed6 510{
ab2c0672 511 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
ea5b213a
CW
512 struct intel_dp *intel_dp = container_of(adapter,
513 struct intel_dp,
514 adapter);
ab2c0672
DA
515 uint16_t address = algo_data->address;
516 uint8_t msg[5];
517 uint8_t reply[2];
8316f337 518 unsigned retry;
ab2c0672
DA
519 int msg_bytes;
520 int reply_bytes;
521 int ret;
522
9b984dae 523 intel_dp_check_edp(intel_dp);
ab2c0672
DA
524 /* Set up the command byte */
525 if (mode & MODE_I2C_READ)
526 msg[0] = AUX_I2C_READ << 4;
527 else
528 msg[0] = AUX_I2C_WRITE << 4;
529
530 if (!(mode & MODE_I2C_STOP))
531 msg[0] |= AUX_I2C_MOT << 4;
a4fc5ed6 532
ab2c0672
DA
533 msg[1] = address >> 8;
534 msg[2] = address;
535
536 switch (mode) {
537 case MODE_I2C_WRITE:
538 msg[3] = 0;
539 msg[4] = write_byte;
540 msg_bytes = 5;
541 reply_bytes = 1;
542 break;
543 case MODE_I2C_READ:
544 msg[3] = 0;
545 msg_bytes = 4;
546 reply_bytes = 2;
547 break;
548 default:
549 msg_bytes = 3;
550 reply_bytes = 1;
551 break;
552 }
553
8316f337
DF
554 for (retry = 0; retry < 5; retry++) {
555 ret = intel_dp_aux_ch(intel_dp,
556 msg, msg_bytes,
557 reply, reply_bytes);
ab2c0672 558 if (ret < 0) {
3ff99164 559 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
ab2c0672
DA
560 return ret;
561 }
8316f337
DF
562
563 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
564 case AUX_NATIVE_REPLY_ACK:
565 /* I2C-over-AUX Reply field is only valid
566 * when paired with AUX ACK.
567 */
568 break;
569 case AUX_NATIVE_REPLY_NACK:
570 DRM_DEBUG_KMS("aux_ch native nack\n");
571 return -EREMOTEIO;
572 case AUX_NATIVE_REPLY_DEFER:
573 udelay(100);
574 continue;
575 default:
576 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
577 reply[0]);
578 return -EREMOTEIO;
579 }
580
ab2c0672
DA
581 switch (reply[0] & AUX_I2C_REPLY_MASK) {
582 case AUX_I2C_REPLY_ACK:
583 if (mode == MODE_I2C_READ) {
584 *read_byte = reply[1];
585 }
586 return reply_bytes - 1;
587 case AUX_I2C_REPLY_NACK:
8316f337 588 DRM_DEBUG_KMS("aux_i2c nack\n");
ab2c0672
DA
589 return -EREMOTEIO;
590 case AUX_I2C_REPLY_DEFER:
8316f337 591 DRM_DEBUG_KMS("aux_i2c defer\n");
ab2c0672
DA
592 udelay(100);
593 break;
594 default:
8316f337 595 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
ab2c0672
DA
596 return -EREMOTEIO;
597 }
598 }
8316f337
DF
599
600 DRM_ERROR("too many retries, giving up\n");
601 return -EREMOTEIO;
a4fc5ed6
KP
602}
603
604static int
ea5b213a 605intel_dp_i2c_init(struct intel_dp *intel_dp,
55f78c43 606 struct intel_connector *intel_connector, const char *name)
a4fc5ed6 607{
0b5c541b
KP
608 int ret;
609
d54e9d28 610 DRM_DEBUG_KMS("i2c_init %s\n", name);
ea5b213a
CW
611 intel_dp->algo.running = false;
612 intel_dp->algo.address = 0;
613 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
614
0206e353 615 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
ea5b213a
CW
616 intel_dp->adapter.owner = THIS_MODULE;
617 intel_dp->adapter.class = I2C_CLASS_DDC;
0206e353 618 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
ea5b213a
CW
619 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
620 intel_dp->adapter.algo_data = &intel_dp->algo;
621 intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
622
0b5c541b
KP
623 ironlake_edp_panel_vdd_on(intel_dp);
624 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
bd943159 625 ironlake_edp_panel_vdd_off(intel_dp, false);
0b5c541b 626 return ret;
a4fc5ed6
KP
627}
628
c6bb3538
DV
629static void
630intel_dp_set_clock(struct intel_encoder *encoder,
631 struct intel_crtc_config *pipe_config, int link_bw)
632{
633 struct drm_device *dev = encoder->base.dev;
634
635 if (IS_G4X(dev)) {
636 if (link_bw == DP_LINK_BW_1_62) {
637 pipe_config->dpll.p1 = 2;
638 pipe_config->dpll.p2 = 10;
639 pipe_config->dpll.n = 2;
640 pipe_config->dpll.m1 = 23;
641 pipe_config->dpll.m2 = 8;
642 } else {
643 pipe_config->dpll.p1 = 1;
644 pipe_config->dpll.p2 = 10;
645 pipe_config->dpll.n = 1;
646 pipe_config->dpll.m1 = 14;
647 pipe_config->dpll.m2 = 2;
648 }
649 pipe_config->clock_set = true;
650 } else if (IS_HASWELL(dev)) {
651 /* Haswell has special-purpose DP DDI clocks. */
652 } else if (HAS_PCH_SPLIT(dev)) {
653 if (link_bw == DP_LINK_BW_1_62) {
654 pipe_config->dpll.n = 1;
655 pipe_config->dpll.p1 = 2;
656 pipe_config->dpll.p2 = 10;
657 pipe_config->dpll.m1 = 12;
658 pipe_config->dpll.m2 = 9;
659 } else {
660 pipe_config->dpll.n = 2;
661 pipe_config->dpll.p1 = 1;
662 pipe_config->dpll.p2 = 10;
663 pipe_config->dpll.m1 = 14;
664 pipe_config->dpll.m2 = 8;
665 }
666 pipe_config->clock_set = true;
667 } else if (IS_VALLEYVIEW(dev)) {
668 /* FIXME: Need to figure out optimized DP clocks for vlv. */
669 }
670}
671
00c09d70 672bool
5bfe2ac0
DV
673intel_dp_compute_config(struct intel_encoder *encoder,
674 struct intel_crtc_config *pipe_config)
a4fc5ed6 675{
5bfe2ac0 676 struct drm_device *dev = encoder->base.dev;
36008365 677 struct drm_i915_private *dev_priv = dev->dev_private;
5bfe2ac0 678 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5bfe2ac0 679 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 680 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 681 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 682 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 683 int lane_count, clock;
397fe157 684 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
ea5b213a 685 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
083f9560 686 int bpp, mode_rate;
a4fc5ed6 687 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
ff9a6750 688 int link_avail, link_clock;
a4fc5ed6 689
bc7d38a4 690 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
691 pipe_config->has_pch_encoder = true;
692
03afc4a2 693 pipe_config->has_dp_encoder = true;
a4fc5ed6 694
dd06f90e
JN
695 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
696 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
697 adjusted_mode);
2dd24552
JB
698 if (!HAS_PCH_SPLIT(dev))
699 intel_gmch_panel_fitting(intel_crtc, pipe_config,
700 intel_connector->panel.fitting_mode);
701 else
b074cec8
JB
702 intel_pch_panel_fitting(intel_crtc, pipe_config,
703 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
704 }
705
cb1793ce 706 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
707 return false;
708
083f9560
DV
709 DRM_DEBUG_KMS("DP link computation with max lane count %i "
710 "max bw %02x pixel clock %iKHz\n",
71244653 711 max_lane_count, bws[max_clock], adjusted_mode->clock);
083f9560 712
36008365
DV
713 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
714 * bpc in between. */
3e7ca985 715 bpp = pipe_config->pipe_bpp;
e1b73cba
DV
716 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
717 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
657445fe 718
36008365 719 for (; bpp >= 6*3; bpp -= 2*3) {
ff9a6750 720 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
36008365
DV
721
722 for (clock = 0; clock <= max_clock; clock++) {
723 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
724 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
725 link_avail = intel_dp_max_data_rate(link_clock,
726 lane_count);
727
728 if (mode_rate <= link_avail) {
729 goto found;
730 }
731 }
732 }
733 }
c4867936 734
36008365 735 return false;
3685a8f3 736
36008365 737found:
55bc60db
VS
738 if (intel_dp->color_range_auto) {
739 /*
740 * See:
741 * CEA-861-E - 5.1 Default Encoding Parameters
742 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
743 */
18316c8c 744 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
745 intel_dp->color_range = DP_COLOR_RANGE_16_235;
746 else
747 intel_dp->color_range = 0;
748 }
749
3685a8f3 750 if (intel_dp->color_range)
50f3b016 751 pipe_config->limited_color_range = true;
a4fc5ed6 752
36008365
DV
753 intel_dp->link_bw = bws[clock];
754 intel_dp->lane_count = lane_count;
657445fe 755 pipe_config->pipe_bpp = bpp;
ff9a6750 756 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
a4fc5ed6 757
36008365
DV
758 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
759 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 760 pipe_config->port_clock, bpp);
36008365
DV
761 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
762 mode_rate, link_avail);
a4fc5ed6 763
03afc4a2 764 intel_link_compute_m_n(bpp, lane_count,
ff9a6750 765 adjusted_mode->clock, pipe_config->port_clock,
03afc4a2 766 &pipe_config->dp_m_n);
9d1a455b 767
c6bb3538
DV
768 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
769
03afc4a2 770 return true;
a4fc5ed6
KP
771}
772
247d89f6
PZ
773void intel_dp_init_link_config(struct intel_dp *intel_dp)
774{
775 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
776 intel_dp->link_configuration[0] = intel_dp->link_bw;
777 intel_dp->link_configuration[1] = intel_dp->lane_count;
778 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
779 /*
780 * Check for DPCD version > 1.1 and enhanced framing support
781 */
782 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
783 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
784 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
785 }
786}
787
7c62a164 788static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 789{
7c62a164
DV
790 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
791 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
792 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
793 struct drm_i915_private *dev_priv = dev->dev_private;
794 u32 dpa_ctl;
795
ff9a6750 796 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
ea9b6006
DV
797 dpa_ctl = I915_READ(DP_A);
798 dpa_ctl &= ~DP_PLL_FREQ_MASK;
799
ff9a6750 800 if (crtc->config.port_clock == 162000) {
1ce17038
DV
801 /* For a long time we've carried around a ILK-DevA w/a for the
802 * 160MHz clock. If we're really unlucky, it's still required.
803 */
804 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 805 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 806 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
807 } else {
808 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 809 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 810 }
1ce17038 811
ea9b6006
DV
812 I915_WRITE(DP_A, dpa_ctl);
813
814 POSTING_READ(DP_A);
815 udelay(500);
816}
817
a4fc5ed6
KP
818static void
819intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
820 struct drm_display_mode *adjusted_mode)
821{
e3421a18 822 struct drm_device *dev = encoder->dev;
417e822d 823 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 824 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bc7d38a4 825 enum port port = dp_to_dig_port(intel_dp)->port;
7c62a164 826 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
a4fc5ed6 827
417e822d 828 /*
1a2eb460 829 * There are four kinds of DP registers:
417e822d
KP
830 *
831 * IBX PCH
1a2eb460
KP
832 * SNB CPU
833 * IVB CPU
417e822d
KP
834 * CPT PCH
835 *
836 * IBX PCH and CPU are the same for almost everything,
837 * except that the CPU DP PLL is configured in this
838 * register
839 *
840 * CPT PCH is quite different, having many bits moved
841 * to the TRANS_DP_CTL register instead. That
842 * configuration happens (oddly) in ironlake_pch_enable
843 */
9c9e7927 844
417e822d
KP
845 /* Preserve the BIOS-computed detected bit. This is
846 * supposed to be read-only.
847 */
848 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 849
417e822d 850 /* Handle DP bits in common between all three register formats */
417e822d 851 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 852 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 853
e0dac65e
WF
854 if (intel_dp->has_audio) {
855 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
7c62a164 856 pipe_name(crtc->pipe));
ea5b213a 857 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
e0dac65e
WF
858 intel_write_eld(encoder, adjusted_mode);
859 }
247d89f6
PZ
860
861 intel_dp_init_link_config(intel_dp);
a4fc5ed6 862
417e822d 863 /* Split out the IBX/CPU vs CPT settings */
32f9d658 864
bc7d38a4 865 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
866 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
867 intel_dp->DP |= DP_SYNC_HS_HIGH;
868 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
869 intel_dp->DP |= DP_SYNC_VS_HIGH;
870 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
871
872 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
873 intel_dp->DP |= DP_ENHANCED_FRAMING;
874
7c62a164 875 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 876 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 877 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 878 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
879
880 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
881 intel_dp->DP |= DP_SYNC_HS_HIGH;
882 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
883 intel_dp->DP |= DP_SYNC_VS_HIGH;
884 intel_dp->DP |= DP_LINK_TRAIN_OFF;
885
886 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
887 intel_dp->DP |= DP_ENHANCED_FRAMING;
888
7c62a164 889 if (crtc->pipe == 1)
417e822d 890 intel_dp->DP |= DP_PIPEB_SELECT;
417e822d
KP
891 } else {
892 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 893 }
ea9b6006 894
bc7d38a4 895 if (port == PORT_A && !IS_VALLEYVIEW(dev))
7c62a164 896 ironlake_set_pll_cpu_edp(intel_dp);
a4fc5ed6
KP
897}
898
99ea7127
KP
899#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
900#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
901
902#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
903#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
904
905#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
906#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
907
908static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
909 u32 mask,
910 u32 value)
bd943159 911{
30add22d 912 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 913 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
914 u32 pp_stat_reg, pp_ctrl_reg;
915
916 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
917 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
32ce697c 918
99ea7127 919 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
920 mask, value,
921 I915_READ(pp_stat_reg),
922 I915_READ(pp_ctrl_reg));
32ce697c 923
453c5420 924 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 925 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
926 I915_READ(pp_stat_reg),
927 I915_READ(pp_ctrl_reg));
32ce697c 928 }
99ea7127 929}
32ce697c 930
99ea7127
KP
931static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
932{
933 DRM_DEBUG_KMS("Wait for panel power on\n");
934 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
935}
936
99ea7127
KP
937static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
938{
939 DRM_DEBUG_KMS("Wait for panel power off time\n");
940 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
941}
942
943static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
944{
945 DRM_DEBUG_KMS("Wait for panel power cycle\n");
946 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
947}
948
949
832dd3c1
KP
950/* Read the current pp_control value, unlocking the register if it
951 * is locked
952 */
953
453c5420 954static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 955{
453c5420
JB
956 struct drm_device *dev = intel_dp_to_dev(intel_dp);
957 struct drm_i915_private *dev_priv = dev->dev_private;
958 u32 control;
959 u32 pp_ctrl_reg;
960
961 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
962 control = I915_READ(pp_ctrl_reg);
832dd3c1
KP
963
964 control &= ~PANEL_UNLOCK_MASK;
965 control |= PANEL_UNLOCK_REGS;
966 return control;
bd943159
KP
967}
968
82a4d9c0 969void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 970{
30add22d 971 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501
JB
972 struct drm_i915_private *dev_priv = dev->dev_private;
973 u32 pp;
453c5420 974 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 975
97af61f5
KP
976 if (!is_edp(intel_dp))
977 return;
f01eca2e 978 DRM_DEBUG_KMS("Turn eDP VDD on\n");
5d613501 979
bd943159
KP
980 WARN(intel_dp->want_panel_vdd,
981 "eDP VDD already requested on\n");
982
983 intel_dp->want_panel_vdd = true;
99ea7127 984
bd943159
KP
985 if (ironlake_edp_have_panel_vdd(intel_dp)) {
986 DRM_DEBUG_KMS("eDP VDD already on\n");
987 return;
988 }
989
99ea7127
KP
990 if (!ironlake_edp_have_panel_power(intel_dp))
991 ironlake_wait_panel_power_cycle(intel_dp);
992
453c5420 993 pp = ironlake_get_pp_control(intel_dp);
5d613501 994 pp |= EDP_FORCE_VDD;
ebf33b18 995
453c5420
JB
996 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
997 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
998
999 I915_WRITE(pp_ctrl_reg, pp);
1000 POSTING_READ(pp_ctrl_reg);
1001 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1002 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1003 /*
1004 * If the panel wasn't on, delay before accessing aux channel
1005 */
1006 if (!ironlake_edp_have_panel_power(intel_dp)) {
bd943159 1007 DRM_DEBUG_KMS("eDP was not running\n");
f01eca2e 1008 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1009 }
5d613501
JB
1010}
1011
bd943159 1012static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1013{
30add22d 1014 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501
JB
1015 struct drm_i915_private *dev_priv = dev->dev_private;
1016 u32 pp;
453c5420 1017 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1018
a0e99e68
DV
1019 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1020
bd943159 1021 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
453c5420 1022 pp = ironlake_get_pp_control(intel_dp);
bd943159 1023 pp &= ~EDP_FORCE_VDD;
bd943159 1024
453c5420
JB
1025 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
1026 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1027
1028 I915_WRITE(pp_ctrl_reg, pp);
1029 POSTING_READ(pp_ctrl_reg);
99ea7127 1030
453c5420
JB
1031 /* Make sure sequencer is idle before allowing subsequent activity */
1032 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1033 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
99ea7127 1034 msleep(intel_dp->panel_power_down_delay);
bd943159
KP
1035 }
1036}
5d613501 1037
bd943159
KP
1038static void ironlake_panel_vdd_work(struct work_struct *__work)
1039{
1040 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1041 struct intel_dp, panel_vdd_work);
30add22d 1042 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bd943159 1043
627f7675 1044 mutex_lock(&dev->mode_config.mutex);
bd943159 1045 ironlake_panel_vdd_off_sync(intel_dp);
627f7675 1046 mutex_unlock(&dev->mode_config.mutex);
bd943159
KP
1047}
1048
82a4d9c0 1049void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1050{
97af61f5
KP
1051 if (!is_edp(intel_dp))
1052 return;
5d613501 1053
bd943159
KP
1054 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1055 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
f2e8b18a 1056
bd943159
KP
1057 intel_dp->want_panel_vdd = false;
1058
1059 if (sync) {
1060 ironlake_panel_vdd_off_sync(intel_dp);
1061 } else {
1062 /*
1063 * Queue the timer to fire a long
1064 * time from now (relative to the power down delay)
1065 * to keep the panel power up across a sequence of operations
1066 */
1067 schedule_delayed_work(&intel_dp->panel_vdd_work,
1068 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1069 }
5d613501
JB
1070}
1071
82a4d9c0 1072void ironlake_edp_panel_on(struct intel_dp *intel_dp)
9934c132 1073{
30add22d 1074 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1075 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1076 u32 pp;
453c5420 1077 u32 pp_ctrl_reg;
9934c132 1078
97af61f5 1079 if (!is_edp(intel_dp))
bd943159 1080 return;
99ea7127
KP
1081
1082 DRM_DEBUG_KMS("Turn eDP power on\n");
1083
1084 if (ironlake_edp_have_panel_power(intel_dp)) {
1085 DRM_DEBUG_KMS("eDP power already on\n");
7d639f35 1086 return;
99ea7127 1087 }
9934c132 1088
99ea7127 1089 ironlake_wait_panel_power_cycle(intel_dp);
37c6c9b0 1090
453c5420 1091 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1092 if (IS_GEN5(dev)) {
1093 /* ILK workaround: disable reset around power sequence */
1094 pp &= ~PANEL_POWER_RESET;
1095 I915_WRITE(PCH_PP_CONTROL, pp);
1096 POSTING_READ(PCH_PP_CONTROL);
1097 }
37c6c9b0 1098
1c0ae80a 1099 pp |= POWER_TARGET_ON;
99ea7127
KP
1100 if (!IS_GEN5(dev))
1101 pp |= PANEL_POWER_RESET;
1102
453c5420
JB
1103 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1104
1105 I915_WRITE(pp_ctrl_reg, pp);
1106 POSTING_READ(pp_ctrl_reg);
9934c132 1107
99ea7127 1108 ironlake_wait_panel_on(intel_dp);
9934c132 1109
05ce1a49
KP
1110 if (IS_GEN5(dev)) {
1111 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1112 I915_WRITE(PCH_PP_CONTROL, pp);
1113 POSTING_READ(PCH_PP_CONTROL);
1114 }
9934c132
JB
1115}
1116
82a4d9c0 1117void ironlake_edp_panel_off(struct intel_dp *intel_dp)
9934c132 1118{
30add22d 1119 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1120 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1121 u32 pp;
453c5420 1122 u32 pp_ctrl_reg;
9934c132 1123
97af61f5
KP
1124 if (!is_edp(intel_dp))
1125 return;
37c6c9b0 1126
99ea7127 1127 DRM_DEBUG_KMS("Turn eDP power off\n");
37c6c9b0 1128
6cb49835 1129 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
37c6c9b0 1130
453c5420 1131 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1132 /* We need to switch off panel power _and_ force vdd, for otherwise some
1133 * panels get very unhappy and cease to work. */
1134 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
453c5420
JB
1135
1136 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1137
1138 I915_WRITE(pp_ctrl_reg, pp);
1139 POSTING_READ(pp_ctrl_reg);
9934c132 1140
35a38556
DV
1141 intel_dp->want_panel_vdd = false;
1142
99ea7127 1143 ironlake_wait_panel_off(intel_dp);
9934c132
JB
1144}
1145
d6c50ff8 1146void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1147{
da63a9f2
PZ
1148 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1149 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658 1150 struct drm_i915_private *dev_priv = dev->dev_private;
da63a9f2 1151 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
32f9d658 1152 u32 pp;
453c5420 1153 u32 pp_ctrl_reg;
32f9d658 1154
f01eca2e
KP
1155 if (!is_edp(intel_dp))
1156 return;
1157
28c97730 1158 DRM_DEBUG_KMS("\n");
01cb9ea6
JB
1159 /*
1160 * If we enable the backlight right away following a panel power
1161 * on, we may see slight flicker as the panel syncs with the eDP
1162 * link. So delay a bit to make sure the image is solid before
1163 * allowing it to appear.
1164 */
f01eca2e 1165 msleep(intel_dp->backlight_on_delay);
453c5420 1166 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1167 pp |= EDP_BLC_ENABLE;
453c5420
JB
1168
1169 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1170
1171 I915_WRITE(pp_ctrl_reg, pp);
1172 POSTING_READ(pp_ctrl_reg);
035aa3de
DV
1173
1174 intel_panel_enable_backlight(dev, pipe);
32f9d658
ZW
1175}
1176
d6c50ff8 1177void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1178{
30add22d 1179 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1180 struct drm_i915_private *dev_priv = dev->dev_private;
1181 u32 pp;
453c5420 1182 u32 pp_ctrl_reg;
32f9d658 1183
f01eca2e
KP
1184 if (!is_edp(intel_dp))
1185 return;
1186
035aa3de
DV
1187 intel_panel_disable_backlight(dev);
1188
28c97730 1189 DRM_DEBUG_KMS("\n");
453c5420 1190 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1191 pp &= ~EDP_BLC_ENABLE;
453c5420
JB
1192
1193 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1194
1195 I915_WRITE(pp_ctrl_reg, pp);
1196 POSTING_READ(pp_ctrl_reg);
f01eca2e 1197 msleep(intel_dp->backlight_off_delay);
32f9d658 1198}
a4fc5ed6 1199
2bd2ad64 1200static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 1201{
da63a9f2
PZ
1202 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1203 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1204 struct drm_device *dev = crtc->dev;
d240f20f
JB
1205 struct drm_i915_private *dev_priv = dev->dev_private;
1206 u32 dpa_ctl;
1207
2bd2ad64
DV
1208 assert_pipe_disabled(dev_priv,
1209 to_intel_crtc(crtc)->pipe);
1210
d240f20f
JB
1211 DRM_DEBUG_KMS("\n");
1212 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1213 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1214 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1215
1216 /* We don't adjust intel_dp->DP while tearing down the link, to
1217 * facilitate link retraining (e.g. after hotplug). Hence clear all
1218 * enable bits here to ensure that we don't enable too much. */
1219 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1220 intel_dp->DP |= DP_PLL_ENABLE;
1221 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
1222 POSTING_READ(DP_A);
1223 udelay(200);
d240f20f
JB
1224}
1225
2bd2ad64 1226static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 1227{
da63a9f2
PZ
1228 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1229 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1230 struct drm_device *dev = crtc->dev;
d240f20f
JB
1231 struct drm_i915_private *dev_priv = dev->dev_private;
1232 u32 dpa_ctl;
1233
2bd2ad64
DV
1234 assert_pipe_disabled(dev_priv,
1235 to_intel_crtc(crtc)->pipe);
1236
d240f20f 1237 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1238 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1239 "dp pll off, should be on\n");
1240 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1241
1242 /* We can't rely on the value tracked for the DP register in
1243 * intel_dp->DP because link_down must not change that (otherwise link
1244 * re-training will fail. */
298b0b39 1245 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 1246 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 1247 POSTING_READ(DP_A);
d240f20f
JB
1248 udelay(200);
1249}
1250
c7ad3810 1251/* If the sink supports it, try to set the power state appropriately */
c19b0669 1252void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
1253{
1254 int ret, i;
1255
1256 /* Should have a valid DPCD by this point */
1257 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1258 return;
1259
1260 if (mode != DRM_MODE_DPMS_ON) {
1261 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1262 DP_SET_POWER_D3);
1263 if (ret != 1)
1264 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1265 } else {
1266 /*
1267 * When turning on, we need to retry for 1ms to give the sink
1268 * time to wake up.
1269 */
1270 for (i = 0; i < 3; i++) {
1271 ret = intel_dp_aux_native_write_1(intel_dp,
1272 DP_SET_POWER,
1273 DP_SET_POWER_D0);
1274 if (ret == 1)
1275 break;
1276 msleep(1);
1277 }
1278 }
1279}
1280
19d8fe15
DV
1281static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1282 enum pipe *pipe)
d240f20f 1283{
19d8fe15 1284 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1285 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
1286 struct drm_device *dev = encoder->base.dev;
1287 struct drm_i915_private *dev_priv = dev->dev_private;
1288 u32 tmp = I915_READ(intel_dp->output_reg);
1289
1290 if (!(tmp & DP_PORT_EN))
1291 return false;
1292
bc7d38a4 1293 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 1294 *pipe = PORT_TO_PIPE_CPT(tmp);
bc7d38a4 1295 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
1296 *pipe = PORT_TO_PIPE(tmp);
1297 } else {
1298 u32 trans_sel;
1299 u32 trans_dp;
1300 int i;
1301
1302 switch (intel_dp->output_reg) {
1303 case PCH_DP_B:
1304 trans_sel = TRANS_DP_PORT_SEL_B;
1305 break;
1306 case PCH_DP_C:
1307 trans_sel = TRANS_DP_PORT_SEL_C;
1308 break;
1309 case PCH_DP_D:
1310 trans_sel = TRANS_DP_PORT_SEL_D;
1311 break;
1312 default:
1313 return true;
1314 }
1315
1316 for_each_pipe(i) {
1317 trans_dp = I915_READ(TRANS_DP_CTL(i));
1318 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1319 *pipe = i;
1320 return true;
1321 }
1322 }
19d8fe15 1323
4a0833ec
DV
1324 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1325 intel_dp->output_reg);
1326 }
d240f20f 1327
19d8fe15
DV
1328 return true;
1329}
d240f20f 1330
045ac3b5
JB
1331static void intel_dp_get_config(struct intel_encoder *encoder,
1332 struct intel_crtc_config *pipe_config)
1333{
1334 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 1335 u32 tmp, flags = 0;
63000ef6
XZ
1336 struct drm_device *dev = encoder->base.dev;
1337 struct drm_i915_private *dev_priv = dev->dev_private;
1338 enum port port = dp_to_dig_port(intel_dp)->port;
1339 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
045ac3b5 1340
63000ef6
XZ
1341 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1342 tmp = I915_READ(intel_dp->output_reg);
1343 if (tmp & DP_SYNC_HS_HIGH)
1344 flags |= DRM_MODE_FLAG_PHSYNC;
1345 else
1346 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 1347
63000ef6
XZ
1348 if (tmp & DP_SYNC_VS_HIGH)
1349 flags |= DRM_MODE_FLAG_PVSYNC;
1350 else
1351 flags |= DRM_MODE_FLAG_NVSYNC;
1352 } else {
1353 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1354 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1355 flags |= DRM_MODE_FLAG_PHSYNC;
1356 else
1357 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 1358
63000ef6
XZ
1359 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1360 flags |= DRM_MODE_FLAG_PVSYNC;
1361 else
1362 flags |= DRM_MODE_FLAG_NVSYNC;
1363 }
045ac3b5
JB
1364
1365 pipe_config->adjusted_mode.flags |= flags;
f1f644dc
JB
1366
1367 if (dp_to_dig_port(intel_dp)->port == PORT_A) {
1368 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1369 pipe_config->port_clock = 162000;
1370 else
1371 pipe_config->port_clock = 270000;
1372 }
045ac3b5
JB
1373}
1374
2293bb5c
SK
1375static bool is_edp_psr(struct intel_dp *intel_dp)
1376{
1377 return is_edp(intel_dp) &&
1378 intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
1379}
1380
2b28bb1b
RV
1381static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1382{
1383 struct drm_i915_private *dev_priv = dev->dev_private;
1384
1385 if (!IS_HASWELL(dev))
1386 return false;
1387
1388 return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
1389}
1390
1391static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1392 struct edp_vsc_psr *vsc_psr)
1393{
1394 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1395 struct drm_device *dev = dig_port->base.base.dev;
1396 struct drm_i915_private *dev_priv = dev->dev_private;
1397 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1398 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1399 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1400 uint32_t *data = (uint32_t *) vsc_psr;
1401 unsigned int i;
1402
1403 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
1404 the video DIP being updated before program video DIP data buffer
1405 registers for DIP being updated. */
1406 I915_WRITE(ctl_reg, 0);
1407 POSTING_READ(ctl_reg);
1408
1409 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1410 if (i < sizeof(struct edp_vsc_psr))
1411 I915_WRITE(data_reg + i, *data++);
1412 else
1413 I915_WRITE(data_reg + i, 0);
1414 }
1415
1416 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1417 POSTING_READ(ctl_reg);
1418}
1419
1420static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1421{
1422 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1423 struct drm_i915_private *dev_priv = dev->dev_private;
1424 struct edp_vsc_psr psr_vsc;
1425
1426 if (intel_dp->psr_setup_done)
1427 return;
1428
1429 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1430 memset(&psr_vsc, 0, sizeof(psr_vsc));
1431 psr_vsc.sdp_header.HB0 = 0;
1432 psr_vsc.sdp_header.HB1 = 0x7;
1433 psr_vsc.sdp_header.HB2 = 0x2;
1434 psr_vsc.sdp_header.HB3 = 0x8;
1435 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1436
1437 /* Avoid continuous PSR exit by masking memup and hpd */
1438 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
1439 EDP_PSR_DEBUG_MASK_HPD);
1440
1441 intel_dp->psr_setup_done = true;
1442}
1443
1444static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1445{
1446 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1447 struct drm_i915_private *dev_priv = dev->dev_private;
1448 uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp);
1449 int precharge = 0x3;
1450 int msg_size = 5; /* Header(4) + Message(1) */
1451
1452 /* Enable PSR in sink */
1453 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1454 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1455 DP_PSR_ENABLE &
1456 ~DP_PSR_MAIN_LINK_ACTIVE);
1457 else
1458 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1459 DP_PSR_ENABLE |
1460 DP_PSR_MAIN_LINK_ACTIVE);
1461
1462 /* Setup AUX registers */
1463 I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
1464 I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
1465 I915_WRITE(EDP_PSR_AUX_CTL,
1466 DP_AUX_CH_CTL_TIME_OUT_400us |
1467 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1468 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1469 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1470}
1471
1472static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1473{
1474 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1475 struct drm_i915_private *dev_priv = dev->dev_private;
1476 uint32_t max_sleep_time = 0x1f;
1477 uint32_t idle_frames = 1;
1478 uint32_t val = 0x0;
1479
1480 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1481 val |= EDP_PSR_LINK_STANDBY;
1482 val |= EDP_PSR_TP2_TP3_TIME_0us;
1483 val |= EDP_PSR_TP1_TIME_0us;
1484 val |= EDP_PSR_SKIP_AUX_EXIT;
1485 } else
1486 val |= EDP_PSR_LINK_DISABLE;
1487
1488 I915_WRITE(EDP_PSR_CTL, val |
1489 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
1490 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1491 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1492 EDP_PSR_ENABLE);
1493}
1494
3f51e471
RV
1495static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1496{
1497 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1498 struct drm_device *dev = dig_port->base.base.dev;
1499 struct drm_i915_private *dev_priv = dev->dev_private;
1500 struct drm_crtc *crtc = dig_port->base.base.crtc;
1501 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1502 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1503 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1504
1505 if (!IS_HASWELL(dev)) {
1506 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1507 dev_priv->no_psr_reason = PSR_NO_SOURCE;
1508 return false;
1509 }
1510
1511 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1512 (dig_port->port != PORT_A)) {
1513 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1514 dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
1515 return false;
1516 }
1517
1518 if (!is_edp_psr(intel_dp)) {
1519 DRM_DEBUG_KMS("PSR not supported by this panel\n");
1520 dev_priv->no_psr_reason = PSR_NO_SINK;
1521 return false;
1522 }
1523
105b7c11
RV
1524 if (!i915_enable_psr) {
1525 DRM_DEBUG_KMS("PSR disable by flag\n");
1526 dev_priv->no_psr_reason = PSR_MODULE_PARAM;
1527 return false;
1528 }
1529
3f51e471
RV
1530 if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
1531 DRM_DEBUG_KMS("crtc not active for PSR\n");
1532 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1533 return false;
1534 }
1535
1536 if (obj->tiling_mode != I915_TILING_X ||
1537 obj->fence_reg == I915_FENCE_REG_NONE) {
1538 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1539 dev_priv->no_psr_reason = PSR_NOT_TILED;
1540 return false;
1541 }
1542
1543 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1544 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1545 dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
1546 return false;
1547 }
1548
1549 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1550 S3D_ENABLE) {
1551 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1552 dev_priv->no_psr_reason = PSR_S3D_ENABLED;
1553 return false;
1554 }
1555
1556 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
1557 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1558 dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
1559 return false;
1560 }
1561
1562 return true;
1563}
1564
3d739d92 1565static void intel_edp_psr_do_enable(struct intel_dp *intel_dp)
2b28bb1b
RV
1566{
1567 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1568
3f51e471
RV
1569 if (!intel_edp_psr_match_conditions(intel_dp) ||
1570 intel_edp_is_psr_enabled(dev))
2b28bb1b
RV
1571 return;
1572
1573 /* Setup PSR once */
1574 intel_edp_psr_setup(intel_dp);
1575
1576 /* Enable PSR on the panel */
1577 intel_edp_psr_enable_sink(intel_dp);
1578
1579 /* Enable PSR on the host */
1580 intel_edp_psr_enable_source(intel_dp);
1581}
1582
3d739d92
RV
1583void intel_edp_psr_enable(struct intel_dp *intel_dp)
1584{
1585 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1586
1587 if (intel_edp_psr_match_conditions(intel_dp) &&
1588 !intel_edp_is_psr_enabled(dev))
1589 intel_edp_psr_do_enable(intel_dp);
1590}
1591
2b28bb1b
RV
1592void intel_edp_psr_disable(struct intel_dp *intel_dp)
1593{
1594 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1595 struct drm_i915_private *dev_priv = dev->dev_private;
1596
1597 if (!intel_edp_is_psr_enabled(dev))
1598 return;
1599
1600 I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
1601
1602 /* Wait till PSR is idle */
1603 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
1604 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1605 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1606}
1607
3d739d92
RV
1608void intel_edp_psr_update(struct drm_device *dev)
1609{
1610 struct intel_encoder *encoder;
1611 struct intel_dp *intel_dp = NULL;
1612
1613 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head)
1614 if (encoder->type == INTEL_OUTPUT_EDP) {
1615 intel_dp = enc_to_intel_dp(&encoder->base);
1616
1617 if (!is_edp_psr(intel_dp))
1618 return;
1619
1620 if (!intel_edp_psr_match_conditions(intel_dp))
1621 intel_edp_psr_disable(intel_dp);
1622 else
1623 if (!intel_edp_is_psr_enabled(dev))
1624 intel_edp_psr_do_enable(intel_dp);
1625 }
1626}
1627
e8cb4558 1628static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 1629{
e8cb4558 1630 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866
ID
1631 enum port port = dp_to_dig_port(intel_dp)->port;
1632 struct drm_device *dev = encoder->base.dev;
6cb49835
DV
1633
1634 /* Make sure the panel is off before trying to change the mode. But also
1635 * ensure that we have vdd while we switch off the panel. */
1636 ironlake_edp_panel_vdd_on(intel_dp);
21264c63 1637 ironlake_edp_backlight_off(intel_dp);
c7ad3810 1638 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
35a38556 1639 ironlake_edp_panel_off(intel_dp);
3739850b
DV
1640
1641 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
982a3866 1642 if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
3739850b 1643 intel_dp_link_down(intel_dp);
d240f20f
JB
1644}
1645
2bd2ad64 1646static void intel_post_disable_dp(struct intel_encoder *encoder)
d240f20f 1647{
2bd2ad64 1648 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 1649 enum port port = dp_to_dig_port(intel_dp)->port;
b2634017 1650 struct drm_device *dev = encoder->base.dev;
2bd2ad64 1651
982a3866 1652 if (port == PORT_A || IS_VALLEYVIEW(dev)) {
3739850b 1653 intel_dp_link_down(intel_dp);
b2634017
JB
1654 if (!IS_VALLEYVIEW(dev))
1655 ironlake_edp_pll_off(intel_dp);
3739850b 1656 }
2bd2ad64
DV
1657}
1658
e8cb4558 1659static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 1660{
e8cb4558
DV
1661 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1662 struct drm_device *dev = encoder->base.dev;
1663 struct drm_i915_private *dev_priv = dev->dev_private;
1664 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 1665
0c33d8d7
DV
1666 if (WARN_ON(dp_reg & DP_PORT_EN))
1667 return;
5d613501 1668
97af61f5 1669 ironlake_edp_panel_vdd_on(intel_dp);
f01eca2e 1670 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 1671 intel_dp_start_link_train(intel_dp);
97af61f5 1672 ironlake_edp_panel_on(intel_dp);
bd943159 1673 ironlake_edp_panel_vdd_off(intel_dp, true);
33a34e4e 1674 intel_dp_complete_link_train(intel_dp);
3ab9c637 1675 intel_dp_stop_link_train(intel_dp);
f01eca2e 1676 ironlake_edp_backlight_on(intel_dp);
89b667f8
JB
1677
1678 if (IS_VALLEYVIEW(dev)) {
1679 struct intel_digital_port *dport =
1680 enc_to_dig_port(&encoder->base);
1681 int channel = vlv_dport_to_channel(dport);
1682
1683 vlv_wait_port_ready(dev_priv, channel);
1684 }
d240f20f
JB
1685}
1686
2bd2ad64 1687static void intel_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 1688{
2bd2ad64 1689 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1690 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 1691 struct drm_device *dev = encoder->base.dev;
89b667f8 1692 struct drm_i915_private *dev_priv = dev->dev_private;
a4fc5ed6 1693
bc7d38a4 1694 if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
2bd2ad64 1695 ironlake_edp_pll_on(intel_dp);
89b667f8
JB
1696
1697 if (IS_VALLEYVIEW(dev)) {
89b667f8
JB
1698 struct intel_crtc *intel_crtc =
1699 to_intel_crtc(encoder->base.crtc);
1700 int port = vlv_dport_to_channel(dport);
1701 int pipe = intel_crtc->pipe;
1702 u32 val;
1703
ae99258f 1704 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
89b667f8
JB
1705 val = 0;
1706 if (pipe)
1707 val |= (1<<21);
1708 else
1709 val &= ~(1<<21);
1710 val |= 0x001000c4;
ae99258f 1711 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
89b667f8 1712
ae99258f 1713 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
89b667f8 1714 0x00760018);
ae99258f 1715 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
89b667f8
JB
1716 0x00400888);
1717 }
1718}
1719
1720static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1721{
1722 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1723 struct drm_device *dev = encoder->base.dev;
1724 struct drm_i915_private *dev_priv = dev->dev_private;
1725 int port = vlv_dport_to_channel(dport);
1726
1727 if (!IS_VALLEYVIEW(dev))
1728 return;
1729
89b667f8 1730 /* Program Tx lane resets to default */
ae99258f 1731 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
89b667f8
JB
1732 DPIO_PCS_TX_LANE2_RESET |
1733 DPIO_PCS_TX_LANE1_RESET);
ae99258f 1734 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
89b667f8
JB
1735 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1736 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1737 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1738 DPIO_PCS_CLK_SOFT_RESET);
1739
1740 /* Fix up inter-pair skew failure */
ae99258f
JN
1741 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1742 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1743 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
a4fc5ed6
KP
1744}
1745
1746/*
df0c237d
JB
1747 * Native read with retry for link status and receiver capability reads for
1748 * cases where the sink may still be asleep.
a4fc5ed6
KP
1749 */
1750static bool
df0c237d
JB
1751intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1752 uint8_t *recv, int recv_bytes)
a4fc5ed6 1753{
61da5fab
JB
1754 int ret, i;
1755
df0c237d
JB
1756 /*
1757 * Sinks are *supposed* to come up within 1ms from an off state,
1758 * but we're also supposed to retry 3 times per the spec.
1759 */
61da5fab 1760 for (i = 0; i < 3; i++) {
df0c237d
JB
1761 ret = intel_dp_aux_native_read(intel_dp, address, recv,
1762 recv_bytes);
1763 if (ret == recv_bytes)
61da5fab
JB
1764 return true;
1765 msleep(1);
1766 }
a4fc5ed6 1767
61da5fab 1768 return false;
a4fc5ed6
KP
1769}
1770
1771/*
1772 * Fetch AUX CH registers 0x202 - 0x207 which contain
1773 * link status information
1774 */
1775static bool
93f62dad 1776intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 1777{
df0c237d
JB
1778 return intel_dp_aux_native_read_retry(intel_dp,
1779 DP_LANE0_1_STATUS,
93f62dad 1780 link_status,
df0c237d 1781 DP_LINK_STATUS_SIZE);
a4fc5ed6
KP
1782}
1783
a4fc5ed6
KP
1784#if 0
1785static char *voltage_names[] = {
1786 "0.4V", "0.6V", "0.8V", "1.2V"
1787};
1788static char *pre_emph_names[] = {
1789 "0dB", "3.5dB", "6dB", "9.5dB"
1790};
1791static char *link_train_names[] = {
1792 "pattern 1", "pattern 2", "idle", "off"
1793};
1794#endif
1795
1796/*
1797 * These are source-specific values; current Intel hardware supports
1798 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1799 */
a4fc5ed6
KP
1800
1801static uint8_t
1a2eb460 1802intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 1803{
30add22d 1804 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 1805 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 1806
e2fa6fba
P
1807 if (IS_VALLEYVIEW(dev))
1808 return DP_TRAIN_VOLTAGE_SWING_1200;
bc7d38a4 1809 else if (IS_GEN7(dev) && port == PORT_A)
1a2eb460 1810 return DP_TRAIN_VOLTAGE_SWING_800;
bc7d38a4 1811 else if (HAS_PCH_CPT(dev) && port != PORT_A)
1a2eb460
KP
1812 return DP_TRAIN_VOLTAGE_SWING_1200;
1813 else
1814 return DP_TRAIN_VOLTAGE_SWING_800;
1815}
1816
1817static uint8_t
1818intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1819{
30add22d 1820 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 1821 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 1822
22b8bf17 1823 if (HAS_DDI(dev)) {
d6c0d722
PZ
1824 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1825 case DP_TRAIN_VOLTAGE_SWING_400:
1826 return DP_TRAIN_PRE_EMPHASIS_9_5;
1827 case DP_TRAIN_VOLTAGE_SWING_600:
1828 return DP_TRAIN_PRE_EMPHASIS_6;
1829 case DP_TRAIN_VOLTAGE_SWING_800:
1830 return DP_TRAIN_PRE_EMPHASIS_3_5;
1831 case DP_TRAIN_VOLTAGE_SWING_1200:
1832 default:
1833 return DP_TRAIN_PRE_EMPHASIS_0;
1834 }
e2fa6fba
P
1835 } else if (IS_VALLEYVIEW(dev)) {
1836 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1837 case DP_TRAIN_VOLTAGE_SWING_400:
1838 return DP_TRAIN_PRE_EMPHASIS_9_5;
1839 case DP_TRAIN_VOLTAGE_SWING_600:
1840 return DP_TRAIN_PRE_EMPHASIS_6;
1841 case DP_TRAIN_VOLTAGE_SWING_800:
1842 return DP_TRAIN_PRE_EMPHASIS_3_5;
1843 case DP_TRAIN_VOLTAGE_SWING_1200:
1844 default:
1845 return DP_TRAIN_PRE_EMPHASIS_0;
1846 }
bc7d38a4 1847 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1848 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1849 case DP_TRAIN_VOLTAGE_SWING_400:
1850 return DP_TRAIN_PRE_EMPHASIS_6;
1851 case DP_TRAIN_VOLTAGE_SWING_600:
1852 case DP_TRAIN_VOLTAGE_SWING_800:
1853 return DP_TRAIN_PRE_EMPHASIS_3_5;
1854 default:
1855 return DP_TRAIN_PRE_EMPHASIS_0;
1856 }
1857 } else {
1858 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1859 case DP_TRAIN_VOLTAGE_SWING_400:
1860 return DP_TRAIN_PRE_EMPHASIS_6;
1861 case DP_TRAIN_VOLTAGE_SWING_600:
1862 return DP_TRAIN_PRE_EMPHASIS_6;
1863 case DP_TRAIN_VOLTAGE_SWING_800:
1864 return DP_TRAIN_PRE_EMPHASIS_3_5;
1865 case DP_TRAIN_VOLTAGE_SWING_1200:
1866 default:
1867 return DP_TRAIN_PRE_EMPHASIS_0;
1868 }
a4fc5ed6
KP
1869 }
1870}
1871
e2fa6fba
P
1872static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1873{
1874 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1875 struct drm_i915_private *dev_priv = dev->dev_private;
1876 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1877 unsigned long demph_reg_value, preemph_reg_value,
1878 uniqtranscale_reg_value;
1879 uint8_t train_set = intel_dp->train_set[0];
cece5d58 1880 int port = vlv_dport_to_channel(dport);
e2fa6fba
P
1881
1882 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1883 case DP_TRAIN_PRE_EMPHASIS_0:
1884 preemph_reg_value = 0x0004000;
1885 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1886 case DP_TRAIN_VOLTAGE_SWING_400:
1887 demph_reg_value = 0x2B405555;
1888 uniqtranscale_reg_value = 0x552AB83A;
1889 break;
1890 case DP_TRAIN_VOLTAGE_SWING_600:
1891 demph_reg_value = 0x2B404040;
1892 uniqtranscale_reg_value = 0x5548B83A;
1893 break;
1894 case DP_TRAIN_VOLTAGE_SWING_800:
1895 demph_reg_value = 0x2B245555;
1896 uniqtranscale_reg_value = 0x5560B83A;
1897 break;
1898 case DP_TRAIN_VOLTAGE_SWING_1200:
1899 demph_reg_value = 0x2B405555;
1900 uniqtranscale_reg_value = 0x5598DA3A;
1901 break;
1902 default:
1903 return 0;
1904 }
1905 break;
1906 case DP_TRAIN_PRE_EMPHASIS_3_5:
1907 preemph_reg_value = 0x0002000;
1908 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1909 case DP_TRAIN_VOLTAGE_SWING_400:
1910 demph_reg_value = 0x2B404040;
1911 uniqtranscale_reg_value = 0x5552B83A;
1912 break;
1913 case DP_TRAIN_VOLTAGE_SWING_600:
1914 demph_reg_value = 0x2B404848;
1915 uniqtranscale_reg_value = 0x5580B83A;
1916 break;
1917 case DP_TRAIN_VOLTAGE_SWING_800:
1918 demph_reg_value = 0x2B404040;
1919 uniqtranscale_reg_value = 0x55ADDA3A;
1920 break;
1921 default:
1922 return 0;
1923 }
1924 break;
1925 case DP_TRAIN_PRE_EMPHASIS_6:
1926 preemph_reg_value = 0x0000000;
1927 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1928 case DP_TRAIN_VOLTAGE_SWING_400:
1929 demph_reg_value = 0x2B305555;
1930 uniqtranscale_reg_value = 0x5570B83A;
1931 break;
1932 case DP_TRAIN_VOLTAGE_SWING_600:
1933 demph_reg_value = 0x2B2B4040;
1934 uniqtranscale_reg_value = 0x55ADDA3A;
1935 break;
1936 default:
1937 return 0;
1938 }
1939 break;
1940 case DP_TRAIN_PRE_EMPHASIS_9_5:
1941 preemph_reg_value = 0x0006000;
1942 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1943 case DP_TRAIN_VOLTAGE_SWING_400:
1944 demph_reg_value = 0x1B405555;
1945 uniqtranscale_reg_value = 0x55ADDA3A;
1946 break;
1947 default:
1948 return 0;
1949 }
1950 break;
1951 default:
1952 return 0;
1953 }
1954
ae99258f
JN
1955 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
1956 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
1957 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
e2fa6fba 1958 uniqtranscale_reg_value);
ae99258f
JN
1959 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
1960 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1961 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
1962 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
e2fa6fba
P
1963
1964 return 0;
1965}
1966
a4fc5ed6 1967static void
93f62dad 1968intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
1969{
1970 uint8_t v = 0;
1971 uint8_t p = 0;
1972 int lane;
1a2eb460
KP
1973 uint8_t voltage_max;
1974 uint8_t preemph_max;
a4fc5ed6 1975
33a34e4e 1976 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
1977 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1978 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
1979
1980 if (this_v > v)
1981 v = this_v;
1982 if (this_p > p)
1983 p = this_p;
1984 }
1985
1a2eb460 1986 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
1987 if (v >= voltage_max)
1988 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 1989
1a2eb460
KP
1990 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1991 if (p >= preemph_max)
1992 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
1993
1994 for (lane = 0; lane < 4; lane++)
33a34e4e 1995 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
1996}
1997
1998static uint32_t
f0a3424e 1999intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 2000{
3cf2efb1 2001 uint32_t signal_levels = 0;
a4fc5ed6 2002
3cf2efb1 2003 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
a4fc5ed6
KP
2004 case DP_TRAIN_VOLTAGE_SWING_400:
2005 default:
2006 signal_levels |= DP_VOLTAGE_0_4;
2007 break;
2008 case DP_TRAIN_VOLTAGE_SWING_600:
2009 signal_levels |= DP_VOLTAGE_0_6;
2010 break;
2011 case DP_TRAIN_VOLTAGE_SWING_800:
2012 signal_levels |= DP_VOLTAGE_0_8;
2013 break;
2014 case DP_TRAIN_VOLTAGE_SWING_1200:
2015 signal_levels |= DP_VOLTAGE_1_2;
2016 break;
2017 }
3cf2efb1 2018 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
a4fc5ed6
KP
2019 case DP_TRAIN_PRE_EMPHASIS_0:
2020 default:
2021 signal_levels |= DP_PRE_EMPHASIS_0;
2022 break;
2023 case DP_TRAIN_PRE_EMPHASIS_3_5:
2024 signal_levels |= DP_PRE_EMPHASIS_3_5;
2025 break;
2026 case DP_TRAIN_PRE_EMPHASIS_6:
2027 signal_levels |= DP_PRE_EMPHASIS_6;
2028 break;
2029 case DP_TRAIN_PRE_EMPHASIS_9_5:
2030 signal_levels |= DP_PRE_EMPHASIS_9_5;
2031 break;
2032 }
2033 return signal_levels;
2034}
2035
e3421a18
ZW
2036/* Gen6's DP voltage swing and pre-emphasis control */
2037static uint32_t
2038intel_gen6_edp_signal_levels(uint8_t train_set)
2039{
3c5a62b5
YL
2040 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2041 DP_TRAIN_PRE_EMPHASIS_MASK);
2042 switch (signal_levels) {
e3421a18 2043 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
3c5a62b5
YL
2044 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2045 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2046 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2047 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
e3421a18 2048 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
3c5a62b5
YL
2049 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2050 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
e3421a18 2051 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
3c5a62b5
YL
2052 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2053 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
e3421a18 2054 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
3c5a62b5
YL
2055 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2056 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 2057 default:
3c5a62b5
YL
2058 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2059 "0x%x\n", signal_levels);
2060 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
2061 }
2062}
2063
1a2eb460
KP
2064/* Gen7's DP voltage swing and pre-emphasis control */
2065static uint32_t
2066intel_gen7_edp_signal_levels(uint8_t train_set)
2067{
2068 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2069 DP_TRAIN_PRE_EMPHASIS_MASK);
2070 switch (signal_levels) {
2071 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2072 return EDP_LINK_TRAIN_400MV_0DB_IVB;
2073 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2074 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2075 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2076 return EDP_LINK_TRAIN_400MV_6DB_IVB;
2077
2078 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2079 return EDP_LINK_TRAIN_600MV_0DB_IVB;
2080 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2081 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2082
2083 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2084 return EDP_LINK_TRAIN_800MV_0DB_IVB;
2085 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2086 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2087
2088 default:
2089 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2090 "0x%x\n", signal_levels);
2091 return EDP_LINK_TRAIN_500MV_0DB_IVB;
2092 }
2093}
2094
d6c0d722
PZ
2095/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2096static uint32_t
f0a3424e 2097intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 2098{
d6c0d722
PZ
2099 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2100 DP_TRAIN_PRE_EMPHASIS_MASK);
2101 switch (signal_levels) {
2102 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2103 return DDI_BUF_EMP_400MV_0DB_HSW;
2104 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2105 return DDI_BUF_EMP_400MV_3_5DB_HSW;
2106 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2107 return DDI_BUF_EMP_400MV_6DB_HSW;
2108 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2109 return DDI_BUF_EMP_400MV_9_5DB_HSW;
a4fc5ed6 2110
d6c0d722
PZ
2111 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2112 return DDI_BUF_EMP_600MV_0DB_HSW;
2113 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2114 return DDI_BUF_EMP_600MV_3_5DB_HSW;
2115 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2116 return DDI_BUF_EMP_600MV_6DB_HSW;
a4fc5ed6 2117
d6c0d722
PZ
2118 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2119 return DDI_BUF_EMP_800MV_0DB_HSW;
2120 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2121 return DDI_BUF_EMP_800MV_3_5DB_HSW;
2122 default:
2123 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2124 "0x%x\n", signal_levels);
2125 return DDI_BUF_EMP_400MV_0DB_HSW;
a4fc5ed6 2126 }
a4fc5ed6
KP
2127}
2128
f0a3424e
PZ
2129/* Properly updates "DP" with the correct signal levels. */
2130static void
2131intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2132{
2133 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 2134 enum port port = intel_dig_port->port;
f0a3424e
PZ
2135 struct drm_device *dev = intel_dig_port->base.base.dev;
2136 uint32_t signal_levels, mask;
2137 uint8_t train_set = intel_dp->train_set[0];
2138
22b8bf17 2139 if (HAS_DDI(dev)) {
f0a3424e
PZ
2140 signal_levels = intel_hsw_signal_levels(train_set);
2141 mask = DDI_BUF_EMP_MASK;
e2fa6fba
P
2142 } else if (IS_VALLEYVIEW(dev)) {
2143 signal_levels = intel_vlv_signal_levels(intel_dp);
2144 mask = 0;
bc7d38a4 2145 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
2146 signal_levels = intel_gen7_edp_signal_levels(train_set);
2147 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 2148 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
2149 signal_levels = intel_gen6_edp_signal_levels(train_set);
2150 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
2151 } else {
2152 signal_levels = intel_gen4_signal_levels(train_set);
2153 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
2154 }
2155
2156 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
2157
2158 *DP = (*DP & ~mask) | signal_levels;
2159}
2160
a4fc5ed6 2161static bool
ea5b213a 2162intel_dp_set_link_train(struct intel_dp *intel_dp,
a4fc5ed6 2163 uint32_t dp_reg_value,
58e10eb9 2164 uint8_t dp_train_pat)
a4fc5ed6 2165{
174edf1f
PZ
2166 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2167 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 2168 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 2169 enum port port = intel_dig_port->port;
a4fc5ed6
KP
2170 int ret;
2171
22b8bf17 2172 if (HAS_DDI(dev)) {
3ab9c637 2173 uint32_t temp = I915_READ(DP_TP_CTL(port));
d6c0d722
PZ
2174
2175 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2176 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2177 else
2178 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2179
2180 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2181 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2182 case DP_TRAINING_PATTERN_DISABLE:
d6c0d722
PZ
2183 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2184
2185 break;
2186 case DP_TRAINING_PATTERN_1:
2187 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2188 break;
2189 case DP_TRAINING_PATTERN_2:
2190 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2191 break;
2192 case DP_TRAINING_PATTERN_3:
2193 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2194 break;
2195 }
174edf1f 2196 I915_WRITE(DP_TP_CTL(port), temp);
d6c0d722 2197
bc7d38a4 2198 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
47ea7542
PZ
2199 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
2200
2201 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2202 case DP_TRAINING_PATTERN_DISABLE:
2203 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
2204 break;
2205 case DP_TRAINING_PATTERN_1:
2206 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
2207 break;
2208 case DP_TRAINING_PATTERN_2:
2209 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
2210 break;
2211 case DP_TRAINING_PATTERN_3:
2212 DRM_ERROR("DP training pattern 3 not supported\n");
2213 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
2214 break;
2215 }
2216
2217 } else {
2218 dp_reg_value &= ~DP_LINK_TRAIN_MASK;
2219
2220 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2221 case DP_TRAINING_PATTERN_DISABLE:
2222 dp_reg_value |= DP_LINK_TRAIN_OFF;
2223 break;
2224 case DP_TRAINING_PATTERN_1:
2225 dp_reg_value |= DP_LINK_TRAIN_PAT_1;
2226 break;
2227 case DP_TRAINING_PATTERN_2:
2228 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
2229 break;
2230 case DP_TRAINING_PATTERN_3:
2231 DRM_ERROR("DP training pattern 3 not supported\n");
2232 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
2233 break;
2234 }
2235 }
2236
ea5b213a
CW
2237 I915_WRITE(intel_dp->output_reg, dp_reg_value);
2238 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 2239
ea5b213a 2240 intel_dp_aux_native_write_1(intel_dp,
a4fc5ed6
KP
2241 DP_TRAINING_PATTERN_SET,
2242 dp_train_pat);
2243
47ea7542
PZ
2244 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
2245 DP_TRAINING_PATTERN_DISABLE) {
2246 ret = intel_dp_aux_native_write(intel_dp,
2247 DP_TRAINING_LANE0_SET,
2248 intel_dp->train_set,
2249 intel_dp->lane_count);
2250 if (ret != intel_dp->lane_count)
2251 return false;
2252 }
a4fc5ed6
KP
2253
2254 return true;
2255}
2256
3ab9c637
ID
2257static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2258{
2259 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2260 struct drm_device *dev = intel_dig_port->base.base.dev;
2261 struct drm_i915_private *dev_priv = dev->dev_private;
2262 enum port port = intel_dig_port->port;
2263 uint32_t val;
2264
2265 if (!HAS_DDI(dev))
2266 return;
2267
2268 val = I915_READ(DP_TP_CTL(port));
2269 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2270 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2271 I915_WRITE(DP_TP_CTL(port), val);
2272
2273 /*
2274 * On PORT_A we can have only eDP in SST mode. There the only reason
2275 * we need to set idle transmission mode is to work around a HW issue
2276 * where we enable the pipe while not in idle link-training mode.
2277 * In this case there is requirement to wait for a minimum number of
2278 * idle patterns to be sent.
2279 */
2280 if (port == PORT_A)
2281 return;
2282
2283 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2284 1))
2285 DRM_ERROR("Timed out waiting for DP idle patterns\n");
2286}
2287
33a34e4e 2288/* Enable corresponding port and start training pattern 1 */
c19b0669 2289void
33a34e4e 2290intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 2291{
da63a9f2 2292 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 2293 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
2294 int i;
2295 uint8_t voltage;
2296 bool clock_recovery = false;
cdb0e95b 2297 int voltage_tries, loop_tries;
ea5b213a 2298 uint32_t DP = intel_dp->DP;
a4fc5ed6 2299
affa9354 2300 if (HAS_DDI(dev))
c19b0669
PZ
2301 intel_ddi_prepare_link_retrain(encoder);
2302
3cf2efb1
CW
2303 /* Write the link configuration data */
2304 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
2305 intel_dp->link_configuration,
2306 DP_LINK_CONFIGURATION_SIZE);
a4fc5ed6
KP
2307
2308 DP |= DP_PORT_EN;
1a2eb460 2309
33a34e4e 2310 memset(intel_dp->train_set, 0, 4);
a4fc5ed6 2311 voltage = 0xff;
cdb0e95b
KP
2312 voltage_tries = 0;
2313 loop_tries = 0;
a4fc5ed6
KP
2314 clock_recovery = false;
2315 for (;;) {
33a34e4e 2316 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
93f62dad 2317 uint8_t link_status[DP_LINK_STATUS_SIZE];
f0a3424e
PZ
2318
2319 intel_dp_set_signal_levels(intel_dp, &DP);
a4fc5ed6 2320
a7c9655f 2321 /* Set training pattern 1 */
47ea7542 2322 if (!intel_dp_set_link_train(intel_dp, DP,
81055854
AJ
2323 DP_TRAINING_PATTERN_1 |
2324 DP_LINK_SCRAMBLING_DISABLE))
a4fc5ed6 2325 break;
a4fc5ed6 2326
a7c9655f 2327 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
2328 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2329 DRM_ERROR("failed to get link status\n");
a4fc5ed6 2330 break;
93f62dad 2331 }
a4fc5ed6 2332
01916270 2333 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 2334 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
2335 clock_recovery = true;
2336 break;
2337 }
2338
2339 /* Check to see if we've tried the max voltage */
2340 for (i = 0; i < intel_dp->lane_count; i++)
2341 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 2342 break;
3b4f819d 2343 if (i == intel_dp->lane_count) {
b06fbda3
DV
2344 ++loop_tries;
2345 if (loop_tries == 5) {
cdb0e95b
KP
2346 DRM_DEBUG_KMS("too many full retries, give up\n");
2347 break;
2348 }
2349 memset(intel_dp->train_set, 0, 4);
2350 voltage_tries = 0;
2351 continue;
2352 }
a4fc5ed6 2353
3cf2efb1 2354 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 2355 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 2356 ++voltage_tries;
b06fbda3
DV
2357 if (voltage_tries == 5) {
2358 DRM_DEBUG_KMS("too many voltage retries, give up\n");
2359 break;
2360 }
2361 } else
2362 voltage_tries = 0;
2363 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 2364
3cf2efb1 2365 /* Compute new intel_dp->train_set as requested by target */
93f62dad 2366 intel_get_adjust_train(intel_dp, link_status);
a4fc5ed6
KP
2367 }
2368
33a34e4e
JB
2369 intel_dp->DP = DP;
2370}
2371
c19b0669 2372void
33a34e4e
JB
2373intel_dp_complete_link_train(struct intel_dp *intel_dp)
2374{
33a34e4e 2375 bool channel_eq = false;
37f80975 2376 int tries, cr_tries;
33a34e4e
JB
2377 uint32_t DP = intel_dp->DP;
2378
a4fc5ed6
KP
2379 /* channel equalization */
2380 tries = 0;
37f80975 2381 cr_tries = 0;
a4fc5ed6
KP
2382 channel_eq = false;
2383 for (;;) {
93f62dad 2384 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 2385
37f80975
JB
2386 if (cr_tries > 5) {
2387 DRM_ERROR("failed to train DP, aborting\n");
2388 intel_dp_link_down(intel_dp);
2389 break;
2390 }
2391
f0a3424e 2392 intel_dp_set_signal_levels(intel_dp, &DP);
e3421a18 2393
a4fc5ed6 2394 /* channel eq pattern */
47ea7542 2395 if (!intel_dp_set_link_train(intel_dp, DP,
81055854
AJ
2396 DP_TRAINING_PATTERN_2 |
2397 DP_LINK_SCRAMBLING_DISABLE))
a4fc5ed6
KP
2398 break;
2399
a7c9655f 2400 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
93f62dad 2401 if (!intel_dp_get_link_status(intel_dp, link_status))
a4fc5ed6 2402 break;
a4fc5ed6 2403
37f80975 2404 /* Make sure clock is still ok */
01916270 2405 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975
JB
2406 intel_dp_start_link_train(intel_dp);
2407 cr_tries++;
2408 continue;
2409 }
2410
1ffdff13 2411 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
2412 channel_eq = true;
2413 break;
2414 }
a4fc5ed6 2415
37f80975
JB
2416 /* Try 5 times, then try clock recovery if that fails */
2417 if (tries > 5) {
2418 intel_dp_link_down(intel_dp);
2419 intel_dp_start_link_train(intel_dp);
2420 tries = 0;
2421 cr_tries++;
2422 continue;
2423 }
a4fc5ed6 2424
3cf2efb1 2425 /* Compute new intel_dp->train_set as requested by target */
93f62dad 2426 intel_get_adjust_train(intel_dp, link_status);
3cf2efb1 2427 ++tries;
869184a6 2428 }
3cf2efb1 2429
3ab9c637
ID
2430 intel_dp_set_idle_link_train(intel_dp);
2431
2432 intel_dp->DP = DP;
2433
d6c0d722 2434 if (channel_eq)
07f42258 2435 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 2436
3ab9c637
ID
2437}
2438
2439void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2440{
2441 intel_dp_set_link_train(intel_dp, intel_dp->DP,
2442 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
2443}
2444
2445static void
ea5b213a 2446intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 2447{
da63a9f2 2448 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 2449 enum port port = intel_dig_port->port;
da63a9f2 2450 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 2451 struct drm_i915_private *dev_priv = dev->dev_private;
ab527efc
DV
2452 struct intel_crtc *intel_crtc =
2453 to_intel_crtc(intel_dig_port->base.base.crtc);
ea5b213a 2454 uint32_t DP = intel_dp->DP;
a4fc5ed6 2455
c19b0669
PZ
2456 /*
2457 * DDI code has a strict mode set sequence and we should try to respect
2458 * it, otherwise we might hang the machine in many different ways. So we
2459 * really should be disabling the port only on a complete crtc_disable
2460 * sequence. This function is just called under two conditions on DDI
2461 * code:
2462 * - Link train failed while doing crtc_enable, and on this case we
2463 * really should respect the mode set sequence and wait for a
2464 * crtc_disable.
2465 * - Someone turned the monitor off and intel_dp_check_link_status
2466 * called us. We don't need to disable the whole port on this case, so
2467 * when someone turns the monitor on again,
2468 * intel_ddi_prepare_link_retrain will take care of redoing the link
2469 * train.
2470 */
affa9354 2471 if (HAS_DDI(dev))
c19b0669
PZ
2472 return;
2473
0c33d8d7 2474 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
2475 return;
2476
28c97730 2477 DRM_DEBUG_KMS("\n");
32f9d658 2478
bc7d38a4 2479 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 2480 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 2481 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18
ZW
2482 } else {
2483 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 2484 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 2485 }
fe255d00 2486 POSTING_READ(intel_dp->output_reg);
5eb08b69 2487
ab527efc
DV
2488 /* We don't really know why we're doing this */
2489 intel_wait_for_vblank(dev, intel_crtc->pipe);
5eb08b69 2490
493a7081 2491 if (HAS_PCH_IBX(dev) &&
1b39d6f3 2492 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
da63a9f2 2493 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
31acbcc4 2494
5bddd17f
EA
2495 /* Hardware workaround: leaving our transcoder select
2496 * set to transcoder B while it's off will prevent the
2497 * corresponding HDMI output on transcoder A.
2498 *
2499 * Combine this with another hardware workaround:
2500 * transcoder select bit can only be cleared while the
2501 * port is enabled.
2502 */
2503 DP &= ~DP_PIPEB_SELECT;
2504 I915_WRITE(intel_dp->output_reg, DP);
2505
2506 /* Changes to enable or select take place the vblank
2507 * after being written.
2508 */
ff50afe9
DV
2509 if (WARN_ON(crtc == NULL)) {
2510 /* We should never try to disable a port without a crtc
2511 * attached. For paranoia keep the code around for a
2512 * bit. */
31acbcc4
CW
2513 POSTING_READ(intel_dp->output_reg);
2514 msleep(50);
2515 } else
ab527efc 2516 intel_wait_for_vblank(dev, intel_crtc->pipe);
5bddd17f
EA
2517 }
2518
832afda6 2519 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
2520 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2521 POSTING_READ(intel_dp->output_reg);
f01eca2e 2522 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
2523}
2524
26d61aad
KP
2525static bool
2526intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 2527{
577c7a50
DL
2528 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2529
92fd8fd1 2530 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
edb39244
AJ
2531 sizeof(intel_dp->dpcd)) == 0)
2532 return false; /* aux transfer failed */
92fd8fd1 2533
577c7a50
DL
2534 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2535 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2536 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2537
edb39244
AJ
2538 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2539 return false; /* DPCD not present */
2540
2293bb5c
SK
2541 /* Check if the panel supports PSR */
2542 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2543 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2544 intel_dp->psr_dpcd,
2545 sizeof(intel_dp->psr_dpcd));
2546 if (is_edp_psr(intel_dp))
2547 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
edb39244
AJ
2548 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2549 DP_DWN_STRM_PORT_PRESENT))
2550 return true; /* native DP sink */
2551
2552 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2553 return true; /* no per-port downstream info */
2554
2555 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2556 intel_dp->downstream_ports,
2557 DP_MAX_DOWNSTREAM_PORTS) == 0)
2558 return false; /* downstream port status fetch failed */
2559
2560 return true;
92fd8fd1
KP
2561}
2562
0d198328
AJ
2563static void
2564intel_dp_probe_oui(struct intel_dp *intel_dp)
2565{
2566 u8 buf[3];
2567
2568 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2569 return;
2570
351cfc34
DV
2571 ironlake_edp_panel_vdd_on(intel_dp);
2572
0d198328
AJ
2573 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2574 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2575 buf[0], buf[1], buf[2]);
2576
2577 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2578 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2579 buf[0], buf[1], buf[2]);
351cfc34
DV
2580
2581 ironlake_edp_panel_vdd_off(intel_dp, false);
0d198328
AJ
2582}
2583
a60f0e38
JB
2584static bool
2585intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2586{
2587 int ret;
2588
2589 ret = intel_dp_aux_native_read_retry(intel_dp,
2590 DP_DEVICE_SERVICE_IRQ_VECTOR,
2591 sink_irq_vector, 1);
2592 if (!ret)
2593 return false;
2594
2595 return true;
2596}
2597
2598static void
2599intel_dp_handle_test_request(struct intel_dp *intel_dp)
2600{
2601 /* NAK by default */
9324cf7f 2602 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
2603}
2604
a4fc5ed6
KP
2605/*
2606 * According to DP spec
2607 * 5.1.2:
2608 * 1. Read DPCD
2609 * 2. Configure link according to Receiver Capabilities
2610 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
2611 * 4. Check link status on receipt of hot-plug interrupt
2612 */
2613
00c09d70 2614void
ea5b213a 2615intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 2616{
da63a9f2 2617 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 2618 u8 sink_irq_vector;
93f62dad 2619 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 2620
da63a9f2 2621 if (!intel_encoder->connectors_active)
d2b996ac 2622 return;
59cd09e1 2623
da63a9f2 2624 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
2625 return;
2626
92fd8fd1 2627 /* Try to read receiver status if the link appears to be up */
93f62dad 2628 if (!intel_dp_get_link_status(intel_dp, link_status)) {
ea5b213a 2629 intel_dp_link_down(intel_dp);
a4fc5ed6
KP
2630 return;
2631 }
2632
92fd8fd1 2633 /* Now read the DPCD to see if it's actually running */
26d61aad 2634 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
2635 intel_dp_link_down(intel_dp);
2636 return;
2637 }
2638
a60f0e38
JB
2639 /* Try to read the source of the interrupt */
2640 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2641 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2642 /* Clear interrupt source */
2643 intel_dp_aux_native_write_1(intel_dp,
2644 DP_DEVICE_SERVICE_IRQ_VECTOR,
2645 sink_irq_vector);
2646
2647 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2648 intel_dp_handle_test_request(intel_dp);
2649 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2650 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2651 }
2652
1ffdff13 2653 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 2654 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
da63a9f2 2655 drm_get_encoder_name(&intel_encoder->base));
33a34e4e
JB
2656 intel_dp_start_link_train(intel_dp);
2657 intel_dp_complete_link_train(intel_dp);
3ab9c637 2658 intel_dp_stop_link_train(intel_dp);
33a34e4e 2659 }
a4fc5ed6 2660}
a4fc5ed6 2661
caf9ab24 2662/* XXX this is probably wrong for multiple downstream ports */
71ba9000 2663static enum drm_connector_status
26d61aad 2664intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 2665{
caf9ab24
AJ
2666 uint8_t *dpcd = intel_dp->dpcd;
2667 bool hpd;
2668 uint8_t type;
2669
2670 if (!intel_dp_get_dpcd(intel_dp))
2671 return connector_status_disconnected;
2672
2673 /* if there's no downstream port, we're done */
2674 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 2675 return connector_status_connected;
caf9ab24
AJ
2676
2677 /* If we're HPD-aware, SINK_COUNT changes dynamically */
2678 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2679 if (hpd) {
23235177 2680 uint8_t reg;
caf9ab24 2681 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
23235177 2682 &reg, 1))
caf9ab24 2683 return connector_status_unknown;
23235177
AJ
2684 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2685 : connector_status_disconnected;
caf9ab24
AJ
2686 }
2687
2688 /* If no HPD, poke DDC gently */
2689 if (drm_probe_ddc(&intel_dp->adapter))
26d61aad 2690 return connector_status_connected;
caf9ab24
AJ
2691
2692 /* Well we tried, say unknown for unreliable port types */
2693 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2694 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2695 return connector_status_unknown;
2696
2697 /* Anything else is out of spec, warn and ignore */
2698 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 2699 return connector_status_disconnected;
71ba9000
AJ
2700}
2701
5eb08b69 2702static enum drm_connector_status
a9756bb5 2703ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 2704{
30add22d 2705 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
2706 struct drm_i915_private *dev_priv = dev->dev_private;
2707 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5eb08b69
ZW
2708 enum drm_connector_status status;
2709
fe16d949
CW
2710 /* Can't disconnect eDP, but you can close the lid... */
2711 if (is_edp(intel_dp)) {
30add22d 2712 status = intel_panel_detect(dev);
fe16d949
CW
2713 if (status == connector_status_unknown)
2714 status = connector_status_connected;
2715 return status;
2716 }
01cb9ea6 2717
1b469639
DL
2718 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
2719 return connector_status_disconnected;
2720
26d61aad 2721 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
2722}
2723
a4fc5ed6 2724static enum drm_connector_status
a9756bb5 2725g4x_dp_detect(struct intel_dp *intel_dp)
a4fc5ed6 2726{
30add22d 2727 struct drm_device *dev = intel_dp_to_dev(intel_dp);
a4fc5ed6 2728 struct drm_i915_private *dev_priv = dev->dev_private;
34f2be46 2729 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
10f76a38 2730 uint32_t bit;
5eb08b69 2731
35aad75f
JB
2732 /* Can't disconnect eDP, but you can close the lid... */
2733 if (is_edp(intel_dp)) {
2734 enum drm_connector_status status;
2735
2736 status = intel_panel_detect(dev);
2737 if (status == connector_status_unknown)
2738 status = connector_status_connected;
2739 return status;
2740 }
2741
34f2be46
VS
2742 switch (intel_dig_port->port) {
2743 case PORT_B:
26739f12 2744 bit = PORTB_HOTPLUG_LIVE_STATUS;
a4fc5ed6 2745 break;
34f2be46 2746 case PORT_C:
26739f12 2747 bit = PORTC_HOTPLUG_LIVE_STATUS;
a4fc5ed6 2748 break;
34f2be46 2749 case PORT_D:
26739f12 2750 bit = PORTD_HOTPLUG_LIVE_STATUS;
a4fc5ed6
KP
2751 break;
2752 default:
2753 return connector_status_unknown;
2754 }
2755
10f76a38 2756 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
a4fc5ed6
KP
2757 return connector_status_disconnected;
2758
26d61aad 2759 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
2760}
2761
8c241fef
KP
2762static struct edid *
2763intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2764{
9cd300e0 2765 struct intel_connector *intel_connector = to_intel_connector(connector);
d6f24d0f 2766
9cd300e0
JN
2767 /* use cached edid if we have one */
2768 if (intel_connector->edid) {
2769 struct edid *edid;
2770 int size;
2771
2772 /* invalid edid */
2773 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
2774 return NULL;
2775
9cd300e0 2776 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
edbe1581 2777 edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
d6f24d0f
JB
2778 if (!edid)
2779 return NULL;
2780
d6f24d0f
JB
2781 return edid;
2782 }
8c241fef 2783
9cd300e0 2784 return drm_get_edid(connector, adapter);
8c241fef
KP
2785}
2786
2787static int
2788intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2789{
9cd300e0 2790 struct intel_connector *intel_connector = to_intel_connector(connector);
8c241fef 2791
9cd300e0
JN
2792 /* use cached edid if we have one */
2793 if (intel_connector->edid) {
2794 /* invalid edid */
2795 if (IS_ERR(intel_connector->edid))
2796 return 0;
2797
2798 return intel_connector_update_modes(connector,
2799 intel_connector->edid);
d6f24d0f
JB
2800 }
2801
9cd300e0 2802 return intel_ddc_get_modes(connector, adapter);
8c241fef
KP
2803}
2804
a9756bb5
ZW
2805static enum drm_connector_status
2806intel_dp_detect(struct drm_connector *connector, bool force)
2807{
2808 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
2809 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2810 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 2811 struct drm_device *dev = connector->dev;
a9756bb5
ZW
2812 enum drm_connector_status status;
2813 struct edid *edid = NULL;
2814
2815 intel_dp->has_audio = false;
2816
2817 if (HAS_PCH_SPLIT(dev))
2818 status = ironlake_dp_detect(intel_dp);
2819 else
2820 status = g4x_dp_detect(intel_dp);
1b9be9d0 2821
a9756bb5
ZW
2822 if (status != connector_status_connected)
2823 return status;
2824
0d198328
AJ
2825 intel_dp_probe_oui(intel_dp);
2826
c3e5f67b
DV
2827 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2828 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
f684960e 2829 } else {
8c241fef 2830 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
f684960e
CW
2831 if (edid) {
2832 intel_dp->has_audio = drm_detect_monitor_audio(edid);
f684960e
CW
2833 kfree(edid);
2834 }
a9756bb5
ZW
2835 }
2836
d63885da
PZ
2837 if (intel_encoder->type != INTEL_OUTPUT_EDP)
2838 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
a9756bb5 2839 return connector_status_connected;
a4fc5ed6
KP
2840}
2841
2842static int intel_dp_get_modes(struct drm_connector *connector)
2843{
df0e9248 2844 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e 2845 struct intel_connector *intel_connector = to_intel_connector(connector);
fa90ecef 2846 struct drm_device *dev = connector->dev;
32f9d658 2847 int ret;
a4fc5ed6
KP
2848
2849 /* We should parse the EDID data and find out if it has an audio sink
2850 */
2851
8c241fef 2852 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
f8779fda 2853 if (ret)
32f9d658
ZW
2854 return ret;
2855
f8779fda 2856 /* if eDP has no EDID, fall back to fixed mode */
dd06f90e 2857 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
f8779fda 2858 struct drm_display_mode *mode;
dd06f90e
JN
2859 mode = drm_mode_duplicate(dev,
2860 intel_connector->panel.fixed_mode);
f8779fda 2861 if (mode) {
32f9d658
ZW
2862 drm_mode_probed_add(connector, mode);
2863 return 1;
2864 }
2865 }
2866 return 0;
a4fc5ed6
KP
2867}
2868
1aad7ac0
CW
2869static bool
2870intel_dp_detect_audio(struct drm_connector *connector)
2871{
2872 struct intel_dp *intel_dp = intel_attached_dp(connector);
2873 struct edid *edid;
2874 bool has_audio = false;
2875
8c241fef 2876 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
1aad7ac0
CW
2877 if (edid) {
2878 has_audio = drm_detect_monitor_audio(edid);
1aad7ac0
CW
2879 kfree(edid);
2880 }
2881
2882 return has_audio;
2883}
2884
f684960e
CW
2885static int
2886intel_dp_set_property(struct drm_connector *connector,
2887 struct drm_property *property,
2888 uint64_t val)
2889{
e953fd7b 2890 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 2891 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
2892 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2893 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
2894 int ret;
2895
662595df 2896 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
2897 if (ret)
2898 return ret;
2899
3f43c48d 2900 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
2901 int i = val;
2902 bool has_audio;
2903
2904 if (i == intel_dp->force_audio)
f684960e
CW
2905 return 0;
2906
1aad7ac0 2907 intel_dp->force_audio = i;
f684960e 2908
c3e5f67b 2909 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
2910 has_audio = intel_dp_detect_audio(connector);
2911 else
c3e5f67b 2912 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
2913
2914 if (has_audio == intel_dp->has_audio)
f684960e
CW
2915 return 0;
2916
1aad7ac0 2917 intel_dp->has_audio = has_audio;
f684960e
CW
2918 goto done;
2919 }
2920
e953fd7b 2921 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
2922 bool old_auto = intel_dp->color_range_auto;
2923 uint32_t old_range = intel_dp->color_range;
2924
55bc60db
VS
2925 switch (val) {
2926 case INTEL_BROADCAST_RGB_AUTO:
2927 intel_dp->color_range_auto = true;
2928 break;
2929 case INTEL_BROADCAST_RGB_FULL:
2930 intel_dp->color_range_auto = false;
2931 intel_dp->color_range = 0;
2932 break;
2933 case INTEL_BROADCAST_RGB_LIMITED:
2934 intel_dp->color_range_auto = false;
2935 intel_dp->color_range = DP_COLOR_RANGE_16_235;
2936 break;
2937 default:
2938 return -EINVAL;
2939 }
ae4edb80
DV
2940
2941 if (old_auto == intel_dp->color_range_auto &&
2942 old_range == intel_dp->color_range)
2943 return 0;
2944
e953fd7b
CW
2945 goto done;
2946 }
2947
53b41837
YN
2948 if (is_edp(intel_dp) &&
2949 property == connector->dev->mode_config.scaling_mode_property) {
2950 if (val == DRM_MODE_SCALE_NONE) {
2951 DRM_DEBUG_KMS("no scaling not supported\n");
2952 return -EINVAL;
2953 }
2954
2955 if (intel_connector->panel.fitting_mode == val) {
2956 /* the eDP scaling property is not changed */
2957 return 0;
2958 }
2959 intel_connector->panel.fitting_mode = val;
2960
2961 goto done;
2962 }
2963
f684960e
CW
2964 return -EINVAL;
2965
2966done:
c0c36b94
CW
2967 if (intel_encoder->base.crtc)
2968 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
2969
2970 return 0;
2971}
2972
a4fc5ed6 2973static void
73845adf 2974intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 2975{
1d508706 2976 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 2977
9cd300e0
JN
2978 if (!IS_ERR_OR_NULL(intel_connector->edid))
2979 kfree(intel_connector->edid);
2980
acd8db10
PZ
2981 /* Can't call is_edp() since the encoder may have been destroyed
2982 * already. */
2983 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 2984 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 2985
a4fc5ed6
KP
2986 drm_sysfs_connector_remove(connector);
2987 drm_connector_cleanup(connector);
55f78c43 2988 kfree(connector);
a4fc5ed6
KP
2989}
2990
00c09d70 2991void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 2992{
da63a9f2
PZ
2993 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2994 struct intel_dp *intel_dp = &intel_dig_port->dp;
bd173813 2995 struct drm_device *dev = intel_dp_to_dev(intel_dp);
24d05927
DV
2996
2997 i2c_del_adapter(&intel_dp->adapter);
2998 drm_encoder_cleanup(encoder);
bd943159
KP
2999 if (is_edp(intel_dp)) {
3000 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
bd173813 3001 mutex_lock(&dev->mode_config.mutex);
bd943159 3002 ironlake_panel_vdd_off_sync(intel_dp);
bd173813 3003 mutex_unlock(&dev->mode_config.mutex);
bd943159 3004 }
da63a9f2 3005 kfree(intel_dig_port);
24d05927
DV
3006}
3007
a4fc5ed6 3008static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
a4fc5ed6 3009 .mode_set = intel_dp_mode_set,
a4fc5ed6
KP
3010};
3011
3012static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 3013 .dpms = intel_connector_dpms,
a4fc5ed6
KP
3014 .detect = intel_dp_detect,
3015 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 3016 .set_property = intel_dp_set_property,
73845adf 3017 .destroy = intel_dp_connector_destroy,
a4fc5ed6
KP
3018};
3019
3020static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
3021 .get_modes = intel_dp_get_modes,
3022 .mode_valid = intel_dp_mode_valid,
df0e9248 3023 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
3024};
3025
a4fc5ed6 3026static const struct drm_encoder_funcs intel_dp_enc_funcs = {
24d05927 3027 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
3028};
3029
995b6762 3030static void
21d40d37 3031intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 3032{
fa90ecef 3033 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
c8110e52 3034
885a5014 3035 intel_dp_check_link_status(intel_dp);
c8110e52 3036}
6207937d 3037
e3421a18
ZW
3038/* Return which DP Port should be selected for Transcoder DP control */
3039int
0206e353 3040intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
3041{
3042 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
3043 struct intel_encoder *intel_encoder;
3044 struct intel_dp *intel_dp;
e3421a18 3045
fa90ecef
PZ
3046 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
3047 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 3048
fa90ecef
PZ
3049 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3050 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 3051 return intel_dp->output_reg;
e3421a18 3052 }
ea5b213a 3053
e3421a18
ZW
3054 return -1;
3055}
3056
36e83a18 3057/* check the VBT to see whether the eDP is on DP-D port */
cb0953d7 3058bool intel_dpd_is_edp(struct drm_device *dev)
36e83a18
ZY
3059{
3060 struct drm_i915_private *dev_priv = dev->dev_private;
3061 struct child_device_config *p_child;
3062 int i;
3063
41aa3448 3064 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
3065 return false;
3066
41aa3448
RV
3067 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3068 p_child = dev_priv->vbt.child_dev + i;
36e83a18
ZY
3069
3070 if (p_child->dvo_port == PORT_IDPD &&
3071 p_child->device_type == DEVICE_TYPE_eDP)
3072 return true;
3073 }
3074 return false;
3075}
3076
f684960e
CW
3077static void
3078intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
3079{
53b41837
YN
3080 struct intel_connector *intel_connector = to_intel_connector(connector);
3081
3f43c48d 3082 intel_attach_force_audio_property(connector);
e953fd7b 3083 intel_attach_broadcast_rgb_property(connector);
55bc60db 3084 intel_dp->color_range_auto = true;
53b41837
YN
3085
3086 if (is_edp(intel_dp)) {
3087 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
3088 drm_object_attach_property(
3089 &connector->base,
53b41837 3090 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
3091 DRM_MODE_SCALE_ASPECT);
3092 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 3093 }
f684960e
CW
3094}
3095
67a54566
DV
3096static void
3097intel_dp_init_panel_power_sequencer(struct drm_device *dev,
f30d26e4
JN
3098 struct intel_dp *intel_dp,
3099 struct edp_power_seq *out)
67a54566
DV
3100{
3101 struct drm_i915_private *dev_priv = dev->dev_private;
3102 struct edp_power_seq cur, vbt, spec, final;
3103 u32 pp_on, pp_off, pp_div, pp;
453c5420
JB
3104 int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3105
3106 if (HAS_PCH_SPLIT(dev)) {
3107 pp_control_reg = PCH_PP_CONTROL;
3108 pp_on_reg = PCH_PP_ON_DELAYS;
3109 pp_off_reg = PCH_PP_OFF_DELAYS;
3110 pp_div_reg = PCH_PP_DIVISOR;
3111 } else {
3112 pp_control_reg = PIPEA_PP_CONTROL;
3113 pp_on_reg = PIPEA_PP_ON_DELAYS;
3114 pp_off_reg = PIPEA_PP_OFF_DELAYS;
3115 pp_div_reg = PIPEA_PP_DIVISOR;
3116 }
67a54566
DV
3117
3118 /* Workaround: Need to write PP_CONTROL with the unlock key as
3119 * the very first thing. */
453c5420
JB
3120 pp = ironlake_get_pp_control(intel_dp);
3121 I915_WRITE(pp_control_reg, pp);
67a54566 3122
453c5420
JB
3123 pp_on = I915_READ(pp_on_reg);
3124 pp_off = I915_READ(pp_off_reg);
3125 pp_div = I915_READ(pp_div_reg);
67a54566
DV
3126
3127 /* Pull timing values out of registers */
3128 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
3129 PANEL_POWER_UP_DELAY_SHIFT;
3130
3131 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
3132 PANEL_LIGHT_ON_DELAY_SHIFT;
3133
3134 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
3135 PANEL_LIGHT_OFF_DELAY_SHIFT;
3136
3137 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
3138 PANEL_POWER_DOWN_DELAY_SHIFT;
3139
3140 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3141 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
3142
3143 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3144 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
3145
41aa3448 3146 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
3147
3148 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
3149 * our hw here, which are all in 100usec. */
3150 spec.t1_t3 = 210 * 10;
3151 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
3152 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
3153 spec.t10 = 500 * 10;
3154 /* This one is special and actually in units of 100ms, but zero
3155 * based in the hw (so we need to add 100 ms). But the sw vbt
3156 * table multiplies it with 1000 to make it in units of 100usec,
3157 * too. */
3158 spec.t11_t12 = (510 + 100) * 10;
3159
3160 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3161 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
3162
3163 /* Use the max of the register settings and vbt. If both are
3164 * unset, fall back to the spec limits. */
3165#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
3166 spec.field : \
3167 max(cur.field, vbt.field))
3168 assign_final(t1_t3);
3169 assign_final(t8);
3170 assign_final(t9);
3171 assign_final(t10);
3172 assign_final(t11_t12);
3173#undef assign_final
3174
3175#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
3176 intel_dp->panel_power_up_delay = get_delay(t1_t3);
3177 intel_dp->backlight_on_delay = get_delay(t8);
3178 intel_dp->backlight_off_delay = get_delay(t9);
3179 intel_dp->panel_power_down_delay = get_delay(t10);
3180 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
3181#undef get_delay
3182
f30d26e4
JN
3183 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
3184 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
3185 intel_dp->panel_power_cycle_delay);
3186
3187 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
3188 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
3189
3190 if (out)
3191 *out = final;
3192}
3193
3194static void
3195intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3196 struct intel_dp *intel_dp,
3197 struct edp_power_seq *seq)
3198{
3199 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
3200 u32 pp_on, pp_off, pp_div, port_sel = 0;
3201 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
3202 int pp_on_reg, pp_off_reg, pp_div_reg;
3203
3204 if (HAS_PCH_SPLIT(dev)) {
3205 pp_on_reg = PCH_PP_ON_DELAYS;
3206 pp_off_reg = PCH_PP_OFF_DELAYS;
3207 pp_div_reg = PCH_PP_DIVISOR;
3208 } else {
3209 pp_on_reg = PIPEA_PP_ON_DELAYS;
3210 pp_off_reg = PIPEA_PP_OFF_DELAYS;
3211 pp_div_reg = PIPEA_PP_DIVISOR;
3212 }
3213
67a54566 3214 /* And finally store the new values in the power sequencer. */
f30d26e4
JN
3215 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
3216 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
3217 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3218 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
3219 /* Compute the divisor for the pp clock, simply match the Bspec
3220 * formula. */
453c5420 3221 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 3222 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
3223 << PANEL_POWER_CYCLE_DELAY_SHIFT);
3224
3225 /* Haswell doesn't have any port selection bits for the panel
3226 * power sequencer any more. */
bc7d38a4
ID
3227 if (IS_VALLEYVIEW(dev)) {
3228 port_sel = I915_READ(pp_on_reg) & 0xc0000000;
3229 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3230 if (dp_to_dig_port(intel_dp)->port == PORT_A)
453c5420 3231 port_sel = PANEL_POWER_PORT_DP_A;
67a54566 3232 else
453c5420 3233 port_sel = PANEL_POWER_PORT_DP_D;
67a54566
DV
3234 }
3235
453c5420
JB
3236 pp_on |= port_sel;
3237
3238 I915_WRITE(pp_on_reg, pp_on);
3239 I915_WRITE(pp_off_reg, pp_off);
3240 I915_WRITE(pp_div_reg, pp_div);
67a54566 3241
67a54566 3242 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
3243 I915_READ(pp_on_reg),
3244 I915_READ(pp_off_reg),
3245 I915_READ(pp_div_reg));
f684960e
CW
3246}
3247
ed92f0b2
PZ
3248static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3249 struct intel_connector *intel_connector)
3250{
3251 struct drm_connector *connector = &intel_connector->base;
3252 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3253 struct drm_device *dev = intel_dig_port->base.base.dev;
3254 struct drm_i915_private *dev_priv = dev->dev_private;
3255 struct drm_display_mode *fixed_mode = NULL;
3256 struct edp_power_seq power_seq = { 0 };
3257 bool has_dpcd;
3258 struct drm_display_mode *scan;
3259 struct edid *edid;
3260
3261 if (!is_edp(intel_dp))
3262 return true;
3263
3264 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3265
3266 /* Cache DPCD and EDID for edp. */
3267 ironlake_edp_panel_vdd_on(intel_dp);
3268 has_dpcd = intel_dp_get_dpcd(intel_dp);
3269 ironlake_edp_panel_vdd_off(intel_dp, false);
3270
3271 if (has_dpcd) {
3272 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3273 dev_priv->no_aux_handshake =
3274 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3275 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3276 } else {
3277 /* if this fails, presume the device is a ghost */
3278 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
3279 return false;
3280 }
3281
3282 /* We now know it's not a ghost, init power sequence regs. */
3283 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3284 &power_seq);
3285
3286 ironlake_edp_panel_vdd_on(intel_dp);
3287 edid = drm_get_edid(connector, &intel_dp->adapter);
3288 if (edid) {
3289 if (drm_add_edid_modes(connector, edid)) {
3290 drm_mode_connector_update_edid_property(connector,
3291 edid);
3292 drm_edid_to_eld(connector, edid);
3293 } else {
3294 kfree(edid);
3295 edid = ERR_PTR(-EINVAL);
3296 }
3297 } else {
3298 edid = ERR_PTR(-ENOENT);
3299 }
3300 intel_connector->edid = edid;
3301
3302 /* prefer fixed mode from EDID if available */
3303 list_for_each_entry(scan, &connector->probed_modes, head) {
3304 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3305 fixed_mode = drm_mode_duplicate(dev, scan);
3306 break;
3307 }
3308 }
3309
3310 /* fallback to VBT if available for eDP */
3311 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3312 fixed_mode = drm_mode_duplicate(dev,
3313 dev_priv->vbt.lfp_lvds_vbt_mode);
3314 if (fixed_mode)
3315 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3316 }
3317
3318 ironlake_edp_panel_vdd_off(intel_dp, false);
3319
3320 intel_panel_init(&intel_connector->panel, fixed_mode);
3321 intel_panel_setup_backlight(connector);
3322
3323 return true;
3324}
3325
16c25533 3326bool
f0fec3f2
PZ
3327intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3328 struct intel_connector *intel_connector)
a4fc5ed6 3329{
f0fec3f2
PZ
3330 struct drm_connector *connector = &intel_connector->base;
3331 struct intel_dp *intel_dp = &intel_dig_port->dp;
3332 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3333 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 3334 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 3335 enum port port = intel_dig_port->port;
5eb08b69 3336 const char *name = NULL;
b2a14755 3337 int type, error;
a4fc5ed6 3338
0767935e
DV
3339 /* Preserve the current hw state. */
3340 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 3341 intel_dp->attached_connector = intel_connector;
3d3dc149 3342
f7d24902 3343 type = DRM_MODE_CONNECTOR_DisplayPort;
19c03924
GB
3344 /*
3345 * FIXME : We need to initialize built-in panels before external panels.
3346 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3347 */
f7d24902
ID
3348 switch (port) {
3349 case PORT_A:
b329530c 3350 type = DRM_MODE_CONNECTOR_eDP;
f7d24902
ID
3351 break;
3352 case PORT_C:
3353 if (IS_VALLEYVIEW(dev))
3354 type = DRM_MODE_CONNECTOR_eDP;
3355 break;
3356 case PORT_D:
3357 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3358 type = DRM_MODE_CONNECTOR_eDP;
3359 break;
3360 default: /* silence GCC warning */
3361 break;
b329530c
AJ
3362 }
3363
f7d24902
ID
3364 /*
3365 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3366 * for DP the encoder type can be set by the caller to
3367 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3368 */
3369 if (type == DRM_MODE_CONNECTOR_eDP)
3370 intel_encoder->type = INTEL_OUTPUT_EDP;
3371
e7281eab
ID
3372 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3373 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3374 port_name(port));
3375
b329530c 3376 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
3377 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
3378
a4fc5ed6
KP
3379 connector->interlace_allowed = true;
3380 connector->doublescan_allowed = 0;
3381
f0fec3f2
PZ
3382 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3383 ironlake_panel_vdd_work);
a4fc5ed6 3384
df0e9248 3385 intel_connector_attach_encoder(intel_connector, intel_encoder);
a4fc5ed6
KP
3386 drm_sysfs_connector_add(connector);
3387
affa9354 3388 if (HAS_DDI(dev))
bcbc889b
PZ
3389 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3390 else
3391 intel_connector->get_hw_state = intel_connector_get_hw_state;
3392
9ed35ab1
PZ
3393 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
3394 if (HAS_DDI(dev)) {
3395 switch (intel_dig_port->port) {
3396 case PORT_A:
3397 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
3398 break;
3399 case PORT_B:
3400 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
3401 break;
3402 case PORT_C:
3403 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
3404 break;
3405 case PORT_D:
3406 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
3407 break;
3408 default:
3409 BUG();
3410 }
3411 }
e8cb4558 3412
a4fc5ed6 3413 /* Set up the DDC bus. */
ab9d7c30
PZ
3414 switch (port) {
3415 case PORT_A:
1d843f9d 3416 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
3417 name = "DPDDC-A";
3418 break;
3419 case PORT_B:
1d843f9d 3420 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
3421 name = "DPDDC-B";
3422 break;
3423 case PORT_C:
1d843f9d 3424 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
3425 name = "DPDDC-C";
3426 break;
3427 case PORT_D:
1d843f9d 3428 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
3429 name = "DPDDC-D";
3430 break;
3431 default:
ad1c0b19 3432 BUG();
5eb08b69
ZW
3433 }
3434
b2a14755
PZ
3435 error = intel_dp_i2c_init(intel_dp, intel_connector, name);
3436 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
3437 error, port_name(port));
c1f05264 3438
2b28bb1b
RV
3439 intel_dp->psr_setup_done = false;
3440
b2f246a8 3441 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
15b1d171
PZ
3442 i2c_del_adapter(&intel_dp->adapter);
3443 if (is_edp(intel_dp)) {
3444 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3445 mutex_lock(&dev->mode_config.mutex);
3446 ironlake_panel_vdd_off_sync(intel_dp);
3447 mutex_unlock(&dev->mode_config.mutex);
3448 }
b2f246a8
PZ
3449 drm_sysfs_connector_remove(connector);
3450 drm_connector_cleanup(connector);
16c25533 3451 return false;
b2f246a8 3452 }
32f9d658 3453
f684960e
CW
3454 intel_dp_add_properties(intel_dp, connector);
3455
a4fc5ed6
KP
3456 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
3457 * 0xd. Failure to do so will result in spurious interrupts being
3458 * generated on the port when a cable is not attached.
3459 */
3460 if (IS_G4X(dev) && !IS_GM45(dev)) {
3461 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3462 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3463 }
16c25533
PZ
3464
3465 return true;
a4fc5ed6 3466}
f0fec3f2
PZ
3467
3468void
3469intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3470{
3471 struct intel_digital_port *intel_dig_port;
3472 struct intel_encoder *intel_encoder;
3473 struct drm_encoder *encoder;
3474 struct intel_connector *intel_connector;
3475
3476 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
3477 if (!intel_dig_port)
3478 return;
3479
3480 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
3481 if (!intel_connector) {
3482 kfree(intel_dig_port);
3483 return;
3484 }
3485
3486 intel_encoder = &intel_dig_port->base;
3487 encoder = &intel_encoder->base;
3488
3489 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3490 DRM_MODE_ENCODER_TMDS);
00c09d70 3491 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
f0fec3f2 3492
5bfe2ac0 3493 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70
PZ
3494 intel_encoder->enable = intel_enable_dp;
3495 intel_encoder->pre_enable = intel_pre_enable_dp;
3496 intel_encoder->disable = intel_disable_dp;
3497 intel_encoder->post_disable = intel_post_disable_dp;
3498 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 3499 intel_encoder->get_config = intel_dp_get_config;
89b667f8
JB
3500 if (IS_VALLEYVIEW(dev))
3501 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
f0fec3f2 3502
174edf1f 3503 intel_dig_port->port = port;
f0fec3f2
PZ
3504 intel_dig_port->dp.output_reg = output_reg;
3505
00c09d70 3506 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
f0fec3f2
PZ
3507 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3508 intel_encoder->cloneable = false;
3509 intel_encoder->hot_plug = intel_dp_hot_plug;
3510
15b1d171
PZ
3511 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
3512 drm_encoder_cleanup(encoder);
3513 kfree(intel_dig_port);
b2f246a8 3514 kfree(intel_connector);
15b1d171 3515 }
f0fec3f2 3516}