]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - drivers/gpu/drm/i915/intel_dp.c
drm/i915: Match all PSR mode entry conditions before enabling it.
[mirror_ubuntu-zesty-kernel.git] / drivers / gpu / drm / i915 / intel_dp.c
CommitLineData
a4fc5ed6
KP
1/*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Keith Packard <keithp@keithp.com>
25 *
26 */
27
28#include <linux/i2c.h>
5a0e3ad6 29#include <linux/slab.h>
2d1a8a48 30#include <linux/export.h>
760285e7
DH
31#include <drm/drmP.h>
32#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h>
34#include <drm/drm_edid.h>
a4fc5ed6 35#include "intel_drv.h"
760285e7 36#include <drm/i915_drm.h>
a4fc5ed6 37#include "i915_drv.h"
a4fc5ed6 38
a4fc5ed6
KP
39#define DP_LINK_CHECK_TIMEOUT (10 * 1000)
40
cfcb0fc9
JB
41/**
42 * is_edp - is the given port attached to an eDP panel (either CPU or PCH)
43 * @intel_dp: DP struct
44 *
45 * If a CPU or PCH DP output is attached to an eDP panel, this function
46 * will return true, and false otherwise.
47 */
48static bool is_edp(struct intel_dp *intel_dp)
49{
da63a9f2
PZ
50 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
51
52 return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
cfcb0fc9
JB
53}
54
68b4d824 55static struct drm_device *intel_dp_to_dev(struct intel_dp *intel_dp)
cfcb0fc9 56{
68b4d824
ID
57 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
58
59 return intel_dig_port->base.base.dev;
cfcb0fc9
JB
60}
61
df0e9248
CW
62static struct intel_dp *intel_attached_dp(struct drm_connector *connector)
63{
fa90ecef 64 return enc_to_intel_dp(&intel_attached_encoder(connector)->base);
df0e9248
CW
65}
66
ea5b213a 67static void intel_dp_link_down(struct intel_dp *intel_dp);
a4fc5ed6 68
a4fc5ed6 69static int
ea5b213a 70intel_dp_max_link_bw(struct intel_dp *intel_dp)
a4fc5ed6 71{
7183dc29 72 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE];
a4fc5ed6
KP
73
74 switch (max_link_bw) {
75 case DP_LINK_BW_1_62:
76 case DP_LINK_BW_2_7:
77 break;
78 default:
79 max_link_bw = DP_LINK_BW_1_62;
80 break;
81 }
82 return max_link_bw;
83}
84
cd9dde44
AJ
85/*
86 * The units on the numbers in the next two are... bizarre. Examples will
87 * make it clearer; this one parallels an example in the eDP spec.
88 *
89 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as:
90 *
91 * 270000 * 1 * 8 / 10 == 216000
92 *
93 * The actual data capacity of that configuration is 2.16Gbit/s, so the
94 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz -
95 * or equivalently, kilopixels per second - so for 1680x1050R it'd be
96 * 119000. At 18bpp that's 2142000 kilobits per second.
97 *
98 * Thus the strange-looking division by 10 in intel_dp_link_required, to
99 * get the result in decakilobits instead of kilobits.
100 */
101
a4fc5ed6 102static int
c898261c 103intel_dp_link_required(int pixel_clock, int bpp)
a4fc5ed6 104{
cd9dde44 105 return (pixel_clock * bpp + 9) / 10;
a4fc5ed6
KP
106}
107
fe27d53e
DA
108static int
109intel_dp_max_data_rate(int max_link_clock, int max_lanes)
110{
111 return (max_link_clock * max_lanes * 8) / 10;
112}
113
a4fc5ed6
KP
114static int
115intel_dp_mode_valid(struct drm_connector *connector,
116 struct drm_display_mode *mode)
117{
df0e9248 118 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e
JN
119 struct intel_connector *intel_connector = to_intel_connector(connector);
120 struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
36008365
DV
121 int target_clock = mode->clock;
122 int max_rate, mode_rate, max_lanes, max_link_clock;
a4fc5ed6 123
dd06f90e
JN
124 if (is_edp(intel_dp) && fixed_mode) {
125 if (mode->hdisplay > fixed_mode->hdisplay)
7de56f43
ZY
126 return MODE_PANEL;
127
dd06f90e 128 if (mode->vdisplay > fixed_mode->vdisplay)
7de56f43 129 return MODE_PANEL;
03afc4a2
DV
130
131 target_clock = fixed_mode->clock;
7de56f43
ZY
132 }
133
36008365
DV
134 max_link_clock = drm_dp_bw_code_to_link_rate(intel_dp_max_link_bw(intel_dp));
135 max_lanes = drm_dp_max_lane_count(intel_dp->dpcd);
136
137 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes);
138 mode_rate = intel_dp_link_required(target_clock, 18);
139
140 if (mode_rate > max_rate)
c4867936 141 return MODE_CLOCK_HIGH;
a4fc5ed6
KP
142
143 if (mode->clock < 10000)
144 return MODE_CLOCK_LOW;
145
0af78a2b
DV
146 if (mode->flags & DRM_MODE_FLAG_DBLCLK)
147 return MODE_H_ILLEGAL;
148
a4fc5ed6
KP
149 return MODE_OK;
150}
151
152static uint32_t
153pack_aux(uint8_t *src, int src_bytes)
154{
155 int i;
156 uint32_t v = 0;
157
158 if (src_bytes > 4)
159 src_bytes = 4;
160 for (i = 0; i < src_bytes; i++)
161 v |= ((uint32_t) src[i]) << ((3-i) * 8);
162 return v;
163}
164
165static void
166unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes)
167{
168 int i;
169 if (dst_bytes > 4)
170 dst_bytes = 4;
171 for (i = 0; i < dst_bytes; i++)
172 dst[i] = src >> ((3-i) * 8);
173}
174
fb0f8fbf
KP
175/* hrawclock is 1/4 the FSB frequency */
176static int
177intel_hrawclk(struct drm_device *dev)
178{
179 struct drm_i915_private *dev_priv = dev->dev_private;
180 uint32_t clkcfg;
181
9473c8f4
VP
182 /* There is no CLKCFG reg in Valleyview. VLV hrawclk is 200 MHz */
183 if (IS_VALLEYVIEW(dev))
184 return 200;
185
fb0f8fbf
KP
186 clkcfg = I915_READ(CLKCFG);
187 switch (clkcfg & CLKCFG_FSB_MASK) {
188 case CLKCFG_FSB_400:
189 return 100;
190 case CLKCFG_FSB_533:
191 return 133;
192 case CLKCFG_FSB_667:
193 return 166;
194 case CLKCFG_FSB_800:
195 return 200;
196 case CLKCFG_FSB_1067:
197 return 266;
198 case CLKCFG_FSB_1333:
199 return 333;
200 /* these two are just a guess; one of them might be right */
201 case CLKCFG_FSB_1600:
202 case CLKCFG_FSB_1600_ALT:
203 return 400;
204 default:
205 return 133;
206 }
207}
208
ebf33b18
KP
209static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp)
210{
30add22d 211 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18 212 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420 213 u32 pp_stat_reg;
ebf33b18 214
453c5420
JB
215 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
216 return (I915_READ(pp_stat_reg) & PP_ON) != 0;
ebf33b18
KP
217}
218
219static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp)
220{
30add22d 221 struct drm_device *dev = intel_dp_to_dev(intel_dp);
ebf33b18 222 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420 223 u32 pp_ctrl_reg;
ebf33b18 224
453c5420
JB
225 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
226 return (I915_READ(pp_ctrl_reg) & EDP_FORCE_VDD) != 0;
ebf33b18
KP
227}
228
9b984dae
KP
229static void
230intel_dp_check_edp(struct intel_dp *intel_dp)
231{
30add22d 232 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9b984dae 233 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420 234 u32 pp_stat_reg, pp_ctrl_reg;
ebf33b18 235
9b984dae
KP
236 if (!is_edp(intel_dp))
237 return;
453c5420
JB
238
239 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
240 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
241
ebf33b18 242 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) {
9b984dae
KP
243 WARN(1, "eDP powered off while attempting aux channel communication.\n");
244 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
453c5420
JB
245 I915_READ(pp_stat_reg),
246 I915_READ(pp_ctrl_reg));
9b984dae
KP
247 }
248}
249
9ee32fea
DV
250static uint32_t
251intel_dp_aux_wait_done(struct intel_dp *intel_dp, bool has_aux_irq)
252{
253 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
254 struct drm_device *dev = intel_dig_port->base.base.dev;
255 struct drm_i915_private *dev_priv = dev->dev_private;
9ed35ab1 256 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
9ee32fea
DV
257 uint32_t status;
258 bool done;
259
ef04f00d 260#define C (((status = I915_READ_NOTRACE(ch_ctl)) & DP_AUX_CH_CTL_SEND_BUSY) == 0)
9ee32fea 261 if (has_aux_irq)
b18ac466 262 done = wait_event_timeout(dev_priv->gmbus_wait_queue, C,
3598706b 263 msecs_to_jiffies_timeout(10));
9ee32fea
DV
264 else
265 done = wait_for_atomic(C, 10) == 0;
266 if (!done)
267 DRM_ERROR("dp aux hw did not signal timeout (has irq: %i)!\n",
268 has_aux_irq);
269#undef C
270
271 return status;
272}
273
b84a1cf8 274static uint32_t get_aux_clock_divider(struct intel_dp *intel_dp)
a4fc5ed6 275{
174edf1f
PZ
276 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
277 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 278 struct drm_i915_private *dev_priv = dev->dev_private;
9ee32fea 279
a4fc5ed6 280 /* The clock divider is based off the hrawclk,
fb0f8fbf
KP
281 * and would like to run at 2MHz. So, take the
282 * hrawclk value and divide by 2 and use that
6176b8f9
JB
283 *
284 * Note that PCH attached eDP panels should use a 125MHz input
285 * clock divider.
a4fc5ed6 286 */
a62d0834 287 if (IS_VALLEYVIEW(dev)) {
b84a1cf8 288 return 100;
a62d0834 289 } else if (intel_dig_port->port == PORT_A) {
affa9354 290 if (HAS_DDI(dev))
b84a1cf8 291 return DIV_ROUND_CLOSEST(
b2b877ff 292 intel_ddi_get_cdclk_freq(dev_priv), 2000);
9473c8f4 293 else if (IS_GEN6(dev) || IS_GEN7(dev))
b84a1cf8 294 return 200; /* SNB & IVB eDP input clock at 400Mhz */
e3421a18 295 else
b84a1cf8 296 return 225; /* eDP input clock at 450Mhz */
2c55c336
JN
297 } else if (dev_priv->pch_id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
298 /* Workaround for non-ULT HSW */
b84a1cf8 299 return 74;
2c55c336 300 } else if (HAS_PCH_SPLIT(dev)) {
b84a1cf8 301 return DIV_ROUND_UP(intel_pch_rawclk(dev), 2);
2c55c336 302 } else {
b84a1cf8 303 return intel_hrawclk(dev) / 2;
2c55c336 304 }
b84a1cf8
RV
305}
306
307static int
308intel_dp_aux_ch(struct intel_dp *intel_dp,
309 uint8_t *send, int send_bytes,
310 uint8_t *recv, int recv_size)
311{
312 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
313 struct drm_device *dev = intel_dig_port->base.base.dev;
314 struct drm_i915_private *dev_priv = dev->dev_private;
315 uint32_t ch_ctl = intel_dp->aux_ch_ctl_reg;
316 uint32_t ch_data = ch_ctl + 4;
317 int i, ret, recv_bytes;
318 uint32_t status;
319 uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp);
320 int try, precharge;
321 bool has_aux_irq = INTEL_INFO(dev)->gen >= 5 && !IS_VALLEYVIEW(dev);
322
323 /* dp aux is extremely sensitive to irq latency, hence request the
324 * lowest possible wakeup latency and so prevent the cpu from going into
325 * deep sleep states.
326 */
327 pm_qos_update_request(&dev_priv->pm_qos, 0);
328
329 intel_dp_check_edp(intel_dp);
5eb08b69 330
6b4e0a93
DV
331 if (IS_GEN6(dev))
332 precharge = 3;
333 else
334 precharge = 5;
335
11bee43e
JB
336 /* Try to wait for any previous AUX channel activity */
337 for (try = 0; try < 3; try++) {
ef04f00d 338 status = I915_READ_NOTRACE(ch_ctl);
11bee43e
JB
339 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
340 break;
341 msleep(1);
342 }
343
344 if (try == 3) {
345 WARN(1, "dp_aux_ch not started status 0x%08x\n",
346 I915_READ(ch_ctl));
9ee32fea
DV
347 ret = -EBUSY;
348 goto out;
4f7f7b7e
CW
349 }
350
fb0f8fbf
KP
351 /* Must try at least 3 times according to DP spec */
352 for (try = 0; try < 5; try++) {
353 /* Load the send data into the aux channel data registers */
4f7f7b7e
CW
354 for (i = 0; i < send_bytes; i += 4)
355 I915_WRITE(ch_data + i,
356 pack_aux(send + i, send_bytes - i));
0206e353 357
fb0f8fbf 358 /* Send the command and wait for it to complete */
4f7f7b7e
CW
359 I915_WRITE(ch_ctl,
360 DP_AUX_CH_CTL_SEND_BUSY |
9ee32fea 361 (has_aux_irq ? DP_AUX_CH_CTL_INTERRUPT : 0) |
4f7f7b7e
CW
362 DP_AUX_CH_CTL_TIME_OUT_400us |
363 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
364 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
365 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) |
366 DP_AUX_CH_CTL_DONE |
367 DP_AUX_CH_CTL_TIME_OUT_ERROR |
368 DP_AUX_CH_CTL_RECEIVE_ERROR);
9ee32fea
DV
369
370 status = intel_dp_aux_wait_done(intel_dp, has_aux_irq);
0206e353 371
fb0f8fbf 372 /* Clear done status and any errors */
4f7f7b7e
CW
373 I915_WRITE(ch_ctl,
374 status |
375 DP_AUX_CH_CTL_DONE |
376 DP_AUX_CH_CTL_TIME_OUT_ERROR |
377 DP_AUX_CH_CTL_RECEIVE_ERROR);
d7e96fea
AJ
378
379 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR |
380 DP_AUX_CH_CTL_RECEIVE_ERROR))
381 continue;
4f7f7b7e 382 if (status & DP_AUX_CH_CTL_DONE)
a4fc5ed6
KP
383 break;
384 }
385
a4fc5ed6 386 if ((status & DP_AUX_CH_CTL_DONE) == 0) {
1ae8c0a5 387 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
9ee32fea
DV
388 ret = -EBUSY;
389 goto out;
a4fc5ed6
KP
390 }
391
392 /* Check for timeout or receive error.
393 * Timeouts occur when the sink is not connected
394 */
a5b3da54 395 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
1ae8c0a5 396 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
9ee32fea
DV
397 ret = -EIO;
398 goto out;
a5b3da54 399 }
1ae8c0a5
KP
400
401 /* Timeouts occur when the device isn't connected, so they're
402 * "normal" -- don't fill the kernel log with these */
a5b3da54 403 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
28c97730 404 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
9ee32fea
DV
405 ret = -ETIMEDOUT;
406 goto out;
a4fc5ed6
KP
407 }
408
409 /* Unload any bytes sent back from the other side */
410 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >>
411 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
a4fc5ed6
KP
412 if (recv_bytes > recv_size)
413 recv_bytes = recv_size;
0206e353 414
4f7f7b7e
CW
415 for (i = 0; i < recv_bytes; i += 4)
416 unpack_aux(I915_READ(ch_data + i),
417 recv + i, recv_bytes - i);
a4fc5ed6 418
9ee32fea
DV
419 ret = recv_bytes;
420out:
421 pm_qos_update_request(&dev_priv->pm_qos, PM_QOS_DEFAULT_VALUE);
422
423 return ret;
a4fc5ed6
KP
424}
425
426/* Write data to the aux channel in native mode */
427static int
ea5b213a 428intel_dp_aux_native_write(struct intel_dp *intel_dp,
a4fc5ed6
KP
429 uint16_t address, uint8_t *send, int send_bytes)
430{
431 int ret;
432 uint8_t msg[20];
433 int msg_bytes;
434 uint8_t ack;
435
9b984dae 436 intel_dp_check_edp(intel_dp);
a4fc5ed6
KP
437 if (send_bytes > 16)
438 return -1;
439 msg[0] = AUX_NATIVE_WRITE << 4;
440 msg[1] = address >> 8;
eebc863e 441 msg[2] = address & 0xff;
a4fc5ed6
KP
442 msg[3] = send_bytes - 1;
443 memcpy(&msg[4], send, send_bytes);
444 msg_bytes = send_bytes + 4;
445 for (;;) {
ea5b213a 446 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
a4fc5ed6
KP
447 if (ret < 0)
448 return ret;
449 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK)
450 break;
451 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
452 udelay(100);
453 else
a5b3da54 454 return -EIO;
a4fc5ed6
KP
455 }
456 return send_bytes;
457}
458
459/* Write a single byte to the aux channel in native mode */
460static int
ea5b213a 461intel_dp_aux_native_write_1(struct intel_dp *intel_dp,
a4fc5ed6
KP
462 uint16_t address, uint8_t byte)
463{
ea5b213a 464 return intel_dp_aux_native_write(intel_dp, address, &byte, 1);
a4fc5ed6
KP
465}
466
467/* read bytes from a native aux channel */
468static int
ea5b213a 469intel_dp_aux_native_read(struct intel_dp *intel_dp,
a4fc5ed6
KP
470 uint16_t address, uint8_t *recv, int recv_bytes)
471{
472 uint8_t msg[4];
473 int msg_bytes;
474 uint8_t reply[20];
475 int reply_bytes;
476 uint8_t ack;
477 int ret;
478
9b984dae 479 intel_dp_check_edp(intel_dp);
a4fc5ed6
KP
480 msg[0] = AUX_NATIVE_READ << 4;
481 msg[1] = address >> 8;
482 msg[2] = address & 0xff;
483 msg[3] = recv_bytes - 1;
484
485 msg_bytes = 4;
486 reply_bytes = recv_bytes + 1;
487
488 for (;;) {
ea5b213a 489 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
a4fc5ed6 490 reply, reply_bytes);
a5b3da54
KP
491 if (ret == 0)
492 return -EPROTO;
493 if (ret < 0)
a4fc5ed6
KP
494 return ret;
495 ack = reply[0];
496 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) {
497 memcpy(recv, reply + 1, ret - 1);
498 return ret - 1;
499 }
500 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER)
501 udelay(100);
502 else
a5b3da54 503 return -EIO;
a4fc5ed6
KP
504 }
505}
506
507static int
ab2c0672
DA
508intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
509 uint8_t write_byte, uint8_t *read_byte)
a4fc5ed6 510{
ab2c0672 511 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
ea5b213a
CW
512 struct intel_dp *intel_dp = container_of(adapter,
513 struct intel_dp,
514 adapter);
ab2c0672
DA
515 uint16_t address = algo_data->address;
516 uint8_t msg[5];
517 uint8_t reply[2];
8316f337 518 unsigned retry;
ab2c0672
DA
519 int msg_bytes;
520 int reply_bytes;
521 int ret;
522
9b984dae 523 intel_dp_check_edp(intel_dp);
ab2c0672
DA
524 /* Set up the command byte */
525 if (mode & MODE_I2C_READ)
526 msg[0] = AUX_I2C_READ << 4;
527 else
528 msg[0] = AUX_I2C_WRITE << 4;
529
530 if (!(mode & MODE_I2C_STOP))
531 msg[0] |= AUX_I2C_MOT << 4;
a4fc5ed6 532
ab2c0672
DA
533 msg[1] = address >> 8;
534 msg[2] = address;
535
536 switch (mode) {
537 case MODE_I2C_WRITE:
538 msg[3] = 0;
539 msg[4] = write_byte;
540 msg_bytes = 5;
541 reply_bytes = 1;
542 break;
543 case MODE_I2C_READ:
544 msg[3] = 0;
545 msg_bytes = 4;
546 reply_bytes = 2;
547 break;
548 default:
549 msg_bytes = 3;
550 reply_bytes = 1;
551 break;
552 }
553
8316f337
DF
554 for (retry = 0; retry < 5; retry++) {
555 ret = intel_dp_aux_ch(intel_dp,
556 msg, msg_bytes,
557 reply, reply_bytes);
ab2c0672 558 if (ret < 0) {
3ff99164 559 DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
ab2c0672
DA
560 return ret;
561 }
8316f337
DF
562
563 switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
564 case AUX_NATIVE_REPLY_ACK:
565 /* I2C-over-AUX Reply field is only valid
566 * when paired with AUX ACK.
567 */
568 break;
569 case AUX_NATIVE_REPLY_NACK:
570 DRM_DEBUG_KMS("aux_ch native nack\n");
571 return -EREMOTEIO;
572 case AUX_NATIVE_REPLY_DEFER:
573 udelay(100);
574 continue;
575 default:
576 DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
577 reply[0]);
578 return -EREMOTEIO;
579 }
580
ab2c0672
DA
581 switch (reply[0] & AUX_I2C_REPLY_MASK) {
582 case AUX_I2C_REPLY_ACK:
583 if (mode == MODE_I2C_READ) {
584 *read_byte = reply[1];
585 }
586 return reply_bytes - 1;
587 case AUX_I2C_REPLY_NACK:
8316f337 588 DRM_DEBUG_KMS("aux_i2c nack\n");
ab2c0672
DA
589 return -EREMOTEIO;
590 case AUX_I2C_REPLY_DEFER:
8316f337 591 DRM_DEBUG_KMS("aux_i2c defer\n");
ab2c0672
DA
592 udelay(100);
593 break;
594 default:
8316f337 595 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
ab2c0672
DA
596 return -EREMOTEIO;
597 }
598 }
8316f337
DF
599
600 DRM_ERROR("too many retries, giving up\n");
601 return -EREMOTEIO;
a4fc5ed6
KP
602}
603
604static int
ea5b213a 605intel_dp_i2c_init(struct intel_dp *intel_dp,
55f78c43 606 struct intel_connector *intel_connector, const char *name)
a4fc5ed6 607{
0b5c541b
KP
608 int ret;
609
d54e9d28 610 DRM_DEBUG_KMS("i2c_init %s\n", name);
ea5b213a
CW
611 intel_dp->algo.running = false;
612 intel_dp->algo.address = 0;
613 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch;
614
0206e353 615 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter));
ea5b213a
CW
616 intel_dp->adapter.owner = THIS_MODULE;
617 intel_dp->adapter.class = I2C_CLASS_DDC;
0206e353 618 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1);
ea5b213a
CW
619 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0';
620 intel_dp->adapter.algo_data = &intel_dp->algo;
621 intel_dp->adapter.dev.parent = &intel_connector->base.kdev;
622
0b5c541b
KP
623 ironlake_edp_panel_vdd_on(intel_dp);
624 ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
bd943159 625 ironlake_edp_panel_vdd_off(intel_dp, false);
0b5c541b 626 return ret;
a4fc5ed6
KP
627}
628
c6bb3538
DV
629static void
630intel_dp_set_clock(struct intel_encoder *encoder,
631 struct intel_crtc_config *pipe_config, int link_bw)
632{
633 struct drm_device *dev = encoder->base.dev;
634
635 if (IS_G4X(dev)) {
636 if (link_bw == DP_LINK_BW_1_62) {
637 pipe_config->dpll.p1 = 2;
638 pipe_config->dpll.p2 = 10;
639 pipe_config->dpll.n = 2;
640 pipe_config->dpll.m1 = 23;
641 pipe_config->dpll.m2 = 8;
642 } else {
643 pipe_config->dpll.p1 = 1;
644 pipe_config->dpll.p2 = 10;
645 pipe_config->dpll.n = 1;
646 pipe_config->dpll.m1 = 14;
647 pipe_config->dpll.m2 = 2;
648 }
649 pipe_config->clock_set = true;
650 } else if (IS_HASWELL(dev)) {
651 /* Haswell has special-purpose DP DDI clocks. */
652 } else if (HAS_PCH_SPLIT(dev)) {
653 if (link_bw == DP_LINK_BW_1_62) {
654 pipe_config->dpll.n = 1;
655 pipe_config->dpll.p1 = 2;
656 pipe_config->dpll.p2 = 10;
657 pipe_config->dpll.m1 = 12;
658 pipe_config->dpll.m2 = 9;
659 } else {
660 pipe_config->dpll.n = 2;
661 pipe_config->dpll.p1 = 1;
662 pipe_config->dpll.p2 = 10;
663 pipe_config->dpll.m1 = 14;
664 pipe_config->dpll.m2 = 8;
665 }
666 pipe_config->clock_set = true;
667 } else if (IS_VALLEYVIEW(dev)) {
668 /* FIXME: Need to figure out optimized DP clocks for vlv. */
669 }
670}
671
00c09d70 672bool
5bfe2ac0
DV
673intel_dp_compute_config(struct intel_encoder *encoder,
674 struct intel_crtc_config *pipe_config)
a4fc5ed6 675{
5bfe2ac0 676 struct drm_device *dev = encoder->base.dev;
36008365 677 struct drm_i915_private *dev_priv = dev->dev_private;
5bfe2ac0 678 struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode;
5bfe2ac0 679 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 680 enum port port = dp_to_dig_port(intel_dp)->port;
2dd24552 681 struct intel_crtc *intel_crtc = encoder->new_crtc;
dd06f90e 682 struct intel_connector *intel_connector = intel_dp->attached_connector;
a4fc5ed6 683 int lane_count, clock;
397fe157 684 int max_lane_count = drm_dp_max_lane_count(intel_dp->dpcd);
ea5b213a 685 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0;
083f9560 686 int bpp, mode_rate;
a4fc5ed6 687 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 };
ff9a6750 688 int link_avail, link_clock;
a4fc5ed6 689
bc7d38a4 690 if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A)
5bfe2ac0
DV
691 pipe_config->has_pch_encoder = true;
692
03afc4a2 693 pipe_config->has_dp_encoder = true;
a4fc5ed6 694
dd06f90e
JN
695 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
696 intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
697 adjusted_mode);
2dd24552
JB
698 if (!HAS_PCH_SPLIT(dev))
699 intel_gmch_panel_fitting(intel_crtc, pipe_config,
700 intel_connector->panel.fitting_mode);
701 else
b074cec8
JB
702 intel_pch_panel_fitting(intel_crtc, pipe_config,
703 intel_connector->panel.fitting_mode);
0d3a1bee
ZY
704 }
705
cb1793ce 706 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
0af78a2b
DV
707 return false;
708
083f9560
DV
709 DRM_DEBUG_KMS("DP link computation with max lane count %i "
710 "max bw %02x pixel clock %iKHz\n",
71244653 711 max_lane_count, bws[max_clock], adjusted_mode->clock);
083f9560 712
36008365
DV
713 /* Walk through all bpp values. Luckily they're all nicely spaced with 2
714 * bpc in between. */
3e7ca985 715 bpp = pipe_config->pipe_bpp;
e1b73cba
DV
716 if (is_edp(intel_dp) && dev_priv->vbt.edp_bpp)
717 bpp = min_t(int, bpp, dev_priv->vbt.edp_bpp);
657445fe 718
36008365 719 for (; bpp >= 6*3; bpp -= 2*3) {
ff9a6750 720 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
36008365
DV
721
722 for (clock = 0; clock <= max_clock; clock++) {
723 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
724 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
725 link_avail = intel_dp_max_data_rate(link_clock,
726 lane_count);
727
728 if (mode_rate <= link_avail) {
729 goto found;
730 }
731 }
732 }
733 }
c4867936 734
36008365 735 return false;
3685a8f3 736
36008365 737found:
55bc60db
VS
738 if (intel_dp->color_range_auto) {
739 /*
740 * See:
741 * CEA-861-E - 5.1 Default Encoding Parameters
742 * VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
743 */
18316c8c 744 if (bpp != 18 && drm_match_cea_mode(adjusted_mode) > 1)
55bc60db
VS
745 intel_dp->color_range = DP_COLOR_RANGE_16_235;
746 else
747 intel_dp->color_range = 0;
748 }
749
3685a8f3 750 if (intel_dp->color_range)
50f3b016 751 pipe_config->limited_color_range = true;
a4fc5ed6 752
36008365
DV
753 intel_dp->link_bw = bws[clock];
754 intel_dp->lane_count = lane_count;
657445fe 755 pipe_config->pipe_bpp = bpp;
ff9a6750 756 pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw);
a4fc5ed6 757
36008365
DV
758 DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n",
759 intel_dp->link_bw, intel_dp->lane_count,
ff9a6750 760 pipe_config->port_clock, bpp);
36008365
DV
761 DRM_DEBUG_KMS("DP link bw required %i available %i\n",
762 mode_rate, link_avail);
a4fc5ed6 763
03afc4a2 764 intel_link_compute_m_n(bpp, lane_count,
ff9a6750 765 adjusted_mode->clock, pipe_config->port_clock,
03afc4a2 766 &pipe_config->dp_m_n);
9d1a455b 767
c6bb3538
DV
768 intel_dp_set_clock(encoder, pipe_config, intel_dp->link_bw);
769
03afc4a2 770 return true;
a4fc5ed6
KP
771}
772
247d89f6
PZ
773void intel_dp_init_link_config(struct intel_dp *intel_dp)
774{
775 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
776 intel_dp->link_configuration[0] = intel_dp->link_bw;
777 intel_dp->link_configuration[1] = intel_dp->lane_count;
778 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
779 /*
780 * Check for DPCD version > 1.1 and enhanced framing support
781 */
782 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
783 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
784 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
785 }
786}
787
7c62a164 788static void ironlake_set_pll_cpu_edp(struct intel_dp *intel_dp)
ea9b6006 789{
7c62a164
DV
790 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
791 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
792 struct drm_device *dev = crtc->base.dev;
ea9b6006
DV
793 struct drm_i915_private *dev_priv = dev->dev_private;
794 u32 dpa_ctl;
795
ff9a6750 796 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", crtc->config.port_clock);
ea9b6006
DV
797 dpa_ctl = I915_READ(DP_A);
798 dpa_ctl &= ~DP_PLL_FREQ_MASK;
799
ff9a6750 800 if (crtc->config.port_clock == 162000) {
1ce17038
DV
801 /* For a long time we've carried around a ILK-DevA w/a for the
802 * 160MHz clock. If we're really unlucky, it's still required.
803 */
804 DRM_DEBUG_KMS("160MHz cpu eDP clock, might need ilk devA w/a\n");
ea9b6006 805 dpa_ctl |= DP_PLL_FREQ_160MHZ;
7c62a164 806 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
ea9b6006
DV
807 } else {
808 dpa_ctl |= DP_PLL_FREQ_270MHZ;
7c62a164 809 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
ea9b6006 810 }
1ce17038 811
ea9b6006
DV
812 I915_WRITE(DP_A, dpa_ctl);
813
814 POSTING_READ(DP_A);
815 udelay(500);
816}
817
a4fc5ed6
KP
818static void
819intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
820 struct drm_display_mode *adjusted_mode)
821{
e3421a18 822 struct drm_device *dev = encoder->dev;
417e822d 823 struct drm_i915_private *dev_priv = dev->dev_private;
ea5b213a 824 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
bc7d38a4 825 enum port port = dp_to_dig_port(intel_dp)->port;
7c62a164 826 struct intel_crtc *crtc = to_intel_crtc(encoder->crtc);
a4fc5ed6 827
417e822d 828 /*
1a2eb460 829 * There are four kinds of DP registers:
417e822d
KP
830 *
831 * IBX PCH
1a2eb460
KP
832 * SNB CPU
833 * IVB CPU
417e822d
KP
834 * CPT PCH
835 *
836 * IBX PCH and CPU are the same for almost everything,
837 * except that the CPU DP PLL is configured in this
838 * register
839 *
840 * CPT PCH is quite different, having many bits moved
841 * to the TRANS_DP_CTL register instead. That
842 * configuration happens (oddly) in ironlake_pch_enable
843 */
9c9e7927 844
417e822d
KP
845 /* Preserve the BIOS-computed detected bit. This is
846 * supposed to be read-only.
847 */
848 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
a4fc5ed6 849
417e822d 850 /* Handle DP bits in common between all three register formats */
417e822d 851 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
17aa6be9 852 intel_dp->DP |= DP_PORT_WIDTH(intel_dp->lane_count);
a4fc5ed6 853
e0dac65e
WF
854 if (intel_dp->has_audio) {
855 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
7c62a164 856 pipe_name(crtc->pipe));
ea5b213a 857 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
e0dac65e
WF
858 intel_write_eld(encoder, adjusted_mode);
859 }
247d89f6
PZ
860
861 intel_dp_init_link_config(intel_dp);
a4fc5ed6 862
417e822d 863 /* Split out the IBX/CPU vs CPT settings */
32f9d658 864
bc7d38a4 865 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
1a2eb460
KP
866 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
867 intel_dp->DP |= DP_SYNC_HS_HIGH;
868 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
869 intel_dp->DP |= DP_SYNC_VS_HIGH;
870 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
871
872 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
873 intel_dp->DP |= DP_ENHANCED_FRAMING;
874
7c62a164 875 intel_dp->DP |= crtc->pipe << 29;
bc7d38a4 876 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
b2634017 877 if (!HAS_PCH_SPLIT(dev) && !IS_VALLEYVIEW(dev))
3685a8f3 878 intel_dp->DP |= intel_dp->color_range;
417e822d
KP
879
880 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
881 intel_dp->DP |= DP_SYNC_HS_HIGH;
882 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
883 intel_dp->DP |= DP_SYNC_VS_HIGH;
884 intel_dp->DP |= DP_LINK_TRAIN_OFF;
885
886 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
887 intel_dp->DP |= DP_ENHANCED_FRAMING;
888
7c62a164 889 if (crtc->pipe == 1)
417e822d 890 intel_dp->DP |= DP_PIPEB_SELECT;
417e822d
KP
891 } else {
892 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
32f9d658 893 }
ea9b6006 894
bc7d38a4 895 if (port == PORT_A && !IS_VALLEYVIEW(dev))
7c62a164 896 ironlake_set_pll_cpu_edp(intel_dp);
a4fc5ed6
KP
897}
898
99ea7127
KP
899#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
900#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
901
902#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
903#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
904
905#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
906#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
907
908static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
909 u32 mask,
910 u32 value)
bd943159 911{
30add22d 912 struct drm_device *dev = intel_dp_to_dev(intel_dp);
99ea7127 913 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
914 u32 pp_stat_reg, pp_ctrl_reg;
915
916 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
917 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
32ce697c 918
99ea7127 919 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
453c5420
JB
920 mask, value,
921 I915_READ(pp_stat_reg),
922 I915_READ(pp_ctrl_reg));
32ce697c 923
453c5420 924 if (_wait_for((I915_READ(pp_stat_reg) & mask) == value, 5000, 10)) {
99ea7127 925 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
453c5420
JB
926 I915_READ(pp_stat_reg),
927 I915_READ(pp_ctrl_reg));
32ce697c 928 }
99ea7127 929}
32ce697c 930
99ea7127
KP
931static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
932{
933 DRM_DEBUG_KMS("Wait for panel power on\n");
934 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
bd943159
KP
935}
936
99ea7127
KP
937static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
938{
939 DRM_DEBUG_KMS("Wait for panel power off time\n");
940 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
941}
942
943static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
944{
945 DRM_DEBUG_KMS("Wait for panel power cycle\n");
946 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
947}
948
949
832dd3c1
KP
950/* Read the current pp_control value, unlocking the register if it
951 * is locked
952 */
953
453c5420 954static u32 ironlake_get_pp_control(struct intel_dp *intel_dp)
832dd3c1 955{
453c5420
JB
956 struct drm_device *dev = intel_dp_to_dev(intel_dp);
957 struct drm_i915_private *dev_priv = dev->dev_private;
958 u32 control;
959 u32 pp_ctrl_reg;
960
961 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
962 control = I915_READ(pp_ctrl_reg);
832dd3c1
KP
963
964 control &= ~PANEL_UNLOCK_MASK;
965 control |= PANEL_UNLOCK_REGS;
966 return control;
bd943159
KP
967}
968
82a4d9c0 969void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
5d613501 970{
30add22d 971 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501
JB
972 struct drm_i915_private *dev_priv = dev->dev_private;
973 u32 pp;
453c5420 974 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 975
97af61f5
KP
976 if (!is_edp(intel_dp))
977 return;
f01eca2e 978 DRM_DEBUG_KMS("Turn eDP VDD on\n");
5d613501 979
bd943159
KP
980 WARN(intel_dp->want_panel_vdd,
981 "eDP VDD already requested on\n");
982
983 intel_dp->want_panel_vdd = true;
99ea7127 984
bd943159
KP
985 if (ironlake_edp_have_panel_vdd(intel_dp)) {
986 DRM_DEBUG_KMS("eDP VDD already on\n");
987 return;
988 }
989
99ea7127
KP
990 if (!ironlake_edp_have_panel_power(intel_dp))
991 ironlake_wait_panel_power_cycle(intel_dp);
992
453c5420 993 pp = ironlake_get_pp_control(intel_dp);
5d613501 994 pp |= EDP_FORCE_VDD;
ebf33b18 995
453c5420
JB
996 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
997 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
998
999 I915_WRITE(pp_ctrl_reg, pp);
1000 POSTING_READ(pp_ctrl_reg);
1001 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1002 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
ebf33b18
KP
1003 /*
1004 * If the panel wasn't on, delay before accessing aux channel
1005 */
1006 if (!ironlake_edp_have_panel_power(intel_dp)) {
bd943159 1007 DRM_DEBUG_KMS("eDP was not running\n");
f01eca2e 1008 msleep(intel_dp->panel_power_up_delay);
f01eca2e 1009 }
5d613501
JB
1010}
1011
bd943159 1012static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
5d613501 1013{
30add22d 1014 struct drm_device *dev = intel_dp_to_dev(intel_dp);
5d613501
JB
1015 struct drm_i915_private *dev_priv = dev->dev_private;
1016 u32 pp;
453c5420 1017 u32 pp_stat_reg, pp_ctrl_reg;
5d613501 1018
a0e99e68
DV
1019 WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
1020
bd943159 1021 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
453c5420 1022 pp = ironlake_get_pp_control(intel_dp);
bd943159 1023 pp &= ~EDP_FORCE_VDD;
bd943159 1024
453c5420
JB
1025 pp_stat_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_STATUS : PCH_PP_STATUS;
1026 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1027
1028 I915_WRITE(pp_ctrl_reg, pp);
1029 POSTING_READ(pp_ctrl_reg);
99ea7127 1030
453c5420
JB
1031 /* Make sure sequencer is idle before allowing subsequent activity */
1032 DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
1033 I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
99ea7127 1034 msleep(intel_dp->panel_power_down_delay);
bd943159
KP
1035 }
1036}
5d613501 1037
bd943159
KP
1038static void ironlake_panel_vdd_work(struct work_struct *__work)
1039{
1040 struct intel_dp *intel_dp = container_of(to_delayed_work(__work),
1041 struct intel_dp, panel_vdd_work);
30add22d 1042 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bd943159 1043
627f7675 1044 mutex_lock(&dev->mode_config.mutex);
bd943159 1045 ironlake_panel_vdd_off_sync(intel_dp);
627f7675 1046 mutex_unlock(&dev->mode_config.mutex);
bd943159
KP
1047}
1048
82a4d9c0 1049void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
bd943159 1050{
97af61f5
KP
1051 if (!is_edp(intel_dp))
1052 return;
5d613501 1053
bd943159
KP
1054 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
1055 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
f2e8b18a 1056
bd943159
KP
1057 intel_dp->want_panel_vdd = false;
1058
1059 if (sync) {
1060 ironlake_panel_vdd_off_sync(intel_dp);
1061 } else {
1062 /*
1063 * Queue the timer to fire a long
1064 * time from now (relative to the power down delay)
1065 * to keep the panel power up across a sequence of operations
1066 */
1067 schedule_delayed_work(&intel_dp->panel_vdd_work,
1068 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5));
1069 }
5d613501
JB
1070}
1071
82a4d9c0 1072void ironlake_edp_panel_on(struct intel_dp *intel_dp)
9934c132 1073{
30add22d 1074 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1075 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1076 u32 pp;
453c5420 1077 u32 pp_ctrl_reg;
9934c132 1078
97af61f5 1079 if (!is_edp(intel_dp))
bd943159 1080 return;
99ea7127
KP
1081
1082 DRM_DEBUG_KMS("Turn eDP power on\n");
1083
1084 if (ironlake_edp_have_panel_power(intel_dp)) {
1085 DRM_DEBUG_KMS("eDP power already on\n");
7d639f35 1086 return;
99ea7127 1087 }
9934c132 1088
99ea7127 1089 ironlake_wait_panel_power_cycle(intel_dp);
37c6c9b0 1090
453c5420 1091 pp = ironlake_get_pp_control(intel_dp);
05ce1a49
KP
1092 if (IS_GEN5(dev)) {
1093 /* ILK workaround: disable reset around power sequence */
1094 pp &= ~PANEL_POWER_RESET;
1095 I915_WRITE(PCH_PP_CONTROL, pp);
1096 POSTING_READ(PCH_PP_CONTROL);
1097 }
37c6c9b0 1098
1c0ae80a 1099 pp |= POWER_TARGET_ON;
99ea7127
KP
1100 if (!IS_GEN5(dev))
1101 pp |= PANEL_POWER_RESET;
1102
453c5420
JB
1103 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1104
1105 I915_WRITE(pp_ctrl_reg, pp);
1106 POSTING_READ(pp_ctrl_reg);
9934c132 1107
99ea7127 1108 ironlake_wait_panel_on(intel_dp);
9934c132 1109
05ce1a49
KP
1110 if (IS_GEN5(dev)) {
1111 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
1112 I915_WRITE(PCH_PP_CONTROL, pp);
1113 POSTING_READ(PCH_PP_CONTROL);
1114 }
9934c132
JB
1115}
1116
82a4d9c0 1117void ironlake_edp_panel_off(struct intel_dp *intel_dp)
9934c132 1118{
30add22d 1119 struct drm_device *dev = intel_dp_to_dev(intel_dp);
9934c132 1120 struct drm_i915_private *dev_priv = dev->dev_private;
99ea7127 1121 u32 pp;
453c5420 1122 u32 pp_ctrl_reg;
9934c132 1123
97af61f5
KP
1124 if (!is_edp(intel_dp))
1125 return;
37c6c9b0 1126
99ea7127 1127 DRM_DEBUG_KMS("Turn eDP power off\n");
37c6c9b0 1128
6cb49835 1129 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
37c6c9b0 1130
453c5420 1131 pp = ironlake_get_pp_control(intel_dp);
35a38556
DV
1132 /* We need to switch off panel power _and_ force vdd, for otherwise some
1133 * panels get very unhappy and cease to work. */
1134 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
453c5420
JB
1135
1136 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1137
1138 I915_WRITE(pp_ctrl_reg, pp);
1139 POSTING_READ(pp_ctrl_reg);
9934c132 1140
35a38556
DV
1141 intel_dp->want_panel_vdd = false;
1142
99ea7127 1143 ironlake_wait_panel_off(intel_dp);
9934c132
JB
1144}
1145
d6c50ff8 1146void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
32f9d658 1147{
da63a9f2
PZ
1148 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1149 struct drm_device *dev = intel_dig_port->base.base.dev;
32f9d658 1150 struct drm_i915_private *dev_priv = dev->dev_private;
da63a9f2 1151 int pipe = to_intel_crtc(intel_dig_port->base.base.crtc)->pipe;
32f9d658 1152 u32 pp;
453c5420 1153 u32 pp_ctrl_reg;
32f9d658 1154
f01eca2e
KP
1155 if (!is_edp(intel_dp))
1156 return;
1157
28c97730 1158 DRM_DEBUG_KMS("\n");
01cb9ea6
JB
1159 /*
1160 * If we enable the backlight right away following a panel power
1161 * on, we may see slight flicker as the panel syncs with the eDP
1162 * link. So delay a bit to make sure the image is solid before
1163 * allowing it to appear.
1164 */
f01eca2e 1165 msleep(intel_dp->backlight_on_delay);
453c5420 1166 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1167 pp |= EDP_BLC_ENABLE;
453c5420
JB
1168
1169 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1170
1171 I915_WRITE(pp_ctrl_reg, pp);
1172 POSTING_READ(pp_ctrl_reg);
035aa3de
DV
1173
1174 intel_panel_enable_backlight(dev, pipe);
32f9d658
ZW
1175}
1176
d6c50ff8 1177void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
32f9d658 1178{
30add22d 1179 struct drm_device *dev = intel_dp_to_dev(intel_dp);
32f9d658
ZW
1180 struct drm_i915_private *dev_priv = dev->dev_private;
1181 u32 pp;
453c5420 1182 u32 pp_ctrl_reg;
32f9d658 1183
f01eca2e
KP
1184 if (!is_edp(intel_dp))
1185 return;
1186
035aa3de
DV
1187 intel_panel_disable_backlight(dev);
1188
28c97730 1189 DRM_DEBUG_KMS("\n");
453c5420 1190 pp = ironlake_get_pp_control(intel_dp);
32f9d658 1191 pp &= ~EDP_BLC_ENABLE;
453c5420
JB
1192
1193 pp_ctrl_reg = IS_VALLEYVIEW(dev) ? PIPEA_PP_CONTROL : PCH_PP_CONTROL;
1194
1195 I915_WRITE(pp_ctrl_reg, pp);
1196 POSTING_READ(pp_ctrl_reg);
f01eca2e 1197 msleep(intel_dp->backlight_off_delay);
32f9d658 1198}
a4fc5ed6 1199
2bd2ad64 1200static void ironlake_edp_pll_on(struct intel_dp *intel_dp)
d240f20f 1201{
da63a9f2
PZ
1202 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1203 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1204 struct drm_device *dev = crtc->dev;
d240f20f
JB
1205 struct drm_i915_private *dev_priv = dev->dev_private;
1206 u32 dpa_ctl;
1207
2bd2ad64
DV
1208 assert_pipe_disabled(dev_priv,
1209 to_intel_crtc(crtc)->pipe);
1210
d240f20f
JB
1211 DRM_DEBUG_KMS("\n");
1212 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1213 WARN(dpa_ctl & DP_PLL_ENABLE, "dp pll on, should be off\n");
1214 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1215
1216 /* We don't adjust intel_dp->DP while tearing down the link, to
1217 * facilitate link retraining (e.g. after hotplug). Hence clear all
1218 * enable bits here to ensure that we don't enable too much. */
1219 intel_dp->DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
1220 intel_dp->DP |= DP_PLL_ENABLE;
1221 I915_WRITE(DP_A, intel_dp->DP);
298b0b39
JB
1222 POSTING_READ(DP_A);
1223 udelay(200);
d240f20f
JB
1224}
1225
2bd2ad64 1226static void ironlake_edp_pll_off(struct intel_dp *intel_dp)
d240f20f 1227{
da63a9f2
PZ
1228 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
1229 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
1230 struct drm_device *dev = crtc->dev;
d240f20f
JB
1231 struct drm_i915_private *dev_priv = dev->dev_private;
1232 u32 dpa_ctl;
1233
2bd2ad64
DV
1234 assert_pipe_disabled(dev_priv,
1235 to_intel_crtc(crtc)->pipe);
1236
d240f20f 1237 dpa_ctl = I915_READ(DP_A);
0767935e
DV
1238 WARN((dpa_ctl & DP_PLL_ENABLE) == 0,
1239 "dp pll off, should be on\n");
1240 WARN(dpa_ctl & DP_PORT_EN, "dp port still on, should be off\n");
1241
1242 /* We can't rely on the value tracked for the DP register in
1243 * intel_dp->DP because link_down must not change that (otherwise link
1244 * re-training will fail. */
298b0b39 1245 dpa_ctl &= ~DP_PLL_ENABLE;
d240f20f 1246 I915_WRITE(DP_A, dpa_ctl);
1af5fa1b 1247 POSTING_READ(DP_A);
d240f20f
JB
1248 udelay(200);
1249}
1250
c7ad3810 1251/* If the sink supports it, try to set the power state appropriately */
c19b0669 1252void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode)
c7ad3810
JB
1253{
1254 int ret, i;
1255
1256 /* Should have a valid DPCD by this point */
1257 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11)
1258 return;
1259
1260 if (mode != DRM_MODE_DPMS_ON) {
1261 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER,
1262 DP_SET_POWER_D3);
1263 if (ret != 1)
1264 DRM_DEBUG_DRIVER("failed to write sink power state\n");
1265 } else {
1266 /*
1267 * When turning on, we need to retry for 1ms to give the sink
1268 * time to wake up.
1269 */
1270 for (i = 0; i < 3; i++) {
1271 ret = intel_dp_aux_native_write_1(intel_dp,
1272 DP_SET_POWER,
1273 DP_SET_POWER_D0);
1274 if (ret == 1)
1275 break;
1276 msleep(1);
1277 }
1278 }
1279}
1280
19d8fe15
DV
1281static bool intel_dp_get_hw_state(struct intel_encoder *encoder,
1282 enum pipe *pipe)
d240f20f 1283{
19d8fe15 1284 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1285 enum port port = dp_to_dig_port(intel_dp)->port;
19d8fe15
DV
1286 struct drm_device *dev = encoder->base.dev;
1287 struct drm_i915_private *dev_priv = dev->dev_private;
1288 u32 tmp = I915_READ(intel_dp->output_reg);
1289
1290 if (!(tmp & DP_PORT_EN))
1291 return false;
1292
bc7d38a4 1293 if (port == PORT_A && IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) {
19d8fe15 1294 *pipe = PORT_TO_PIPE_CPT(tmp);
bc7d38a4 1295 } else if (!HAS_PCH_CPT(dev) || port == PORT_A) {
19d8fe15
DV
1296 *pipe = PORT_TO_PIPE(tmp);
1297 } else {
1298 u32 trans_sel;
1299 u32 trans_dp;
1300 int i;
1301
1302 switch (intel_dp->output_reg) {
1303 case PCH_DP_B:
1304 trans_sel = TRANS_DP_PORT_SEL_B;
1305 break;
1306 case PCH_DP_C:
1307 trans_sel = TRANS_DP_PORT_SEL_C;
1308 break;
1309 case PCH_DP_D:
1310 trans_sel = TRANS_DP_PORT_SEL_D;
1311 break;
1312 default:
1313 return true;
1314 }
1315
1316 for_each_pipe(i) {
1317 trans_dp = I915_READ(TRANS_DP_CTL(i));
1318 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) {
1319 *pipe = i;
1320 return true;
1321 }
1322 }
19d8fe15 1323
4a0833ec
DV
1324 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n",
1325 intel_dp->output_reg);
1326 }
d240f20f 1327
19d8fe15
DV
1328 return true;
1329}
d240f20f 1330
045ac3b5
JB
1331static void intel_dp_get_config(struct intel_encoder *encoder,
1332 struct intel_crtc_config *pipe_config)
1333{
1334 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
045ac3b5 1335 u32 tmp, flags = 0;
63000ef6
XZ
1336 struct drm_device *dev = encoder->base.dev;
1337 struct drm_i915_private *dev_priv = dev->dev_private;
1338 enum port port = dp_to_dig_port(intel_dp)->port;
1339 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
045ac3b5 1340
63000ef6
XZ
1341 if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
1342 tmp = I915_READ(intel_dp->output_reg);
1343 if (tmp & DP_SYNC_HS_HIGH)
1344 flags |= DRM_MODE_FLAG_PHSYNC;
1345 else
1346 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 1347
63000ef6
XZ
1348 if (tmp & DP_SYNC_VS_HIGH)
1349 flags |= DRM_MODE_FLAG_PVSYNC;
1350 else
1351 flags |= DRM_MODE_FLAG_NVSYNC;
1352 } else {
1353 tmp = I915_READ(TRANS_DP_CTL(crtc->pipe));
1354 if (tmp & TRANS_DP_HSYNC_ACTIVE_HIGH)
1355 flags |= DRM_MODE_FLAG_PHSYNC;
1356 else
1357 flags |= DRM_MODE_FLAG_NHSYNC;
045ac3b5 1358
63000ef6
XZ
1359 if (tmp & TRANS_DP_VSYNC_ACTIVE_HIGH)
1360 flags |= DRM_MODE_FLAG_PVSYNC;
1361 else
1362 flags |= DRM_MODE_FLAG_NVSYNC;
1363 }
045ac3b5
JB
1364
1365 pipe_config->adjusted_mode.flags |= flags;
f1f644dc
JB
1366
1367 if (dp_to_dig_port(intel_dp)->port == PORT_A) {
1368 if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_160MHZ)
1369 pipe_config->port_clock = 162000;
1370 else
1371 pipe_config->port_clock = 270000;
1372 }
045ac3b5
JB
1373}
1374
2293bb5c
SK
1375static bool is_edp_psr(struct intel_dp *intel_dp)
1376{
1377 return is_edp(intel_dp) &&
1378 intel_dp->psr_dpcd[0] & DP_PSR_IS_SUPPORTED;
1379}
1380
2b28bb1b
RV
1381static bool intel_edp_is_psr_enabled(struct drm_device *dev)
1382{
1383 struct drm_i915_private *dev_priv = dev->dev_private;
1384
1385 if (!IS_HASWELL(dev))
1386 return false;
1387
1388 return I915_READ(EDP_PSR_CTL) & EDP_PSR_ENABLE;
1389}
1390
1391static void intel_edp_psr_write_vsc(struct intel_dp *intel_dp,
1392 struct edp_vsc_psr *vsc_psr)
1393{
1394 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1395 struct drm_device *dev = dig_port->base.base.dev;
1396 struct drm_i915_private *dev_priv = dev->dev_private;
1397 struct intel_crtc *crtc = to_intel_crtc(dig_port->base.base.crtc);
1398 u32 ctl_reg = HSW_TVIDEO_DIP_CTL(crtc->config.cpu_transcoder);
1399 u32 data_reg = HSW_TVIDEO_DIP_VSC_DATA(crtc->config.cpu_transcoder);
1400 uint32_t *data = (uint32_t *) vsc_psr;
1401 unsigned int i;
1402
1403 /* As per BSPec (Pipe Video Data Island Packet), we need to disable
1404 the video DIP being updated before program video DIP data buffer
1405 registers for DIP being updated. */
1406 I915_WRITE(ctl_reg, 0);
1407 POSTING_READ(ctl_reg);
1408
1409 for (i = 0; i < VIDEO_DIP_VSC_DATA_SIZE; i += 4) {
1410 if (i < sizeof(struct edp_vsc_psr))
1411 I915_WRITE(data_reg + i, *data++);
1412 else
1413 I915_WRITE(data_reg + i, 0);
1414 }
1415
1416 I915_WRITE(ctl_reg, VIDEO_DIP_ENABLE_VSC_HSW);
1417 POSTING_READ(ctl_reg);
1418}
1419
1420static void intel_edp_psr_setup(struct intel_dp *intel_dp)
1421{
1422 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1423 struct drm_i915_private *dev_priv = dev->dev_private;
1424 struct edp_vsc_psr psr_vsc;
1425
1426 if (intel_dp->psr_setup_done)
1427 return;
1428
1429 /* Prepare VSC packet as per EDP 1.3 spec, Table 3.10 */
1430 memset(&psr_vsc, 0, sizeof(psr_vsc));
1431 psr_vsc.sdp_header.HB0 = 0;
1432 psr_vsc.sdp_header.HB1 = 0x7;
1433 psr_vsc.sdp_header.HB2 = 0x2;
1434 psr_vsc.sdp_header.HB3 = 0x8;
1435 intel_edp_psr_write_vsc(intel_dp, &psr_vsc);
1436
1437 /* Avoid continuous PSR exit by masking memup and hpd */
1438 I915_WRITE(EDP_PSR_DEBUG_CTL, EDP_PSR_DEBUG_MASK_MEMUP |
1439 EDP_PSR_DEBUG_MASK_HPD);
1440
1441 intel_dp->psr_setup_done = true;
1442}
1443
1444static void intel_edp_psr_enable_sink(struct intel_dp *intel_dp)
1445{
1446 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1447 struct drm_i915_private *dev_priv = dev->dev_private;
1448 uint32_t aux_clock_divider = get_aux_clock_divider(intel_dp);
1449 int precharge = 0x3;
1450 int msg_size = 5; /* Header(4) + Message(1) */
1451
1452 /* Enable PSR in sink */
1453 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT)
1454 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1455 DP_PSR_ENABLE &
1456 ~DP_PSR_MAIN_LINK_ACTIVE);
1457 else
1458 intel_dp_aux_native_write_1(intel_dp, DP_PSR_EN_CFG,
1459 DP_PSR_ENABLE |
1460 DP_PSR_MAIN_LINK_ACTIVE);
1461
1462 /* Setup AUX registers */
1463 I915_WRITE(EDP_PSR_AUX_DATA1, EDP_PSR_DPCD_COMMAND);
1464 I915_WRITE(EDP_PSR_AUX_DATA2, EDP_PSR_DPCD_NORMAL_OPERATION);
1465 I915_WRITE(EDP_PSR_AUX_CTL,
1466 DP_AUX_CH_CTL_TIME_OUT_400us |
1467 (msg_size << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) |
1468 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) |
1469 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT));
1470}
1471
1472static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
1473{
1474 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1475 struct drm_i915_private *dev_priv = dev->dev_private;
1476 uint32_t max_sleep_time = 0x1f;
1477 uint32_t idle_frames = 1;
1478 uint32_t val = 0x0;
1479
1480 if (intel_dp->psr_dpcd[1] & DP_PSR_NO_TRAIN_ON_EXIT) {
1481 val |= EDP_PSR_LINK_STANDBY;
1482 val |= EDP_PSR_TP2_TP3_TIME_0us;
1483 val |= EDP_PSR_TP1_TIME_0us;
1484 val |= EDP_PSR_SKIP_AUX_EXIT;
1485 } else
1486 val |= EDP_PSR_LINK_DISABLE;
1487
1488 I915_WRITE(EDP_PSR_CTL, val |
1489 EDP_PSR_MIN_LINK_ENTRY_TIME_8_LINES |
1490 max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
1491 idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
1492 EDP_PSR_ENABLE);
1493}
1494
3f51e471
RV
1495static bool intel_edp_psr_match_conditions(struct intel_dp *intel_dp)
1496{
1497 struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
1498 struct drm_device *dev = dig_port->base.base.dev;
1499 struct drm_i915_private *dev_priv = dev->dev_private;
1500 struct drm_crtc *crtc = dig_port->base.base.crtc;
1501 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1502 struct drm_i915_gem_object *obj = to_intel_framebuffer(crtc->fb)->obj;
1503 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
1504
1505 if (!IS_HASWELL(dev)) {
1506 DRM_DEBUG_KMS("PSR not supported on this platform\n");
1507 dev_priv->no_psr_reason = PSR_NO_SOURCE;
1508 return false;
1509 }
1510
1511 if ((intel_encoder->type != INTEL_OUTPUT_EDP) ||
1512 (dig_port->port != PORT_A)) {
1513 DRM_DEBUG_KMS("HSW ties PSR to DDI A (eDP)\n");
1514 dev_priv->no_psr_reason = PSR_HSW_NOT_DDIA;
1515 return false;
1516 }
1517
1518 if (!is_edp_psr(intel_dp)) {
1519 DRM_DEBUG_KMS("PSR not supported by this panel\n");
1520 dev_priv->no_psr_reason = PSR_NO_SINK;
1521 return false;
1522 }
1523
1524 if (!intel_crtc->active || !crtc->fb || !crtc->mode.clock) {
1525 DRM_DEBUG_KMS("crtc not active for PSR\n");
1526 dev_priv->no_psr_reason = PSR_CRTC_NOT_ACTIVE;
1527 return false;
1528 }
1529
1530 if (obj->tiling_mode != I915_TILING_X ||
1531 obj->fence_reg == I915_FENCE_REG_NONE) {
1532 DRM_DEBUG_KMS("PSR condition failed: fb not tiled or fenced\n");
1533 dev_priv->no_psr_reason = PSR_NOT_TILED;
1534 return false;
1535 }
1536
1537 if (I915_READ(SPRCTL(intel_crtc->pipe)) & SPRITE_ENABLE) {
1538 DRM_DEBUG_KMS("PSR condition failed: Sprite is Enabled\n");
1539 dev_priv->no_psr_reason = PSR_SPRITE_ENABLED;
1540 return false;
1541 }
1542
1543 if (I915_READ(HSW_STEREO_3D_CTL(intel_crtc->config.cpu_transcoder)) &
1544 S3D_ENABLE) {
1545 DRM_DEBUG_KMS("PSR condition failed: Stereo 3D is Enabled\n");
1546 dev_priv->no_psr_reason = PSR_S3D_ENABLED;
1547 return false;
1548 }
1549
1550 if (crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) {
1551 DRM_DEBUG_KMS("PSR condition failed: Interlaced is Enabled\n");
1552 dev_priv->no_psr_reason = PSR_INTERLACED_ENABLED;
1553 return false;
1554 }
1555
1556 return true;
1557}
1558
2b28bb1b
RV
1559void intel_edp_psr_enable(struct intel_dp *intel_dp)
1560{
1561 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1562
3f51e471
RV
1563 if (!intel_edp_psr_match_conditions(intel_dp) ||
1564 intel_edp_is_psr_enabled(dev))
2b28bb1b
RV
1565 return;
1566
1567 /* Setup PSR once */
1568 intel_edp_psr_setup(intel_dp);
1569
1570 /* Enable PSR on the panel */
1571 intel_edp_psr_enable_sink(intel_dp);
1572
1573 /* Enable PSR on the host */
1574 intel_edp_psr_enable_source(intel_dp);
1575}
1576
1577void intel_edp_psr_disable(struct intel_dp *intel_dp)
1578{
1579 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1580 struct drm_i915_private *dev_priv = dev->dev_private;
1581
1582 if (!intel_edp_is_psr_enabled(dev))
1583 return;
1584
1585 I915_WRITE(EDP_PSR_CTL, I915_READ(EDP_PSR_CTL) & ~EDP_PSR_ENABLE);
1586
1587 /* Wait till PSR is idle */
1588 if (_wait_for((I915_READ(EDP_PSR_STATUS_CTL) &
1589 EDP_PSR_STATUS_STATE_MASK) == 0, 2000, 10))
1590 DRM_ERROR("Timed out waiting for PSR Idle State\n");
1591}
1592
e8cb4558 1593static void intel_disable_dp(struct intel_encoder *encoder)
d240f20f 1594{
e8cb4558 1595 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866
ID
1596 enum port port = dp_to_dig_port(intel_dp)->port;
1597 struct drm_device *dev = encoder->base.dev;
6cb49835
DV
1598
1599 /* Make sure the panel is off before trying to change the mode. But also
1600 * ensure that we have vdd while we switch off the panel. */
1601 ironlake_edp_panel_vdd_on(intel_dp);
21264c63 1602 ironlake_edp_backlight_off(intel_dp);
c7ad3810 1603 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
35a38556 1604 ironlake_edp_panel_off(intel_dp);
3739850b
DV
1605
1606 /* cpu edp my only be disable _after_ the cpu pipe/plane is disabled. */
982a3866 1607 if (!(port == PORT_A || IS_VALLEYVIEW(dev)))
3739850b 1608 intel_dp_link_down(intel_dp);
d240f20f
JB
1609}
1610
2bd2ad64 1611static void intel_post_disable_dp(struct intel_encoder *encoder)
d240f20f 1612{
2bd2ad64 1613 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
982a3866 1614 enum port port = dp_to_dig_port(intel_dp)->port;
b2634017 1615 struct drm_device *dev = encoder->base.dev;
2bd2ad64 1616
982a3866 1617 if (port == PORT_A || IS_VALLEYVIEW(dev)) {
3739850b 1618 intel_dp_link_down(intel_dp);
b2634017
JB
1619 if (!IS_VALLEYVIEW(dev))
1620 ironlake_edp_pll_off(intel_dp);
3739850b 1621 }
2bd2ad64
DV
1622}
1623
e8cb4558 1624static void intel_enable_dp(struct intel_encoder *encoder)
d240f20f 1625{
e8cb4558
DV
1626 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
1627 struct drm_device *dev = encoder->base.dev;
1628 struct drm_i915_private *dev_priv = dev->dev_private;
1629 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
5d613501 1630
0c33d8d7
DV
1631 if (WARN_ON(dp_reg & DP_PORT_EN))
1632 return;
5d613501 1633
97af61f5 1634 ironlake_edp_panel_vdd_on(intel_dp);
f01eca2e 1635 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
33a34e4e 1636 intel_dp_start_link_train(intel_dp);
97af61f5 1637 ironlake_edp_panel_on(intel_dp);
bd943159 1638 ironlake_edp_panel_vdd_off(intel_dp, true);
33a34e4e 1639 intel_dp_complete_link_train(intel_dp);
3ab9c637 1640 intel_dp_stop_link_train(intel_dp);
f01eca2e 1641 ironlake_edp_backlight_on(intel_dp);
89b667f8
JB
1642
1643 if (IS_VALLEYVIEW(dev)) {
1644 struct intel_digital_port *dport =
1645 enc_to_dig_port(&encoder->base);
1646 int channel = vlv_dport_to_channel(dport);
1647
1648 vlv_wait_port_ready(dev_priv, channel);
1649 }
d240f20f
JB
1650}
1651
2bd2ad64 1652static void intel_pre_enable_dp(struct intel_encoder *encoder)
a4fc5ed6 1653{
2bd2ad64 1654 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
bc7d38a4 1655 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
b2634017 1656 struct drm_device *dev = encoder->base.dev;
89b667f8 1657 struct drm_i915_private *dev_priv = dev->dev_private;
a4fc5ed6 1658
bc7d38a4 1659 if (dport->port == PORT_A && !IS_VALLEYVIEW(dev))
2bd2ad64 1660 ironlake_edp_pll_on(intel_dp);
89b667f8
JB
1661
1662 if (IS_VALLEYVIEW(dev)) {
89b667f8
JB
1663 struct intel_crtc *intel_crtc =
1664 to_intel_crtc(encoder->base.crtc);
1665 int port = vlv_dport_to_channel(dport);
1666 int pipe = intel_crtc->pipe;
1667 u32 val;
1668
ae99258f 1669 val = vlv_dpio_read(dev_priv, DPIO_DATA_LANE_A(port));
89b667f8
JB
1670 val = 0;
1671 if (pipe)
1672 val |= (1<<21);
1673 else
1674 val &= ~(1<<21);
1675 val |= 0x001000c4;
ae99258f 1676 vlv_dpio_write(dev_priv, DPIO_DATA_CHANNEL(port), val);
89b667f8 1677
ae99258f 1678 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF0(port),
89b667f8 1679 0x00760018);
ae99258f 1680 vlv_dpio_write(dev_priv, DPIO_PCS_CLOCKBUF8(port),
89b667f8
JB
1681 0x00400888);
1682 }
1683}
1684
1685static void intel_dp_pre_pll_enable(struct intel_encoder *encoder)
1686{
1687 struct intel_digital_port *dport = enc_to_dig_port(&encoder->base);
1688 struct drm_device *dev = encoder->base.dev;
1689 struct drm_i915_private *dev_priv = dev->dev_private;
1690 int port = vlv_dport_to_channel(dport);
1691
1692 if (!IS_VALLEYVIEW(dev))
1693 return;
1694
89b667f8 1695 /* Program Tx lane resets to default */
ae99258f 1696 vlv_dpio_write(dev_priv, DPIO_PCS_TX(port),
89b667f8
JB
1697 DPIO_PCS_TX_LANE2_RESET |
1698 DPIO_PCS_TX_LANE1_RESET);
ae99258f 1699 vlv_dpio_write(dev_priv, DPIO_PCS_CLK(port),
89b667f8
JB
1700 DPIO_PCS_CLK_CRI_RXEB_EIOS_EN |
1701 DPIO_PCS_CLK_CRI_RXDIGFILTSG_EN |
1702 (1<<DPIO_PCS_CLK_DATAWIDTH_SHIFT) |
1703 DPIO_PCS_CLK_SOFT_RESET);
1704
1705 /* Fix up inter-pair skew failure */
ae99258f
JN
1706 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER1(port), 0x00750f00);
1707 vlv_dpio_write(dev_priv, DPIO_TX_CTL(port), 0x00001500);
1708 vlv_dpio_write(dev_priv, DPIO_TX_LANE(port), 0x40400000);
a4fc5ed6
KP
1709}
1710
1711/*
df0c237d
JB
1712 * Native read with retry for link status and receiver capability reads for
1713 * cases where the sink may still be asleep.
a4fc5ed6
KP
1714 */
1715static bool
df0c237d
JB
1716intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1717 uint8_t *recv, int recv_bytes)
a4fc5ed6 1718{
61da5fab
JB
1719 int ret, i;
1720
df0c237d
JB
1721 /*
1722 * Sinks are *supposed* to come up within 1ms from an off state,
1723 * but we're also supposed to retry 3 times per the spec.
1724 */
61da5fab 1725 for (i = 0; i < 3; i++) {
df0c237d
JB
1726 ret = intel_dp_aux_native_read(intel_dp, address, recv,
1727 recv_bytes);
1728 if (ret == recv_bytes)
61da5fab
JB
1729 return true;
1730 msleep(1);
1731 }
a4fc5ed6 1732
61da5fab 1733 return false;
a4fc5ed6
KP
1734}
1735
1736/*
1737 * Fetch AUX CH registers 0x202 - 0x207 which contain
1738 * link status information
1739 */
1740static bool
93f62dad 1741intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6 1742{
df0c237d
JB
1743 return intel_dp_aux_native_read_retry(intel_dp,
1744 DP_LANE0_1_STATUS,
93f62dad 1745 link_status,
df0c237d 1746 DP_LINK_STATUS_SIZE);
a4fc5ed6
KP
1747}
1748
a4fc5ed6
KP
1749#if 0
1750static char *voltage_names[] = {
1751 "0.4V", "0.6V", "0.8V", "1.2V"
1752};
1753static char *pre_emph_names[] = {
1754 "0dB", "3.5dB", "6dB", "9.5dB"
1755};
1756static char *link_train_names[] = {
1757 "pattern 1", "pattern 2", "idle", "off"
1758};
1759#endif
1760
1761/*
1762 * These are source-specific values; current Intel hardware supports
1763 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1764 */
a4fc5ed6
KP
1765
1766static uint8_t
1a2eb460 1767intel_dp_voltage_max(struct intel_dp *intel_dp)
a4fc5ed6 1768{
30add22d 1769 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 1770 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 1771
e2fa6fba
P
1772 if (IS_VALLEYVIEW(dev))
1773 return DP_TRAIN_VOLTAGE_SWING_1200;
bc7d38a4 1774 else if (IS_GEN7(dev) && port == PORT_A)
1a2eb460 1775 return DP_TRAIN_VOLTAGE_SWING_800;
bc7d38a4 1776 else if (HAS_PCH_CPT(dev) && port != PORT_A)
1a2eb460
KP
1777 return DP_TRAIN_VOLTAGE_SWING_1200;
1778 else
1779 return DP_TRAIN_VOLTAGE_SWING_800;
1780}
1781
1782static uint8_t
1783intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing)
1784{
30add22d 1785 struct drm_device *dev = intel_dp_to_dev(intel_dp);
bc7d38a4 1786 enum port port = dp_to_dig_port(intel_dp)->port;
1a2eb460 1787
22b8bf17 1788 if (HAS_DDI(dev)) {
d6c0d722
PZ
1789 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1790 case DP_TRAIN_VOLTAGE_SWING_400:
1791 return DP_TRAIN_PRE_EMPHASIS_9_5;
1792 case DP_TRAIN_VOLTAGE_SWING_600:
1793 return DP_TRAIN_PRE_EMPHASIS_6;
1794 case DP_TRAIN_VOLTAGE_SWING_800:
1795 return DP_TRAIN_PRE_EMPHASIS_3_5;
1796 case DP_TRAIN_VOLTAGE_SWING_1200:
1797 default:
1798 return DP_TRAIN_PRE_EMPHASIS_0;
1799 }
e2fa6fba
P
1800 } else if (IS_VALLEYVIEW(dev)) {
1801 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1802 case DP_TRAIN_VOLTAGE_SWING_400:
1803 return DP_TRAIN_PRE_EMPHASIS_9_5;
1804 case DP_TRAIN_VOLTAGE_SWING_600:
1805 return DP_TRAIN_PRE_EMPHASIS_6;
1806 case DP_TRAIN_VOLTAGE_SWING_800:
1807 return DP_TRAIN_PRE_EMPHASIS_3_5;
1808 case DP_TRAIN_VOLTAGE_SWING_1200:
1809 default:
1810 return DP_TRAIN_PRE_EMPHASIS_0;
1811 }
bc7d38a4 1812 } else if (IS_GEN7(dev) && port == PORT_A) {
1a2eb460
KP
1813 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1814 case DP_TRAIN_VOLTAGE_SWING_400:
1815 return DP_TRAIN_PRE_EMPHASIS_6;
1816 case DP_TRAIN_VOLTAGE_SWING_600:
1817 case DP_TRAIN_VOLTAGE_SWING_800:
1818 return DP_TRAIN_PRE_EMPHASIS_3_5;
1819 default:
1820 return DP_TRAIN_PRE_EMPHASIS_0;
1821 }
1822 } else {
1823 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
1824 case DP_TRAIN_VOLTAGE_SWING_400:
1825 return DP_TRAIN_PRE_EMPHASIS_6;
1826 case DP_TRAIN_VOLTAGE_SWING_600:
1827 return DP_TRAIN_PRE_EMPHASIS_6;
1828 case DP_TRAIN_VOLTAGE_SWING_800:
1829 return DP_TRAIN_PRE_EMPHASIS_3_5;
1830 case DP_TRAIN_VOLTAGE_SWING_1200:
1831 default:
1832 return DP_TRAIN_PRE_EMPHASIS_0;
1833 }
a4fc5ed6
KP
1834 }
1835}
1836
e2fa6fba
P
1837static uint32_t intel_vlv_signal_levels(struct intel_dp *intel_dp)
1838{
1839 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1840 struct drm_i915_private *dev_priv = dev->dev_private;
1841 struct intel_digital_port *dport = dp_to_dig_port(intel_dp);
1842 unsigned long demph_reg_value, preemph_reg_value,
1843 uniqtranscale_reg_value;
1844 uint8_t train_set = intel_dp->train_set[0];
cece5d58 1845 int port = vlv_dport_to_channel(dport);
e2fa6fba
P
1846
1847 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1848 case DP_TRAIN_PRE_EMPHASIS_0:
1849 preemph_reg_value = 0x0004000;
1850 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1851 case DP_TRAIN_VOLTAGE_SWING_400:
1852 demph_reg_value = 0x2B405555;
1853 uniqtranscale_reg_value = 0x552AB83A;
1854 break;
1855 case DP_TRAIN_VOLTAGE_SWING_600:
1856 demph_reg_value = 0x2B404040;
1857 uniqtranscale_reg_value = 0x5548B83A;
1858 break;
1859 case DP_TRAIN_VOLTAGE_SWING_800:
1860 demph_reg_value = 0x2B245555;
1861 uniqtranscale_reg_value = 0x5560B83A;
1862 break;
1863 case DP_TRAIN_VOLTAGE_SWING_1200:
1864 demph_reg_value = 0x2B405555;
1865 uniqtranscale_reg_value = 0x5598DA3A;
1866 break;
1867 default:
1868 return 0;
1869 }
1870 break;
1871 case DP_TRAIN_PRE_EMPHASIS_3_5:
1872 preemph_reg_value = 0x0002000;
1873 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1874 case DP_TRAIN_VOLTAGE_SWING_400:
1875 demph_reg_value = 0x2B404040;
1876 uniqtranscale_reg_value = 0x5552B83A;
1877 break;
1878 case DP_TRAIN_VOLTAGE_SWING_600:
1879 demph_reg_value = 0x2B404848;
1880 uniqtranscale_reg_value = 0x5580B83A;
1881 break;
1882 case DP_TRAIN_VOLTAGE_SWING_800:
1883 demph_reg_value = 0x2B404040;
1884 uniqtranscale_reg_value = 0x55ADDA3A;
1885 break;
1886 default:
1887 return 0;
1888 }
1889 break;
1890 case DP_TRAIN_PRE_EMPHASIS_6:
1891 preemph_reg_value = 0x0000000;
1892 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1893 case DP_TRAIN_VOLTAGE_SWING_400:
1894 demph_reg_value = 0x2B305555;
1895 uniqtranscale_reg_value = 0x5570B83A;
1896 break;
1897 case DP_TRAIN_VOLTAGE_SWING_600:
1898 demph_reg_value = 0x2B2B4040;
1899 uniqtranscale_reg_value = 0x55ADDA3A;
1900 break;
1901 default:
1902 return 0;
1903 }
1904 break;
1905 case DP_TRAIN_PRE_EMPHASIS_9_5:
1906 preemph_reg_value = 0x0006000;
1907 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1908 case DP_TRAIN_VOLTAGE_SWING_400:
1909 demph_reg_value = 0x1B405555;
1910 uniqtranscale_reg_value = 0x55ADDA3A;
1911 break;
1912 default:
1913 return 0;
1914 }
1915 break;
1916 default:
1917 return 0;
1918 }
1919
ae99258f
JN
1920 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x00000000);
1921 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL4(port), demph_reg_value);
1922 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL2(port),
e2fa6fba 1923 uniqtranscale_reg_value);
ae99258f
JN
1924 vlv_dpio_write(dev_priv, DPIO_TX_SWING_CTL3(port), 0x0C782040);
1925 vlv_dpio_write(dev_priv, DPIO_PCS_STAGGER0(port), 0x00030000);
1926 vlv_dpio_write(dev_priv, DPIO_PCS_CTL_OVER1(port), preemph_reg_value);
1927 vlv_dpio_write(dev_priv, DPIO_TX_OCALINIT(port), 0x80000000);
e2fa6fba
P
1928
1929 return 0;
1930}
1931
a4fc5ed6 1932static void
93f62dad 1933intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
a4fc5ed6
KP
1934{
1935 uint8_t v = 0;
1936 uint8_t p = 0;
1937 int lane;
1a2eb460
KP
1938 uint8_t voltage_max;
1939 uint8_t preemph_max;
a4fc5ed6 1940
33a34e4e 1941 for (lane = 0; lane < intel_dp->lane_count; lane++) {
0f037bde
DV
1942 uint8_t this_v = drm_dp_get_adjust_request_voltage(link_status, lane);
1943 uint8_t this_p = drm_dp_get_adjust_request_pre_emphasis(link_status, lane);
a4fc5ed6
KP
1944
1945 if (this_v > v)
1946 v = this_v;
1947 if (this_p > p)
1948 p = this_p;
1949 }
1950
1a2eb460 1951 voltage_max = intel_dp_voltage_max(intel_dp);
417e822d
KP
1952 if (v >= voltage_max)
1953 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
a4fc5ed6 1954
1a2eb460
KP
1955 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v);
1956 if (p >= preemph_max)
1957 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
a4fc5ed6
KP
1958
1959 for (lane = 0; lane < 4; lane++)
33a34e4e 1960 intel_dp->train_set[lane] = v | p;
a4fc5ed6
KP
1961}
1962
1963static uint32_t
f0a3424e 1964intel_gen4_signal_levels(uint8_t train_set)
a4fc5ed6 1965{
3cf2efb1 1966 uint32_t signal_levels = 0;
a4fc5ed6 1967
3cf2efb1 1968 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
a4fc5ed6
KP
1969 case DP_TRAIN_VOLTAGE_SWING_400:
1970 default:
1971 signal_levels |= DP_VOLTAGE_0_4;
1972 break;
1973 case DP_TRAIN_VOLTAGE_SWING_600:
1974 signal_levels |= DP_VOLTAGE_0_6;
1975 break;
1976 case DP_TRAIN_VOLTAGE_SWING_800:
1977 signal_levels |= DP_VOLTAGE_0_8;
1978 break;
1979 case DP_TRAIN_VOLTAGE_SWING_1200:
1980 signal_levels |= DP_VOLTAGE_1_2;
1981 break;
1982 }
3cf2efb1 1983 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
a4fc5ed6
KP
1984 case DP_TRAIN_PRE_EMPHASIS_0:
1985 default:
1986 signal_levels |= DP_PRE_EMPHASIS_0;
1987 break;
1988 case DP_TRAIN_PRE_EMPHASIS_3_5:
1989 signal_levels |= DP_PRE_EMPHASIS_3_5;
1990 break;
1991 case DP_TRAIN_PRE_EMPHASIS_6:
1992 signal_levels |= DP_PRE_EMPHASIS_6;
1993 break;
1994 case DP_TRAIN_PRE_EMPHASIS_9_5:
1995 signal_levels |= DP_PRE_EMPHASIS_9_5;
1996 break;
1997 }
1998 return signal_levels;
1999}
2000
e3421a18
ZW
2001/* Gen6's DP voltage swing and pre-emphasis control */
2002static uint32_t
2003intel_gen6_edp_signal_levels(uint8_t train_set)
2004{
3c5a62b5
YL
2005 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2006 DP_TRAIN_PRE_EMPHASIS_MASK);
2007 switch (signal_levels) {
e3421a18 2008 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
3c5a62b5
YL
2009 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2010 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
2011 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2012 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B;
e3421a18 2013 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
3c5a62b5
YL
2014 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2015 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B;
e3421a18 2016 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
3c5a62b5
YL
2017 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2018 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B;
e3421a18 2019 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
3c5a62b5
YL
2020 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0:
2021 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B;
e3421a18 2022 default:
3c5a62b5
YL
2023 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2024 "0x%x\n", signal_levels);
2025 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B;
e3421a18
ZW
2026 }
2027}
2028
1a2eb460
KP
2029/* Gen7's DP voltage swing and pre-emphasis control */
2030static uint32_t
2031intel_gen7_edp_signal_levels(uint8_t train_set)
2032{
2033 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2034 DP_TRAIN_PRE_EMPHASIS_MASK);
2035 switch (signal_levels) {
2036 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2037 return EDP_LINK_TRAIN_400MV_0DB_IVB;
2038 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2039 return EDP_LINK_TRAIN_400MV_3_5DB_IVB;
2040 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2041 return EDP_LINK_TRAIN_400MV_6DB_IVB;
2042
2043 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2044 return EDP_LINK_TRAIN_600MV_0DB_IVB;
2045 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2046 return EDP_LINK_TRAIN_600MV_3_5DB_IVB;
2047
2048 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2049 return EDP_LINK_TRAIN_800MV_0DB_IVB;
2050 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2051 return EDP_LINK_TRAIN_800MV_3_5DB_IVB;
2052
2053 default:
2054 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2055 "0x%x\n", signal_levels);
2056 return EDP_LINK_TRAIN_500MV_0DB_IVB;
2057 }
2058}
2059
d6c0d722
PZ
2060/* Gen7.5's (HSW) DP voltage swing and pre-emphasis control */
2061static uint32_t
f0a3424e 2062intel_hsw_signal_levels(uint8_t train_set)
a4fc5ed6 2063{
d6c0d722
PZ
2064 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK |
2065 DP_TRAIN_PRE_EMPHASIS_MASK);
2066 switch (signal_levels) {
2067 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0:
2068 return DDI_BUF_EMP_400MV_0DB_HSW;
2069 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5:
2070 return DDI_BUF_EMP_400MV_3_5DB_HSW;
2071 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6:
2072 return DDI_BUF_EMP_400MV_6DB_HSW;
2073 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_9_5:
2074 return DDI_BUF_EMP_400MV_9_5DB_HSW;
a4fc5ed6 2075
d6c0d722
PZ
2076 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0:
2077 return DDI_BUF_EMP_600MV_0DB_HSW;
2078 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5:
2079 return DDI_BUF_EMP_600MV_3_5DB_HSW;
2080 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6:
2081 return DDI_BUF_EMP_600MV_6DB_HSW;
a4fc5ed6 2082
d6c0d722
PZ
2083 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0:
2084 return DDI_BUF_EMP_800MV_0DB_HSW;
2085 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5:
2086 return DDI_BUF_EMP_800MV_3_5DB_HSW;
2087 default:
2088 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:"
2089 "0x%x\n", signal_levels);
2090 return DDI_BUF_EMP_400MV_0DB_HSW;
a4fc5ed6 2091 }
a4fc5ed6
KP
2092}
2093
f0a3424e
PZ
2094/* Properly updates "DP" with the correct signal levels. */
2095static void
2096intel_dp_set_signal_levels(struct intel_dp *intel_dp, uint32_t *DP)
2097{
2098 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 2099 enum port port = intel_dig_port->port;
f0a3424e
PZ
2100 struct drm_device *dev = intel_dig_port->base.base.dev;
2101 uint32_t signal_levels, mask;
2102 uint8_t train_set = intel_dp->train_set[0];
2103
22b8bf17 2104 if (HAS_DDI(dev)) {
f0a3424e
PZ
2105 signal_levels = intel_hsw_signal_levels(train_set);
2106 mask = DDI_BUF_EMP_MASK;
e2fa6fba
P
2107 } else if (IS_VALLEYVIEW(dev)) {
2108 signal_levels = intel_vlv_signal_levels(intel_dp);
2109 mask = 0;
bc7d38a4 2110 } else if (IS_GEN7(dev) && port == PORT_A) {
f0a3424e
PZ
2111 signal_levels = intel_gen7_edp_signal_levels(train_set);
2112 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
bc7d38a4 2113 } else if (IS_GEN6(dev) && port == PORT_A) {
f0a3424e
PZ
2114 signal_levels = intel_gen6_edp_signal_levels(train_set);
2115 mask = EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
2116 } else {
2117 signal_levels = intel_gen4_signal_levels(train_set);
2118 mask = DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK;
2119 }
2120
2121 DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
2122
2123 *DP = (*DP & ~mask) | signal_levels;
2124}
2125
a4fc5ed6 2126static bool
ea5b213a 2127intel_dp_set_link_train(struct intel_dp *intel_dp,
a4fc5ed6 2128 uint32_t dp_reg_value,
58e10eb9 2129 uint8_t dp_train_pat)
a4fc5ed6 2130{
174edf1f
PZ
2131 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2132 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 2133 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 2134 enum port port = intel_dig_port->port;
a4fc5ed6
KP
2135 int ret;
2136
22b8bf17 2137 if (HAS_DDI(dev)) {
3ab9c637 2138 uint32_t temp = I915_READ(DP_TP_CTL(port));
d6c0d722
PZ
2139
2140 if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
2141 temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
2142 else
2143 temp &= ~DP_TP_CTL_SCRAMBLE_DISABLE;
2144
2145 temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2146 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2147 case DP_TRAINING_PATTERN_DISABLE:
d6c0d722
PZ
2148 temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
2149
2150 break;
2151 case DP_TRAINING_PATTERN_1:
2152 temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
2153 break;
2154 case DP_TRAINING_PATTERN_2:
2155 temp |= DP_TP_CTL_LINK_TRAIN_PAT2;
2156 break;
2157 case DP_TRAINING_PATTERN_3:
2158 temp |= DP_TP_CTL_LINK_TRAIN_PAT3;
2159 break;
2160 }
174edf1f 2161 I915_WRITE(DP_TP_CTL(port), temp);
d6c0d722 2162
bc7d38a4 2163 } else if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
47ea7542
PZ
2164 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT;
2165
2166 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2167 case DP_TRAINING_PATTERN_DISABLE:
2168 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT;
2169 break;
2170 case DP_TRAINING_PATTERN_1:
2171 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT;
2172 break;
2173 case DP_TRAINING_PATTERN_2:
2174 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
2175 break;
2176 case DP_TRAINING_PATTERN_3:
2177 DRM_ERROR("DP training pattern 3 not supported\n");
2178 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT;
2179 break;
2180 }
2181
2182 } else {
2183 dp_reg_value &= ~DP_LINK_TRAIN_MASK;
2184
2185 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
2186 case DP_TRAINING_PATTERN_DISABLE:
2187 dp_reg_value |= DP_LINK_TRAIN_OFF;
2188 break;
2189 case DP_TRAINING_PATTERN_1:
2190 dp_reg_value |= DP_LINK_TRAIN_PAT_1;
2191 break;
2192 case DP_TRAINING_PATTERN_2:
2193 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
2194 break;
2195 case DP_TRAINING_PATTERN_3:
2196 DRM_ERROR("DP training pattern 3 not supported\n");
2197 dp_reg_value |= DP_LINK_TRAIN_PAT_2;
2198 break;
2199 }
2200 }
2201
ea5b213a
CW
2202 I915_WRITE(intel_dp->output_reg, dp_reg_value);
2203 POSTING_READ(intel_dp->output_reg);
a4fc5ed6 2204
ea5b213a 2205 intel_dp_aux_native_write_1(intel_dp,
a4fc5ed6
KP
2206 DP_TRAINING_PATTERN_SET,
2207 dp_train_pat);
2208
47ea7542
PZ
2209 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) !=
2210 DP_TRAINING_PATTERN_DISABLE) {
2211 ret = intel_dp_aux_native_write(intel_dp,
2212 DP_TRAINING_LANE0_SET,
2213 intel_dp->train_set,
2214 intel_dp->lane_count);
2215 if (ret != intel_dp->lane_count)
2216 return false;
2217 }
a4fc5ed6
KP
2218
2219 return true;
2220}
2221
3ab9c637
ID
2222static void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
2223{
2224 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2225 struct drm_device *dev = intel_dig_port->base.base.dev;
2226 struct drm_i915_private *dev_priv = dev->dev_private;
2227 enum port port = intel_dig_port->port;
2228 uint32_t val;
2229
2230 if (!HAS_DDI(dev))
2231 return;
2232
2233 val = I915_READ(DP_TP_CTL(port));
2234 val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
2235 val |= DP_TP_CTL_LINK_TRAIN_IDLE;
2236 I915_WRITE(DP_TP_CTL(port), val);
2237
2238 /*
2239 * On PORT_A we can have only eDP in SST mode. There the only reason
2240 * we need to set idle transmission mode is to work around a HW issue
2241 * where we enable the pipe while not in idle link-training mode.
2242 * In this case there is requirement to wait for a minimum number of
2243 * idle patterns to be sent.
2244 */
2245 if (port == PORT_A)
2246 return;
2247
2248 if (wait_for((I915_READ(DP_TP_STATUS(port)) & DP_TP_STATUS_IDLE_DONE),
2249 1))
2250 DRM_ERROR("Timed out waiting for DP idle patterns\n");
2251}
2252
33a34e4e 2253/* Enable corresponding port and start training pattern 1 */
c19b0669 2254void
33a34e4e 2255intel_dp_start_link_train(struct intel_dp *intel_dp)
a4fc5ed6 2256{
da63a9f2 2257 struct drm_encoder *encoder = &dp_to_dig_port(intel_dp)->base.base;
c19b0669 2258 struct drm_device *dev = encoder->dev;
a4fc5ed6
KP
2259 int i;
2260 uint8_t voltage;
2261 bool clock_recovery = false;
cdb0e95b 2262 int voltage_tries, loop_tries;
ea5b213a 2263 uint32_t DP = intel_dp->DP;
a4fc5ed6 2264
affa9354 2265 if (HAS_DDI(dev))
c19b0669
PZ
2266 intel_ddi_prepare_link_retrain(encoder);
2267
3cf2efb1
CW
2268 /* Write the link configuration data */
2269 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
2270 intel_dp->link_configuration,
2271 DP_LINK_CONFIGURATION_SIZE);
a4fc5ed6
KP
2272
2273 DP |= DP_PORT_EN;
1a2eb460 2274
33a34e4e 2275 memset(intel_dp->train_set, 0, 4);
a4fc5ed6 2276 voltage = 0xff;
cdb0e95b
KP
2277 voltage_tries = 0;
2278 loop_tries = 0;
a4fc5ed6
KP
2279 clock_recovery = false;
2280 for (;;) {
33a34e4e 2281 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
93f62dad 2282 uint8_t link_status[DP_LINK_STATUS_SIZE];
f0a3424e
PZ
2283
2284 intel_dp_set_signal_levels(intel_dp, &DP);
a4fc5ed6 2285
a7c9655f 2286 /* Set training pattern 1 */
47ea7542 2287 if (!intel_dp_set_link_train(intel_dp, DP,
81055854
AJ
2288 DP_TRAINING_PATTERN_1 |
2289 DP_LINK_SCRAMBLING_DISABLE))
a4fc5ed6 2290 break;
a4fc5ed6 2291
a7c9655f 2292 drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
93f62dad
KP
2293 if (!intel_dp_get_link_status(intel_dp, link_status)) {
2294 DRM_ERROR("failed to get link status\n");
a4fc5ed6 2295 break;
93f62dad 2296 }
a4fc5ed6 2297
01916270 2298 if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
93f62dad 2299 DRM_DEBUG_KMS("clock recovery OK\n");
3cf2efb1
CW
2300 clock_recovery = true;
2301 break;
2302 }
2303
2304 /* Check to see if we've tried the max voltage */
2305 for (i = 0; i < intel_dp->lane_count; i++)
2306 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
a4fc5ed6 2307 break;
3b4f819d 2308 if (i == intel_dp->lane_count) {
b06fbda3
DV
2309 ++loop_tries;
2310 if (loop_tries == 5) {
cdb0e95b
KP
2311 DRM_DEBUG_KMS("too many full retries, give up\n");
2312 break;
2313 }
2314 memset(intel_dp->train_set, 0, 4);
2315 voltage_tries = 0;
2316 continue;
2317 }
a4fc5ed6 2318
3cf2efb1 2319 /* Check to see if we've tried the same voltage 5 times */
b06fbda3 2320 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
24773670 2321 ++voltage_tries;
b06fbda3
DV
2322 if (voltage_tries == 5) {
2323 DRM_DEBUG_KMS("too many voltage retries, give up\n");
2324 break;
2325 }
2326 } else
2327 voltage_tries = 0;
2328 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
a4fc5ed6 2329
3cf2efb1 2330 /* Compute new intel_dp->train_set as requested by target */
93f62dad 2331 intel_get_adjust_train(intel_dp, link_status);
a4fc5ed6
KP
2332 }
2333
33a34e4e
JB
2334 intel_dp->DP = DP;
2335}
2336
c19b0669 2337void
33a34e4e
JB
2338intel_dp_complete_link_train(struct intel_dp *intel_dp)
2339{
33a34e4e 2340 bool channel_eq = false;
37f80975 2341 int tries, cr_tries;
33a34e4e
JB
2342 uint32_t DP = intel_dp->DP;
2343
a4fc5ed6
KP
2344 /* channel equalization */
2345 tries = 0;
37f80975 2346 cr_tries = 0;
a4fc5ed6
KP
2347 channel_eq = false;
2348 for (;;) {
93f62dad 2349 uint8_t link_status[DP_LINK_STATUS_SIZE];
e3421a18 2350
37f80975
JB
2351 if (cr_tries > 5) {
2352 DRM_ERROR("failed to train DP, aborting\n");
2353 intel_dp_link_down(intel_dp);
2354 break;
2355 }
2356
f0a3424e 2357 intel_dp_set_signal_levels(intel_dp, &DP);
e3421a18 2358
a4fc5ed6 2359 /* channel eq pattern */
47ea7542 2360 if (!intel_dp_set_link_train(intel_dp, DP,
81055854
AJ
2361 DP_TRAINING_PATTERN_2 |
2362 DP_LINK_SCRAMBLING_DISABLE))
a4fc5ed6
KP
2363 break;
2364
a7c9655f 2365 drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
93f62dad 2366 if (!intel_dp_get_link_status(intel_dp, link_status))
a4fc5ed6 2367 break;
a4fc5ed6 2368
37f80975 2369 /* Make sure clock is still ok */
01916270 2370 if (!drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
37f80975
JB
2371 intel_dp_start_link_train(intel_dp);
2372 cr_tries++;
2373 continue;
2374 }
2375
1ffdff13 2376 if (drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
3cf2efb1
CW
2377 channel_eq = true;
2378 break;
2379 }
a4fc5ed6 2380
37f80975
JB
2381 /* Try 5 times, then try clock recovery if that fails */
2382 if (tries > 5) {
2383 intel_dp_link_down(intel_dp);
2384 intel_dp_start_link_train(intel_dp);
2385 tries = 0;
2386 cr_tries++;
2387 continue;
2388 }
a4fc5ed6 2389
3cf2efb1 2390 /* Compute new intel_dp->train_set as requested by target */
93f62dad 2391 intel_get_adjust_train(intel_dp, link_status);
3cf2efb1 2392 ++tries;
869184a6 2393 }
3cf2efb1 2394
3ab9c637
ID
2395 intel_dp_set_idle_link_train(intel_dp);
2396
2397 intel_dp->DP = DP;
2398
d6c0d722 2399 if (channel_eq)
07f42258 2400 DRM_DEBUG_KMS("Channel EQ done. DP Training successful\n");
d6c0d722 2401
3ab9c637
ID
2402}
2403
2404void intel_dp_stop_link_train(struct intel_dp *intel_dp)
2405{
2406 intel_dp_set_link_train(intel_dp, intel_dp->DP,
2407 DP_TRAINING_PATTERN_DISABLE);
a4fc5ed6
KP
2408}
2409
2410static void
ea5b213a 2411intel_dp_link_down(struct intel_dp *intel_dp)
a4fc5ed6 2412{
da63a9f2 2413 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
bc7d38a4 2414 enum port port = intel_dig_port->port;
da63a9f2 2415 struct drm_device *dev = intel_dig_port->base.base.dev;
a4fc5ed6 2416 struct drm_i915_private *dev_priv = dev->dev_private;
ab527efc
DV
2417 struct intel_crtc *intel_crtc =
2418 to_intel_crtc(intel_dig_port->base.base.crtc);
ea5b213a 2419 uint32_t DP = intel_dp->DP;
a4fc5ed6 2420
c19b0669
PZ
2421 /*
2422 * DDI code has a strict mode set sequence and we should try to respect
2423 * it, otherwise we might hang the machine in many different ways. So we
2424 * really should be disabling the port only on a complete crtc_disable
2425 * sequence. This function is just called under two conditions on DDI
2426 * code:
2427 * - Link train failed while doing crtc_enable, and on this case we
2428 * really should respect the mode set sequence and wait for a
2429 * crtc_disable.
2430 * - Someone turned the monitor off and intel_dp_check_link_status
2431 * called us. We don't need to disable the whole port on this case, so
2432 * when someone turns the monitor on again,
2433 * intel_ddi_prepare_link_retrain will take care of redoing the link
2434 * train.
2435 */
affa9354 2436 if (HAS_DDI(dev))
c19b0669
PZ
2437 return;
2438
0c33d8d7 2439 if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
1b39d6f3
CW
2440 return;
2441
28c97730 2442 DRM_DEBUG_KMS("\n");
32f9d658 2443
bc7d38a4 2444 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || port != PORT_A)) {
e3421a18 2445 DP &= ~DP_LINK_TRAIN_MASK_CPT;
ea5b213a 2446 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT);
e3421a18
ZW
2447 } else {
2448 DP &= ~DP_LINK_TRAIN_MASK;
ea5b213a 2449 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE);
e3421a18 2450 }
fe255d00 2451 POSTING_READ(intel_dp->output_reg);
5eb08b69 2452
ab527efc
DV
2453 /* We don't really know why we're doing this */
2454 intel_wait_for_vblank(dev, intel_crtc->pipe);
5eb08b69 2455
493a7081 2456 if (HAS_PCH_IBX(dev) &&
1b39d6f3 2457 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
da63a9f2 2458 struct drm_crtc *crtc = intel_dig_port->base.base.crtc;
31acbcc4 2459
5bddd17f
EA
2460 /* Hardware workaround: leaving our transcoder select
2461 * set to transcoder B while it's off will prevent the
2462 * corresponding HDMI output on transcoder A.
2463 *
2464 * Combine this with another hardware workaround:
2465 * transcoder select bit can only be cleared while the
2466 * port is enabled.
2467 */
2468 DP &= ~DP_PIPEB_SELECT;
2469 I915_WRITE(intel_dp->output_reg, DP);
2470
2471 /* Changes to enable or select take place the vblank
2472 * after being written.
2473 */
ff50afe9
DV
2474 if (WARN_ON(crtc == NULL)) {
2475 /* We should never try to disable a port without a crtc
2476 * attached. For paranoia keep the code around for a
2477 * bit. */
31acbcc4
CW
2478 POSTING_READ(intel_dp->output_reg);
2479 msleep(50);
2480 } else
ab527efc 2481 intel_wait_for_vblank(dev, intel_crtc->pipe);
5bddd17f
EA
2482 }
2483
832afda6 2484 DP &= ~DP_AUDIO_OUTPUT_ENABLE;
ea5b213a
CW
2485 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
2486 POSTING_READ(intel_dp->output_reg);
f01eca2e 2487 msleep(intel_dp->panel_power_down_delay);
a4fc5ed6
KP
2488}
2489
26d61aad
KP
2490static bool
2491intel_dp_get_dpcd(struct intel_dp *intel_dp)
92fd8fd1 2492{
577c7a50
DL
2493 char dpcd_hex_dump[sizeof(intel_dp->dpcd) * 3];
2494
92fd8fd1 2495 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd,
edb39244
AJ
2496 sizeof(intel_dp->dpcd)) == 0)
2497 return false; /* aux transfer failed */
92fd8fd1 2498
577c7a50
DL
2499 hex_dump_to_buffer(intel_dp->dpcd, sizeof(intel_dp->dpcd),
2500 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
2501 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
2502
edb39244
AJ
2503 if (intel_dp->dpcd[DP_DPCD_REV] == 0)
2504 return false; /* DPCD not present */
2505
2293bb5c
SK
2506 /* Check if the panel supports PSR */
2507 memset(intel_dp->psr_dpcd, 0, sizeof(intel_dp->psr_dpcd));
2508 intel_dp_aux_native_read_retry(intel_dp, DP_PSR_SUPPORT,
2509 intel_dp->psr_dpcd,
2510 sizeof(intel_dp->psr_dpcd));
2511 if (is_edp_psr(intel_dp))
2512 DRM_DEBUG_KMS("Detected EDP PSR Panel.\n");
edb39244
AJ
2513 if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] &
2514 DP_DWN_STRM_PORT_PRESENT))
2515 return true; /* native DP sink */
2516
2517 if (intel_dp->dpcd[DP_DPCD_REV] == 0x10)
2518 return true; /* no per-port downstream info */
2519
2520 if (intel_dp_aux_native_read_retry(intel_dp, DP_DOWNSTREAM_PORT_0,
2521 intel_dp->downstream_ports,
2522 DP_MAX_DOWNSTREAM_PORTS) == 0)
2523 return false; /* downstream port status fetch failed */
2524
2525 return true;
92fd8fd1
KP
2526}
2527
0d198328
AJ
2528static void
2529intel_dp_probe_oui(struct intel_dp *intel_dp)
2530{
2531 u8 buf[3];
2532
2533 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
2534 return;
2535
351cfc34
DV
2536 ironlake_edp_panel_vdd_on(intel_dp);
2537
0d198328
AJ
2538 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
2539 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
2540 buf[0], buf[1], buf[2]);
2541
2542 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
2543 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
2544 buf[0], buf[1], buf[2]);
351cfc34
DV
2545
2546 ironlake_edp_panel_vdd_off(intel_dp, false);
0d198328
AJ
2547}
2548
a60f0e38
JB
2549static bool
2550intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector)
2551{
2552 int ret;
2553
2554 ret = intel_dp_aux_native_read_retry(intel_dp,
2555 DP_DEVICE_SERVICE_IRQ_VECTOR,
2556 sink_irq_vector, 1);
2557 if (!ret)
2558 return false;
2559
2560 return true;
2561}
2562
2563static void
2564intel_dp_handle_test_request(struct intel_dp *intel_dp)
2565{
2566 /* NAK by default */
9324cf7f 2567 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_NAK);
a60f0e38
JB
2568}
2569
a4fc5ed6
KP
2570/*
2571 * According to DP spec
2572 * 5.1.2:
2573 * 1. Read DPCD
2574 * 2. Configure link according to Receiver Capabilities
2575 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3
2576 * 4. Check link status on receipt of hot-plug interrupt
2577 */
2578
00c09d70 2579void
ea5b213a 2580intel_dp_check_link_status(struct intel_dp *intel_dp)
a4fc5ed6 2581{
da63a9f2 2582 struct intel_encoder *intel_encoder = &dp_to_dig_port(intel_dp)->base;
a60f0e38 2583 u8 sink_irq_vector;
93f62dad 2584 u8 link_status[DP_LINK_STATUS_SIZE];
a60f0e38 2585
da63a9f2 2586 if (!intel_encoder->connectors_active)
d2b996ac 2587 return;
59cd09e1 2588
da63a9f2 2589 if (WARN_ON(!intel_encoder->base.crtc))
a4fc5ed6
KP
2590 return;
2591
92fd8fd1 2592 /* Try to read receiver status if the link appears to be up */
93f62dad 2593 if (!intel_dp_get_link_status(intel_dp, link_status)) {
ea5b213a 2594 intel_dp_link_down(intel_dp);
a4fc5ed6
KP
2595 return;
2596 }
2597
92fd8fd1 2598 /* Now read the DPCD to see if it's actually running */
26d61aad 2599 if (!intel_dp_get_dpcd(intel_dp)) {
59cd09e1
JB
2600 intel_dp_link_down(intel_dp);
2601 return;
2602 }
2603
a60f0e38
JB
2604 /* Try to read the source of the interrupt */
2605 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
2606 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) {
2607 /* Clear interrupt source */
2608 intel_dp_aux_native_write_1(intel_dp,
2609 DP_DEVICE_SERVICE_IRQ_VECTOR,
2610 sink_irq_vector);
2611
2612 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST)
2613 intel_dp_handle_test_request(intel_dp);
2614 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ))
2615 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
2616 }
2617
1ffdff13 2618 if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) {
92fd8fd1 2619 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
da63a9f2 2620 drm_get_encoder_name(&intel_encoder->base));
33a34e4e
JB
2621 intel_dp_start_link_train(intel_dp);
2622 intel_dp_complete_link_train(intel_dp);
3ab9c637 2623 intel_dp_stop_link_train(intel_dp);
33a34e4e 2624 }
a4fc5ed6 2625}
a4fc5ed6 2626
caf9ab24 2627/* XXX this is probably wrong for multiple downstream ports */
71ba9000 2628static enum drm_connector_status
26d61aad 2629intel_dp_detect_dpcd(struct intel_dp *intel_dp)
71ba9000 2630{
caf9ab24
AJ
2631 uint8_t *dpcd = intel_dp->dpcd;
2632 bool hpd;
2633 uint8_t type;
2634
2635 if (!intel_dp_get_dpcd(intel_dp))
2636 return connector_status_disconnected;
2637
2638 /* if there's no downstream port, we're done */
2639 if (!(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
26d61aad 2640 return connector_status_connected;
caf9ab24
AJ
2641
2642 /* If we're HPD-aware, SINK_COUNT changes dynamically */
2643 hpd = !!(intel_dp->downstream_ports[0] & DP_DS_PORT_HPD);
2644 if (hpd) {
23235177 2645 uint8_t reg;
caf9ab24 2646 if (!intel_dp_aux_native_read_retry(intel_dp, DP_SINK_COUNT,
23235177 2647 &reg, 1))
caf9ab24 2648 return connector_status_unknown;
23235177
AJ
2649 return DP_GET_SINK_COUNT(reg) ? connector_status_connected
2650 : connector_status_disconnected;
caf9ab24
AJ
2651 }
2652
2653 /* If no HPD, poke DDC gently */
2654 if (drm_probe_ddc(&intel_dp->adapter))
26d61aad 2655 return connector_status_connected;
caf9ab24
AJ
2656
2657 /* Well we tried, say unknown for unreliable port types */
2658 type = intel_dp->downstream_ports[0] & DP_DS_PORT_TYPE_MASK;
2659 if (type == DP_DS_PORT_TYPE_VGA || type == DP_DS_PORT_TYPE_NON_EDID)
2660 return connector_status_unknown;
2661
2662 /* Anything else is out of spec, warn and ignore */
2663 DRM_DEBUG_KMS("Broken DP branch device, ignoring\n");
26d61aad 2664 return connector_status_disconnected;
71ba9000
AJ
2665}
2666
5eb08b69 2667static enum drm_connector_status
a9756bb5 2668ironlake_dp_detect(struct intel_dp *intel_dp)
5eb08b69 2669{
30add22d 2670 struct drm_device *dev = intel_dp_to_dev(intel_dp);
1b469639
DL
2671 struct drm_i915_private *dev_priv = dev->dev_private;
2672 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
5eb08b69
ZW
2673 enum drm_connector_status status;
2674
fe16d949
CW
2675 /* Can't disconnect eDP, but you can close the lid... */
2676 if (is_edp(intel_dp)) {
30add22d 2677 status = intel_panel_detect(dev);
fe16d949
CW
2678 if (status == connector_status_unknown)
2679 status = connector_status_connected;
2680 return status;
2681 }
01cb9ea6 2682
1b469639
DL
2683 if (!ibx_digital_port_connected(dev_priv, intel_dig_port))
2684 return connector_status_disconnected;
2685
26d61aad 2686 return intel_dp_detect_dpcd(intel_dp);
5eb08b69
ZW
2687}
2688
a4fc5ed6 2689static enum drm_connector_status
a9756bb5 2690g4x_dp_detect(struct intel_dp *intel_dp)
a4fc5ed6 2691{
30add22d 2692 struct drm_device *dev = intel_dp_to_dev(intel_dp);
a4fc5ed6 2693 struct drm_i915_private *dev_priv = dev->dev_private;
34f2be46 2694 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
10f76a38 2695 uint32_t bit;
5eb08b69 2696
35aad75f
JB
2697 /* Can't disconnect eDP, but you can close the lid... */
2698 if (is_edp(intel_dp)) {
2699 enum drm_connector_status status;
2700
2701 status = intel_panel_detect(dev);
2702 if (status == connector_status_unknown)
2703 status = connector_status_connected;
2704 return status;
2705 }
2706
34f2be46
VS
2707 switch (intel_dig_port->port) {
2708 case PORT_B:
26739f12 2709 bit = PORTB_HOTPLUG_LIVE_STATUS;
a4fc5ed6 2710 break;
34f2be46 2711 case PORT_C:
26739f12 2712 bit = PORTC_HOTPLUG_LIVE_STATUS;
a4fc5ed6 2713 break;
34f2be46 2714 case PORT_D:
26739f12 2715 bit = PORTD_HOTPLUG_LIVE_STATUS;
a4fc5ed6
KP
2716 break;
2717 default:
2718 return connector_status_unknown;
2719 }
2720
10f76a38 2721 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0)
a4fc5ed6
KP
2722 return connector_status_disconnected;
2723
26d61aad 2724 return intel_dp_detect_dpcd(intel_dp);
a9756bb5
ZW
2725}
2726
8c241fef
KP
2727static struct edid *
2728intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2729{
9cd300e0 2730 struct intel_connector *intel_connector = to_intel_connector(connector);
d6f24d0f 2731
9cd300e0
JN
2732 /* use cached edid if we have one */
2733 if (intel_connector->edid) {
2734 struct edid *edid;
2735 int size;
2736
2737 /* invalid edid */
2738 if (IS_ERR(intel_connector->edid))
d6f24d0f
JB
2739 return NULL;
2740
9cd300e0 2741 size = (intel_connector->edid->extensions + 1) * EDID_LENGTH;
edbe1581 2742 edid = kmemdup(intel_connector->edid, size, GFP_KERNEL);
d6f24d0f
JB
2743 if (!edid)
2744 return NULL;
2745
d6f24d0f
JB
2746 return edid;
2747 }
8c241fef 2748
9cd300e0 2749 return drm_get_edid(connector, adapter);
8c241fef
KP
2750}
2751
2752static int
2753intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter)
2754{
9cd300e0 2755 struct intel_connector *intel_connector = to_intel_connector(connector);
8c241fef 2756
9cd300e0
JN
2757 /* use cached edid if we have one */
2758 if (intel_connector->edid) {
2759 /* invalid edid */
2760 if (IS_ERR(intel_connector->edid))
2761 return 0;
2762
2763 return intel_connector_update_modes(connector,
2764 intel_connector->edid);
d6f24d0f
JB
2765 }
2766
9cd300e0 2767 return intel_ddc_get_modes(connector, adapter);
8c241fef
KP
2768}
2769
a9756bb5
ZW
2770static enum drm_connector_status
2771intel_dp_detect(struct drm_connector *connector, bool force)
2772{
2773 struct intel_dp *intel_dp = intel_attached_dp(connector);
d63885da
PZ
2774 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
2775 struct intel_encoder *intel_encoder = &intel_dig_port->base;
fa90ecef 2776 struct drm_device *dev = connector->dev;
a9756bb5
ZW
2777 enum drm_connector_status status;
2778 struct edid *edid = NULL;
2779
2780 intel_dp->has_audio = false;
2781
2782 if (HAS_PCH_SPLIT(dev))
2783 status = ironlake_dp_detect(intel_dp);
2784 else
2785 status = g4x_dp_detect(intel_dp);
1b9be9d0 2786
a9756bb5
ZW
2787 if (status != connector_status_connected)
2788 return status;
2789
0d198328
AJ
2790 intel_dp_probe_oui(intel_dp);
2791
c3e5f67b
DV
2792 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) {
2793 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON);
f684960e 2794 } else {
8c241fef 2795 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
f684960e
CW
2796 if (edid) {
2797 intel_dp->has_audio = drm_detect_monitor_audio(edid);
f684960e
CW
2798 kfree(edid);
2799 }
a9756bb5
ZW
2800 }
2801
d63885da
PZ
2802 if (intel_encoder->type != INTEL_OUTPUT_EDP)
2803 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
a9756bb5 2804 return connector_status_connected;
a4fc5ed6
KP
2805}
2806
2807static int intel_dp_get_modes(struct drm_connector *connector)
2808{
df0e9248 2809 struct intel_dp *intel_dp = intel_attached_dp(connector);
dd06f90e 2810 struct intel_connector *intel_connector = to_intel_connector(connector);
fa90ecef 2811 struct drm_device *dev = connector->dev;
32f9d658 2812 int ret;
a4fc5ed6
KP
2813
2814 /* We should parse the EDID data and find out if it has an audio sink
2815 */
2816
8c241fef 2817 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter);
f8779fda 2818 if (ret)
32f9d658
ZW
2819 return ret;
2820
f8779fda 2821 /* if eDP has no EDID, fall back to fixed mode */
dd06f90e 2822 if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
f8779fda 2823 struct drm_display_mode *mode;
dd06f90e
JN
2824 mode = drm_mode_duplicate(dev,
2825 intel_connector->panel.fixed_mode);
f8779fda 2826 if (mode) {
32f9d658
ZW
2827 drm_mode_probed_add(connector, mode);
2828 return 1;
2829 }
2830 }
2831 return 0;
a4fc5ed6
KP
2832}
2833
1aad7ac0
CW
2834static bool
2835intel_dp_detect_audio(struct drm_connector *connector)
2836{
2837 struct intel_dp *intel_dp = intel_attached_dp(connector);
2838 struct edid *edid;
2839 bool has_audio = false;
2840
8c241fef 2841 edid = intel_dp_get_edid(connector, &intel_dp->adapter);
1aad7ac0
CW
2842 if (edid) {
2843 has_audio = drm_detect_monitor_audio(edid);
1aad7ac0
CW
2844 kfree(edid);
2845 }
2846
2847 return has_audio;
2848}
2849
f684960e
CW
2850static int
2851intel_dp_set_property(struct drm_connector *connector,
2852 struct drm_property *property,
2853 uint64_t val)
2854{
e953fd7b 2855 struct drm_i915_private *dev_priv = connector->dev->dev_private;
53b41837 2856 struct intel_connector *intel_connector = to_intel_connector(connector);
da63a9f2
PZ
2857 struct intel_encoder *intel_encoder = intel_attached_encoder(connector);
2858 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
f684960e
CW
2859 int ret;
2860
662595df 2861 ret = drm_object_property_set_value(&connector->base, property, val);
f684960e
CW
2862 if (ret)
2863 return ret;
2864
3f43c48d 2865 if (property == dev_priv->force_audio_property) {
1aad7ac0
CW
2866 int i = val;
2867 bool has_audio;
2868
2869 if (i == intel_dp->force_audio)
f684960e
CW
2870 return 0;
2871
1aad7ac0 2872 intel_dp->force_audio = i;
f684960e 2873
c3e5f67b 2874 if (i == HDMI_AUDIO_AUTO)
1aad7ac0
CW
2875 has_audio = intel_dp_detect_audio(connector);
2876 else
c3e5f67b 2877 has_audio = (i == HDMI_AUDIO_ON);
1aad7ac0
CW
2878
2879 if (has_audio == intel_dp->has_audio)
f684960e
CW
2880 return 0;
2881
1aad7ac0 2882 intel_dp->has_audio = has_audio;
f684960e
CW
2883 goto done;
2884 }
2885
e953fd7b 2886 if (property == dev_priv->broadcast_rgb_property) {
ae4edb80
DV
2887 bool old_auto = intel_dp->color_range_auto;
2888 uint32_t old_range = intel_dp->color_range;
2889
55bc60db
VS
2890 switch (val) {
2891 case INTEL_BROADCAST_RGB_AUTO:
2892 intel_dp->color_range_auto = true;
2893 break;
2894 case INTEL_BROADCAST_RGB_FULL:
2895 intel_dp->color_range_auto = false;
2896 intel_dp->color_range = 0;
2897 break;
2898 case INTEL_BROADCAST_RGB_LIMITED:
2899 intel_dp->color_range_auto = false;
2900 intel_dp->color_range = DP_COLOR_RANGE_16_235;
2901 break;
2902 default:
2903 return -EINVAL;
2904 }
ae4edb80
DV
2905
2906 if (old_auto == intel_dp->color_range_auto &&
2907 old_range == intel_dp->color_range)
2908 return 0;
2909
e953fd7b
CW
2910 goto done;
2911 }
2912
53b41837
YN
2913 if (is_edp(intel_dp) &&
2914 property == connector->dev->mode_config.scaling_mode_property) {
2915 if (val == DRM_MODE_SCALE_NONE) {
2916 DRM_DEBUG_KMS("no scaling not supported\n");
2917 return -EINVAL;
2918 }
2919
2920 if (intel_connector->panel.fitting_mode == val) {
2921 /* the eDP scaling property is not changed */
2922 return 0;
2923 }
2924 intel_connector->panel.fitting_mode = val;
2925
2926 goto done;
2927 }
2928
f684960e
CW
2929 return -EINVAL;
2930
2931done:
c0c36b94
CW
2932 if (intel_encoder->base.crtc)
2933 intel_crtc_restore_mode(intel_encoder->base.crtc);
f684960e
CW
2934
2935 return 0;
2936}
2937
a4fc5ed6 2938static void
73845adf 2939intel_dp_connector_destroy(struct drm_connector *connector)
a4fc5ed6 2940{
1d508706 2941 struct intel_connector *intel_connector = to_intel_connector(connector);
aaa6fd2a 2942
9cd300e0
JN
2943 if (!IS_ERR_OR_NULL(intel_connector->edid))
2944 kfree(intel_connector->edid);
2945
acd8db10
PZ
2946 /* Can't call is_edp() since the encoder may have been destroyed
2947 * already. */
2948 if (connector->connector_type == DRM_MODE_CONNECTOR_eDP)
1d508706 2949 intel_panel_fini(&intel_connector->panel);
aaa6fd2a 2950
a4fc5ed6
KP
2951 drm_sysfs_connector_remove(connector);
2952 drm_connector_cleanup(connector);
55f78c43 2953 kfree(connector);
a4fc5ed6
KP
2954}
2955
00c09d70 2956void intel_dp_encoder_destroy(struct drm_encoder *encoder)
24d05927 2957{
da63a9f2
PZ
2958 struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
2959 struct intel_dp *intel_dp = &intel_dig_port->dp;
bd173813 2960 struct drm_device *dev = intel_dp_to_dev(intel_dp);
24d05927
DV
2961
2962 i2c_del_adapter(&intel_dp->adapter);
2963 drm_encoder_cleanup(encoder);
bd943159
KP
2964 if (is_edp(intel_dp)) {
2965 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
bd173813 2966 mutex_lock(&dev->mode_config.mutex);
bd943159 2967 ironlake_panel_vdd_off_sync(intel_dp);
bd173813 2968 mutex_unlock(&dev->mode_config.mutex);
bd943159 2969 }
da63a9f2 2970 kfree(intel_dig_port);
24d05927
DV
2971}
2972
a4fc5ed6 2973static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = {
a4fc5ed6 2974 .mode_set = intel_dp_mode_set,
a4fc5ed6
KP
2975};
2976
2977static const struct drm_connector_funcs intel_dp_connector_funcs = {
2bd2ad64 2978 .dpms = intel_connector_dpms,
a4fc5ed6
KP
2979 .detect = intel_dp_detect,
2980 .fill_modes = drm_helper_probe_single_connector_modes,
f684960e 2981 .set_property = intel_dp_set_property,
73845adf 2982 .destroy = intel_dp_connector_destroy,
a4fc5ed6
KP
2983};
2984
2985static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = {
2986 .get_modes = intel_dp_get_modes,
2987 .mode_valid = intel_dp_mode_valid,
df0e9248 2988 .best_encoder = intel_best_encoder,
a4fc5ed6
KP
2989};
2990
a4fc5ed6 2991static const struct drm_encoder_funcs intel_dp_enc_funcs = {
24d05927 2992 .destroy = intel_dp_encoder_destroy,
a4fc5ed6
KP
2993};
2994
995b6762 2995static void
21d40d37 2996intel_dp_hot_plug(struct intel_encoder *intel_encoder)
c8110e52 2997{
fa90ecef 2998 struct intel_dp *intel_dp = enc_to_intel_dp(&intel_encoder->base);
c8110e52 2999
885a5014 3000 intel_dp_check_link_status(intel_dp);
c8110e52 3001}
6207937d 3002
e3421a18
ZW
3003/* Return which DP Port should be selected for Transcoder DP control */
3004int
0206e353 3005intel_trans_dp_port_sel(struct drm_crtc *crtc)
e3421a18
ZW
3006{
3007 struct drm_device *dev = crtc->dev;
fa90ecef
PZ
3008 struct intel_encoder *intel_encoder;
3009 struct intel_dp *intel_dp;
e3421a18 3010
fa90ecef
PZ
3011 for_each_encoder_on_crtc(dev, crtc, intel_encoder) {
3012 intel_dp = enc_to_intel_dp(&intel_encoder->base);
e3421a18 3013
fa90ecef
PZ
3014 if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
3015 intel_encoder->type == INTEL_OUTPUT_EDP)
ea5b213a 3016 return intel_dp->output_reg;
e3421a18 3017 }
ea5b213a 3018
e3421a18
ZW
3019 return -1;
3020}
3021
36e83a18 3022/* check the VBT to see whether the eDP is on DP-D port */
cb0953d7 3023bool intel_dpd_is_edp(struct drm_device *dev)
36e83a18
ZY
3024{
3025 struct drm_i915_private *dev_priv = dev->dev_private;
3026 struct child_device_config *p_child;
3027 int i;
3028
41aa3448 3029 if (!dev_priv->vbt.child_dev_num)
36e83a18
ZY
3030 return false;
3031
41aa3448
RV
3032 for (i = 0; i < dev_priv->vbt.child_dev_num; i++) {
3033 p_child = dev_priv->vbt.child_dev + i;
36e83a18
ZY
3034
3035 if (p_child->dvo_port == PORT_IDPD &&
3036 p_child->device_type == DEVICE_TYPE_eDP)
3037 return true;
3038 }
3039 return false;
3040}
3041
f684960e
CW
3042static void
3043intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector)
3044{
53b41837
YN
3045 struct intel_connector *intel_connector = to_intel_connector(connector);
3046
3f43c48d 3047 intel_attach_force_audio_property(connector);
e953fd7b 3048 intel_attach_broadcast_rgb_property(connector);
55bc60db 3049 intel_dp->color_range_auto = true;
53b41837
YN
3050
3051 if (is_edp(intel_dp)) {
3052 drm_mode_create_scaling_mode_property(connector->dev);
6de6d846
RC
3053 drm_object_attach_property(
3054 &connector->base,
53b41837 3055 connector->dev->mode_config.scaling_mode_property,
8e740cd1
YN
3056 DRM_MODE_SCALE_ASPECT);
3057 intel_connector->panel.fitting_mode = DRM_MODE_SCALE_ASPECT;
53b41837 3058 }
f684960e
CW
3059}
3060
67a54566
DV
3061static void
3062intel_dp_init_panel_power_sequencer(struct drm_device *dev,
f30d26e4
JN
3063 struct intel_dp *intel_dp,
3064 struct edp_power_seq *out)
67a54566
DV
3065{
3066 struct drm_i915_private *dev_priv = dev->dev_private;
3067 struct edp_power_seq cur, vbt, spec, final;
3068 u32 pp_on, pp_off, pp_div, pp;
453c5420
JB
3069 int pp_control_reg, pp_on_reg, pp_off_reg, pp_div_reg;
3070
3071 if (HAS_PCH_SPLIT(dev)) {
3072 pp_control_reg = PCH_PP_CONTROL;
3073 pp_on_reg = PCH_PP_ON_DELAYS;
3074 pp_off_reg = PCH_PP_OFF_DELAYS;
3075 pp_div_reg = PCH_PP_DIVISOR;
3076 } else {
3077 pp_control_reg = PIPEA_PP_CONTROL;
3078 pp_on_reg = PIPEA_PP_ON_DELAYS;
3079 pp_off_reg = PIPEA_PP_OFF_DELAYS;
3080 pp_div_reg = PIPEA_PP_DIVISOR;
3081 }
67a54566
DV
3082
3083 /* Workaround: Need to write PP_CONTROL with the unlock key as
3084 * the very first thing. */
453c5420
JB
3085 pp = ironlake_get_pp_control(intel_dp);
3086 I915_WRITE(pp_control_reg, pp);
67a54566 3087
453c5420
JB
3088 pp_on = I915_READ(pp_on_reg);
3089 pp_off = I915_READ(pp_off_reg);
3090 pp_div = I915_READ(pp_div_reg);
67a54566
DV
3091
3092 /* Pull timing values out of registers */
3093 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
3094 PANEL_POWER_UP_DELAY_SHIFT;
3095
3096 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
3097 PANEL_LIGHT_ON_DELAY_SHIFT;
3098
3099 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
3100 PANEL_LIGHT_OFF_DELAY_SHIFT;
3101
3102 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >>
3103 PANEL_POWER_DOWN_DELAY_SHIFT;
3104
3105 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >>
3106 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000;
3107
3108 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3109 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12);
3110
41aa3448 3111 vbt = dev_priv->vbt.edp_pps;
67a54566
DV
3112
3113 /* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
3114 * our hw here, which are all in 100usec. */
3115 spec.t1_t3 = 210 * 10;
3116 spec.t8 = 50 * 10; /* no limit for t8, use t7 instead */
3117 spec.t9 = 50 * 10; /* no limit for t9, make it symmetric with t8 */
3118 spec.t10 = 500 * 10;
3119 /* This one is special and actually in units of 100ms, but zero
3120 * based in the hw (so we need to add 100 ms). But the sw vbt
3121 * table multiplies it with 1000 to make it in units of 100usec,
3122 * too. */
3123 spec.t11_t12 = (510 + 100) * 10;
3124
3125 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
3126 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12);
3127
3128 /* Use the max of the register settings and vbt. If both are
3129 * unset, fall back to the spec limits. */
3130#define assign_final(field) final.field = (max(cur.field, vbt.field) == 0 ? \
3131 spec.field : \
3132 max(cur.field, vbt.field))
3133 assign_final(t1_t3);
3134 assign_final(t8);
3135 assign_final(t9);
3136 assign_final(t10);
3137 assign_final(t11_t12);
3138#undef assign_final
3139
3140#define get_delay(field) (DIV_ROUND_UP(final.field, 10))
3141 intel_dp->panel_power_up_delay = get_delay(t1_t3);
3142 intel_dp->backlight_on_delay = get_delay(t8);
3143 intel_dp->backlight_off_delay = get_delay(t9);
3144 intel_dp->panel_power_down_delay = get_delay(t10);
3145 intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
3146#undef get_delay
3147
f30d26e4
JN
3148 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
3149 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
3150 intel_dp->panel_power_cycle_delay);
3151
3152 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
3153 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
3154
3155 if (out)
3156 *out = final;
3157}
3158
3159static void
3160intel_dp_init_panel_power_sequencer_registers(struct drm_device *dev,
3161 struct intel_dp *intel_dp,
3162 struct edp_power_seq *seq)
3163{
3164 struct drm_i915_private *dev_priv = dev->dev_private;
453c5420
JB
3165 u32 pp_on, pp_off, pp_div, port_sel = 0;
3166 int div = HAS_PCH_SPLIT(dev) ? intel_pch_rawclk(dev) : intel_hrawclk(dev);
3167 int pp_on_reg, pp_off_reg, pp_div_reg;
3168
3169 if (HAS_PCH_SPLIT(dev)) {
3170 pp_on_reg = PCH_PP_ON_DELAYS;
3171 pp_off_reg = PCH_PP_OFF_DELAYS;
3172 pp_div_reg = PCH_PP_DIVISOR;
3173 } else {
3174 pp_on_reg = PIPEA_PP_ON_DELAYS;
3175 pp_off_reg = PIPEA_PP_OFF_DELAYS;
3176 pp_div_reg = PIPEA_PP_DIVISOR;
3177 }
3178
67a54566 3179 /* And finally store the new values in the power sequencer. */
f30d26e4
JN
3180 pp_on = (seq->t1_t3 << PANEL_POWER_UP_DELAY_SHIFT) |
3181 (seq->t8 << PANEL_LIGHT_ON_DELAY_SHIFT);
3182 pp_off = (seq->t9 << PANEL_LIGHT_OFF_DELAY_SHIFT) |
3183 (seq->t10 << PANEL_POWER_DOWN_DELAY_SHIFT);
67a54566
DV
3184 /* Compute the divisor for the pp clock, simply match the Bspec
3185 * formula. */
453c5420 3186 pp_div = ((100 * div)/2 - 1) << PP_REFERENCE_DIVIDER_SHIFT;
f30d26e4 3187 pp_div |= (DIV_ROUND_UP(seq->t11_t12, 1000)
67a54566
DV
3188 << PANEL_POWER_CYCLE_DELAY_SHIFT);
3189
3190 /* Haswell doesn't have any port selection bits for the panel
3191 * power sequencer any more. */
bc7d38a4
ID
3192 if (IS_VALLEYVIEW(dev)) {
3193 port_sel = I915_READ(pp_on_reg) & 0xc0000000;
3194 } else if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
3195 if (dp_to_dig_port(intel_dp)->port == PORT_A)
453c5420 3196 port_sel = PANEL_POWER_PORT_DP_A;
67a54566 3197 else
453c5420 3198 port_sel = PANEL_POWER_PORT_DP_D;
67a54566
DV
3199 }
3200
453c5420
JB
3201 pp_on |= port_sel;
3202
3203 I915_WRITE(pp_on_reg, pp_on);
3204 I915_WRITE(pp_off_reg, pp_off);
3205 I915_WRITE(pp_div_reg, pp_div);
67a54566 3206
67a54566 3207 DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
453c5420
JB
3208 I915_READ(pp_on_reg),
3209 I915_READ(pp_off_reg),
3210 I915_READ(pp_div_reg));
f684960e
CW
3211}
3212
ed92f0b2
PZ
3213static bool intel_edp_init_connector(struct intel_dp *intel_dp,
3214 struct intel_connector *intel_connector)
3215{
3216 struct drm_connector *connector = &intel_connector->base;
3217 struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
3218 struct drm_device *dev = intel_dig_port->base.base.dev;
3219 struct drm_i915_private *dev_priv = dev->dev_private;
3220 struct drm_display_mode *fixed_mode = NULL;
3221 struct edp_power_seq power_seq = { 0 };
3222 bool has_dpcd;
3223 struct drm_display_mode *scan;
3224 struct edid *edid;
3225
3226 if (!is_edp(intel_dp))
3227 return true;
3228
3229 intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
3230
3231 /* Cache DPCD and EDID for edp. */
3232 ironlake_edp_panel_vdd_on(intel_dp);
3233 has_dpcd = intel_dp_get_dpcd(intel_dp);
3234 ironlake_edp_panel_vdd_off(intel_dp, false);
3235
3236 if (has_dpcd) {
3237 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
3238 dev_priv->no_aux_handshake =
3239 intel_dp->dpcd[DP_MAX_DOWNSPREAD] &
3240 DP_NO_AUX_HANDSHAKE_LINK_TRAINING;
3241 } else {
3242 /* if this fails, presume the device is a ghost */
3243 DRM_INFO("failed to retrieve link info, disabling eDP\n");
ed92f0b2
PZ
3244 return false;
3245 }
3246
3247 /* We now know it's not a ghost, init power sequence regs. */
3248 intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
3249 &power_seq);
3250
3251 ironlake_edp_panel_vdd_on(intel_dp);
3252 edid = drm_get_edid(connector, &intel_dp->adapter);
3253 if (edid) {
3254 if (drm_add_edid_modes(connector, edid)) {
3255 drm_mode_connector_update_edid_property(connector,
3256 edid);
3257 drm_edid_to_eld(connector, edid);
3258 } else {
3259 kfree(edid);
3260 edid = ERR_PTR(-EINVAL);
3261 }
3262 } else {
3263 edid = ERR_PTR(-ENOENT);
3264 }
3265 intel_connector->edid = edid;
3266
3267 /* prefer fixed mode from EDID if available */
3268 list_for_each_entry(scan, &connector->probed_modes, head) {
3269 if ((scan->type & DRM_MODE_TYPE_PREFERRED)) {
3270 fixed_mode = drm_mode_duplicate(dev, scan);
3271 break;
3272 }
3273 }
3274
3275 /* fallback to VBT if available for eDP */
3276 if (!fixed_mode && dev_priv->vbt.lfp_lvds_vbt_mode) {
3277 fixed_mode = drm_mode_duplicate(dev,
3278 dev_priv->vbt.lfp_lvds_vbt_mode);
3279 if (fixed_mode)
3280 fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
3281 }
3282
3283 ironlake_edp_panel_vdd_off(intel_dp, false);
3284
3285 intel_panel_init(&intel_connector->panel, fixed_mode);
3286 intel_panel_setup_backlight(connector);
3287
3288 return true;
3289}
3290
16c25533 3291bool
f0fec3f2
PZ
3292intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
3293 struct intel_connector *intel_connector)
a4fc5ed6 3294{
f0fec3f2
PZ
3295 struct drm_connector *connector = &intel_connector->base;
3296 struct intel_dp *intel_dp = &intel_dig_port->dp;
3297 struct intel_encoder *intel_encoder = &intel_dig_port->base;
3298 struct drm_device *dev = intel_encoder->base.dev;
a4fc5ed6 3299 struct drm_i915_private *dev_priv = dev->dev_private;
174edf1f 3300 enum port port = intel_dig_port->port;
5eb08b69 3301 const char *name = NULL;
b2a14755 3302 int type, error;
a4fc5ed6 3303
0767935e
DV
3304 /* Preserve the current hw state. */
3305 intel_dp->DP = I915_READ(intel_dp->output_reg);
dd06f90e 3306 intel_dp->attached_connector = intel_connector;
3d3dc149 3307
f7d24902 3308 type = DRM_MODE_CONNECTOR_DisplayPort;
19c03924
GB
3309 /*
3310 * FIXME : We need to initialize built-in panels before external panels.
3311 * For X0, DP_C is fixed as eDP. Revisit this as part of VLV eDP cleanup
3312 */
f7d24902
ID
3313 switch (port) {
3314 case PORT_A:
b329530c 3315 type = DRM_MODE_CONNECTOR_eDP;
f7d24902
ID
3316 break;
3317 case PORT_C:
3318 if (IS_VALLEYVIEW(dev))
3319 type = DRM_MODE_CONNECTOR_eDP;
3320 break;
3321 case PORT_D:
3322 if (HAS_PCH_SPLIT(dev) && intel_dpd_is_edp(dev))
3323 type = DRM_MODE_CONNECTOR_eDP;
3324 break;
3325 default: /* silence GCC warning */
3326 break;
b329530c
AJ
3327 }
3328
f7d24902
ID
3329 /*
3330 * For eDP we always set the encoder type to INTEL_OUTPUT_EDP, but
3331 * for DP the encoder type can be set by the caller to
3332 * INTEL_OUTPUT_UNKNOWN for DDI, so don't rewrite it.
3333 */
3334 if (type == DRM_MODE_CONNECTOR_eDP)
3335 intel_encoder->type = INTEL_OUTPUT_EDP;
3336
e7281eab
ID
3337 DRM_DEBUG_KMS("Adding %s connector on port %c\n",
3338 type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
3339 port_name(port));
3340
b329530c 3341 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
a4fc5ed6
KP
3342 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
3343
a4fc5ed6
KP
3344 connector->interlace_allowed = true;
3345 connector->doublescan_allowed = 0;
3346
f0fec3f2
PZ
3347 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work,
3348 ironlake_panel_vdd_work);
a4fc5ed6 3349
df0e9248 3350 intel_connector_attach_encoder(intel_connector, intel_encoder);
a4fc5ed6
KP
3351 drm_sysfs_connector_add(connector);
3352
affa9354 3353 if (HAS_DDI(dev))
bcbc889b
PZ
3354 intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
3355 else
3356 intel_connector->get_hw_state = intel_connector_get_hw_state;
3357
9ed35ab1
PZ
3358 intel_dp->aux_ch_ctl_reg = intel_dp->output_reg + 0x10;
3359 if (HAS_DDI(dev)) {
3360 switch (intel_dig_port->port) {
3361 case PORT_A:
3362 intel_dp->aux_ch_ctl_reg = DPA_AUX_CH_CTL;
3363 break;
3364 case PORT_B:
3365 intel_dp->aux_ch_ctl_reg = PCH_DPB_AUX_CH_CTL;
3366 break;
3367 case PORT_C:
3368 intel_dp->aux_ch_ctl_reg = PCH_DPC_AUX_CH_CTL;
3369 break;
3370 case PORT_D:
3371 intel_dp->aux_ch_ctl_reg = PCH_DPD_AUX_CH_CTL;
3372 break;
3373 default:
3374 BUG();
3375 }
3376 }
e8cb4558 3377
a4fc5ed6 3378 /* Set up the DDC bus. */
ab9d7c30
PZ
3379 switch (port) {
3380 case PORT_A:
1d843f9d 3381 intel_encoder->hpd_pin = HPD_PORT_A;
ab9d7c30
PZ
3382 name = "DPDDC-A";
3383 break;
3384 case PORT_B:
1d843f9d 3385 intel_encoder->hpd_pin = HPD_PORT_B;
ab9d7c30
PZ
3386 name = "DPDDC-B";
3387 break;
3388 case PORT_C:
1d843f9d 3389 intel_encoder->hpd_pin = HPD_PORT_C;
ab9d7c30
PZ
3390 name = "DPDDC-C";
3391 break;
3392 case PORT_D:
1d843f9d 3393 intel_encoder->hpd_pin = HPD_PORT_D;
ab9d7c30
PZ
3394 name = "DPDDC-D";
3395 break;
3396 default:
ad1c0b19 3397 BUG();
5eb08b69
ZW
3398 }
3399
b2a14755
PZ
3400 error = intel_dp_i2c_init(intel_dp, intel_connector, name);
3401 WARN(error, "intel_dp_i2c_init failed with error %d for port %c\n",
3402 error, port_name(port));
c1f05264 3403
2b28bb1b
RV
3404 intel_dp->psr_setup_done = false;
3405
b2f246a8 3406 if (!intel_edp_init_connector(intel_dp, intel_connector)) {
15b1d171
PZ
3407 i2c_del_adapter(&intel_dp->adapter);
3408 if (is_edp(intel_dp)) {
3409 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
3410 mutex_lock(&dev->mode_config.mutex);
3411 ironlake_panel_vdd_off_sync(intel_dp);
3412 mutex_unlock(&dev->mode_config.mutex);
3413 }
b2f246a8
PZ
3414 drm_sysfs_connector_remove(connector);
3415 drm_connector_cleanup(connector);
16c25533 3416 return false;
b2f246a8 3417 }
32f9d658 3418
f684960e
CW
3419 intel_dp_add_properties(intel_dp, connector);
3420
a4fc5ed6
KP
3421 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
3422 * 0xd. Failure to do so will result in spurious interrupts being
3423 * generated on the port when a cable is not attached.
3424 */
3425 if (IS_G4X(dev) && !IS_GM45(dev)) {
3426 u32 temp = I915_READ(PEG_BAND_GAP_DATA);
3427 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
3428 }
16c25533
PZ
3429
3430 return true;
a4fc5ed6 3431}
f0fec3f2
PZ
3432
3433void
3434intel_dp_init(struct drm_device *dev, int output_reg, enum port port)
3435{
3436 struct intel_digital_port *intel_dig_port;
3437 struct intel_encoder *intel_encoder;
3438 struct drm_encoder *encoder;
3439 struct intel_connector *intel_connector;
3440
3441 intel_dig_port = kzalloc(sizeof(struct intel_digital_port), GFP_KERNEL);
3442 if (!intel_dig_port)
3443 return;
3444
3445 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
3446 if (!intel_connector) {
3447 kfree(intel_dig_port);
3448 return;
3449 }
3450
3451 intel_encoder = &intel_dig_port->base;
3452 encoder = &intel_encoder->base;
3453
3454 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs,
3455 DRM_MODE_ENCODER_TMDS);
00c09d70 3456 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs);
f0fec3f2 3457
5bfe2ac0 3458 intel_encoder->compute_config = intel_dp_compute_config;
00c09d70
PZ
3459 intel_encoder->enable = intel_enable_dp;
3460 intel_encoder->pre_enable = intel_pre_enable_dp;
3461 intel_encoder->disable = intel_disable_dp;
3462 intel_encoder->post_disable = intel_post_disable_dp;
3463 intel_encoder->get_hw_state = intel_dp_get_hw_state;
045ac3b5 3464 intel_encoder->get_config = intel_dp_get_config;
89b667f8
JB
3465 if (IS_VALLEYVIEW(dev))
3466 intel_encoder->pre_pll_enable = intel_dp_pre_pll_enable;
f0fec3f2 3467
174edf1f 3468 intel_dig_port->port = port;
f0fec3f2
PZ
3469 intel_dig_port->dp.output_reg = output_reg;
3470
00c09d70 3471 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT;
f0fec3f2
PZ
3472 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
3473 intel_encoder->cloneable = false;
3474 intel_encoder->hot_plug = intel_dp_hot_plug;
3475
15b1d171
PZ
3476 if (!intel_dp_init_connector(intel_dig_port, intel_connector)) {
3477 drm_encoder_cleanup(encoder);
3478 kfree(intel_dig_port);
b2f246a8 3479 kfree(intel_connector);
15b1d171 3480 }
f0fec3f2 3481}