]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_display.c
drm/i915: add ValleyView specific CRT detect function
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_display.c
CommitLineData
79e53945
JB
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
618563e3 27#include <linux/dmi.h>
c1c7af60
JB
28#include <linux/module.h>
29#include <linux/input.h>
79e53945 30#include <linux/i2c.h>
7662c8bd 31#include <linux/kernel.h>
5a0e3ad6 32#include <linux/slab.h>
9cce37f4 33#include <linux/vgaarb.h>
e0dac65e 34#include <drm/drm_edid.h>
79e53945
JB
35#include "drmP.h"
36#include "intel_drv.h"
37#include "i915_drm.h"
38#include "i915_drv.h"
e5510fac 39#include "i915_trace.h"
ab2c0672 40#include "drm_dp_helper.h"
79e53945 41#include "drm_crtc_helper.h"
c0f372b3 42#include <linux/dma_remapping.h>
79e53945 43
32f9d658
ZW
44#define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
45
0206e353 46bool intel_pipe_has_type(struct drm_crtc *crtc, int type);
3dec0095 47static void intel_increase_pllclock(struct drm_crtc *crtc);
6b383a7f 48static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
79e53945
JB
49
50typedef struct {
0206e353
AJ
51 /* given values */
52 int n;
53 int m1, m2;
54 int p1, p2;
55 /* derived values */
56 int dot;
57 int vco;
58 int m;
59 int p;
79e53945
JB
60} intel_clock_t;
61
62typedef struct {
0206e353 63 int min, max;
79e53945
JB
64} intel_range_t;
65
66typedef struct {
0206e353
AJ
67 int dot_limit;
68 int p2_slow, p2_fast;
79e53945
JB
69} intel_p2_t;
70
71#define INTEL_P2_NUM 2
d4906093
ML
72typedef struct intel_limit intel_limit_t;
73struct intel_limit {
0206e353
AJ
74 intel_range_t dot, vco, n, m, m1, m2, p, p1;
75 intel_p2_t p2;
76 bool (* find_pll)(const intel_limit_t *, struct drm_crtc *,
cec2f356 77 int, int, intel_clock_t *, intel_clock_t *);
d4906093 78};
79e53945 79
2377b741
JB
80/* FDI */
81#define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */
82
d4906093
ML
83static bool
84intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
85 int target, int refclk, intel_clock_t *match_clock,
86 intel_clock_t *best_clock);
d4906093
ML
87static bool
88intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
89 int target, int refclk, intel_clock_t *match_clock,
90 intel_clock_t *best_clock);
79e53945 91
a4fc5ed6
KP
92static bool
93intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc,
cec2f356
SP
94 int target, int refclk, intel_clock_t *match_clock,
95 intel_clock_t *best_clock);
5eb08b69 96static bool
f2b115e6 97intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc,
cec2f356
SP
98 int target, int refclk, intel_clock_t *match_clock,
99 intel_clock_t *best_clock);
a4fc5ed6 100
a0c4da24
JB
101static bool
102intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
103 int target, int refclk, intel_clock_t *match_clock,
104 intel_clock_t *best_clock);
105
021357ac
CW
106static inline u32 /* units of 100MHz */
107intel_fdi_link_freq(struct drm_device *dev)
108{
8b99e68c
CW
109 if (IS_GEN5(dev)) {
110 struct drm_i915_private *dev_priv = dev->dev_private;
111 return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2;
112 } else
113 return 27;
021357ac
CW
114}
115
e4b36699 116static const intel_limit_t intel_limits_i8xx_dvo = {
0206e353
AJ
117 .dot = { .min = 25000, .max = 350000 },
118 .vco = { .min = 930000, .max = 1400000 },
119 .n = { .min = 3, .max = 16 },
120 .m = { .min = 96, .max = 140 },
121 .m1 = { .min = 18, .max = 26 },
122 .m2 = { .min = 6, .max = 16 },
123 .p = { .min = 4, .max = 128 },
124 .p1 = { .min = 2, .max = 33 },
273e27ca
EA
125 .p2 = { .dot_limit = 165000,
126 .p2_slow = 4, .p2_fast = 2 },
d4906093 127 .find_pll = intel_find_best_PLL,
e4b36699
KP
128};
129
130static const intel_limit_t intel_limits_i8xx_lvds = {
0206e353
AJ
131 .dot = { .min = 25000, .max = 350000 },
132 .vco = { .min = 930000, .max = 1400000 },
133 .n = { .min = 3, .max = 16 },
134 .m = { .min = 96, .max = 140 },
135 .m1 = { .min = 18, .max = 26 },
136 .m2 = { .min = 6, .max = 16 },
137 .p = { .min = 4, .max = 128 },
138 .p1 = { .min = 1, .max = 6 },
273e27ca
EA
139 .p2 = { .dot_limit = 165000,
140 .p2_slow = 14, .p2_fast = 7 },
d4906093 141 .find_pll = intel_find_best_PLL,
e4b36699 142};
273e27ca 143
e4b36699 144static const intel_limit_t intel_limits_i9xx_sdvo = {
0206e353
AJ
145 .dot = { .min = 20000, .max = 400000 },
146 .vco = { .min = 1400000, .max = 2800000 },
147 .n = { .min = 1, .max = 6 },
148 .m = { .min = 70, .max = 120 },
149 .m1 = { .min = 10, .max = 22 },
150 .m2 = { .min = 5, .max = 9 },
151 .p = { .min = 5, .max = 80 },
152 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
153 .p2 = { .dot_limit = 200000,
154 .p2_slow = 10, .p2_fast = 5 },
d4906093 155 .find_pll = intel_find_best_PLL,
e4b36699
KP
156};
157
158static const intel_limit_t intel_limits_i9xx_lvds = {
0206e353
AJ
159 .dot = { .min = 20000, .max = 400000 },
160 .vco = { .min = 1400000, .max = 2800000 },
161 .n = { .min = 1, .max = 6 },
162 .m = { .min = 70, .max = 120 },
163 .m1 = { .min = 10, .max = 22 },
164 .m2 = { .min = 5, .max = 9 },
165 .p = { .min = 7, .max = 98 },
166 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
167 .p2 = { .dot_limit = 112000,
168 .p2_slow = 14, .p2_fast = 7 },
d4906093 169 .find_pll = intel_find_best_PLL,
e4b36699
KP
170};
171
273e27ca 172
e4b36699 173static const intel_limit_t intel_limits_g4x_sdvo = {
273e27ca
EA
174 .dot = { .min = 25000, .max = 270000 },
175 .vco = { .min = 1750000, .max = 3500000},
176 .n = { .min = 1, .max = 4 },
177 .m = { .min = 104, .max = 138 },
178 .m1 = { .min = 17, .max = 23 },
179 .m2 = { .min = 5, .max = 11 },
180 .p = { .min = 10, .max = 30 },
181 .p1 = { .min = 1, .max = 3},
182 .p2 = { .dot_limit = 270000,
183 .p2_slow = 10,
184 .p2_fast = 10
044c7c41 185 },
d4906093 186 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
187};
188
189static const intel_limit_t intel_limits_g4x_hdmi = {
273e27ca
EA
190 .dot = { .min = 22000, .max = 400000 },
191 .vco = { .min = 1750000, .max = 3500000},
192 .n = { .min = 1, .max = 4 },
193 .m = { .min = 104, .max = 138 },
194 .m1 = { .min = 16, .max = 23 },
195 .m2 = { .min = 5, .max = 11 },
196 .p = { .min = 5, .max = 80 },
197 .p1 = { .min = 1, .max = 8},
198 .p2 = { .dot_limit = 165000,
199 .p2_slow = 10, .p2_fast = 5 },
d4906093 200 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
201};
202
203static const intel_limit_t intel_limits_g4x_single_channel_lvds = {
273e27ca
EA
204 .dot = { .min = 20000, .max = 115000 },
205 .vco = { .min = 1750000, .max = 3500000 },
206 .n = { .min = 1, .max = 3 },
207 .m = { .min = 104, .max = 138 },
208 .m1 = { .min = 17, .max = 23 },
209 .m2 = { .min = 5, .max = 11 },
210 .p = { .min = 28, .max = 112 },
211 .p1 = { .min = 2, .max = 8 },
212 .p2 = { .dot_limit = 0,
213 .p2_slow = 14, .p2_fast = 14
044c7c41 214 },
d4906093 215 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
216};
217
218static const intel_limit_t intel_limits_g4x_dual_channel_lvds = {
273e27ca
EA
219 .dot = { .min = 80000, .max = 224000 },
220 .vco = { .min = 1750000, .max = 3500000 },
221 .n = { .min = 1, .max = 3 },
222 .m = { .min = 104, .max = 138 },
223 .m1 = { .min = 17, .max = 23 },
224 .m2 = { .min = 5, .max = 11 },
225 .p = { .min = 14, .max = 42 },
226 .p1 = { .min = 2, .max = 6 },
227 .p2 = { .dot_limit = 0,
228 .p2_slow = 7, .p2_fast = 7
044c7c41 229 },
d4906093 230 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
231};
232
233static const intel_limit_t intel_limits_g4x_display_port = {
0206e353
AJ
234 .dot = { .min = 161670, .max = 227000 },
235 .vco = { .min = 1750000, .max = 3500000},
236 .n = { .min = 1, .max = 2 },
237 .m = { .min = 97, .max = 108 },
238 .m1 = { .min = 0x10, .max = 0x12 },
239 .m2 = { .min = 0x05, .max = 0x06 },
240 .p = { .min = 10, .max = 20 },
241 .p1 = { .min = 1, .max = 2},
242 .p2 = { .dot_limit = 0,
273e27ca 243 .p2_slow = 10, .p2_fast = 10 },
0206e353 244 .find_pll = intel_find_pll_g4x_dp,
e4b36699
KP
245};
246
f2b115e6 247static const intel_limit_t intel_limits_pineview_sdvo = {
0206e353
AJ
248 .dot = { .min = 20000, .max = 400000},
249 .vco = { .min = 1700000, .max = 3500000 },
273e27ca 250 /* Pineview's Ncounter is a ring counter */
0206e353
AJ
251 .n = { .min = 3, .max = 6 },
252 .m = { .min = 2, .max = 256 },
273e27ca 253 /* Pineview only has one combined m divider, which we treat as m2. */
0206e353
AJ
254 .m1 = { .min = 0, .max = 0 },
255 .m2 = { .min = 0, .max = 254 },
256 .p = { .min = 5, .max = 80 },
257 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
258 .p2 = { .dot_limit = 200000,
259 .p2_slow = 10, .p2_fast = 5 },
6115707b 260 .find_pll = intel_find_best_PLL,
e4b36699
KP
261};
262
f2b115e6 263static const intel_limit_t intel_limits_pineview_lvds = {
0206e353
AJ
264 .dot = { .min = 20000, .max = 400000 },
265 .vco = { .min = 1700000, .max = 3500000 },
266 .n = { .min = 3, .max = 6 },
267 .m = { .min = 2, .max = 256 },
268 .m1 = { .min = 0, .max = 0 },
269 .m2 = { .min = 0, .max = 254 },
270 .p = { .min = 7, .max = 112 },
271 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
272 .p2 = { .dot_limit = 112000,
273 .p2_slow = 14, .p2_fast = 14 },
6115707b 274 .find_pll = intel_find_best_PLL,
e4b36699
KP
275};
276
273e27ca
EA
277/* Ironlake / Sandybridge
278 *
279 * We calculate clock using (register_value + 2) for N/M1/M2, so here
280 * the range value for them is (actual_value - 2).
281 */
b91ad0ec 282static const intel_limit_t intel_limits_ironlake_dac = {
273e27ca
EA
283 .dot = { .min = 25000, .max = 350000 },
284 .vco = { .min = 1760000, .max = 3510000 },
285 .n = { .min = 1, .max = 5 },
286 .m = { .min = 79, .max = 127 },
287 .m1 = { .min = 12, .max = 22 },
288 .m2 = { .min = 5, .max = 9 },
289 .p = { .min = 5, .max = 80 },
290 .p1 = { .min = 1, .max = 8 },
291 .p2 = { .dot_limit = 225000,
292 .p2_slow = 10, .p2_fast = 5 },
4547668a 293 .find_pll = intel_g4x_find_best_PLL,
e4b36699
KP
294};
295
b91ad0ec 296static const intel_limit_t intel_limits_ironlake_single_lvds = {
273e27ca
EA
297 .dot = { .min = 25000, .max = 350000 },
298 .vco = { .min = 1760000, .max = 3510000 },
299 .n = { .min = 1, .max = 3 },
300 .m = { .min = 79, .max = 118 },
301 .m1 = { .min = 12, .max = 22 },
302 .m2 = { .min = 5, .max = 9 },
303 .p = { .min = 28, .max = 112 },
304 .p1 = { .min = 2, .max = 8 },
305 .p2 = { .dot_limit = 225000,
306 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
307 .find_pll = intel_g4x_find_best_PLL,
308};
309
310static const intel_limit_t intel_limits_ironlake_dual_lvds = {
273e27ca
EA
311 .dot = { .min = 25000, .max = 350000 },
312 .vco = { .min = 1760000, .max = 3510000 },
313 .n = { .min = 1, .max = 3 },
314 .m = { .min = 79, .max = 127 },
315 .m1 = { .min = 12, .max = 22 },
316 .m2 = { .min = 5, .max = 9 },
317 .p = { .min = 14, .max = 56 },
318 .p1 = { .min = 2, .max = 8 },
319 .p2 = { .dot_limit = 225000,
320 .p2_slow = 7, .p2_fast = 7 },
b91ad0ec
ZW
321 .find_pll = intel_g4x_find_best_PLL,
322};
323
273e27ca 324/* LVDS 100mhz refclk limits. */
b91ad0ec 325static const intel_limit_t intel_limits_ironlake_single_lvds_100m = {
273e27ca
EA
326 .dot = { .min = 25000, .max = 350000 },
327 .vco = { .min = 1760000, .max = 3510000 },
328 .n = { .min = 1, .max = 2 },
329 .m = { .min = 79, .max = 126 },
330 .m1 = { .min = 12, .max = 22 },
331 .m2 = { .min = 5, .max = 9 },
332 .p = { .min = 28, .max = 112 },
0206e353 333 .p1 = { .min = 2, .max = 8 },
273e27ca
EA
334 .p2 = { .dot_limit = 225000,
335 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
336 .find_pll = intel_g4x_find_best_PLL,
337};
338
339static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = {
273e27ca
EA
340 .dot = { .min = 25000, .max = 350000 },
341 .vco = { .min = 1760000, .max = 3510000 },
342 .n = { .min = 1, .max = 3 },
343 .m = { .min = 79, .max = 126 },
344 .m1 = { .min = 12, .max = 22 },
345 .m2 = { .min = 5, .max = 9 },
346 .p = { .min = 14, .max = 42 },
0206e353 347 .p1 = { .min = 2, .max = 6 },
273e27ca
EA
348 .p2 = { .dot_limit = 225000,
349 .p2_slow = 7, .p2_fast = 7 },
4547668a
ZY
350 .find_pll = intel_g4x_find_best_PLL,
351};
352
353static const intel_limit_t intel_limits_ironlake_display_port = {
0206e353
AJ
354 .dot = { .min = 25000, .max = 350000 },
355 .vco = { .min = 1760000, .max = 3510000},
356 .n = { .min = 1, .max = 2 },
357 .m = { .min = 81, .max = 90 },
358 .m1 = { .min = 12, .max = 22 },
359 .m2 = { .min = 5, .max = 9 },
360 .p = { .min = 10, .max = 20 },
361 .p1 = { .min = 1, .max = 2},
362 .p2 = { .dot_limit = 0,
273e27ca 363 .p2_slow = 10, .p2_fast = 10 },
0206e353 364 .find_pll = intel_find_pll_ironlake_dp,
79e53945
JB
365};
366
a0c4da24
JB
367static const intel_limit_t intel_limits_vlv_dac = {
368 .dot = { .min = 25000, .max = 270000 },
369 .vco = { .min = 4000000, .max = 6000000 },
370 .n = { .min = 1, .max = 7 },
371 .m = { .min = 22, .max = 450 }, /* guess */
372 .m1 = { .min = 2, .max = 3 },
373 .m2 = { .min = 11, .max = 156 },
374 .p = { .min = 10, .max = 30 },
375 .p1 = { .min = 2, .max = 3 },
376 .p2 = { .dot_limit = 270000,
377 .p2_slow = 2, .p2_fast = 20 },
378 .find_pll = intel_vlv_find_best_pll,
379};
380
381static const intel_limit_t intel_limits_vlv_hdmi = {
382 .dot = { .min = 20000, .max = 165000 },
383 .vco = { .min = 5994000, .max = 4000000 },
384 .n = { .min = 1, .max = 7 },
385 .m = { .min = 60, .max = 300 }, /* guess */
386 .m1 = { .min = 2, .max = 3 },
387 .m2 = { .min = 11, .max = 156 },
388 .p = { .min = 10, .max = 30 },
389 .p1 = { .min = 2, .max = 3 },
390 .p2 = { .dot_limit = 270000,
391 .p2_slow = 2, .p2_fast = 20 },
392 .find_pll = intel_vlv_find_best_pll,
393};
394
395static const intel_limit_t intel_limits_vlv_dp = {
396 .dot = { .min = 162000, .max = 270000 },
397 .vco = { .min = 5994000, .max = 4000000 },
398 .n = { .min = 1, .max = 7 },
399 .m = { .min = 60, .max = 300 }, /* guess */
400 .m1 = { .min = 2, .max = 3 },
401 .m2 = { .min = 11, .max = 156 },
402 .p = { .min = 10, .max = 30 },
403 .p1 = { .min = 2, .max = 3 },
404 .p2 = { .dot_limit = 270000,
405 .p2_slow = 2, .p2_fast = 20 },
406 .find_pll = intel_vlv_find_best_pll,
407};
408
57f350b6
JB
409u32 intel_dpio_read(struct drm_i915_private *dev_priv, int reg)
410{
411 unsigned long flags;
412 u32 val = 0;
413
414 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
415 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
416 DRM_ERROR("DPIO idle wait timed out\n");
417 goto out_unlock;
418 }
419
420 I915_WRITE(DPIO_REG, reg);
421 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_READ | DPIO_PORTID |
422 DPIO_BYTE);
423 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
424 DRM_ERROR("DPIO read wait timed out\n");
425 goto out_unlock;
426 }
427 val = I915_READ(DPIO_DATA);
428
429out_unlock:
430 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
431 return val;
432}
433
a0c4da24
JB
434static void intel_dpio_write(struct drm_i915_private *dev_priv, int reg,
435 u32 val)
436{
437 unsigned long flags;
438
439 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
440 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100)) {
441 DRM_ERROR("DPIO idle wait timed out\n");
442 goto out_unlock;
443 }
444
445 I915_WRITE(DPIO_DATA, val);
446 I915_WRITE(DPIO_REG, reg);
447 I915_WRITE(DPIO_PKT, DPIO_RID | DPIO_OP_WRITE | DPIO_PORTID |
448 DPIO_BYTE);
449 if (wait_for_atomic_us((I915_READ(DPIO_PKT) & DPIO_BUSY) == 0, 100))
450 DRM_ERROR("DPIO write wait timed out\n");
451
452out_unlock:
453 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
454}
455
57f350b6
JB
456static void vlv_init_dpio(struct drm_device *dev)
457{
458 struct drm_i915_private *dev_priv = dev->dev_private;
459
460 /* Reset the DPIO config */
461 I915_WRITE(DPIO_CTL, 0);
462 POSTING_READ(DPIO_CTL);
463 I915_WRITE(DPIO_CTL, 1);
464 POSTING_READ(DPIO_CTL);
465}
466
618563e3
DV
467static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
468{
469 DRM_INFO("Forcing lvds to dual link mode on %s\n", id->ident);
470 return 1;
471}
472
473static const struct dmi_system_id intel_dual_link_lvds[] = {
474 {
475 .callback = intel_dual_link_lvds_callback,
476 .ident = "Apple MacBook Pro (Core i5/i7 Series)",
477 .matches = {
478 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
479 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
480 },
481 },
482 { } /* terminating entry */
483};
484
b0354385
TI
485static bool is_dual_link_lvds(struct drm_i915_private *dev_priv,
486 unsigned int reg)
487{
488 unsigned int val;
489
121d527a
TI
490 /* use the module option value if specified */
491 if (i915_lvds_channel_mode > 0)
492 return i915_lvds_channel_mode == 2;
493
618563e3
DV
494 if (dmi_check_system(intel_dual_link_lvds))
495 return true;
496
b0354385
TI
497 if (dev_priv->lvds_val)
498 val = dev_priv->lvds_val;
499 else {
500 /* BIOS should set the proper LVDS register value at boot, but
501 * in reality, it doesn't set the value when the lid is closed;
502 * we need to check "the value to be set" in VBT when LVDS
503 * register is uninitialized.
504 */
505 val = I915_READ(reg);
14d94a3d 506 if (!(val & ~(LVDS_PIPE_MASK | LVDS_DETECTED)))
b0354385
TI
507 val = dev_priv->bios_lvds_val;
508 dev_priv->lvds_val = val;
509 }
510 return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP;
511}
512
1b894b59
CW
513static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
514 int refclk)
2c07245f 515{
b91ad0ec
ZW
516 struct drm_device *dev = crtc->dev;
517 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f 518 const intel_limit_t *limit;
b91ad0ec
ZW
519
520 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
b0354385 521 if (is_dual_link_lvds(dev_priv, PCH_LVDS)) {
b91ad0ec 522 /* LVDS dual channel */
1b894b59 523 if (refclk == 100000)
b91ad0ec
ZW
524 limit = &intel_limits_ironlake_dual_lvds_100m;
525 else
526 limit = &intel_limits_ironlake_dual_lvds;
527 } else {
1b894b59 528 if (refclk == 100000)
b91ad0ec
ZW
529 limit = &intel_limits_ironlake_single_lvds_100m;
530 else
531 limit = &intel_limits_ironlake_single_lvds;
532 }
533 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
4547668a
ZY
534 HAS_eDP)
535 limit = &intel_limits_ironlake_display_port;
2c07245f 536 else
b91ad0ec 537 limit = &intel_limits_ironlake_dac;
2c07245f
ZW
538
539 return limit;
540}
541
044c7c41
ML
542static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
543{
544 struct drm_device *dev = crtc->dev;
545 struct drm_i915_private *dev_priv = dev->dev_private;
546 const intel_limit_t *limit;
547
548 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
b0354385 549 if (is_dual_link_lvds(dev_priv, LVDS))
044c7c41 550 /* LVDS with dual channel */
e4b36699 551 limit = &intel_limits_g4x_dual_channel_lvds;
044c7c41
ML
552 else
553 /* LVDS with dual channel */
e4b36699 554 limit = &intel_limits_g4x_single_channel_lvds;
044c7c41
ML
555 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) ||
556 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
e4b36699 557 limit = &intel_limits_g4x_hdmi;
044c7c41 558 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) {
e4b36699 559 limit = &intel_limits_g4x_sdvo;
0206e353 560 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
e4b36699 561 limit = &intel_limits_g4x_display_port;
044c7c41 562 } else /* The option is for other outputs */
e4b36699 563 limit = &intel_limits_i9xx_sdvo;
044c7c41
ML
564
565 return limit;
566}
567
1b894b59 568static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
79e53945
JB
569{
570 struct drm_device *dev = crtc->dev;
571 const intel_limit_t *limit;
572
bad720ff 573 if (HAS_PCH_SPLIT(dev))
1b894b59 574 limit = intel_ironlake_limit(crtc, refclk);
2c07245f 575 else if (IS_G4X(dev)) {
044c7c41 576 limit = intel_g4x_limit(crtc);
f2b115e6 577 } else if (IS_PINEVIEW(dev)) {
2177832f 578 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
f2b115e6 579 limit = &intel_limits_pineview_lvds;
2177832f 580 else
f2b115e6 581 limit = &intel_limits_pineview_sdvo;
a0c4da24
JB
582 } else if (IS_VALLEYVIEW(dev)) {
583 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG))
584 limit = &intel_limits_vlv_dac;
585 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
586 limit = &intel_limits_vlv_hdmi;
587 else
588 limit = &intel_limits_vlv_dp;
a6c45cf0
CW
589 } else if (!IS_GEN2(dev)) {
590 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
591 limit = &intel_limits_i9xx_lvds;
592 else
593 limit = &intel_limits_i9xx_sdvo;
79e53945
JB
594 } else {
595 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
e4b36699 596 limit = &intel_limits_i8xx_lvds;
79e53945 597 else
e4b36699 598 limit = &intel_limits_i8xx_dvo;
79e53945
JB
599 }
600 return limit;
601}
602
f2b115e6
AJ
603/* m1 is reserved as 0 in Pineview, n is a ring counter */
604static void pineview_clock(int refclk, intel_clock_t *clock)
79e53945 605{
2177832f
SL
606 clock->m = clock->m2 + 2;
607 clock->p = clock->p1 * clock->p2;
608 clock->vco = refclk * clock->m / clock->n;
609 clock->dot = clock->vco / clock->p;
610}
611
612static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock)
613{
f2b115e6
AJ
614 if (IS_PINEVIEW(dev)) {
615 pineview_clock(refclk, clock);
2177832f
SL
616 return;
617 }
79e53945
JB
618 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
619 clock->p = clock->p1 * clock->p2;
620 clock->vco = refclk * clock->m / (clock->n + 2);
621 clock->dot = clock->vco / clock->p;
622}
623
79e53945
JB
624/**
625 * Returns whether any output on the specified pipe is of the specified type
626 */
4ef69c7a 627bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
79e53945 628{
4ef69c7a
CW
629 struct drm_device *dev = crtc->dev;
630 struct drm_mode_config *mode_config = &dev->mode_config;
631 struct intel_encoder *encoder;
632
633 list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
634 if (encoder->base.crtc == crtc && encoder->type == type)
635 return true;
636
637 return false;
79e53945
JB
638}
639
7c04d1d9 640#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
79e53945
JB
641/**
642 * Returns whether the given set of divisors are valid for a given refclk with
643 * the given connectors.
644 */
645
1b894b59
CW
646static bool intel_PLL_is_valid(struct drm_device *dev,
647 const intel_limit_t *limit,
648 const intel_clock_t *clock)
79e53945 649{
79e53945 650 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
0206e353 651 INTELPllInvalid("p1 out of range\n");
79e53945 652 if (clock->p < limit->p.min || limit->p.max < clock->p)
0206e353 653 INTELPllInvalid("p out of range\n");
79e53945 654 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
0206e353 655 INTELPllInvalid("m2 out of range\n");
79e53945 656 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
0206e353 657 INTELPllInvalid("m1 out of range\n");
f2b115e6 658 if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev))
0206e353 659 INTELPllInvalid("m1 <= m2\n");
79e53945 660 if (clock->m < limit->m.min || limit->m.max < clock->m)
0206e353 661 INTELPllInvalid("m out of range\n");
79e53945 662 if (clock->n < limit->n.min || limit->n.max < clock->n)
0206e353 663 INTELPllInvalid("n out of range\n");
79e53945 664 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
0206e353 665 INTELPllInvalid("vco out of range\n");
79e53945
JB
666 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
667 * connector, etc., rather than just a single range.
668 */
669 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
0206e353 670 INTELPllInvalid("dot out of range\n");
79e53945
JB
671
672 return true;
673}
674
d4906093
ML
675static bool
676intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
677 int target, int refclk, intel_clock_t *match_clock,
678 intel_clock_t *best_clock)
d4906093 679
79e53945
JB
680{
681 struct drm_device *dev = crtc->dev;
682 struct drm_i915_private *dev_priv = dev->dev_private;
683 intel_clock_t clock;
79e53945
JB
684 int err = target;
685
bc5e5718 686 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
832cc28d 687 (I915_READ(LVDS)) != 0) {
79e53945
JB
688 /*
689 * For LVDS, if the panel is on, just rely on its current
690 * settings for dual-channel. We haven't figured out how to
691 * reliably set up different single/dual channel state, if we
692 * even can.
693 */
b0354385 694 if (is_dual_link_lvds(dev_priv, LVDS))
79e53945
JB
695 clock.p2 = limit->p2.p2_fast;
696 else
697 clock.p2 = limit->p2.p2_slow;
698 } else {
699 if (target < limit->p2.dot_limit)
700 clock.p2 = limit->p2.p2_slow;
701 else
702 clock.p2 = limit->p2.p2_fast;
703 }
704
0206e353 705 memset(best_clock, 0, sizeof(*best_clock));
79e53945 706
42158660
ZY
707 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
708 clock.m1++) {
709 for (clock.m2 = limit->m2.min;
710 clock.m2 <= limit->m2.max; clock.m2++) {
f2b115e6
AJ
711 /* m1 is always 0 in Pineview */
712 if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev))
42158660
ZY
713 break;
714 for (clock.n = limit->n.min;
715 clock.n <= limit->n.max; clock.n++) {
716 for (clock.p1 = limit->p1.min;
717 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
718 int this_err;
719
2177832f 720 intel_clock(dev, refclk, &clock);
1b894b59
CW
721 if (!intel_PLL_is_valid(dev, limit,
722 &clock))
79e53945 723 continue;
cec2f356
SP
724 if (match_clock &&
725 clock.p != match_clock->p)
726 continue;
79e53945
JB
727
728 this_err = abs(clock.dot - target);
729 if (this_err < err) {
730 *best_clock = clock;
731 err = this_err;
732 }
733 }
734 }
735 }
736 }
737
738 return (err != target);
739}
740
d4906093
ML
741static bool
742intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
743 int target, int refclk, intel_clock_t *match_clock,
744 intel_clock_t *best_clock)
d4906093
ML
745{
746 struct drm_device *dev = crtc->dev;
747 struct drm_i915_private *dev_priv = dev->dev_private;
748 intel_clock_t clock;
749 int max_n;
750 bool found;
6ba770dc
AJ
751 /* approximately equals target * 0.00585 */
752 int err_most = (target >> 8) + (target >> 9);
d4906093
ML
753 found = false;
754
755 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4547668a
ZY
756 int lvds_reg;
757
c619eed4 758 if (HAS_PCH_SPLIT(dev))
4547668a
ZY
759 lvds_reg = PCH_LVDS;
760 else
761 lvds_reg = LVDS;
762 if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) ==
d4906093
ML
763 LVDS_CLKB_POWER_UP)
764 clock.p2 = limit->p2.p2_fast;
765 else
766 clock.p2 = limit->p2.p2_slow;
767 } else {
768 if (target < limit->p2.dot_limit)
769 clock.p2 = limit->p2.p2_slow;
770 else
771 clock.p2 = limit->p2.p2_fast;
772 }
773
774 memset(best_clock, 0, sizeof(*best_clock));
775 max_n = limit->n.max;
f77f13e2 776 /* based on hardware requirement, prefer smaller n to precision */
d4906093 777 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
f77f13e2 778 /* based on hardware requirement, prefere larger m1,m2 */
d4906093
ML
779 for (clock.m1 = limit->m1.max;
780 clock.m1 >= limit->m1.min; clock.m1--) {
781 for (clock.m2 = limit->m2.max;
782 clock.m2 >= limit->m2.min; clock.m2--) {
783 for (clock.p1 = limit->p1.max;
784 clock.p1 >= limit->p1.min; clock.p1--) {
785 int this_err;
786
2177832f 787 intel_clock(dev, refclk, &clock);
1b894b59
CW
788 if (!intel_PLL_is_valid(dev, limit,
789 &clock))
d4906093 790 continue;
cec2f356
SP
791 if (match_clock &&
792 clock.p != match_clock->p)
793 continue;
1b894b59
CW
794
795 this_err = abs(clock.dot - target);
d4906093
ML
796 if (this_err < err_most) {
797 *best_clock = clock;
798 err_most = this_err;
799 max_n = clock.n;
800 found = true;
801 }
802 }
803 }
804 }
805 }
2c07245f
ZW
806 return found;
807}
808
5eb08b69 809static bool
f2b115e6 810intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
811 int target, int refclk, intel_clock_t *match_clock,
812 intel_clock_t *best_clock)
5eb08b69
ZW
813{
814 struct drm_device *dev = crtc->dev;
815 intel_clock_t clock;
4547668a 816
5eb08b69
ZW
817 if (target < 200000) {
818 clock.n = 1;
819 clock.p1 = 2;
820 clock.p2 = 10;
821 clock.m1 = 12;
822 clock.m2 = 9;
823 } else {
824 clock.n = 2;
825 clock.p1 = 1;
826 clock.p2 = 10;
827 clock.m1 = 14;
828 clock.m2 = 8;
829 }
830 intel_clock(dev, refclk, &clock);
831 memcpy(best_clock, &clock, sizeof(intel_clock_t));
832 return true;
833}
834
a4fc5ed6
KP
835/* DisplayPort has only two frequencies, 162MHz and 270MHz */
836static bool
837intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc,
cec2f356
SP
838 int target, int refclk, intel_clock_t *match_clock,
839 intel_clock_t *best_clock)
a4fc5ed6 840{
5eddb70b
CW
841 intel_clock_t clock;
842 if (target < 200000) {
843 clock.p1 = 2;
844 clock.p2 = 10;
845 clock.n = 2;
846 clock.m1 = 23;
847 clock.m2 = 8;
848 } else {
849 clock.p1 = 1;
850 clock.p2 = 10;
851 clock.n = 1;
852 clock.m1 = 14;
853 clock.m2 = 2;
854 }
855 clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2);
856 clock.p = (clock.p1 * clock.p2);
857 clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p;
858 clock.vco = 0;
859 memcpy(best_clock, &clock, sizeof(intel_clock_t));
860 return true;
a4fc5ed6 861}
a0c4da24
JB
862static bool
863intel_vlv_find_best_pll(const intel_limit_t *limit, struct drm_crtc *crtc,
864 int target, int refclk, intel_clock_t *match_clock,
865 intel_clock_t *best_clock)
866{
867 u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
868 u32 m, n, fastclk;
869 u32 updrate, minupdate, fracbits, p;
870 unsigned long bestppm, ppm, absppm;
871 int dotclk, flag;
872
873 dotclk = target * 1000;
874 bestppm = 1000000;
875 ppm = absppm = 0;
876 fastclk = dotclk / (2*100);
877 updrate = 0;
878 minupdate = 19200;
879 fracbits = 1;
880 n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
881 bestm1 = bestm2 = bestp1 = bestp2 = 0;
882
883 /* based on hardware requirement, prefer smaller n to precision */
884 for (n = limit->n.min; n <= ((refclk) / minupdate); n++) {
885 updrate = refclk / n;
886 for (p1 = limit->p1.max; p1 > limit->p1.min; p1--) {
887 for (p2 = limit->p2.p2_fast+1; p2 > 0; p2--) {
888 if (p2 > 10)
889 p2 = p2 - 1;
890 p = p1 * p2;
891 /* based on hardware requirement, prefer bigger m1,m2 values */
892 for (m1 = limit->m1.min; m1 <= limit->m1.max; m1++) {
893 m2 = (((2*(fastclk * p * n / m1 )) +
894 refclk) / (2*refclk));
895 m = m1 * m2;
896 vco = updrate * m;
897 if (vco >= limit->vco.min && vco < limit->vco.max) {
898 ppm = 1000000 * ((vco / p) - fastclk) / fastclk;
899 absppm = (ppm > 0) ? ppm : (-ppm);
900 if (absppm < 100 && ((p1 * p2) > (bestp1 * bestp2))) {
901 bestppm = 0;
902 flag = 1;
903 }
904 if (absppm < bestppm - 10) {
905 bestppm = absppm;
906 flag = 1;
907 }
908 if (flag) {
909 bestn = n;
910 bestm1 = m1;
911 bestm2 = m2;
912 bestp1 = p1;
913 bestp2 = p2;
914 flag = 0;
915 }
916 }
917 }
918 }
919 }
920 }
921 best_clock->n = bestn;
922 best_clock->m1 = bestm1;
923 best_clock->m2 = bestm2;
924 best_clock->p1 = bestp1;
925 best_clock->p2 = bestp2;
926
927 return true;
928}
a4fc5ed6 929
a928d536
PZ
930static void ironlake_wait_for_vblank(struct drm_device *dev, int pipe)
931{
932 struct drm_i915_private *dev_priv = dev->dev_private;
933 u32 frame, frame_reg = PIPEFRAME(pipe);
934
935 frame = I915_READ(frame_reg);
936
937 if (wait_for(I915_READ_NOTRACE(frame_reg) != frame, 50))
938 DRM_DEBUG_KMS("vblank wait timed out\n");
939}
940
9d0498a2
JB
941/**
942 * intel_wait_for_vblank - wait for vblank on a given pipe
943 * @dev: drm device
944 * @pipe: pipe to wait for
945 *
946 * Wait for vblank to occur on a given pipe. Needed for various bits of
947 * mode setting code.
948 */
949void intel_wait_for_vblank(struct drm_device *dev, int pipe)
79e53945 950{
9d0498a2 951 struct drm_i915_private *dev_priv = dev->dev_private;
9db4a9c7 952 int pipestat_reg = PIPESTAT(pipe);
9d0498a2 953
a928d536
PZ
954 if (INTEL_INFO(dev)->gen >= 5) {
955 ironlake_wait_for_vblank(dev, pipe);
956 return;
957 }
958
300387c0
CW
959 /* Clear existing vblank status. Note this will clear any other
960 * sticky status fields as well.
961 *
962 * This races with i915_driver_irq_handler() with the result
963 * that either function could miss a vblank event. Here it is not
964 * fatal, as we will either wait upon the next vblank interrupt or
965 * timeout. Generally speaking intel_wait_for_vblank() is only
966 * called during modeset at which time the GPU should be idle and
967 * should *not* be performing page flips and thus not waiting on
968 * vblanks...
969 * Currently, the result of us stealing a vblank from the irq
970 * handler is that a single frame will be skipped during swapbuffers.
971 */
972 I915_WRITE(pipestat_reg,
973 I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS);
974
9d0498a2 975 /* Wait for vblank interrupt bit to set */
481b6af3
CW
976 if (wait_for(I915_READ(pipestat_reg) &
977 PIPE_VBLANK_INTERRUPT_STATUS,
978 50))
9d0498a2
JB
979 DRM_DEBUG_KMS("vblank wait timed out\n");
980}
981
ab7ad7f6
KP
982/*
983 * intel_wait_for_pipe_off - wait for pipe to turn off
9d0498a2
JB
984 * @dev: drm device
985 * @pipe: pipe to wait for
986 *
987 * After disabling a pipe, we can't wait for vblank in the usual way,
988 * spinning on the vblank interrupt status bit, since we won't actually
989 * see an interrupt when the pipe is disabled.
990 *
ab7ad7f6
KP
991 * On Gen4 and above:
992 * wait for the pipe register state bit to turn off
993 *
994 * Otherwise:
995 * wait for the display line value to settle (it usually
996 * ends up stopping at the start of the next frame).
58e10eb9 997 *
9d0498a2 998 */
58e10eb9 999void intel_wait_for_pipe_off(struct drm_device *dev, int pipe)
9d0498a2
JB
1000{
1001 struct drm_i915_private *dev_priv = dev->dev_private;
ab7ad7f6
KP
1002
1003 if (INTEL_INFO(dev)->gen >= 4) {
58e10eb9 1004 int reg = PIPECONF(pipe);
ab7ad7f6
KP
1005
1006 /* Wait for the Pipe State to go off */
58e10eb9
CW
1007 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1008 100))
ab7ad7f6
KP
1009 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1010 } else {
837ba00f 1011 u32 last_line, line_mask;
58e10eb9 1012 int reg = PIPEDSL(pipe);
ab7ad7f6
KP
1013 unsigned long timeout = jiffies + msecs_to_jiffies(100);
1014
837ba00f
PZ
1015 if (IS_GEN2(dev))
1016 line_mask = DSL_LINEMASK_GEN2;
1017 else
1018 line_mask = DSL_LINEMASK_GEN3;
1019
ab7ad7f6
KP
1020 /* Wait for the display line to settle */
1021 do {
837ba00f 1022 last_line = I915_READ(reg) & line_mask;
ab7ad7f6 1023 mdelay(5);
837ba00f 1024 } while (((I915_READ(reg) & line_mask) != last_line) &&
ab7ad7f6
KP
1025 time_after(timeout, jiffies));
1026 if (time_after(jiffies, timeout))
1027 DRM_DEBUG_KMS("pipe_off wait timed out\n");
1028 }
79e53945
JB
1029}
1030
b24e7179
JB
1031static const char *state_string(bool enabled)
1032{
1033 return enabled ? "on" : "off";
1034}
1035
1036/* Only for pre-ILK configs */
1037static void assert_pll(struct drm_i915_private *dev_priv,
1038 enum pipe pipe, bool state)
1039{
1040 int reg;
1041 u32 val;
1042 bool cur_state;
1043
1044 reg = DPLL(pipe);
1045 val = I915_READ(reg);
1046 cur_state = !!(val & DPLL_VCO_ENABLE);
1047 WARN(cur_state != state,
1048 "PLL state assertion failure (expected %s, current %s)\n",
1049 state_string(state), state_string(cur_state));
1050}
1051#define assert_pll_enabled(d, p) assert_pll(d, p, true)
1052#define assert_pll_disabled(d, p) assert_pll(d, p, false)
1053
040484af
JB
1054/* For ILK+ */
1055static void assert_pch_pll(struct drm_i915_private *dev_priv,
92b27b08
CW
1056 struct intel_pch_pll *pll,
1057 struct intel_crtc *crtc,
1058 bool state)
040484af 1059{
040484af
JB
1060 u32 val;
1061 bool cur_state;
1062
9d82aa17
ED
1063 if (HAS_PCH_LPT(dev_priv->dev)) {
1064 DRM_DEBUG_DRIVER("LPT detected: skipping PCH PLL test\n");
1065 return;
1066 }
1067
92b27b08
CW
1068 if (WARN (!pll,
1069 "asserting PCH PLL %s with no PLL\n", state_string(state)))
ee7b9f93 1070 return;
ee7b9f93 1071
92b27b08
CW
1072 val = I915_READ(pll->pll_reg);
1073 cur_state = !!(val & DPLL_VCO_ENABLE);
1074 WARN(cur_state != state,
1075 "PCH PLL state for reg %x assertion failure (expected %s, current %s), val=%08x\n",
1076 pll->pll_reg, state_string(state), state_string(cur_state), val);
1077
1078 /* Make sure the selected PLL is correctly attached to the transcoder */
1079 if (crtc && HAS_PCH_CPT(dev_priv->dev)) {
d3ccbe86
JB
1080 u32 pch_dpll;
1081
1082 pch_dpll = I915_READ(PCH_DPLL_SEL);
92b27b08
CW
1083 cur_state = pll->pll_reg == _PCH_DPLL_B;
1084 if (!WARN(((pch_dpll >> (4 * crtc->pipe)) & 1) != cur_state,
1085 "PLL[%d] not attached to this transcoder %d: %08x\n",
1086 cur_state, crtc->pipe, pch_dpll)) {
1087 cur_state = !!(val >> (4*crtc->pipe + 3));
1088 WARN(cur_state != state,
1089 "PLL[%d] not %s on this transcoder %d: %08x\n",
1090 pll->pll_reg == _PCH_DPLL_B,
1091 state_string(state),
1092 crtc->pipe,
1093 val);
1094 }
d3ccbe86 1095 }
040484af 1096}
92b27b08
CW
1097#define assert_pch_pll_enabled(d, p, c) assert_pch_pll(d, p, c, true)
1098#define assert_pch_pll_disabled(d, p, c) assert_pch_pll(d, p, c, false)
040484af
JB
1099
1100static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1101 enum pipe pipe, bool state)
1102{
1103 int reg;
1104 u32 val;
1105 bool cur_state;
1106
bf507ef7
ED
1107 if (IS_HASWELL(dev_priv->dev)) {
1108 /* On Haswell, DDI is used instead of FDI_TX_CTL */
1109 reg = DDI_FUNC_CTL(pipe);
1110 val = I915_READ(reg);
1111 cur_state = !!(val & PIPE_DDI_FUNC_ENABLE);
1112 } else {
1113 reg = FDI_TX_CTL(pipe);
1114 val = I915_READ(reg);
1115 cur_state = !!(val & FDI_TX_ENABLE);
1116 }
040484af
JB
1117 WARN(cur_state != state,
1118 "FDI TX state assertion failure (expected %s, current %s)\n",
1119 state_string(state), state_string(cur_state));
1120}
1121#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1122#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1123
1124static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1125 enum pipe pipe, bool state)
1126{
1127 int reg;
1128 u32 val;
1129 bool cur_state;
1130
59c859d6
ED
1131 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1132 DRM_ERROR("Attempting to enable FDI_RX on Haswell pipe > 0\n");
1133 return;
1134 } else {
1135 reg = FDI_RX_CTL(pipe);
1136 val = I915_READ(reg);
1137 cur_state = !!(val & FDI_RX_ENABLE);
1138 }
040484af
JB
1139 WARN(cur_state != state,
1140 "FDI RX state assertion failure (expected %s, current %s)\n",
1141 state_string(state), state_string(cur_state));
1142}
1143#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1144#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1145
1146static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1147 enum pipe pipe)
1148{
1149 int reg;
1150 u32 val;
1151
1152 /* ILK FDI PLL is always enabled */
1153 if (dev_priv->info->gen == 5)
1154 return;
1155
bf507ef7
ED
1156 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1157 if (IS_HASWELL(dev_priv->dev))
1158 return;
1159
040484af
JB
1160 reg = FDI_TX_CTL(pipe);
1161 val = I915_READ(reg);
1162 WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1163}
1164
1165static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv,
1166 enum pipe pipe)
1167{
1168 int reg;
1169 u32 val;
1170
59c859d6
ED
1171 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1172 DRM_ERROR("Attempting to enable FDI on Haswell with pipe > 0\n");
1173 return;
1174 }
040484af
JB
1175 reg = FDI_RX_CTL(pipe);
1176 val = I915_READ(reg);
1177 WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n");
1178}
1179
ea0760cf
JB
1180static void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1181 enum pipe pipe)
1182{
1183 int pp_reg, lvds_reg;
1184 u32 val;
1185 enum pipe panel_pipe = PIPE_A;
0de3b485 1186 bool locked = true;
ea0760cf
JB
1187
1188 if (HAS_PCH_SPLIT(dev_priv->dev)) {
1189 pp_reg = PCH_PP_CONTROL;
1190 lvds_reg = PCH_LVDS;
1191 } else {
1192 pp_reg = PP_CONTROL;
1193 lvds_reg = LVDS;
1194 }
1195
1196 val = I915_READ(pp_reg);
1197 if (!(val & PANEL_POWER_ON) ||
1198 ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS))
1199 locked = false;
1200
1201 if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT)
1202 panel_pipe = PIPE_B;
1203
1204 WARN(panel_pipe == pipe && locked,
1205 "panel assertion failure, pipe %c regs locked\n",
9db4a9c7 1206 pipe_name(pipe));
ea0760cf
JB
1207}
1208
b840d907
JB
1209void assert_pipe(struct drm_i915_private *dev_priv,
1210 enum pipe pipe, bool state)
b24e7179
JB
1211{
1212 int reg;
1213 u32 val;
63d7bbe9 1214 bool cur_state;
b24e7179 1215
8e636784
DV
1216 /* if we need the pipe A quirk it must be always on */
1217 if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
1218 state = true;
1219
b24e7179
JB
1220 reg = PIPECONF(pipe);
1221 val = I915_READ(reg);
63d7bbe9
JB
1222 cur_state = !!(val & PIPECONF_ENABLE);
1223 WARN(cur_state != state,
1224 "pipe %c assertion failure (expected %s, current %s)\n",
9db4a9c7 1225 pipe_name(pipe), state_string(state), state_string(cur_state));
b24e7179
JB
1226}
1227
931872fc
CW
1228static void assert_plane(struct drm_i915_private *dev_priv,
1229 enum plane plane, bool state)
b24e7179
JB
1230{
1231 int reg;
1232 u32 val;
931872fc 1233 bool cur_state;
b24e7179
JB
1234
1235 reg = DSPCNTR(plane);
1236 val = I915_READ(reg);
931872fc
CW
1237 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1238 WARN(cur_state != state,
1239 "plane %c assertion failure (expected %s, current %s)\n",
1240 plane_name(plane), state_string(state), state_string(cur_state));
b24e7179
JB
1241}
1242
931872fc
CW
1243#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1244#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1245
b24e7179
JB
1246static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1247 enum pipe pipe)
1248{
1249 int reg, i;
1250 u32 val;
1251 int cur_pipe;
1252
19ec1358 1253 /* Planes are fixed to pipes on ILK+ */
28c05794
AJ
1254 if (HAS_PCH_SPLIT(dev_priv->dev)) {
1255 reg = DSPCNTR(pipe);
1256 val = I915_READ(reg);
1257 WARN((val & DISPLAY_PLANE_ENABLE),
1258 "plane %c assertion failure, should be disabled but not\n",
1259 plane_name(pipe));
19ec1358 1260 return;
28c05794 1261 }
19ec1358 1262
b24e7179
JB
1263 /* Need to check both planes against the pipe */
1264 for (i = 0; i < 2; i++) {
1265 reg = DSPCNTR(i);
1266 val = I915_READ(reg);
1267 cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1268 DISPPLANE_SEL_PIPE_SHIFT;
1269 WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
9db4a9c7
JB
1270 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1271 plane_name(i), pipe_name(pipe));
b24e7179
JB
1272 }
1273}
1274
92f2584a
JB
1275static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
1276{
1277 u32 val;
1278 bool enabled;
1279
9d82aa17
ED
1280 if (HAS_PCH_LPT(dev_priv->dev)) {
1281 DRM_DEBUG_DRIVER("LPT does not has PCH refclk, skipping check\n");
1282 return;
1283 }
1284
92f2584a
JB
1285 val = I915_READ(PCH_DREF_CONTROL);
1286 enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
1287 DREF_SUPERSPREAD_SOURCE_MASK));
1288 WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
1289}
1290
1291static void assert_transcoder_disabled(struct drm_i915_private *dev_priv,
1292 enum pipe pipe)
1293{
1294 int reg;
1295 u32 val;
1296 bool enabled;
1297
1298 reg = TRANSCONF(pipe);
1299 val = I915_READ(reg);
1300 enabled = !!(val & TRANS_ENABLE);
9db4a9c7
JB
1301 WARN(enabled,
1302 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1303 pipe_name(pipe));
92f2584a
JB
1304}
1305
4e634389
KP
1306static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1307 enum pipe pipe, u32 port_sel, u32 val)
f0575e92
KP
1308{
1309 if ((val & DP_PORT_EN) == 0)
1310 return false;
1311
1312 if (HAS_PCH_CPT(dev_priv->dev)) {
1313 u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe);
1314 u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg);
1315 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1316 return false;
1317 } else {
1318 if ((val & DP_PIPE_MASK) != (pipe << 30))
1319 return false;
1320 }
1321 return true;
1322}
1323
1519b995
KP
1324static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1325 enum pipe pipe, u32 val)
1326{
1327 if ((val & PORT_ENABLE) == 0)
1328 return false;
1329
1330 if (HAS_PCH_CPT(dev_priv->dev)) {
1331 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1332 return false;
1333 } else {
1334 if ((val & TRANSCODER_MASK) != TRANSCODER(pipe))
1335 return false;
1336 }
1337 return true;
1338}
1339
1340static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1341 enum pipe pipe, u32 val)
1342{
1343 if ((val & LVDS_PORT_EN) == 0)
1344 return false;
1345
1346 if (HAS_PCH_CPT(dev_priv->dev)) {
1347 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1348 return false;
1349 } else {
1350 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1351 return false;
1352 }
1353 return true;
1354}
1355
1356static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1357 enum pipe pipe, u32 val)
1358{
1359 if ((val & ADPA_DAC_ENABLE) == 0)
1360 return false;
1361 if (HAS_PCH_CPT(dev_priv->dev)) {
1362 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1363 return false;
1364 } else {
1365 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1366 return false;
1367 }
1368 return true;
1369}
1370
291906f1 1371static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
f0575e92 1372 enum pipe pipe, int reg, u32 port_sel)
291906f1 1373{
47a05eca 1374 u32 val = I915_READ(reg);
4e634389 1375 WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
291906f1 1376 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
9db4a9c7 1377 reg, pipe_name(pipe));
de9a35ab
DV
1378
1379 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
1380 "IBX PCH dp port still using transcoder B\n");
291906f1
JB
1381}
1382
1383static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1384 enum pipe pipe, int reg)
1385{
47a05eca 1386 u32 val = I915_READ(reg);
1519b995 1387 WARN(hdmi_pipe_enabled(dev_priv, val, pipe),
23c99e77 1388 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
9db4a9c7 1389 reg, pipe_name(pipe));
de9a35ab
DV
1390
1391 WARN(HAS_PCH_IBX(dev_priv->dev) && (val & SDVO_PIPE_B_SELECT),
1392 "IBX PCH hdmi port still using transcoder B\n");
291906f1
JB
1393}
1394
1395static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1396 enum pipe pipe)
1397{
1398 int reg;
1399 u32 val;
291906f1 1400
f0575e92
KP
1401 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1402 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1403 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
291906f1
JB
1404
1405 reg = PCH_ADPA;
1406 val = I915_READ(reg);
1519b995 1407 WARN(adpa_pipe_enabled(dev_priv, val, pipe),
291906f1 1408 "PCH VGA enabled on transcoder %c, should be disabled\n",
9db4a9c7 1409 pipe_name(pipe));
291906f1
JB
1410
1411 reg = PCH_LVDS;
1412 val = I915_READ(reg);
1519b995 1413 WARN(lvds_pipe_enabled(dev_priv, val, pipe),
291906f1 1414 "PCH LVDS enabled on transcoder %c, should be disabled\n",
9db4a9c7 1415 pipe_name(pipe));
291906f1
JB
1416
1417 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB);
1418 assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC);
1419 assert_pch_hdmi_disabled(dev_priv, pipe, HDMID);
1420}
1421
63d7bbe9
JB
1422/**
1423 * intel_enable_pll - enable a PLL
1424 * @dev_priv: i915 private structure
1425 * @pipe: pipe PLL to enable
1426 *
1427 * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to
1428 * make sure the PLL reg is writable first though, since the panel write
1429 * protect mechanism may be enabled.
1430 *
1431 * Note! This is for pre-ILK only.
1432 */
1433static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1434{
1435 int reg;
1436 u32 val;
1437
1438 /* No really, not for ILK+ */
a0c4da24 1439 BUG_ON(!IS_VALLEYVIEW(dev_priv->dev) && dev_priv->info->gen >= 5);
63d7bbe9
JB
1440
1441 /* PLL is protected by panel, make sure we can write it */
1442 if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev))
1443 assert_panel_unlocked(dev_priv, pipe);
1444
1445 reg = DPLL(pipe);
1446 val = I915_READ(reg);
1447 val |= DPLL_VCO_ENABLE;
1448
1449 /* We do this three times for luck */
1450 I915_WRITE(reg, val);
1451 POSTING_READ(reg);
1452 udelay(150); /* wait for warmup */
1453 I915_WRITE(reg, val);
1454 POSTING_READ(reg);
1455 udelay(150); /* wait for warmup */
1456 I915_WRITE(reg, val);
1457 POSTING_READ(reg);
1458 udelay(150); /* wait for warmup */
1459}
1460
1461/**
1462 * intel_disable_pll - disable a PLL
1463 * @dev_priv: i915 private structure
1464 * @pipe: pipe PLL to disable
1465 *
1466 * Disable the PLL for @pipe, making sure the pipe is off first.
1467 *
1468 * Note! This is for pre-ILK only.
1469 */
1470static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1471{
1472 int reg;
1473 u32 val;
1474
1475 /* Don't disable pipe A or pipe A PLLs if needed */
1476 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1477 return;
1478
1479 /* Make sure the pipe isn't still relying on us */
1480 assert_pipe_disabled(dev_priv, pipe);
1481
1482 reg = DPLL(pipe);
1483 val = I915_READ(reg);
1484 val &= ~DPLL_VCO_ENABLE;
1485 I915_WRITE(reg, val);
1486 POSTING_READ(reg);
1487}
1488
a416edef
ED
1489/* SBI access */
1490static void
1491intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value)
1492{
1493 unsigned long flags;
1494
1495 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
39fb50f6 1496 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
a416edef
ED
1497 100)) {
1498 DRM_ERROR("timeout waiting for SBI to become ready\n");
1499 goto out_unlock;
1500 }
1501
1502 I915_WRITE(SBI_ADDR,
1503 (reg << 16));
1504 I915_WRITE(SBI_DATA,
1505 value);
1506 I915_WRITE(SBI_CTL_STAT,
1507 SBI_BUSY |
1508 SBI_CTL_OP_CRWR);
1509
39fb50f6 1510 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
a416edef
ED
1511 100)) {
1512 DRM_ERROR("timeout waiting for SBI to complete write transaction\n");
1513 goto out_unlock;
1514 }
1515
1516out_unlock:
1517 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1518}
1519
1520static u32
1521intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg)
1522{
1523 unsigned long flags;
39fb50f6 1524 u32 value = 0;
a416edef
ED
1525
1526 spin_lock_irqsave(&dev_priv->dpio_lock, flags);
39fb50f6 1527 if (wait_for((I915_READ(SBI_CTL_STAT) & SBI_BUSY) == 0,
a416edef
ED
1528 100)) {
1529 DRM_ERROR("timeout waiting for SBI to become ready\n");
1530 goto out_unlock;
1531 }
1532
1533 I915_WRITE(SBI_ADDR,
1534 (reg << 16));
1535 I915_WRITE(SBI_CTL_STAT,
1536 SBI_BUSY |
1537 SBI_CTL_OP_CRRD);
1538
39fb50f6 1539 if (wait_for((I915_READ(SBI_CTL_STAT) & (SBI_BUSY | SBI_RESPONSE_FAIL)) == 0,
a416edef
ED
1540 100)) {
1541 DRM_ERROR("timeout waiting for SBI to complete read transaction\n");
1542 goto out_unlock;
1543 }
1544
1545 value = I915_READ(SBI_DATA);
1546
1547out_unlock:
1548 spin_unlock_irqrestore(&dev_priv->dpio_lock, flags);
1549 return value;
1550}
1551
92f2584a
JB
1552/**
1553 * intel_enable_pch_pll - enable PCH PLL
1554 * @dev_priv: i915 private structure
1555 * @pipe: pipe PLL to enable
1556 *
1557 * The PCH PLL needs to be enabled before the PCH transcoder, since it
1558 * drives the transcoder clock.
1559 */
ee7b9f93 1560static void intel_enable_pch_pll(struct intel_crtc *intel_crtc)
92f2584a 1561{
ee7b9f93 1562 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
48da64a8 1563 struct intel_pch_pll *pll;
92f2584a
JB
1564 int reg;
1565 u32 val;
1566
48da64a8 1567 /* PCH PLLs only available on ILK, SNB and IVB */
92f2584a 1568 BUG_ON(dev_priv->info->gen < 5);
48da64a8
CW
1569 pll = intel_crtc->pch_pll;
1570 if (pll == NULL)
1571 return;
1572
1573 if (WARN_ON(pll->refcount == 0))
1574 return;
ee7b9f93
JB
1575
1576 DRM_DEBUG_KMS("enable PCH PLL %x (active %d, on? %d)for crtc %d\n",
1577 pll->pll_reg, pll->active, pll->on,
1578 intel_crtc->base.base.id);
92f2584a
JB
1579
1580 /* PCH refclock must be enabled first */
1581 assert_pch_refclk_enabled(dev_priv);
1582
ee7b9f93 1583 if (pll->active++ && pll->on) {
92b27b08 1584 assert_pch_pll_enabled(dev_priv, pll, NULL);
ee7b9f93
JB
1585 return;
1586 }
1587
1588 DRM_DEBUG_KMS("enabling PCH PLL %x\n", pll->pll_reg);
1589
1590 reg = pll->pll_reg;
92f2584a
JB
1591 val = I915_READ(reg);
1592 val |= DPLL_VCO_ENABLE;
1593 I915_WRITE(reg, val);
1594 POSTING_READ(reg);
1595 udelay(200);
ee7b9f93
JB
1596
1597 pll->on = true;
92f2584a
JB
1598}
1599
ee7b9f93 1600static void intel_disable_pch_pll(struct intel_crtc *intel_crtc)
92f2584a 1601{
ee7b9f93
JB
1602 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
1603 struct intel_pch_pll *pll = intel_crtc->pch_pll;
92f2584a 1604 int reg;
ee7b9f93 1605 u32 val;
4c609cb8 1606
92f2584a
JB
1607 /* PCH only available on ILK+ */
1608 BUG_ON(dev_priv->info->gen < 5);
ee7b9f93
JB
1609 if (pll == NULL)
1610 return;
92f2584a 1611
48da64a8
CW
1612 if (WARN_ON(pll->refcount == 0))
1613 return;
7a419866 1614
ee7b9f93
JB
1615 DRM_DEBUG_KMS("disable PCH PLL %x (active %d, on? %d) for crtc %d\n",
1616 pll->pll_reg, pll->active, pll->on,
1617 intel_crtc->base.base.id);
7a419866 1618
48da64a8 1619 if (WARN_ON(pll->active == 0)) {
92b27b08 1620 assert_pch_pll_disabled(dev_priv, pll, NULL);
48da64a8
CW
1621 return;
1622 }
1623
ee7b9f93 1624 if (--pll->active) {
92b27b08 1625 assert_pch_pll_enabled(dev_priv, pll, NULL);
7a419866 1626 return;
ee7b9f93
JB
1627 }
1628
1629 DRM_DEBUG_KMS("disabling PCH PLL %x\n", pll->pll_reg);
1630
1631 /* Make sure transcoder isn't still depending on us */
1632 assert_transcoder_disabled(dev_priv, intel_crtc->pipe);
7a419866 1633
ee7b9f93 1634 reg = pll->pll_reg;
92f2584a
JB
1635 val = I915_READ(reg);
1636 val &= ~DPLL_VCO_ENABLE;
1637 I915_WRITE(reg, val);
1638 POSTING_READ(reg);
1639 udelay(200);
ee7b9f93
JB
1640
1641 pll->on = false;
92f2584a
JB
1642}
1643
040484af
JB
1644static void intel_enable_transcoder(struct drm_i915_private *dev_priv,
1645 enum pipe pipe)
1646{
1647 int reg;
5f7f726d 1648 u32 val, pipeconf_val;
7c26e5c6 1649 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
040484af
JB
1650
1651 /* PCH only available on ILK+ */
1652 BUG_ON(dev_priv->info->gen < 5);
1653
1654 /* Make sure PCH DPLL is enabled */
92b27b08
CW
1655 assert_pch_pll_enabled(dev_priv,
1656 to_intel_crtc(crtc)->pch_pll,
1657 to_intel_crtc(crtc));
040484af
JB
1658
1659 /* FDI must be feeding us bits for PCH ports */
1660 assert_fdi_tx_enabled(dev_priv, pipe);
1661 assert_fdi_rx_enabled(dev_priv, pipe);
1662
59c859d6
ED
1663 if (IS_HASWELL(dev_priv->dev) && pipe > 0) {
1664 DRM_ERROR("Attempting to enable transcoder on Haswell with pipe > 0\n");
1665 return;
1666 }
040484af
JB
1667 reg = TRANSCONF(pipe);
1668 val = I915_READ(reg);
5f7f726d 1669 pipeconf_val = I915_READ(PIPECONF(pipe));
e9bcff5c
JB
1670
1671 if (HAS_PCH_IBX(dev_priv->dev)) {
1672 /*
1673 * make the BPC in transcoder be consistent with
1674 * that in pipeconf reg.
1675 */
1676 val &= ~PIPE_BPC_MASK;
5f7f726d 1677 val |= pipeconf_val & PIPE_BPC_MASK;
e9bcff5c 1678 }
5f7f726d
PZ
1679
1680 val &= ~TRANS_INTERLACE_MASK;
1681 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
7c26e5c6
PZ
1682 if (HAS_PCH_IBX(dev_priv->dev) &&
1683 intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO))
1684 val |= TRANS_LEGACY_INTERLACED_ILK;
1685 else
1686 val |= TRANS_INTERLACED;
5f7f726d
PZ
1687 else
1688 val |= TRANS_PROGRESSIVE;
1689
040484af
JB
1690 I915_WRITE(reg, val | TRANS_ENABLE);
1691 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1692 DRM_ERROR("failed to enable transcoder %d\n", pipe);
1693}
1694
1695static void intel_disable_transcoder(struct drm_i915_private *dev_priv,
1696 enum pipe pipe)
1697{
1698 int reg;
1699 u32 val;
1700
1701 /* FDI relies on the transcoder */
1702 assert_fdi_tx_disabled(dev_priv, pipe);
1703 assert_fdi_rx_disabled(dev_priv, pipe);
1704
291906f1
JB
1705 /* Ports must be off as well */
1706 assert_pch_ports_disabled(dev_priv, pipe);
1707
040484af
JB
1708 reg = TRANSCONF(pipe);
1709 val = I915_READ(reg);
1710 val &= ~TRANS_ENABLE;
1711 I915_WRITE(reg, val);
1712 /* wait for PCH transcoder off, transcoder state */
1713 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4c9c18c2 1714 DRM_ERROR("failed to disable transcoder %d\n", pipe);
040484af
JB
1715}
1716
b24e7179 1717/**
309cfea8 1718 * intel_enable_pipe - enable a pipe, asserting requirements
b24e7179
JB
1719 * @dev_priv: i915 private structure
1720 * @pipe: pipe to enable
040484af 1721 * @pch_port: on ILK+, is this pipe driving a PCH port or not
b24e7179
JB
1722 *
1723 * Enable @pipe, making sure that various hardware specific requirements
1724 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1725 *
1726 * @pipe should be %PIPE_A or %PIPE_B.
1727 *
1728 * Will wait until the pipe is actually running (i.e. first vblank) before
1729 * returning.
1730 */
040484af
JB
1731static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe,
1732 bool pch_port)
b24e7179
JB
1733{
1734 int reg;
1735 u32 val;
1736
1737 /*
1738 * A pipe without a PLL won't actually be able to drive bits from
1739 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1740 * need the check.
1741 */
1742 if (!HAS_PCH_SPLIT(dev_priv->dev))
1743 assert_pll_enabled(dev_priv, pipe);
040484af
JB
1744 else {
1745 if (pch_port) {
1746 /* if driving the PCH, we need FDI enabled */
1747 assert_fdi_rx_pll_enabled(dev_priv, pipe);
1748 assert_fdi_tx_pll_enabled(dev_priv, pipe);
1749 }
1750 /* FIXME: assert CPU port conditions for SNB+ */
1751 }
b24e7179
JB
1752
1753 reg = PIPECONF(pipe);
1754 val = I915_READ(reg);
00d70b15
CW
1755 if (val & PIPECONF_ENABLE)
1756 return;
1757
1758 I915_WRITE(reg, val | PIPECONF_ENABLE);
b24e7179
JB
1759 intel_wait_for_vblank(dev_priv->dev, pipe);
1760}
1761
1762/**
309cfea8 1763 * intel_disable_pipe - disable a pipe, asserting requirements
b24e7179
JB
1764 * @dev_priv: i915 private structure
1765 * @pipe: pipe to disable
1766 *
1767 * Disable @pipe, making sure that various hardware specific requirements
1768 * are met, if applicable, e.g. plane disabled, panel fitter off, etc.
1769 *
1770 * @pipe should be %PIPE_A or %PIPE_B.
1771 *
1772 * Will wait until the pipe has shut down before returning.
1773 */
1774static void intel_disable_pipe(struct drm_i915_private *dev_priv,
1775 enum pipe pipe)
1776{
1777 int reg;
1778 u32 val;
1779
1780 /*
1781 * Make sure planes won't keep trying to pump pixels to us,
1782 * or we might hang the display.
1783 */
1784 assert_planes_disabled(dev_priv, pipe);
1785
1786 /* Don't disable pipe A or pipe A PLLs if needed */
1787 if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
1788 return;
1789
1790 reg = PIPECONF(pipe);
1791 val = I915_READ(reg);
00d70b15
CW
1792 if ((val & PIPECONF_ENABLE) == 0)
1793 return;
1794
1795 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
b24e7179
JB
1796 intel_wait_for_pipe_off(dev_priv->dev, pipe);
1797}
1798
d74362c9
KP
1799/*
1800 * Plane regs are double buffered, going from enabled->disabled needs a
1801 * trigger in order to latch. The display address reg provides this.
1802 */
6f1d69b0 1803void intel_flush_display_plane(struct drm_i915_private *dev_priv,
d74362c9
KP
1804 enum plane plane)
1805{
1806 I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane)));
1807 I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane)));
1808}
1809
b24e7179
JB
1810/**
1811 * intel_enable_plane - enable a display plane on a given pipe
1812 * @dev_priv: i915 private structure
1813 * @plane: plane to enable
1814 * @pipe: pipe being fed
1815 *
1816 * Enable @plane on @pipe, making sure that @pipe is running first.
1817 */
1818static void intel_enable_plane(struct drm_i915_private *dev_priv,
1819 enum plane plane, enum pipe pipe)
1820{
1821 int reg;
1822 u32 val;
1823
1824 /* If the pipe isn't enabled, we can't pump pixels and may hang */
1825 assert_pipe_enabled(dev_priv, pipe);
1826
1827 reg = DSPCNTR(plane);
1828 val = I915_READ(reg);
00d70b15
CW
1829 if (val & DISPLAY_PLANE_ENABLE)
1830 return;
1831
1832 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
d74362c9 1833 intel_flush_display_plane(dev_priv, plane);
b24e7179
JB
1834 intel_wait_for_vblank(dev_priv->dev, pipe);
1835}
1836
b24e7179
JB
1837/**
1838 * intel_disable_plane - disable a display plane
1839 * @dev_priv: i915 private structure
1840 * @plane: plane to disable
1841 * @pipe: pipe consuming the data
1842 *
1843 * Disable @plane; should be an independent operation.
1844 */
1845static void intel_disable_plane(struct drm_i915_private *dev_priv,
1846 enum plane plane, enum pipe pipe)
1847{
1848 int reg;
1849 u32 val;
1850
1851 reg = DSPCNTR(plane);
1852 val = I915_READ(reg);
00d70b15
CW
1853 if ((val & DISPLAY_PLANE_ENABLE) == 0)
1854 return;
1855
1856 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
b24e7179
JB
1857 intel_flush_display_plane(dev_priv, plane);
1858 intel_wait_for_vblank(dev_priv->dev, pipe);
1859}
1860
47a05eca 1861static void disable_pch_dp(struct drm_i915_private *dev_priv,
f0575e92 1862 enum pipe pipe, int reg, u32 port_sel)
47a05eca
JB
1863{
1864 u32 val = I915_READ(reg);
4e634389 1865 if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) {
f0575e92 1866 DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe);
47a05eca 1867 I915_WRITE(reg, val & ~DP_PORT_EN);
f0575e92 1868 }
47a05eca
JB
1869}
1870
1871static void disable_pch_hdmi(struct drm_i915_private *dev_priv,
1872 enum pipe pipe, int reg)
1873{
1874 u32 val = I915_READ(reg);
1519b995 1875 if (hdmi_pipe_enabled(dev_priv, val, pipe)) {
f0575e92
KP
1876 DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n",
1877 reg, pipe);
47a05eca 1878 I915_WRITE(reg, val & ~PORT_ENABLE);
f0575e92 1879 }
47a05eca
JB
1880}
1881
1882/* Disable any ports connected to this transcoder */
1883static void intel_disable_pch_ports(struct drm_i915_private *dev_priv,
1884 enum pipe pipe)
1885{
1886 u32 reg, val;
1887
1888 val = I915_READ(PCH_PP_CONTROL);
1889 I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS);
1890
f0575e92
KP
1891 disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1892 disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1893 disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
47a05eca
JB
1894
1895 reg = PCH_ADPA;
1896 val = I915_READ(reg);
1519b995 1897 if (adpa_pipe_enabled(dev_priv, val, pipe))
47a05eca
JB
1898 I915_WRITE(reg, val & ~ADPA_DAC_ENABLE);
1899
1900 reg = PCH_LVDS;
1901 val = I915_READ(reg);
1519b995
KP
1902 if (lvds_pipe_enabled(dev_priv, val, pipe)) {
1903 DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val);
47a05eca
JB
1904 I915_WRITE(reg, val & ~LVDS_PORT_EN);
1905 POSTING_READ(reg);
1906 udelay(100);
1907 }
1908
1909 disable_pch_hdmi(dev_priv, pipe, HDMIB);
1910 disable_pch_hdmi(dev_priv, pipe, HDMIC);
1911 disable_pch_hdmi(dev_priv, pipe, HDMID);
1912}
1913
127bd2ac 1914int
48b956c5 1915intel_pin_and_fence_fb_obj(struct drm_device *dev,
05394f39 1916 struct drm_i915_gem_object *obj,
919926ae 1917 struct intel_ring_buffer *pipelined)
6b95a207 1918{
ce453d81 1919 struct drm_i915_private *dev_priv = dev->dev_private;
6b95a207
KH
1920 u32 alignment;
1921 int ret;
1922
05394f39 1923 switch (obj->tiling_mode) {
6b95a207 1924 case I915_TILING_NONE:
534843da
CW
1925 if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
1926 alignment = 128 * 1024;
a6c45cf0 1927 else if (INTEL_INFO(dev)->gen >= 4)
534843da
CW
1928 alignment = 4 * 1024;
1929 else
1930 alignment = 64 * 1024;
6b95a207
KH
1931 break;
1932 case I915_TILING_X:
1933 /* pin() will align the object as required by fence */
1934 alignment = 0;
1935 break;
1936 case I915_TILING_Y:
1937 /* FIXME: Is this true? */
1938 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
1939 return -EINVAL;
1940 default:
1941 BUG();
1942 }
1943
ce453d81 1944 dev_priv->mm.interruptible = false;
2da3b9b9 1945 ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined);
48b956c5 1946 if (ret)
ce453d81 1947 goto err_interruptible;
6b95a207
KH
1948
1949 /* Install a fence for tiled scan-out. Pre-i965 always needs a
1950 * fence, whereas 965+ only requires a fence if using
1951 * framebuffer compression. For simplicity, we always install
1952 * a fence as the cost is not that onerous.
1953 */
06d98131 1954 ret = i915_gem_object_get_fence(obj);
9a5a53b3
CW
1955 if (ret)
1956 goto err_unpin;
1690e1eb 1957
9a5a53b3 1958 i915_gem_object_pin_fence(obj);
6b95a207 1959
ce453d81 1960 dev_priv->mm.interruptible = true;
6b95a207 1961 return 0;
48b956c5
CW
1962
1963err_unpin:
1964 i915_gem_object_unpin(obj);
ce453d81
CW
1965err_interruptible:
1966 dev_priv->mm.interruptible = true;
48b956c5 1967 return ret;
6b95a207
KH
1968}
1969
1690e1eb
CW
1970void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
1971{
1972 i915_gem_object_unpin_fence(obj);
1973 i915_gem_object_unpin(obj);
1974}
1975
17638cd6
JB
1976static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1977 int x, int y)
81255565
JB
1978{
1979 struct drm_device *dev = crtc->dev;
1980 struct drm_i915_private *dev_priv = dev->dev_private;
1981 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1982 struct intel_framebuffer *intel_fb;
05394f39 1983 struct drm_i915_gem_object *obj;
81255565
JB
1984 int plane = intel_crtc->plane;
1985 unsigned long Start, Offset;
81255565 1986 u32 dspcntr;
5eddb70b 1987 u32 reg;
81255565
JB
1988
1989 switch (plane) {
1990 case 0:
1991 case 1:
1992 break;
1993 default:
1994 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
1995 return -EINVAL;
1996 }
1997
1998 intel_fb = to_intel_framebuffer(fb);
1999 obj = intel_fb->obj;
81255565 2000
5eddb70b
CW
2001 reg = DSPCNTR(plane);
2002 dspcntr = I915_READ(reg);
81255565
JB
2003 /* Mask out pixel format bits in case we change it */
2004 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2005 switch (fb->bits_per_pixel) {
2006 case 8:
2007 dspcntr |= DISPPLANE_8BPP;
2008 break;
2009 case 16:
2010 if (fb->depth == 15)
2011 dspcntr |= DISPPLANE_15_16BPP;
2012 else
2013 dspcntr |= DISPPLANE_16BPP;
2014 break;
2015 case 24:
2016 case 32:
2017 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2018 break;
2019 default:
17638cd6 2020 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
81255565
JB
2021 return -EINVAL;
2022 }
a6c45cf0 2023 if (INTEL_INFO(dev)->gen >= 4) {
05394f39 2024 if (obj->tiling_mode != I915_TILING_NONE)
81255565
JB
2025 dspcntr |= DISPPLANE_TILED;
2026 else
2027 dspcntr &= ~DISPPLANE_TILED;
2028 }
2029
5eddb70b 2030 I915_WRITE(reg, dspcntr);
81255565 2031
05394f39 2032 Start = obj->gtt_offset;
01f2c773 2033 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
81255565 2034
4e6cfefc 2035 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
01f2c773
VS
2036 Start, Offset, x, y, fb->pitches[0]);
2037 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
a6c45cf0 2038 if (INTEL_INFO(dev)->gen >= 4) {
446f2545 2039 I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
5eddb70b
CW
2040 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2041 I915_WRITE(DSPADDR(plane), Offset);
2042 } else
2043 I915_WRITE(DSPADDR(plane), Start + Offset);
2044 POSTING_READ(reg);
81255565 2045
17638cd6
JB
2046 return 0;
2047}
2048
2049static int ironlake_update_plane(struct drm_crtc *crtc,
2050 struct drm_framebuffer *fb, int x, int y)
2051{
2052 struct drm_device *dev = crtc->dev;
2053 struct drm_i915_private *dev_priv = dev->dev_private;
2054 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2055 struct intel_framebuffer *intel_fb;
2056 struct drm_i915_gem_object *obj;
2057 int plane = intel_crtc->plane;
2058 unsigned long Start, Offset;
2059 u32 dspcntr;
2060 u32 reg;
2061
2062 switch (plane) {
2063 case 0:
2064 case 1:
27f8227b 2065 case 2:
17638cd6
JB
2066 break;
2067 default:
2068 DRM_ERROR("Can't update plane %d in SAREA\n", plane);
2069 return -EINVAL;
2070 }
2071
2072 intel_fb = to_intel_framebuffer(fb);
2073 obj = intel_fb->obj;
2074
2075 reg = DSPCNTR(plane);
2076 dspcntr = I915_READ(reg);
2077 /* Mask out pixel format bits in case we change it */
2078 dspcntr &= ~DISPPLANE_PIXFORMAT_MASK;
2079 switch (fb->bits_per_pixel) {
2080 case 8:
2081 dspcntr |= DISPPLANE_8BPP;
2082 break;
2083 case 16:
2084 if (fb->depth != 16)
2085 return -EINVAL;
2086
2087 dspcntr |= DISPPLANE_16BPP;
2088 break;
2089 case 24:
2090 case 32:
2091 if (fb->depth == 24)
2092 dspcntr |= DISPPLANE_32BPP_NO_ALPHA;
2093 else if (fb->depth == 30)
2094 dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA;
2095 else
2096 return -EINVAL;
2097 break;
2098 default:
2099 DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel);
2100 return -EINVAL;
2101 }
2102
2103 if (obj->tiling_mode != I915_TILING_NONE)
2104 dspcntr |= DISPPLANE_TILED;
2105 else
2106 dspcntr &= ~DISPPLANE_TILED;
2107
2108 /* must disable */
2109 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2110
2111 I915_WRITE(reg, dspcntr);
2112
2113 Start = obj->gtt_offset;
01f2c773 2114 Offset = y * fb->pitches[0] + x * (fb->bits_per_pixel / 8);
17638cd6
JB
2115
2116 DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
01f2c773
VS
2117 Start, Offset, x, y, fb->pitches[0]);
2118 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
446f2545 2119 I915_MODIFY_DISPBASE(DSPSURF(plane), Start);
17638cd6
JB
2120 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2121 I915_WRITE(DSPADDR(plane), Offset);
2122 POSTING_READ(reg);
2123
2124 return 0;
2125}
2126
2127/* Assume fb object is pinned & idle & fenced and just update base pointers */
2128static int
2129intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
2130 int x, int y, enum mode_set_atomic state)
2131{
2132 struct drm_device *dev = crtc->dev;
2133 struct drm_i915_private *dev_priv = dev->dev_private;
17638cd6 2134
6b8e6ed0
CW
2135 if (dev_priv->display.disable_fbc)
2136 dev_priv->display.disable_fbc(dev);
3dec0095 2137 intel_increase_pllclock(crtc);
81255565 2138
6b8e6ed0 2139 return dev_priv->display.update_plane(crtc, fb, x, y);
81255565
JB
2140}
2141
14667a4b
CW
2142static int
2143intel_finish_fb(struct drm_framebuffer *old_fb)
2144{
2145 struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
2146 struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
2147 bool was_interruptible = dev_priv->mm.interruptible;
2148 int ret;
2149
2150 wait_event(dev_priv->pending_flip_queue,
2151 atomic_read(&dev_priv->mm.wedged) ||
2152 atomic_read(&obj->pending_flip) == 0);
2153
2154 /* Big Hammer, we also need to ensure that any pending
2155 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
2156 * current scanout is retired before unpinning the old
2157 * framebuffer.
2158 *
2159 * This should only fail upon a hung GPU, in which case we
2160 * can safely continue.
2161 */
2162 dev_priv->mm.interruptible = false;
2163 ret = i915_gem_object_finish_gpu(obj);
2164 dev_priv->mm.interruptible = was_interruptible;
2165
2166 return ret;
2167}
2168
5c3b82e2 2169static int
3c4fdcfb
KH
2170intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
2171 struct drm_framebuffer *old_fb)
79e53945
JB
2172{
2173 struct drm_device *dev = crtc->dev;
6b8e6ed0 2174 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945
JB
2175 struct drm_i915_master_private *master_priv;
2176 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5c3b82e2 2177 int ret;
79e53945
JB
2178
2179 /* no fb bound */
2180 if (!crtc->fb) {
a5071c2f 2181 DRM_ERROR("No FB bound\n");
5c3b82e2
CW
2182 return 0;
2183 }
2184
5826eca5
ED
2185 if(intel_crtc->plane > dev_priv->num_pipe) {
2186 DRM_ERROR("no plane for crtc: plane %d, num_pipes %d\n",
2187 intel_crtc->plane,
2188 dev_priv->num_pipe);
5c3b82e2 2189 return -EINVAL;
79e53945
JB
2190 }
2191
5c3b82e2 2192 mutex_lock(&dev->struct_mutex);
265db958
CW
2193 ret = intel_pin_and_fence_fb_obj(dev,
2194 to_intel_framebuffer(crtc->fb)->obj,
919926ae 2195 NULL);
5c3b82e2
CW
2196 if (ret != 0) {
2197 mutex_unlock(&dev->struct_mutex);
a5071c2f 2198 DRM_ERROR("pin & fence failed\n");
5c3b82e2
CW
2199 return ret;
2200 }
79e53945 2201
14667a4b
CW
2202 if (old_fb)
2203 intel_finish_fb(old_fb);
265db958 2204
6b8e6ed0 2205 ret = dev_priv->display.update_plane(crtc, crtc->fb, x, y);
4e6cfefc 2206 if (ret) {
1690e1eb 2207 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
5c3b82e2 2208 mutex_unlock(&dev->struct_mutex);
a5071c2f 2209 DRM_ERROR("failed to update base address\n");
4e6cfefc 2210 return ret;
79e53945 2211 }
3c4fdcfb 2212
b7f1de28
CW
2213 if (old_fb) {
2214 intel_wait_for_vblank(dev, intel_crtc->pipe);
1690e1eb 2215 intel_unpin_fb_obj(to_intel_framebuffer(old_fb)->obj);
b7f1de28 2216 }
652c393a 2217
6b8e6ed0 2218 intel_update_fbc(dev);
5c3b82e2 2219 mutex_unlock(&dev->struct_mutex);
79e53945
JB
2220
2221 if (!dev->primary->master)
5c3b82e2 2222 return 0;
79e53945
JB
2223
2224 master_priv = dev->primary->master->driver_priv;
2225 if (!master_priv->sarea_priv)
5c3b82e2 2226 return 0;
79e53945 2227
265db958 2228 if (intel_crtc->pipe) {
79e53945
JB
2229 master_priv->sarea_priv->pipeB_x = x;
2230 master_priv->sarea_priv->pipeB_y = y;
5c3b82e2
CW
2231 } else {
2232 master_priv->sarea_priv->pipeA_x = x;
2233 master_priv->sarea_priv->pipeA_y = y;
79e53945 2234 }
5c3b82e2
CW
2235
2236 return 0;
79e53945
JB
2237}
2238
5eddb70b 2239static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
32f9d658
ZW
2240{
2241 struct drm_device *dev = crtc->dev;
2242 struct drm_i915_private *dev_priv = dev->dev_private;
2243 u32 dpa_ctl;
2244
28c97730 2245 DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock);
32f9d658
ZW
2246 dpa_ctl = I915_READ(DP_A);
2247 dpa_ctl &= ~DP_PLL_FREQ_MASK;
2248
2249 if (clock < 200000) {
2250 u32 temp;
2251 dpa_ctl |= DP_PLL_FREQ_160MHZ;
2252 /* workaround for 160Mhz:
2253 1) program 0x4600c bits 15:0 = 0x8124
2254 2) program 0x46010 bit 0 = 1
2255 3) program 0x46034 bit 24 = 1
2256 4) program 0x64000 bit 14 = 1
2257 */
2258 temp = I915_READ(0x4600c);
2259 temp &= 0xffff0000;
2260 I915_WRITE(0x4600c, temp | 0x8124);
2261
2262 temp = I915_READ(0x46010);
2263 I915_WRITE(0x46010, temp | 1);
2264
2265 temp = I915_READ(0x46034);
2266 I915_WRITE(0x46034, temp | (1 << 24));
2267 } else {
2268 dpa_ctl |= DP_PLL_FREQ_270MHZ;
2269 }
2270 I915_WRITE(DP_A, dpa_ctl);
2271
5eddb70b 2272 POSTING_READ(DP_A);
32f9d658
ZW
2273 udelay(500);
2274}
2275
5e84e1a4
ZW
2276static void intel_fdi_normal_train(struct drm_crtc *crtc)
2277{
2278 struct drm_device *dev = crtc->dev;
2279 struct drm_i915_private *dev_priv = dev->dev_private;
2280 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2281 int pipe = intel_crtc->pipe;
2282 u32 reg, temp;
2283
2284 /* enable normal train */
2285 reg = FDI_TX_CTL(pipe);
2286 temp = I915_READ(reg);
61e499bf 2287 if (IS_IVYBRIDGE(dev)) {
357555c0
JB
2288 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2289 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
61e499bf
KP
2290 } else {
2291 temp &= ~FDI_LINK_TRAIN_NONE;
2292 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
357555c0 2293 }
5e84e1a4
ZW
2294 I915_WRITE(reg, temp);
2295
2296 reg = FDI_RX_CTL(pipe);
2297 temp = I915_READ(reg);
2298 if (HAS_PCH_CPT(dev)) {
2299 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2300 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
2301 } else {
2302 temp &= ~FDI_LINK_TRAIN_NONE;
2303 temp |= FDI_LINK_TRAIN_NONE;
2304 }
2305 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
2306
2307 /* wait one idle pattern time */
2308 POSTING_READ(reg);
2309 udelay(1000);
357555c0
JB
2310
2311 /* IVB wants error correction enabled */
2312 if (IS_IVYBRIDGE(dev))
2313 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
2314 FDI_FE_ERRC_ENABLE);
5e84e1a4
ZW
2315}
2316
291427f5
JB
2317static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe)
2318{
2319 struct drm_i915_private *dev_priv = dev->dev_private;
2320 u32 flags = I915_READ(SOUTH_CHICKEN1);
2321
2322 flags |= FDI_PHASE_SYNC_OVR(pipe);
2323 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */
2324 flags |= FDI_PHASE_SYNC_EN(pipe);
2325 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */
2326 POSTING_READ(SOUTH_CHICKEN1);
2327}
2328
8db9d77b
ZW
2329/* The FDI link training functions for ILK/Ibexpeak. */
2330static void ironlake_fdi_link_train(struct drm_crtc *crtc)
2331{
2332 struct drm_device *dev = crtc->dev;
2333 struct drm_i915_private *dev_priv = dev->dev_private;
2334 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2335 int pipe = intel_crtc->pipe;
0fc932b8 2336 int plane = intel_crtc->plane;
5eddb70b 2337 u32 reg, temp, tries;
8db9d77b 2338
0fc932b8
JB
2339 /* FDI needs bits from pipe & plane first */
2340 assert_pipe_enabled(dev_priv, pipe);
2341 assert_plane_enabled(dev_priv, plane);
2342
e1a44743
AJ
2343 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2344 for train result */
5eddb70b
CW
2345 reg = FDI_RX_IMR(pipe);
2346 temp = I915_READ(reg);
e1a44743
AJ
2347 temp &= ~FDI_RX_SYMBOL_LOCK;
2348 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
2349 I915_WRITE(reg, temp);
2350 I915_READ(reg);
e1a44743
AJ
2351 udelay(150);
2352
8db9d77b 2353 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
2354 reg = FDI_TX_CTL(pipe);
2355 temp = I915_READ(reg);
77ffb597
AJ
2356 temp &= ~(7 << 19);
2357 temp |= (intel_crtc->fdi_lanes - 1) << 19;
8db9d77b
ZW
2358 temp &= ~FDI_LINK_TRAIN_NONE;
2359 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b 2360 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 2361
5eddb70b
CW
2362 reg = FDI_RX_CTL(pipe);
2363 temp = I915_READ(reg);
8db9d77b
ZW
2364 temp &= ~FDI_LINK_TRAIN_NONE;
2365 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b
CW
2366 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2367
2368 POSTING_READ(reg);
8db9d77b
ZW
2369 udelay(150);
2370
5b2adf89 2371 /* Ironlake workaround, enable clock pointer after FDI enable*/
6f06ce18
JB
2372 if (HAS_PCH_IBX(dev)) {
2373 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
2374 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
2375 FDI_RX_PHASE_SYNC_POINTER_EN);
2376 }
5b2adf89 2377
5eddb70b 2378 reg = FDI_RX_IIR(pipe);
e1a44743 2379 for (tries = 0; tries < 5; tries++) {
5eddb70b 2380 temp = I915_READ(reg);
8db9d77b
ZW
2381 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2382
2383 if ((temp & FDI_RX_BIT_LOCK)) {
2384 DRM_DEBUG_KMS("FDI train 1 done.\n");
5eddb70b 2385 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
8db9d77b
ZW
2386 break;
2387 }
8db9d77b 2388 }
e1a44743 2389 if (tries == 5)
5eddb70b 2390 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
2391
2392 /* Train 2 */
5eddb70b
CW
2393 reg = FDI_TX_CTL(pipe);
2394 temp = I915_READ(reg);
8db9d77b
ZW
2395 temp &= ~FDI_LINK_TRAIN_NONE;
2396 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 2397 I915_WRITE(reg, temp);
8db9d77b 2398
5eddb70b
CW
2399 reg = FDI_RX_CTL(pipe);
2400 temp = I915_READ(reg);
8db9d77b
ZW
2401 temp &= ~FDI_LINK_TRAIN_NONE;
2402 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 2403 I915_WRITE(reg, temp);
8db9d77b 2404
5eddb70b
CW
2405 POSTING_READ(reg);
2406 udelay(150);
8db9d77b 2407
5eddb70b 2408 reg = FDI_RX_IIR(pipe);
e1a44743 2409 for (tries = 0; tries < 5; tries++) {
5eddb70b 2410 temp = I915_READ(reg);
8db9d77b
ZW
2411 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2412
2413 if (temp & FDI_RX_SYMBOL_LOCK) {
5eddb70b 2414 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
8db9d77b
ZW
2415 DRM_DEBUG_KMS("FDI train 2 done.\n");
2416 break;
2417 }
8db9d77b 2418 }
e1a44743 2419 if (tries == 5)
5eddb70b 2420 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
2421
2422 DRM_DEBUG_KMS("FDI train done\n");
5c5313c8 2423
8db9d77b
ZW
2424}
2425
0206e353 2426static const int snb_b_fdi_train_param[] = {
8db9d77b
ZW
2427 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
2428 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
2429 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
2430 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
2431};
2432
2433/* The FDI link training functions for SNB/Cougarpoint. */
2434static void gen6_fdi_link_train(struct drm_crtc *crtc)
2435{
2436 struct drm_device *dev = crtc->dev;
2437 struct drm_i915_private *dev_priv = dev->dev_private;
2438 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2439 int pipe = intel_crtc->pipe;
fa37d39e 2440 u32 reg, temp, i, retry;
8db9d77b 2441
e1a44743
AJ
2442 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2443 for train result */
5eddb70b
CW
2444 reg = FDI_RX_IMR(pipe);
2445 temp = I915_READ(reg);
e1a44743
AJ
2446 temp &= ~FDI_RX_SYMBOL_LOCK;
2447 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
2448 I915_WRITE(reg, temp);
2449
2450 POSTING_READ(reg);
e1a44743
AJ
2451 udelay(150);
2452
8db9d77b 2453 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
2454 reg = FDI_TX_CTL(pipe);
2455 temp = I915_READ(reg);
77ffb597
AJ
2456 temp &= ~(7 << 19);
2457 temp |= (intel_crtc->fdi_lanes - 1) << 19;
8db9d77b
ZW
2458 temp &= ~FDI_LINK_TRAIN_NONE;
2459 temp |= FDI_LINK_TRAIN_PATTERN_1;
2460 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2461 /* SNB-B */
2462 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5eddb70b 2463 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 2464
5eddb70b
CW
2465 reg = FDI_RX_CTL(pipe);
2466 temp = I915_READ(reg);
8db9d77b
ZW
2467 if (HAS_PCH_CPT(dev)) {
2468 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2469 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2470 } else {
2471 temp &= ~FDI_LINK_TRAIN_NONE;
2472 temp |= FDI_LINK_TRAIN_PATTERN_1;
2473 }
5eddb70b
CW
2474 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2475
2476 POSTING_READ(reg);
8db9d77b
ZW
2477 udelay(150);
2478
291427f5
JB
2479 if (HAS_PCH_CPT(dev))
2480 cpt_phase_pointer_enable(dev, pipe);
2481
0206e353 2482 for (i = 0; i < 4; i++) {
5eddb70b
CW
2483 reg = FDI_TX_CTL(pipe);
2484 temp = I915_READ(reg);
8db9d77b
ZW
2485 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2486 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
2487 I915_WRITE(reg, temp);
2488
2489 POSTING_READ(reg);
8db9d77b
ZW
2490 udelay(500);
2491
fa37d39e
SP
2492 for (retry = 0; retry < 5; retry++) {
2493 reg = FDI_RX_IIR(pipe);
2494 temp = I915_READ(reg);
2495 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2496 if (temp & FDI_RX_BIT_LOCK) {
2497 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2498 DRM_DEBUG_KMS("FDI train 1 done.\n");
2499 break;
2500 }
2501 udelay(50);
8db9d77b 2502 }
fa37d39e
SP
2503 if (retry < 5)
2504 break;
8db9d77b
ZW
2505 }
2506 if (i == 4)
5eddb70b 2507 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
2508
2509 /* Train 2 */
5eddb70b
CW
2510 reg = FDI_TX_CTL(pipe);
2511 temp = I915_READ(reg);
8db9d77b
ZW
2512 temp &= ~FDI_LINK_TRAIN_NONE;
2513 temp |= FDI_LINK_TRAIN_PATTERN_2;
2514 if (IS_GEN6(dev)) {
2515 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2516 /* SNB-B */
2517 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2518 }
5eddb70b 2519 I915_WRITE(reg, temp);
8db9d77b 2520
5eddb70b
CW
2521 reg = FDI_RX_CTL(pipe);
2522 temp = I915_READ(reg);
8db9d77b
ZW
2523 if (HAS_PCH_CPT(dev)) {
2524 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2525 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2526 } else {
2527 temp &= ~FDI_LINK_TRAIN_NONE;
2528 temp |= FDI_LINK_TRAIN_PATTERN_2;
2529 }
5eddb70b
CW
2530 I915_WRITE(reg, temp);
2531
2532 POSTING_READ(reg);
8db9d77b
ZW
2533 udelay(150);
2534
0206e353 2535 for (i = 0; i < 4; i++) {
5eddb70b
CW
2536 reg = FDI_TX_CTL(pipe);
2537 temp = I915_READ(reg);
8db9d77b
ZW
2538 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2539 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
2540 I915_WRITE(reg, temp);
2541
2542 POSTING_READ(reg);
8db9d77b
ZW
2543 udelay(500);
2544
fa37d39e
SP
2545 for (retry = 0; retry < 5; retry++) {
2546 reg = FDI_RX_IIR(pipe);
2547 temp = I915_READ(reg);
2548 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2549 if (temp & FDI_RX_SYMBOL_LOCK) {
2550 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2551 DRM_DEBUG_KMS("FDI train 2 done.\n");
2552 break;
2553 }
2554 udelay(50);
8db9d77b 2555 }
fa37d39e
SP
2556 if (retry < 5)
2557 break;
8db9d77b
ZW
2558 }
2559 if (i == 4)
5eddb70b 2560 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
2561
2562 DRM_DEBUG_KMS("FDI train done.\n");
2563}
2564
357555c0
JB
2565/* Manual link training for Ivy Bridge A0 parts */
2566static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
2567{
2568 struct drm_device *dev = crtc->dev;
2569 struct drm_i915_private *dev_priv = dev->dev_private;
2570 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2571 int pipe = intel_crtc->pipe;
2572 u32 reg, temp, i;
2573
2574 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
2575 for train result */
2576 reg = FDI_RX_IMR(pipe);
2577 temp = I915_READ(reg);
2578 temp &= ~FDI_RX_SYMBOL_LOCK;
2579 temp &= ~FDI_RX_BIT_LOCK;
2580 I915_WRITE(reg, temp);
2581
2582 POSTING_READ(reg);
2583 udelay(150);
2584
2585 /* enable CPU FDI TX and PCH FDI RX */
2586 reg = FDI_TX_CTL(pipe);
2587 temp = I915_READ(reg);
2588 temp &= ~(7 << 19);
2589 temp |= (intel_crtc->fdi_lanes - 1) << 19;
2590 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
2591 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
2592 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2593 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
c4f9c4c2 2594 temp |= FDI_COMPOSITE_SYNC;
357555c0
JB
2595 I915_WRITE(reg, temp | FDI_TX_ENABLE);
2596
2597 reg = FDI_RX_CTL(pipe);
2598 temp = I915_READ(reg);
2599 temp &= ~FDI_LINK_TRAIN_AUTO;
2600 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2601 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
c4f9c4c2 2602 temp |= FDI_COMPOSITE_SYNC;
357555c0
JB
2603 I915_WRITE(reg, temp | FDI_RX_ENABLE);
2604
2605 POSTING_READ(reg);
2606 udelay(150);
2607
291427f5
JB
2608 if (HAS_PCH_CPT(dev))
2609 cpt_phase_pointer_enable(dev, pipe);
2610
0206e353 2611 for (i = 0; i < 4; i++) {
357555c0
JB
2612 reg = FDI_TX_CTL(pipe);
2613 temp = I915_READ(reg);
2614 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2615 temp |= snb_b_fdi_train_param[i];
2616 I915_WRITE(reg, temp);
2617
2618 POSTING_READ(reg);
2619 udelay(500);
2620
2621 reg = FDI_RX_IIR(pipe);
2622 temp = I915_READ(reg);
2623 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2624
2625 if (temp & FDI_RX_BIT_LOCK ||
2626 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
2627 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
2628 DRM_DEBUG_KMS("FDI train 1 done.\n");
2629 break;
2630 }
2631 }
2632 if (i == 4)
2633 DRM_ERROR("FDI train 1 fail!\n");
2634
2635 /* Train 2 */
2636 reg = FDI_TX_CTL(pipe);
2637 temp = I915_READ(reg);
2638 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
2639 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
2640 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2641 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
2642 I915_WRITE(reg, temp);
2643
2644 reg = FDI_RX_CTL(pipe);
2645 temp = I915_READ(reg);
2646 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2647 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
2648 I915_WRITE(reg, temp);
2649
2650 POSTING_READ(reg);
2651 udelay(150);
2652
0206e353 2653 for (i = 0; i < 4; i++) {
357555c0
JB
2654 reg = FDI_TX_CTL(pipe);
2655 temp = I915_READ(reg);
2656 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
2657 temp |= snb_b_fdi_train_param[i];
2658 I915_WRITE(reg, temp);
2659
2660 POSTING_READ(reg);
2661 udelay(500);
2662
2663 reg = FDI_RX_IIR(pipe);
2664 temp = I915_READ(reg);
2665 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
2666
2667 if (temp & FDI_RX_SYMBOL_LOCK) {
2668 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
2669 DRM_DEBUG_KMS("FDI train 2 done.\n");
2670 break;
2671 }
2672 }
2673 if (i == 4)
2674 DRM_ERROR("FDI train 2 fail!\n");
2675
2676 DRM_DEBUG_KMS("FDI train done.\n");
2677}
2678
2679static void ironlake_fdi_pll_enable(struct drm_crtc *crtc)
2c07245f
ZW
2680{
2681 struct drm_device *dev = crtc->dev;
2682 struct drm_i915_private *dev_priv = dev->dev_private;
2683 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2684 int pipe = intel_crtc->pipe;
5eddb70b 2685 u32 reg, temp;
79e53945 2686
c64e311e 2687 /* Write the TU size bits so error detection works */
5eddb70b
CW
2688 I915_WRITE(FDI_RX_TUSIZE1(pipe),
2689 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
c64e311e 2690
c98e9dcf 2691 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5eddb70b
CW
2692 reg = FDI_RX_CTL(pipe);
2693 temp = I915_READ(reg);
2694 temp &= ~((0x7 << 19) | (0x7 << 16));
c98e9dcf 2695 temp |= (intel_crtc->fdi_lanes - 1) << 19;
5eddb70b
CW
2696 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2697 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
2698
2699 POSTING_READ(reg);
c98e9dcf
JB
2700 udelay(200);
2701
2702 /* Switch from Rawclk to PCDclk */
5eddb70b
CW
2703 temp = I915_READ(reg);
2704 I915_WRITE(reg, temp | FDI_PCDCLK);
2705
2706 POSTING_READ(reg);
c98e9dcf
JB
2707 udelay(200);
2708
bf507ef7
ED
2709 /* On Haswell, the PLL configuration for ports and pipes is handled
2710 * separately, as part of DDI setup */
2711 if (!IS_HASWELL(dev)) {
2712 /* Enable CPU FDI TX PLL, always on for Ironlake */
2713 reg = FDI_TX_CTL(pipe);
2714 temp = I915_READ(reg);
2715 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
2716 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5eddb70b 2717
bf507ef7
ED
2718 POSTING_READ(reg);
2719 udelay(100);
2720 }
6be4a607 2721 }
0e23b99d
JB
2722}
2723
291427f5
JB
2724static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe)
2725{
2726 struct drm_i915_private *dev_priv = dev->dev_private;
2727 u32 flags = I915_READ(SOUTH_CHICKEN1);
2728
2729 flags &= ~(FDI_PHASE_SYNC_EN(pipe));
2730 I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */
2731 flags &= ~(FDI_PHASE_SYNC_OVR(pipe));
2732 I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */
2733 POSTING_READ(SOUTH_CHICKEN1);
2734}
0fc932b8
JB
2735static void ironlake_fdi_disable(struct drm_crtc *crtc)
2736{
2737 struct drm_device *dev = crtc->dev;
2738 struct drm_i915_private *dev_priv = dev->dev_private;
2739 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2740 int pipe = intel_crtc->pipe;
2741 u32 reg, temp;
2742
2743 /* disable CPU FDI tx and PCH FDI rx */
2744 reg = FDI_TX_CTL(pipe);
2745 temp = I915_READ(reg);
2746 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
2747 POSTING_READ(reg);
2748
2749 reg = FDI_RX_CTL(pipe);
2750 temp = I915_READ(reg);
2751 temp &= ~(0x7 << 16);
2752 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2753 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
2754
2755 POSTING_READ(reg);
2756 udelay(100);
2757
2758 /* Ironlake workaround, disable clock pointer after downing FDI */
6f06ce18
JB
2759 if (HAS_PCH_IBX(dev)) {
2760 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
0fc932b8
JB
2761 I915_WRITE(FDI_RX_CHICKEN(pipe),
2762 I915_READ(FDI_RX_CHICKEN(pipe) &
6f06ce18 2763 ~FDI_RX_PHASE_SYNC_POINTER_EN));
291427f5
JB
2764 } else if (HAS_PCH_CPT(dev)) {
2765 cpt_phase_pointer_disable(dev, pipe);
6f06ce18 2766 }
0fc932b8
JB
2767
2768 /* still set train pattern 1 */
2769 reg = FDI_TX_CTL(pipe);
2770 temp = I915_READ(reg);
2771 temp &= ~FDI_LINK_TRAIN_NONE;
2772 temp |= FDI_LINK_TRAIN_PATTERN_1;
2773 I915_WRITE(reg, temp);
2774
2775 reg = FDI_RX_CTL(pipe);
2776 temp = I915_READ(reg);
2777 if (HAS_PCH_CPT(dev)) {
2778 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
2779 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
2780 } else {
2781 temp &= ~FDI_LINK_TRAIN_NONE;
2782 temp |= FDI_LINK_TRAIN_PATTERN_1;
2783 }
2784 /* BPC in FDI rx is consistent with that in PIPECONF */
2785 temp &= ~(0x07 << 16);
2786 temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11;
2787 I915_WRITE(reg, temp);
2788
2789 POSTING_READ(reg);
2790 udelay(100);
2791}
2792
e6c3a2a6
CW
2793static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
2794{
0f91128d 2795 struct drm_device *dev = crtc->dev;
e6c3a2a6
CW
2796
2797 if (crtc->fb == NULL)
2798 return;
2799
0f91128d
CW
2800 mutex_lock(&dev->struct_mutex);
2801 intel_finish_fb(crtc->fb);
2802 mutex_unlock(&dev->struct_mutex);
e6c3a2a6
CW
2803}
2804
040484af
JB
2805static bool intel_crtc_driving_pch(struct drm_crtc *crtc)
2806{
2807 struct drm_device *dev = crtc->dev;
2808 struct drm_mode_config *mode_config = &dev->mode_config;
2809 struct intel_encoder *encoder;
2810
2811 /*
2812 * If there's a non-PCH eDP on this crtc, it must be DP_A, and that
2813 * must be driven by its own crtc; no sharing is possible.
2814 */
2815 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
2816 if (encoder->base.crtc != crtc)
2817 continue;
2818
6ee8bab0
ED
2819 /* On Haswell, LPT PCH handles the VGA connection via FDI, and Haswell
2820 * CPU handles all others */
2821 if (IS_HASWELL(dev)) {
2822 /* It is still unclear how this will work on PPT, so throw up a warning */
2823 WARN_ON(!HAS_PCH_LPT(dev));
2824
2825 if (encoder->type == DRM_MODE_ENCODER_DAC) {
2826 DRM_DEBUG_KMS("Haswell detected DAC encoder, assuming is PCH\n");
2827 return true;
2828 } else {
2829 DRM_DEBUG_KMS("Haswell detected encoder %d, assuming is CPU\n",
2830 encoder->type);
2831 return false;
2832 }
2833 }
2834
040484af
JB
2835 switch (encoder->type) {
2836 case INTEL_OUTPUT_EDP:
2837 if (!intel_encoder_is_pch_edp(&encoder->base))
2838 return false;
2839 continue;
2840 }
2841 }
2842
2843 return true;
2844}
2845
e615efe4
ED
2846/* Program iCLKIP clock to the desired frequency */
2847static void lpt_program_iclkip(struct drm_crtc *crtc)
2848{
2849 struct drm_device *dev = crtc->dev;
2850 struct drm_i915_private *dev_priv = dev->dev_private;
2851 u32 divsel, phaseinc, auxdiv, phasedir = 0;
2852 u32 temp;
2853
2854 /* It is necessary to ungate the pixclk gate prior to programming
2855 * the divisors, and gate it back when it is done.
2856 */
2857 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
2858
2859 /* Disable SSCCTL */
2860 intel_sbi_write(dev_priv, SBI_SSCCTL6,
2861 intel_sbi_read(dev_priv, SBI_SSCCTL6) |
2862 SBI_SSCCTL_DISABLE);
2863
2864 /* 20MHz is a corner case which is out of range for the 7-bit divisor */
2865 if (crtc->mode.clock == 20000) {
2866 auxdiv = 1;
2867 divsel = 0x41;
2868 phaseinc = 0x20;
2869 } else {
2870 /* The iCLK virtual clock root frequency is in MHz,
2871 * but the crtc->mode.clock in in KHz. To get the divisors,
2872 * it is necessary to divide one by another, so we
2873 * convert the virtual clock precision to KHz here for higher
2874 * precision.
2875 */
2876 u32 iclk_virtual_root_freq = 172800 * 1000;
2877 u32 iclk_pi_range = 64;
2878 u32 desired_divisor, msb_divisor_value, pi_value;
2879
2880 desired_divisor = (iclk_virtual_root_freq / crtc->mode.clock);
2881 msb_divisor_value = desired_divisor / iclk_pi_range;
2882 pi_value = desired_divisor % iclk_pi_range;
2883
2884 auxdiv = 0;
2885 divsel = msb_divisor_value - 2;
2886 phaseinc = pi_value;
2887 }
2888
2889 /* This should not happen with any sane values */
2890 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
2891 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
2892 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
2893 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
2894
2895 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
2896 crtc->mode.clock,
2897 auxdiv,
2898 divsel,
2899 phasedir,
2900 phaseinc);
2901
2902 /* Program SSCDIVINTPHASE6 */
2903 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6);
2904 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
2905 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
2906 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
2907 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
2908 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
2909 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
2910
2911 intel_sbi_write(dev_priv,
2912 SBI_SSCDIVINTPHASE6,
2913 temp);
2914
2915 /* Program SSCAUXDIV */
2916 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6);
2917 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
2918 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
2919 intel_sbi_write(dev_priv,
2920 SBI_SSCAUXDIV6,
2921 temp);
2922
2923
2924 /* Enable modulator and associated divider */
2925 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6);
2926 temp &= ~SBI_SSCCTL_DISABLE;
2927 intel_sbi_write(dev_priv,
2928 SBI_SSCCTL6,
2929 temp);
2930
2931 /* Wait for initialization time */
2932 udelay(24);
2933
2934 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
2935}
2936
f67a559d
JB
2937/*
2938 * Enable PCH resources required for PCH ports:
2939 * - PCH PLLs
2940 * - FDI training & RX/TX
2941 * - update transcoder timings
2942 * - DP transcoding bits
2943 * - transcoder
2944 */
2945static void ironlake_pch_enable(struct drm_crtc *crtc)
0e23b99d
JB
2946{
2947 struct drm_device *dev = crtc->dev;
2948 struct drm_i915_private *dev_priv = dev->dev_private;
2949 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2950 int pipe = intel_crtc->pipe;
ee7b9f93 2951 u32 reg, temp;
2c07245f 2952
e7e164db
CW
2953 assert_transcoder_disabled(dev_priv, pipe);
2954
c98e9dcf 2955 /* For PCH output, training FDI link */
674cf967 2956 dev_priv->display.fdi_link_train(crtc);
2c07245f 2957
6f13b7b5
CW
2958 intel_enable_pch_pll(intel_crtc);
2959
e615efe4
ED
2960 if (HAS_PCH_LPT(dev)) {
2961 DRM_DEBUG_KMS("LPT detected: programming iCLKIP\n");
2962 lpt_program_iclkip(crtc);
2963 } else if (HAS_PCH_CPT(dev)) {
ee7b9f93 2964 u32 sel;
4b645f14 2965
c98e9dcf 2966 temp = I915_READ(PCH_DPLL_SEL);
ee7b9f93
JB
2967 switch (pipe) {
2968 default:
2969 case 0:
2970 temp |= TRANSA_DPLL_ENABLE;
2971 sel = TRANSA_DPLLB_SEL;
2972 break;
2973 case 1:
2974 temp |= TRANSB_DPLL_ENABLE;
2975 sel = TRANSB_DPLLB_SEL;
2976 break;
2977 case 2:
2978 temp |= TRANSC_DPLL_ENABLE;
2979 sel = TRANSC_DPLLB_SEL;
2980 break;
d64311ab 2981 }
ee7b9f93
JB
2982 if (intel_crtc->pch_pll->pll_reg == _PCH_DPLL_B)
2983 temp |= sel;
2984 else
2985 temp &= ~sel;
c98e9dcf 2986 I915_WRITE(PCH_DPLL_SEL, temp);
c98e9dcf 2987 }
5eddb70b 2988
d9b6cb56
JB
2989 /* set transcoder timing, panel must allow it */
2990 assert_panel_unlocked(dev_priv, pipe);
5eddb70b
CW
2991 I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe)));
2992 I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe)));
2993 I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe)));
8db9d77b 2994
5eddb70b
CW
2995 I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe)));
2996 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2997 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
0529a0d9 2998 I915_WRITE(TRANS_VSYNCSHIFT(pipe), I915_READ(VSYNCSHIFT(pipe)));
8db9d77b 2999
f57e1e3a
ED
3000 if (!IS_HASWELL(dev))
3001 intel_fdi_normal_train(crtc);
5e84e1a4 3002
c98e9dcf
JB
3003 /* For PCH DP, enable TRANS_DP_CTL */
3004 if (HAS_PCH_CPT(dev) &&
417e822d
KP
3005 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
3006 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
9325c9f0 3007 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
5eddb70b
CW
3008 reg = TRANS_DP_CTL(pipe);
3009 temp = I915_READ(reg);
3010 temp &= ~(TRANS_DP_PORT_SEL_MASK |
220cad3c
EA
3011 TRANS_DP_SYNC_MASK |
3012 TRANS_DP_BPC_MASK);
5eddb70b
CW
3013 temp |= (TRANS_DP_OUTPUT_ENABLE |
3014 TRANS_DP_ENH_FRAMING);
9325c9f0 3015 temp |= bpc << 9; /* same format but at 11:9 */
c98e9dcf
JB
3016
3017 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
5eddb70b 3018 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
c98e9dcf 3019 if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC)
5eddb70b 3020 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
c98e9dcf
JB
3021
3022 switch (intel_trans_dp_port_sel(crtc)) {
3023 case PCH_DP_B:
5eddb70b 3024 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf
JB
3025 break;
3026 case PCH_DP_C:
5eddb70b 3027 temp |= TRANS_DP_PORT_SEL_C;
c98e9dcf
JB
3028 break;
3029 case PCH_DP_D:
5eddb70b 3030 temp |= TRANS_DP_PORT_SEL_D;
c98e9dcf
JB
3031 break;
3032 default:
3033 DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n");
5eddb70b 3034 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf 3035 break;
32f9d658 3036 }
2c07245f 3037
5eddb70b 3038 I915_WRITE(reg, temp);
6be4a607 3039 }
b52eb4dc 3040
040484af 3041 intel_enable_transcoder(dev_priv, pipe);
f67a559d
JB
3042}
3043
ee7b9f93
JB
3044static void intel_put_pch_pll(struct intel_crtc *intel_crtc)
3045{
3046 struct intel_pch_pll *pll = intel_crtc->pch_pll;
3047
3048 if (pll == NULL)
3049 return;
3050
3051 if (pll->refcount == 0) {
3052 WARN(1, "bad PCH PLL refcount\n");
3053 return;
3054 }
3055
3056 --pll->refcount;
3057 intel_crtc->pch_pll = NULL;
3058}
3059
3060static struct intel_pch_pll *intel_get_pch_pll(struct intel_crtc *intel_crtc, u32 dpll, u32 fp)
3061{
3062 struct drm_i915_private *dev_priv = intel_crtc->base.dev->dev_private;
3063 struct intel_pch_pll *pll;
3064 int i;
3065
3066 pll = intel_crtc->pch_pll;
3067 if (pll) {
3068 DRM_DEBUG_KMS("CRTC:%d reusing existing PCH PLL %x\n",
3069 intel_crtc->base.base.id, pll->pll_reg);
3070 goto prepare;
3071 }
3072
98b6bd99
DV
3073 if (HAS_PCH_IBX(dev_priv->dev)) {
3074 /* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
3075 i = intel_crtc->pipe;
3076 pll = &dev_priv->pch_plls[i];
3077
3078 DRM_DEBUG_KMS("CRTC:%d using pre-allocated PCH PLL %x\n",
3079 intel_crtc->base.base.id, pll->pll_reg);
3080
3081 goto found;
3082 }
3083
ee7b9f93
JB
3084 for (i = 0; i < dev_priv->num_pch_pll; i++) {
3085 pll = &dev_priv->pch_plls[i];
3086
3087 /* Only want to check enabled timings first */
3088 if (pll->refcount == 0)
3089 continue;
3090
3091 if (dpll == (I915_READ(pll->pll_reg) & 0x7fffffff) &&
3092 fp == I915_READ(pll->fp0_reg)) {
3093 DRM_DEBUG_KMS("CRTC:%d sharing existing PCH PLL %x (refcount %d, ative %d)\n",
3094 intel_crtc->base.base.id,
3095 pll->pll_reg, pll->refcount, pll->active);
3096
3097 goto found;
3098 }
3099 }
3100
3101 /* Ok no matching timings, maybe there's a free one? */
3102 for (i = 0; i < dev_priv->num_pch_pll; i++) {
3103 pll = &dev_priv->pch_plls[i];
3104 if (pll->refcount == 0) {
3105 DRM_DEBUG_KMS("CRTC:%d allocated PCH PLL %x\n",
3106 intel_crtc->base.base.id, pll->pll_reg);
3107 goto found;
3108 }
3109 }
3110
3111 return NULL;
3112
3113found:
3114 intel_crtc->pch_pll = pll;
3115 pll->refcount++;
3116 DRM_DEBUG_DRIVER("using pll %d for pipe %d\n", i, intel_crtc->pipe);
3117prepare: /* separate function? */
3118 DRM_DEBUG_DRIVER("switching PLL %x off\n", pll->pll_reg);
ee7b9f93 3119
e04c7350
CW
3120 /* Wait for the clocks to stabilize before rewriting the regs */
3121 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
ee7b9f93
JB
3122 POSTING_READ(pll->pll_reg);
3123 udelay(150);
e04c7350
CW
3124
3125 I915_WRITE(pll->fp0_reg, fp);
3126 I915_WRITE(pll->pll_reg, dpll & ~DPLL_VCO_ENABLE);
ee7b9f93
JB
3127 pll->on = false;
3128 return pll;
3129}
3130
d4270e57
JB
3131void intel_cpt_verify_modeset(struct drm_device *dev, int pipe)
3132{
3133 struct drm_i915_private *dev_priv = dev->dev_private;
3134 int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe);
3135 u32 temp;
3136
3137 temp = I915_READ(dslreg);
3138 udelay(500);
3139 if (wait_for(I915_READ(dslreg) != temp, 5)) {
3140 /* Without this, mode sets may fail silently on FDI */
3141 I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS);
3142 udelay(250);
3143 I915_WRITE(tc2reg, 0);
3144 if (wait_for(I915_READ(dslreg) != temp, 5))
3145 DRM_ERROR("mode set failed: pipe %d stuck\n", pipe);
3146 }
3147}
3148
f67a559d
JB
3149static void ironlake_crtc_enable(struct drm_crtc *crtc)
3150{
3151 struct drm_device *dev = crtc->dev;
3152 struct drm_i915_private *dev_priv = dev->dev_private;
3153 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3154 int pipe = intel_crtc->pipe;
3155 int plane = intel_crtc->plane;
3156 u32 temp;
3157 bool is_pch_port;
3158
3159 if (intel_crtc->active)
3160 return;
3161
3162 intel_crtc->active = true;
3163 intel_update_watermarks(dev);
3164
3165 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3166 temp = I915_READ(PCH_LVDS);
3167 if ((temp & LVDS_PORT_EN) == 0)
3168 I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN);
3169 }
3170
3171 is_pch_port = intel_crtc_driving_pch(crtc);
3172
3173 if (is_pch_port)
357555c0 3174 ironlake_fdi_pll_enable(crtc);
f67a559d
JB
3175 else
3176 ironlake_fdi_disable(crtc);
3177
3178 /* Enable panel fitting for LVDS */
3179 if (dev_priv->pch_pf_size &&
3180 (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) {
3181 /* Force use of hard-coded filter coefficients
3182 * as some pre-programmed values are broken,
3183 * e.g. x201.
3184 */
9db4a9c7
JB
3185 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
3186 I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos);
3187 I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size);
f67a559d
JB
3188 }
3189
9c54c0dd
JB
3190 /*
3191 * On ILK+ LUT must be loaded before the pipe is running but with
3192 * clocks enabled
3193 */
3194 intel_crtc_load_lut(crtc);
3195
f67a559d
JB
3196 intel_enable_pipe(dev_priv, pipe, is_pch_port);
3197 intel_enable_plane(dev_priv, plane, pipe);
3198
3199 if (is_pch_port)
3200 ironlake_pch_enable(crtc);
c98e9dcf 3201
d1ebd816 3202 mutex_lock(&dev->struct_mutex);
bed4a673 3203 intel_update_fbc(dev);
d1ebd816
BW
3204 mutex_unlock(&dev->struct_mutex);
3205
6b383a7f 3206 intel_crtc_update_cursor(crtc, true);
6be4a607
JB
3207}
3208
3209static void ironlake_crtc_disable(struct drm_crtc *crtc)
3210{
3211 struct drm_device *dev = crtc->dev;
3212 struct drm_i915_private *dev_priv = dev->dev_private;
3213 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3214 int pipe = intel_crtc->pipe;
3215 int plane = intel_crtc->plane;
5eddb70b 3216 u32 reg, temp;
b52eb4dc 3217
f7abfe8b
CW
3218 if (!intel_crtc->active)
3219 return;
3220
e6c3a2a6 3221 intel_crtc_wait_for_pending_flips(crtc);
6be4a607 3222 drm_vblank_off(dev, pipe);
6b383a7f 3223 intel_crtc_update_cursor(crtc, false);
5eddb70b 3224
b24e7179 3225 intel_disable_plane(dev_priv, plane, pipe);
913d8d11 3226
973d04f9
CW
3227 if (dev_priv->cfb_plane == plane)
3228 intel_disable_fbc(dev);
2c07245f 3229
b24e7179 3230 intel_disable_pipe(dev_priv, pipe);
32f9d658 3231
6be4a607 3232 /* Disable PF */
9db4a9c7
JB
3233 I915_WRITE(PF_CTL(pipe), 0);
3234 I915_WRITE(PF_WIN_SZ(pipe), 0);
2c07245f 3235
0fc932b8 3236 ironlake_fdi_disable(crtc);
2c07245f 3237
47a05eca
JB
3238 /* This is a horrible layering violation; we should be doing this in
3239 * the connector/encoder ->prepare instead, but we don't always have
3240 * enough information there about the config to know whether it will
3241 * actually be necessary or just cause undesired flicker.
3242 */
3243 intel_disable_pch_ports(dev_priv, pipe);
249c0e64 3244
040484af 3245 intel_disable_transcoder(dev_priv, pipe);
913d8d11 3246
6be4a607
JB
3247 if (HAS_PCH_CPT(dev)) {
3248 /* disable TRANS_DP_CTL */
5eddb70b
CW
3249 reg = TRANS_DP_CTL(pipe);
3250 temp = I915_READ(reg);
3251 temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK);
cb3543c6 3252 temp |= TRANS_DP_PORT_SEL_NONE;
5eddb70b 3253 I915_WRITE(reg, temp);
6be4a607
JB
3254
3255 /* disable DPLL_SEL */
3256 temp = I915_READ(PCH_DPLL_SEL);
9db4a9c7
JB
3257 switch (pipe) {
3258 case 0:
d64311ab 3259 temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL);
9db4a9c7
JB
3260 break;
3261 case 1:
6be4a607 3262 temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL);
9db4a9c7
JB
3263 break;
3264 case 2:
4b645f14 3265 /* C shares PLL A or B */
d64311ab 3266 temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL);
9db4a9c7
JB
3267 break;
3268 default:
3269 BUG(); /* wtf */
3270 }
6be4a607 3271 I915_WRITE(PCH_DPLL_SEL, temp);
6be4a607 3272 }
e3421a18 3273
6be4a607 3274 /* disable PCH DPLL */
ee7b9f93 3275 intel_disable_pch_pll(intel_crtc);
8db9d77b 3276
6be4a607 3277 /* Switch from PCDclk to Rawclk */
5eddb70b
CW
3278 reg = FDI_RX_CTL(pipe);
3279 temp = I915_READ(reg);
3280 I915_WRITE(reg, temp & ~FDI_PCDCLK);
8db9d77b 3281
6be4a607 3282 /* Disable CPU FDI TX PLL */
5eddb70b
CW
3283 reg = FDI_TX_CTL(pipe);
3284 temp = I915_READ(reg);
3285 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3286
3287 POSTING_READ(reg);
6be4a607 3288 udelay(100);
8db9d77b 3289
5eddb70b
CW
3290 reg = FDI_RX_CTL(pipe);
3291 temp = I915_READ(reg);
3292 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
2c07245f 3293
6be4a607 3294 /* Wait for the clocks to turn off. */
5eddb70b 3295 POSTING_READ(reg);
6be4a607 3296 udelay(100);
6b383a7f 3297
f7abfe8b 3298 intel_crtc->active = false;
6b383a7f 3299 intel_update_watermarks(dev);
d1ebd816
BW
3300
3301 mutex_lock(&dev->struct_mutex);
6b383a7f 3302 intel_update_fbc(dev);
d1ebd816 3303 mutex_unlock(&dev->struct_mutex);
6be4a607 3304}
1b3c7a47 3305
6be4a607
JB
3306static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode)
3307{
3308 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3309 int pipe = intel_crtc->pipe;
3310 int plane = intel_crtc->plane;
8db9d77b 3311
6be4a607
JB
3312 /* XXX: When our outputs are all unaware of DPMS modes other than off
3313 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3314 */
3315 switch (mode) {
3316 case DRM_MODE_DPMS_ON:
3317 case DRM_MODE_DPMS_STANDBY:
3318 case DRM_MODE_DPMS_SUSPEND:
3319 DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane);
3320 ironlake_crtc_enable(crtc);
3321 break;
1b3c7a47 3322
6be4a607
JB
3323 case DRM_MODE_DPMS_OFF:
3324 DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane);
3325 ironlake_crtc_disable(crtc);
2c07245f
ZW
3326 break;
3327 }
3328}
3329
ee7b9f93
JB
3330static void ironlake_crtc_off(struct drm_crtc *crtc)
3331{
3332 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3333 intel_put_pch_pll(intel_crtc);
3334}
3335
02e792fb
DV
3336static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable)
3337{
02e792fb 3338 if (!enable && intel_crtc->overlay) {
23f09ce3 3339 struct drm_device *dev = intel_crtc->base.dev;
ce453d81 3340 struct drm_i915_private *dev_priv = dev->dev_private;
03f77ea5 3341
23f09ce3 3342 mutex_lock(&dev->struct_mutex);
ce453d81
CW
3343 dev_priv->mm.interruptible = false;
3344 (void) intel_overlay_switch_off(intel_crtc->overlay);
3345 dev_priv->mm.interruptible = true;
23f09ce3 3346 mutex_unlock(&dev->struct_mutex);
02e792fb 3347 }
02e792fb 3348
5dcdbcb0
CW
3349 /* Let userspace switch the overlay on again. In most cases userspace
3350 * has to recompute where to put it anyway.
3351 */
02e792fb
DV
3352}
3353
0b8765c6 3354static void i9xx_crtc_enable(struct drm_crtc *crtc)
79e53945
JB
3355{
3356 struct drm_device *dev = crtc->dev;
79e53945
JB
3357 struct drm_i915_private *dev_priv = dev->dev_private;
3358 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3359 int pipe = intel_crtc->pipe;
80824003 3360 int plane = intel_crtc->plane;
79e53945 3361
f7abfe8b
CW
3362 if (intel_crtc->active)
3363 return;
3364
3365 intel_crtc->active = true;
6b383a7f
CW
3366 intel_update_watermarks(dev);
3367
63d7bbe9 3368 intel_enable_pll(dev_priv, pipe);
040484af 3369 intel_enable_pipe(dev_priv, pipe, false);
b24e7179 3370 intel_enable_plane(dev_priv, plane, pipe);
79e53945 3371
0b8765c6 3372 intel_crtc_load_lut(crtc);
bed4a673 3373 intel_update_fbc(dev);
79e53945 3374
0b8765c6
JB
3375 /* Give the overlay scaler a chance to enable if it's on this pipe */
3376 intel_crtc_dpms_overlay(intel_crtc, true);
6b383a7f 3377 intel_crtc_update_cursor(crtc, true);
0b8765c6 3378}
79e53945 3379
0b8765c6
JB
3380static void i9xx_crtc_disable(struct drm_crtc *crtc)
3381{
3382 struct drm_device *dev = crtc->dev;
3383 struct drm_i915_private *dev_priv = dev->dev_private;
3384 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3385 int pipe = intel_crtc->pipe;
3386 int plane = intel_crtc->plane;
b690e96c 3387
f7abfe8b
CW
3388 if (!intel_crtc->active)
3389 return;
3390
0b8765c6 3391 /* Give the overlay scaler a chance to disable if it's on this pipe */
e6c3a2a6
CW
3392 intel_crtc_wait_for_pending_flips(crtc);
3393 drm_vblank_off(dev, pipe);
0b8765c6 3394 intel_crtc_dpms_overlay(intel_crtc, false);
6b383a7f 3395 intel_crtc_update_cursor(crtc, false);
0b8765c6 3396
973d04f9
CW
3397 if (dev_priv->cfb_plane == plane)
3398 intel_disable_fbc(dev);
79e53945 3399
b24e7179 3400 intel_disable_plane(dev_priv, plane, pipe);
b24e7179 3401 intel_disable_pipe(dev_priv, pipe);
63d7bbe9 3402 intel_disable_pll(dev_priv, pipe);
0b8765c6 3403
f7abfe8b 3404 intel_crtc->active = false;
6b383a7f
CW
3405 intel_update_fbc(dev);
3406 intel_update_watermarks(dev);
0b8765c6
JB
3407}
3408
3409static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
3410{
3411 /* XXX: When our outputs are all unaware of DPMS modes other than off
3412 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
3413 */
3414 switch (mode) {
3415 case DRM_MODE_DPMS_ON:
3416 case DRM_MODE_DPMS_STANDBY:
3417 case DRM_MODE_DPMS_SUSPEND:
3418 i9xx_crtc_enable(crtc);
3419 break;
3420 case DRM_MODE_DPMS_OFF:
3421 i9xx_crtc_disable(crtc);
79e53945
JB
3422 break;
3423 }
2c07245f
ZW
3424}
3425
ee7b9f93
JB
3426static void i9xx_crtc_off(struct drm_crtc *crtc)
3427{
3428}
3429
2c07245f
ZW
3430/**
3431 * Sets the power management mode of the pipe and plane.
2c07245f
ZW
3432 */
3433static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
3434{
3435 struct drm_device *dev = crtc->dev;
e70236a8 3436 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f
ZW
3437 struct drm_i915_master_private *master_priv;
3438 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3439 int pipe = intel_crtc->pipe;
3440 bool enabled;
3441
032d2a0d
CW
3442 if (intel_crtc->dpms_mode == mode)
3443 return;
3444
65655d4a 3445 intel_crtc->dpms_mode = mode;
debcaddc 3446
e70236a8 3447 dev_priv->display.dpms(crtc, mode);
79e53945
JB
3448
3449 if (!dev->primary->master)
3450 return;
3451
3452 master_priv = dev->primary->master->driver_priv;
3453 if (!master_priv->sarea_priv)
3454 return;
3455
3456 enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF;
3457
3458 switch (pipe) {
3459 case 0:
3460 master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0;
3461 master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0;
3462 break;
3463 case 1:
3464 master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0;
3465 master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0;
3466 break;
3467 default:
9db4a9c7 3468 DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe));
79e53945
JB
3469 break;
3470 }
79e53945
JB
3471}
3472
cdd59983
CW
3473static void intel_crtc_disable(struct drm_crtc *crtc)
3474{
3475 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
3476 struct drm_device *dev = crtc->dev;
ee7b9f93 3477 struct drm_i915_private *dev_priv = dev->dev_private;
cdd59983
CW
3478
3479 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
ee7b9f93
JB
3480 dev_priv->display.off(crtc);
3481
931872fc
CW
3482 assert_plane_disabled(dev->dev_private, to_intel_crtc(crtc)->plane);
3483 assert_pipe_disabled(dev->dev_private, to_intel_crtc(crtc)->pipe);
cdd59983
CW
3484
3485 if (crtc->fb) {
3486 mutex_lock(&dev->struct_mutex);
1690e1eb 3487 intel_unpin_fb_obj(to_intel_framebuffer(crtc->fb)->obj);
cdd59983
CW
3488 mutex_unlock(&dev->struct_mutex);
3489 }
3490}
3491
7e7d76c3
JB
3492/* Prepare for a mode set.
3493 *
3494 * Note we could be a lot smarter here. We need to figure out which outputs
3495 * will be enabled, which disabled (in short, how the config will changes)
3496 * and perform the minimum necessary steps to accomplish that, e.g. updating
3497 * watermarks, FBC configuration, making sure PLLs are programmed correctly,
3498 * panel fitting is in the proper state, etc.
3499 */
3500static void i9xx_crtc_prepare(struct drm_crtc *crtc)
79e53945 3501{
7e7d76c3 3502 i9xx_crtc_disable(crtc);
79e53945
JB
3503}
3504
7e7d76c3 3505static void i9xx_crtc_commit(struct drm_crtc *crtc)
79e53945 3506{
7e7d76c3 3507 i9xx_crtc_enable(crtc);
7e7d76c3
JB
3508}
3509
3510static void ironlake_crtc_prepare(struct drm_crtc *crtc)
3511{
7e7d76c3 3512 ironlake_crtc_disable(crtc);
7e7d76c3
JB
3513}
3514
3515static void ironlake_crtc_commit(struct drm_crtc *crtc)
3516{
7e7d76c3 3517 ironlake_crtc_enable(crtc);
79e53945
JB
3518}
3519
0206e353 3520void intel_encoder_prepare(struct drm_encoder *encoder)
79e53945
JB
3521{
3522 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
3523 /* lvds has its own version of prepare see intel_lvds_prepare */
3524 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
3525}
3526
0206e353 3527void intel_encoder_commit(struct drm_encoder *encoder)
79e53945
JB
3528{
3529 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
d4270e57 3530 struct drm_device *dev = encoder->dev;
d47d7cb8 3531 struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
d4270e57 3532
79e53945
JB
3533 /* lvds has its own version of commit see intel_lvds_commit */
3534 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
d4270e57
JB
3535
3536 if (HAS_PCH_CPT(dev))
3537 intel_cpt_verify_modeset(dev, intel_crtc->pipe);
79e53945
JB
3538}
3539
ea5b213a
CW
3540void intel_encoder_destroy(struct drm_encoder *encoder)
3541{
4ef69c7a 3542 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
ea5b213a 3543
ea5b213a
CW
3544 drm_encoder_cleanup(encoder);
3545 kfree(intel_encoder);
3546}
3547
79e53945
JB
3548static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
3549 struct drm_display_mode *mode,
3550 struct drm_display_mode *adjusted_mode)
3551{
2c07245f 3552 struct drm_device *dev = crtc->dev;
89749350 3553
bad720ff 3554 if (HAS_PCH_SPLIT(dev)) {
2c07245f 3555 /* FDI link clock is fixed at 2.7G */
2377b741
JB
3556 if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4)
3557 return false;
2c07245f 3558 }
89749350 3559
f9bef081
DV
3560 /* All interlaced capable intel hw wants timings in frames. Note though
3561 * that intel_lvds_mode_fixup does some funny tricks with the crtc
3562 * timings, so we need to be careful not to clobber these.*/
3563 if (!(adjusted_mode->private_flags & INTEL_MODE_CRTC_TIMINGS_SET))
3564 drm_mode_set_crtcinfo(adjusted_mode, 0);
89749350 3565
79e53945
JB
3566 return true;
3567}
3568
25eb05fc
JB
3569static int valleyview_get_display_clock_speed(struct drm_device *dev)
3570{
3571 return 400000; /* FIXME */
3572}
3573
e70236a8
JB
3574static int i945_get_display_clock_speed(struct drm_device *dev)
3575{
3576 return 400000;
3577}
79e53945 3578
e70236a8 3579static int i915_get_display_clock_speed(struct drm_device *dev)
79e53945 3580{
e70236a8
JB
3581 return 333000;
3582}
79e53945 3583
e70236a8
JB
3584static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
3585{
3586 return 200000;
3587}
79e53945 3588
e70236a8
JB
3589static int i915gm_get_display_clock_speed(struct drm_device *dev)
3590{
3591 u16 gcfgc = 0;
79e53945 3592
e70236a8
JB
3593 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
3594
3595 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
3596 return 133000;
3597 else {
3598 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
3599 case GC_DISPLAY_CLOCK_333_MHZ:
3600 return 333000;
3601 default:
3602 case GC_DISPLAY_CLOCK_190_200_MHZ:
3603 return 190000;
79e53945 3604 }
e70236a8
JB
3605 }
3606}
3607
3608static int i865_get_display_clock_speed(struct drm_device *dev)
3609{
3610 return 266000;
3611}
3612
3613static int i855_get_display_clock_speed(struct drm_device *dev)
3614{
3615 u16 hpllcc = 0;
3616 /* Assume that the hardware is in the high speed state. This
3617 * should be the default.
3618 */
3619 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
3620 case GC_CLOCK_133_200:
3621 case GC_CLOCK_100_200:
3622 return 200000;
3623 case GC_CLOCK_166_250:
3624 return 250000;
3625 case GC_CLOCK_100_133:
79e53945 3626 return 133000;
e70236a8 3627 }
79e53945 3628
e70236a8
JB
3629 /* Shouldn't happen */
3630 return 0;
3631}
79e53945 3632
e70236a8
JB
3633static int i830_get_display_clock_speed(struct drm_device *dev)
3634{
3635 return 133000;
79e53945
JB
3636}
3637
2c07245f
ZW
3638struct fdi_m_n {
3639 u32 tu;
3640 u32 gmch_m;
3641 u32 gmch_n;
3642 u32 link_m;
3643 u32 link_n;
3644};
3645
3646static void
3647fdi_reduce_ratio(u32 *num, u32 *den)
3648{
3649 while (*num > 0xffffff || *den > 0xffffff) {
3650 *num >>= 1;
3651 *den >>= 1;
3652 }
3653}
3654
2c07245f 3655static void
f2b115e6
AJ
3656ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
3657 int link_clock, struct fdi_m_n *m_n)
2c07245f 3658{
2c07245f
ZW
3659 m_n->tu = 64; /* default size */
3660
22ed1113
CW
3661 /* BUG_ON(pixel_clock > INT_MAX / 36); */
3662 m_n->gmch_m = bits_per_pixel * pixel_clock;
3663 m_n->gmch_n = link_clock * nlanes * 8;
2c07245f
ZW
3664 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
3665
22ed1113
CW
3666 m_n->link_m = pixel_clock;
3667 m_n->link_n = link_clock;
2c07245f
ZW
3668 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
3669}
3670
a7615030
CW
3671static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
3672{
72bbe58c
KP
3673 if (i915_panel_use_ssc >= 0)
3674 return i915_panel_use_ssc != 0;
3675 return dev_priv->lvds_use_ssc
435793df 3676 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
a7615030
CW
3677}
3678
5a354204
JB
3679/**
3680 * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send
3681 * @crtc: CRTC structure
3b5c78a3 3682 * @mode: requested mode
5a354204
JB
3683 *
3684 * A pipe may be connected to one or more outputs. Based on the depth of the
3685 * attached framebuffer, choose a good color depth to use on the pipe.
3686 *
3687 * If possible, match the pipe depth to the fb depth. In some cases, this
3688 * isn't ideal, because the connected output supports a lesser or restricted
3689 * set of depths. Resolve that here:
3690 * LVDS typically supports only 6bpc, so clamp down in that case
3691 * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc
3692 * Displays may support a restricted set as well, check EDID and clamp as
3693 * appropriate.
3b5c78a3 3694 * DP may want to dither down to 6bpc to fit larger modes
5a354204
JB
3695 *
3696 * RETURNS:
3697 * Dithering requirement (i.e. false if display bpc and pipe bpc match,
3698 * true if they don't match).
3699 */
3700static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
3b5c78a3
AJ
3701 unsigned int *pipe_bpp,
3702 struct drm_display_mode *mode)
5a354204
JB
3703{
3704 struct drm_device *dev = crtc->dev;
3705 struct drm_i915_private *dev_priv = dev->dev_private;
3706 struct drm_encoder *encoder;
3707 struct drm_connector *connector;
3708 unsigned int display_bpc = UINT_MAX, bpc;
3709
3710 /* Walk the encoders & connectors on this crtc, get min bpc */
3711 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
3712 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
3713
3714 if (encoder->crtc != crtc)
3715 continue;
3716
3717 if (intel_encoder->type == INTEL_OUTPUT_LVDS) {
3718 unsigned int lvds_bpc;
3719
3720 if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) ==
3721 LVDS_A3_POWER_UP)
3722 lvds_bpc = 8;
3723 else
3724 lvds_bpc = 6;
3725
3726 if (lvds_bpc < display_bpc) {
82820490 3727 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
5a354204
JB
3728 display_bpc = lvds_bpc;
3729 }
3730 continue;
3731 }
3732
3733 if (intel_encoder->type == INTEL_OUTPUT_EDP) {
3734 /* Use VBT settings if we have an eDP panel */
3735 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
3736
3737 if (edp_bpc < display_bpc) {
82820490 3738 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
5a354204
JB
3739 display_bpc = edp_bpc;
3740 }
3741 continue;
3742 }
3743
3744 /* Not one of the known troublemakers, check the EDID */
3745 list_for_each_entry(connector, &dev->mode_config.connector_list,
3746 head) {
3747 if (connector->encoder != encoder)
3748 continue;
3749
62ac41a6
JB
3750 /* Don't use an invalid EDID bpc value */
3751 if (connector->display_info.bpc &&
3752 connector->display_info.bpc < display_bpc) {
82820490 3753 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
5a354204
JB
3754 display_bpc = connector->display_info.bpc;
3755 }
3756 }
3757
3758 /*
3759 * HDMI is either 12 or 8, so if the display lets 10bpc sneak
3760 * through, clamp it down. (Note: >12bpc will be caught below.)
3761 */
3762 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
3763 if (display_bpc > 8 && display_bpc < 12) {
82820490 3764 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
5a354204
JB
3765 display_bpc = 12;
3766 } else {
82820490 3767 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
5a354204
JB
3768 display_bpc = 8;
3769 }
3770 }
3771 }
3772
3b5c78a3
AJ
3773 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
3774 DRM_DEBUG_KMS("Dithering DP to 6bpc\n");
3775 display_bpc = 6;
3776 }
3777
5a354204
JB
3778 /*
3779 * We could just drive the pipe at the highest bpc all the time and
3780 * enable dithering as needed, but that costs bandwidth. So choose
3781 * the minimum value that expresses the full color range of the fb but
3782 * also stays within the max display bpc discovered above.
3783 */
3784
3785 switch (crtc->fb->depth) {
3786 case 8:
3787 bpc = 8; /* since we go through a colormap */
3788 break;
3789 case 15:
3790 case 16:
3791 bpc = 6; /* min is 18bpp */
3792 break;
3793 case 24:
578393cd 3794 bpc = 8;
5a354204
JB
3795 break;
3796 case 30:
578393cd 3797 bpc = 10;
5a354204
JB
3798 break;
3799 case 48:
578393cd 3800 bpc = 12;
5a354204
JB
3801 break;
3802 default:
3803 DRM_DEBUG("unsupported depth, assuming 24 bits\n");
3804 bpc = min((unsigned int)8, display_bpc);
3805 break;
3806 }
3807
578393cd
KP
3808 display_bpc = min(display_bpc, bpc);
3809
82820490
AJ
3810 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
3811 bpc, display_bpc);
5a354204 3812
578393cd 3813 *pipe_bpp = display_bpc * 3;
5a354204
JB
3814
3815 return display_bpc != bpc;
3816}
3817
a0c4da24
JB
3818static int vlv_get_refclk(struct drm_crtc *crtc)
3819{
3820 struct drm_device *dev = crtc->dev;
3821 struct drm_i915_private *dev_priv = dev->dev_private;
3822 int refclk = 27000; /* for DP & HDMI */
3823
3824 return 100000; /* only one validated so far */
3825
3826 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) {
3827 refclk = 96000;
3828 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
3829 if (intel_panel_use_ssc(dev_priv))
3830 refclk = 100000;
3831 else
3832 refclk = 96000;
3833 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) {
3834 refclk = 100000;
3835 }
3836
3837 return refclk;
3838}
3839
c65d77d8
JB
3840static int i9xx_get_refclk(struct drm_crtc *crtc, int num_connectors)
3841{
3842 struct drm_device *dev = crtc->dev;
3843 struct drm_i915_private *dev_priv = dev->dev_private;
3844 int refclk;
3845
a0c4da24
JB
3846 if (IS_VALLEYVIEW(dev)) {
3847 refclk = vlv_get_refclk(crtc);
3848 } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
c65d77d8
JB
3849 intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
3850 refclk = dev_priv->lvds_ssc_freq * 1000;
3851 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
3852 refclk / 1000);
3853 } else if (!IS_GEN2(dev)) {
3854 refclk = 96000;
3855 } else {
3856 refclk = 48000;
3857 }
3858
3859 return refclk;
3860}
3861
3862static void i9xx_adjust_sdvo_tv_clock(struct drm_display_mode *adjusted_mode,
3863 intel_clock_t *clock)
3864{
3865 /* SDVO TV has fixed PLL values depend on its clock range,
3866 this mirrors vbios setting. */
3867 if (adjusted_mode->clock >= 100000
3868 && adjusted_mode->clock < 140500) {
3869 clock->p1 = 2;
3870 clock->p2 = 10;
3871 clock->n = 3;
3872 clock->m1 = 16;
3873 clock->m2 = 8;
3874 } else if (adjusted_mode->clock >= 140500
3875 && adjusted_mode->clock <= 200000) {
3876 clock->p1 = 1;
3877 clock->p2 = 10;
3878 clock->n = 6;
3879 clock->m1 = 12;
3880 clock->m2 = 8;
3881 }
3882}
3883
a7516a05
JB
3884static void i9xx_update_pll_dividers(struct drm_crtc *crtc,
3885 intel_clock_t *clock,
3886 intel_clock_t *reduced_clock)
3887{
3888 struct drm_device *dev = crtc->dev;
3889 struct drm_i915_private *dev_priv = dev->dev_private;
3890 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3891 int pipe = intel_crtc->pipe;
3892 u32 fp, fp2 = 0;
3893
3894 if (IS_PINEVIEW(dev)) {
3895 fp = (1 << clock->n) << 16 | clock->m1 << 8 | clock->m2;
3896 if (reduced_clock)
3897 fp2 = (1 << reduced_clock->n) << 16 |
3898 reduced_clock->m1 << 8 | reduced_clock->m2;
3899 } else {
3900 fp = clock->n << 16 | clock->m1 << 8 | clock->m2;
3901 if (reduced_clock)
3902 fp2 = reduced_clock->n << 16 | reduced_clock->m1 << 8 |
3903 reduced_clock->m2;
3904 }
3905
3906 I915_WRITE(FP0(pipe), fp);
3907
3908 intel_crtc->lowfreq_avail = false;
3909 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
3910 reduced_clock && i915_powersave) {
3911 I915_WRITE(FP1(pipe), fp2);
3912 intel_crtc->lowfreq_avail = true;
3913 } else {
3914 I915_WRITE(FP1(pipe), fp);
3915 }
3916}
3917
93e537a1
DV
3918static void intel_update_lvds(struct drm_crtc *crtc, intel_clock_t *clock,
3919 struct drm_display_mode *adjusted_mode)
3920{
3921 struct drm_device *dev = crtc->dev;
3922 struct drm_i915_private *dev_priv = dev->dev_private;
3923 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3924 int pipe = intel_crtc->pipe;
284d5df5 3925 u32 temp;
93e537a1
DV
3926
3927 temp = I915_READ(LVDS);
3928 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
3929 if (pipe == 1) {
3930 temp |= LVDS_PIPEB_SELECT;
3931 } else {
3932 temp &= ~LVDS_PIPEB_SELECT;
3933 }
3934 /* set the corresponsding LVDS_BORDER bit */
3935 temp |= dev_priv->lvds_border_bits;
3936 /* Set the B0-B3 data pairs corresponding to whether we're going to
3937 * set the DPLLs for dual-channel mode or not.
3938 */
3939 if (clock->p2 == 7)
3940 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
3941 else
3942 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
3943
3944 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
3945 * appropriately here, but we need to look more thoroughly into how
3946 * panels behave in the two modes.
3947 */
3948 /* set the dithering flag on LVDS as needed */
3949 if (INTEL_INFO(dev)->gen >= 4) {
3950 if (dev_priv->lvds_dither)
3951 temp |= LVDS_ENABLE_DITHER;
3952 else
3953 temp &= ~LVDS_ENABLE_DITHER;
3954 }
284d5df5 3955 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
93e537a1 3956 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
284d5df5 3957 temp |= LVDS_HSYNC_POLARITY;
93e537a1 3958 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
284d5df5 3959 temp |= LVDS_VSYNC_POLARITY;
93e537a1
DV
3960 I915_WRITE(LVDS, temp);
3961}
3962
a0c4da24
JB
3963static void vlv_update_pll(struct drm_crtc *crtc,
3964 struct drm_display_mode *mode,
3965 struct drm_display_mode *adjusted_mode,
3966 intel_clock_t *clock, intel_clock_t *reduced_clock,
3967 int refclk, int num_connectors)
3968{
3969 struct drm_device *dev = crtc->dev;
3970 struct drm_i915_private *dev_priv = dev->dev_private;
3971 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3972 int pipe = intel_crtc->pipe;
3973 u32 dpll, mdiv, pdiv;
3974 u32 bestn, bestm1, bestm2, bestp1, bestp2;
3975 bool is_hdmi;
3976
3977 is_hdmi = intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
3978
3979 bestn = clock->n;
3980 bestm1 = clock->m1;
3981 bestm2 = clock->m2;
3982 bestp1 = clock->p1;
3983 bestp2 = clock->p2;
3984
3985 /* Enable DPIO clock input */
3986 dpll = DPLL_EXT_BUFFER_ENABLE_VLV | DPLL_REFA_CLK_ENABLE_VLV |
3987 DPLL_VGA_MODE_DIS | DPLL_INTEGRATED_CLOCK_VLV;
3988 I915_WRITE(DPLL(pipe), dpll);
3989 POSTING_READ(DPLL(pipe));
3990
3991 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
3992 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
3993 mdiv |= ((bestn << DPIO_N_SHIFT));
3994 mdiv |= (1 << DPIO_POST_DIV_SHIFT);
3995 mdiv |= (1 << DPIO_K_SHIFT);
3996 mdiv |= DPIO_ENABLE_CALIBRATION;
3997 intel_dpio_write(dev_priv, DPIO_DIV(pipe), mdiv);
3998
3999 intel_dpio_write(dev_priv, DPIO_CORE_CLK(pipe), 0x01000000);
4000
4001 pdiv = DPIO_REFSEL_OVERRIDE | (5 << DPIO_PLL_MODESEL_SHIFT) |
4002 (3 << DPIO_BIAS_CURRENT_CTL_SHIFT) | (1<<20) |
4003 (8 << DPIO_DRIVER_CTL_SHIFT) | (5 << DPIO_CLK_BIAS_CTL_SHIFT);
4004 intel_dpio_write(dev_priv, DPIO_REFSFR(pipe), pdiv);
4005
4006 intel_dpio_write(dev_priv, DPIO_LFP_COEFF(pipe), 0x009f0051);
4007
4008 dpll |= DPLL_VCO_ENABLE;
4009 I915_WRITE(DPLL(pipe), dpll);
4010 POSTING_READ(DPLL(pipe));
4011 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
4012 DRM_ERROR("DPLL %d failed to lock\n", pipe);
4013
4014 if (is_hdmi) {
4015 u32 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4016
4017 if (temp > 1)
4018 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4019 else
4020 temp = 0;
4021
4022 I915_WRITE(DPLL_MD(pipe), temp);
4023 POSTING_READ(DPLL_MD(pipe));
4024 }
4025
4026 intel_dpio_write(dev_priv, DPIO_FASTCLK_DISABLE, 0x641); /* ??? */
4027}
4028
eb1cbe48
DV
4029static void i9xx_update_pll(struct drm_crtc *crtc,
4030 struct drm_display_mode *mode,
4031 struct drm_display_mode *adjusted_mode,
4032 intel_clock_t *clock, intel_clock_t *reduced_clock,
4033 int num_connectors)
4034{
4035 struct drm_device *dev = crtc->dev;
4036 struct drm_i915_private *dev_priv = dev->dev_private;
4037 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4038 int pipe = intel_crtc->pipe;
4039 u32 dpll;
4040 bool is_sdvo;
4041
4042 is_sdvo = intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO) ||
4043 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI);
4044
4045 dpll = DPLL_VGA_MODE_DIS;
4046
4047 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4048 dpll |= DPLLB_MODE_LVDS;
4049 else
4050 dpll |= DPLLB_MODE_DAC_SERIAL;
4051 if (is_sdvo) {
4052 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4053 if (pixel_multiplier > 1) {
4054 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
4055 dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
4056 }
4057 dpll |= DPLL_DVO_HIGH_SPEED;
4058 }
4059 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4060 dpll |= DPLL_DVO_HIGH_SPEED;
4061
4062 /* compute bitmask from p1 value */
4063 if (IS_PINEVIEW(dev))
4064 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
4065 else {
4066 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4067 if (IS_G4X(dev) && reduced_clock)
4068 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4069 }
4070 switch (clock->p2) {
4071 case 5:
4072 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4073 break;
4074 case 7:
4075 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4076 break;
4077 case 10:
4078 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4079 break;
4080 case 14:
4081 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4082 break;
4083 }
4084 if (INTEL_INFO(dev)->gen >= 4)
4085 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
4086
4087 if (is_sdvo && intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4088 dpll |= PLL_REF_INPUT_TVCLKINBC;
4089 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4090 /* XXX: just matching BIOS for now */
4091 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4092 dpll |= 3;
4093 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4094 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4095 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4096 else
4097 dpll |= PLL_REF_INPUT_DREFCLK;
4098
4099 dpll |= DPLL_VCO_ENABLE;
4100 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4101 POSTING_READ(DPLL(pipe));
4102 udelay(150);
4103
4104 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4105 * This is an exception to the general rule that mode_set doesn't turn
4106 * things on.
4107 */
4108 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4109 intel_update_lvds(crtc, clock, adjusted_mode);
4110
4111 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT))
4112 intel_dp_set_m_n(crtc, mode, adjusted_mode);
4113
4114 I915_WRITE(DPLL(pipe), dpll);
4115
4116 /* Wait for the clocks to stabilize. */
4117 POSTING_READ(DPLL(pipe));
4118 udelay(150);
4119
4120 if (INTEL_INFO(dev)->gen >= 4) {
4121 u32 temp = 0;
4122 if (is_sdvo) {
4123 temp = intel_mode_get_pixel_multiplier(adjusted_mode);
4124 if (temp > 1)
4125 temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
4126 else
4127 temp = 0;
4128 }
4129 I915_WRITE(DPLL_MD(pipe), temp);
4130 } else {
4131 /* The pixel multiplier can only be updated once the
4132 * DPLL is enabled and the clocks are stable.
4133 *
4134 * So write it again.
4135 */
4136 I915_WRITE(DPLL(pipe), dpll);
4137 }
4138}
4139
4140static void i8xx_update_pll(struct drm_crtc *crtc,
4141 struct drm_display_mode *adjusted_mode,
4142 intel_clock_t *clock,
4143 int num_connectors)
4144{
4145 struct drm_device *dev = crtc->dev;
4146 struct drm_i915_private *dev_priv = dev->dev_private;
4147 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4148 int pipe = intel_crtc->pipe;
4149 u32 dpll;
4150
4151 dpll = DPLL_VGA_MODE_DIS;
4152
4153 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
4154 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4155 } else {
4156 if (clock->p1 == 2)
4157 dpll |= PLL_P1_DIVIDE_BY_TWO;
4158 else
4159 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4160 if (clock->p2 == 4)
4161 dpll |= PLL_P2_DIVIDE_BY_4;
4162 }
4163
4164 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_TVOUT))
4165 /* XXX: just matching BIOS for now */
4166 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
4167 dpll |= 3;
4168 else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) &&
4169 intel_panel_use_ssc(dev_priv) && num_connectors < 2)
4170 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
4171 else
4172 dpll |= PLL_REF_INPUT_DREFCLK;
4173
4174 dpll |= DPLL_VCO_ENABLE;
4175 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE);
4176 POSTING_READ(DPLL(pipe));
4177 udelay(150);
4178
4179 I915_WRITE(DPLL(pipe), dpll);
4180
4181 /* Wait for the clocks to stabilize. */
4182 POSTING_READ(DPLL(pipe));
4183 udelay(150);
4184
4185 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4186 * This is an exception to the general rule that mode_set doesn't turn
4187 * things on.
4188 */
4189 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
4190 intel_update_lvds(crtc, clock, adjusted_mode);
4191
4192 /* The pixel multiplier can only be updated once the
4193 * DPLL is enabled and the clocks are stable.
4194 *
4195 * So write it again.
4196 */
4197 I915_WRITE(DPLL(pipe), dpll);
4198}
4199
f564048e
EA
4200static int i9xx_crtc_mode_set(struct drm_crtc *crtc,
4201 struct drm_display_mode *mode,
4202 struct drm_display_mode *adjusted_mode,
4203 int x, int y,
4204 struct drm_framebuffer *old_fb)
79e53945
JB
4205{
4206 struct drm_device *dev = crtc->dev;
4207 struct drm_i915_private *dev_priv = dev->dev_private;
4208 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4209 int pipe = intel_crtc->pipe;
80824003 4210 int plane = intel_crtc->plane;
c751ce4f 4211 int refclk, num_connectors = 0;
652c393a 4212 intel_clock_t clock, reduced_clock;
eb1cbe48
DV
4213 u32 dspcntr, pipeconf, vsyncshift;
4214 bool ok, has_reduced_clock = false, is_sdvo = false;
4215 bool is_lvds = false, is_tv = false, is_dp = false;
79e53945 4216 struct drm_mode_config *mode_config = &dev->mode_config;
5eddb70b 4217 struct intel_encoder *encoder;
d4906093 4218 const intel_limit_t *limit;
5c3b82e2 4219 int ret;
79e53945 4220
5eddb70b
CW
4221 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4222 if (encoder->base.crtc != crtc)
79e53945
JB
4223 continue;
4224
5eddb70b 4225 switch (encoder->type) {
79e53945
JB
4226 case INTEL_OUTPUT_LVDS:
4227 is_lvds = true;
4228 break;
4229 case INTEL_OUTPUT_SDVO:
7d57382e 4230 case INTEL_OUTPUT_HDMI:
79e53945 4231 is_sdvo = true;
5eddb70b 4232 if (encoder->needs_tv_clock)
e2f0ba97 4233 is_tv = true;
79e53945 4234 break;
79e53945
JB
4235 case INTEL_OUTPUT_TVOUT:
4236 is_tv = true;
4237 break;
a4fc5ed6
KP
4238 case INTEL_OUTPUT_DISPLAYPORT:
4239 is_dp = true;
4240 break;
79e53945 4241 }
43565a06 4242
c751ce4f 4243 num_connectors++;
79e53945
JB
4244 }
4245
c65d77d8 4246 refclk = i9xx_get_refclk(crtc, num_connectors);
79e53945 4247
d4906093
ML
4248 /*
4249 * Returns a set of divisors for the desired target clock with the given
4250 * refclk, or FALSE. The returned values represent the clock equation:
4251 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4252 */
1b894b59 4253 limit = intel_limit(crtc, refclk);
cec2f356
SP
4254 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4255 &clock);
79e53945
JB
4256 if (!ok) {
4257 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5c3b82e2 4258 return -EINVAL;
79e53945
JB
4259 }
4260
cda4b7d3 4261 /* Ensure that the cursor is valid for the new mode before changing... */
6b383a7f 4262 intel_crtc_update_cursor(crtc, true);
cda4b7d3 4263
ddc9003c 4264 if (is_lvds && dev_priv->lvds_downclock_avail) {
cec2f356
SP
4265 /*
4266 * Ensure we match the reduced clock's P to the target clock.
4267 * If the clocks don't match, we can't switch the display clock
4268 * by using the FP0/FP1. In such case we will disable the LVDS
4269 * downclock feature.
4270 */
ddc9003c 4271 has_reduced_clock = limit->find_pll(limit, crtc,
5eddb70b
CW
4272 dev_priv->lvds_downclock,
4273 refclk,
cec2f356 4274 &clock,
5eddb70b 4275 &reduced_clock);
7026d4ac
ZW
4276 }
4277
c65d77d8
JB
4278 if (is_sdvo && is_tv)
4279 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
7026d4ac 4280
a7516a05
JB
4281 i9xx_update_pll_dividers(crtc, &clock, has_reduced_clock ?
4282 &reduced_clock : NULL);
79e53945 4283
eb1cbe48
DV
4284 if (IS_GEN2(dev))
4285 i8xx_update_pll(crtc, adjusted_mode, &clock, num_connectors);
a0c4da24
JB
4286 else if (IS_VALLEYVIEW(dev))
4287 vlv_update_pll(crtc, mode,adjusted_mode, &clock, NULL,
4288 refclk, num_connectors);
79e53945 4289 else
eb1cbe48
DV
4290 i9xx_update_pll(crtc, mode, adjusted_mode, &clock,
4291 has_reduced_clock ? &reduced_clock : NULL,
4292 num_connectors);
79e53945
JB
4293
4294 /* setup pipeconf */
5eddb70b 4295 pipeconf = I915_READ(PIPECONF(pipe));
79e53945
JB
4296
4297 /* Set up the display plane register */
4298 dspcntr = DISPPLANE_GAMMA_ENABLE;
4299
929c77fb
EA
4300 if (pipe == 0)
4301 dspcntr &= ~DISPPLANE_SEL_PIPE_MASK;
4302 else
4303 dspcntr |= DISPPLANE_SEL_PIPE_B;
79e53945 4304
a6c45cf0 4305 if (pipe == 0 && INTEL_INFO(dev)->gen < 4) {
79e53945
JB
4306 /* Enable pixel doubling when the dot clock is > 90% of the (display)
4307 * core speed.
4308 *
4309 * XXX: No double-wide on 915GM pipe B. Is that the only reason for the
4310 * pipe == 0 check?
4311 */
e70236a8
JB
4312 if (mode->clock >
4313 dev_priv->display.get_display_clock_speed(dev) * 9 / 10)
5eddb70b 4314 pipeconf |= PIPECONF_DOUBLE_WIDE;
79e53945 4315 else
5eddb70b 4316 pipeconf &= ~PIPECONF_DOUBLE_WIDE;
79e53945
JB
4317 }
4318
3b5c78a3
AJ
4319 /* default to 8bpc */
4320 pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN);
4321 if (is_dp) {
4322 if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) {
4323 pipeconf |= PIPECONF_BPP_6 |
4324 PIPECONF_DITHER_EN |
4325 PIPECONF_DITHER_TYPE_SP;
4326 }
4327 }
4328
28c97730 4329 DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
79e53945
JB
4330 drm_mode_debug_printmodeline(mode);
4331
a7516a05
JB
4332 if (HAS_PIPE_CXSR(dev)) {
4333 if (intel_crtc->lowfreq_avail) {
28c97730 4334 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
652c393a 4335 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
a7516a05 4336 } else {
28c97730 4337 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
652c393a
JB
4338 pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK;
4339 }
4340 }
4341
617cf884 4342 pipeconf &= ~PIPECONF_INTERLACE_MASK;
dbb02575
DV
4343 if (!IS_GEN2(dev) &&
4344 adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
734b4157
KH
4345 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
4346 /* the chip adds 2 halflines automatically */
734b4157 4347 adjusted_mode->crtc_vtotal -= 1;
734b4157 4348 adjusted_mode->crtc_vblank_end -= 1;
0529a0d9
DV
4349 vsyncshift = adjusted_mode->crtc_hsync_start
4350 - adjusted_mode->crtc_htotal/2;
4351 } else {
617cf884 4352 pipeconf |= PIPECONF_PROGRESSIVE;
0529a0d9
DV
4353 vsyncshift = 0;
4354 }
4355
4356 if (!IS_GEN3(dev))
4357 I915_WRITE(VSYNCSHIFT(pipe), vsyncshift);
734b4157 4358
5eddb70b
CW
4359 I915_WRITE(HTOTAL(pipe),
4360 (adjusted_mode->crtc_hdisplay - 1) |
79e53945 4361 ((adjusted_mode->crtc_htotal - 1) << 16));
5eddb70b
CW
4362 I915_WRITE(HBLANK(pipe),
4363 (adjusted_mode->crtc_hblank_start - 1) |
79e53945 4364 ((adjusted_mode->crtc_hblank_end - 1) << 16));
5eddb70b
CW
4365 I915_WRITE(HSYNC(pipe),
4366 (adjusted_mode->crtc_hsync_start - 1) |
79e53945 4367 ((adjusted_mode->crtc_hsync_end - 1) << 16));
5eddb70b
CW
4368
4369 I915_WRITE(VTOTAL(pipe),
4370 (adjusted_mode->crtc_vdisplay - 1) |
79e53945 4371 ((adjusted_mode->crtc_vtotal - 1) << 16));
5eddb70b
CW
4372 I915_WRITE(VBLANK(pipe),
4373 (adjusted_mode->crtc_vblank_start - 1) |
79e53945 4374 ((adjusted_mode->crtc_vblank_end - 1) << 16));
5eddb70b
CW
4375 I915_WRITE(VSYNC(pipe),
4376 (adjusted_mode->crtc_vsync_start - 1) |
79e53945 4377 ((adjusted_mode->crtc_vsync_end - 1) << 16));
5eddb70b
CW
4378
4379 /* pipesrc and dspsize control the size that is scaled from,
4380 * which should always be the user's requested size.
79e53945 4381 */
929c77fb
EA
4382 I915_WRITE(DSPSIZE(plane),
4383 ((mode->vdisplay - 1) << 16) |
4384 (mode->hdisplay - 1));
4385 I915_WRITE(DSPPOS(plane), 0);
5eddb70b
CW
4386 I915_WRITE(PIPESRC(pipe),
4387 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
2c07245f 4388
f564048e
EA
4389 I915_WRITE(PIPECONF(pipe), pipeconf);
4390 POSTING_READ(PIPECONF(pipe));
929c77fb 4391 intel_enable_pipe(dev_priv, pipe, false);
f564048e
EA
4392
4393 intel_wait_for_vblank(dev, pipe);
4394
f564048e
EA
4395 I915_WRITE(DSPCNTR(plane), dspcntr);
4396 POSTING_READ(DSPCNTR(plane));
4397
4398 ret = intel_pipe_set_base(crtc, x, y, old_fb);
4399
4400 intel_update_watermarks(dev);
4401
f564048e
EA
4402 return ret;
4403}
4404
9fb526db
KP
4405/*
4406 * Initialize reference clocks when the driver loads
4407 */
4408void ironlake_init_pch_refclk(struct drm_device *dev)
13d83a67
JB
4409{
4410 struct drm_i915_private *dev_priv = dev->dev_private;
4411 struct drm_mode_config *mode_config = &dev->mode_config;
13d83a67 4412 struct intel_encoder *encoder;
13d83a67
JB
4413 u32 temp;
4414 bool has_lvds = false;
199e5d79
KP
4415 bool has_cpu_edp = false;
4416 bool has_pch_edp = false;
4417 bool has_panel = false;
99eb6a01
KP
4418 bool has_ck505 = false;
4419 bool can_ssc = false;
13d83a67
JB
4420
4421 /* We need to take the global config into account */
199e5d79
KP
4422 list_for_each_entry(encoder, &mode_config->encoder_list,
4423 base.head) {
4424 switch (encoder->type) {
4425 case INTEL_OUTPUT_LVDS:
4426 has_panel = true;
4427 has_lvds = true;
4428 break;
4429 case INTEL_OUTPUT_EDP:
4430 has_panel = true;
4431 if (intel_encoder_is_pch_edp(&encoder->base))
4432 has_pch_edp = true;
4433 else
4434 has_cpu_edp = true;
4435 break;
13d83a67
JB
4436 }
4437 }
4438
99eb6a01
KP
4439 if (HAS_PCH_IBX(dev)) {
4440 has_ck505 = dev_priv->display_clock_mode;
4441 can_ssc = has_ck505;
4442 } else {
4443 has_ck505 = false;
4444 can_ssc = true;
4445 }
4446
4447 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n",
4448 has_panel, has_lvds, has_pch_edp, has_cpu_edp,
4449 has_ck505);
13d83a67
JB
4450
4451 /* Ironlake: try to setup display ref clock before DPLL
4452 * enabling. This is only under driver's control after
4453 * PCH B stepping, previous chipset stepping should be
4454 * ignoring this setting.
4455 */
4456 temp = I915_READ(PCH_DREF_CONTROL);
4457 /* Always enable nonspread source */
4458 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
13d83a67 4459
99eb6a01
KP
4460 if (has_ck505)
4461 temp |= DREF_NONSPREAD_CK505_ENABLE;
4462 else
4463 temp |= DREF_NONSPREAD_SOURCE_ENABLE;
13d83a67 4464
199e5d79
KP
4465 if (has_panel) {
4466 temp &= ~DREF_SSC_SOURCE_MASK;
4467 temp |= DREF_SSC_SOURCE_ENABLE;
13d83a67 4468
199e5d79 4469 /* SSC must be turned on before enabling the CPU output */
99eb6a01 4470 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 4471 DRM_DEBUG_KMS("Using SSC on panel\n");
13d83a67 4472 temp |= DREF_SSC1_ENABLE;
e77166b5
DV
4473 } else
4474 temp &= ~DREF_SSC1_ENABLE;
199e5d79
KP
4475
4476 /* Get SSC going before enabling the outputs */
4477 I915_WRITE(PCH_DREF_CONTROL, temp);
4478 POSTING_READ(PCH_DREF_CONTROL);
4479 udelay(200);
4480
13d83a67
JB
4481 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4482
4483 /* Enable CPU source on CPU attached eDP */
199e5d79 4484 if (has_cpu_edp) {
99eb6a01 4485 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 4486 DRM_DEBUG_KMS("Using SSC on eDP\n");
13d83a67 4487 temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
199e5d79 4488 }
13d83a67
JB
4489 else
4490 temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
199e5d79
KP
4491 } else
4492 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4493
4494 I915_WRITE(PCH_DREF_CONTROL, temp);
4495 POSTING_READ(PCH_DREF_CONTROL);
4496 udelay(200);
4497 } else {
4498 DRM_DEBUG_KMS("Disabling SSC entirely\n");
4499
4500 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
4501
4502 /* Turn off CPU output */
4503 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
4504
4505 I915_WRITE(PCH_DREF_CONTROL, temp);
4506 POSTING_READ(PCH_DREF_CONTROL);
4507 udelay(200);
4508
4509 /* Turn off the SSC source */
4510 temp &= ~DREF_SSC_SOURCE_MASK;
4511 temp |= DREF_SSC_SOURCE_DISABLE;
4512
4513 /* Turn off SSC1 */
4514 temp &= ~ DREF_SSC1_ENABLE;
4515
13d83a67
JB
4516 I915_WRITE(PCH_DREF_CONTROL, temp);
4517 POSTING_READ(PCH_DREF_CONTROL);
4518 udelay(200);
4519 }
4520}
4521
d9d444cb
JB
4522static int ironlake_get_refclk(struct drm_crtc *crtc)
4523{
4524 struct drm_device *dev = crtc->dev;
4525 struct drm_i915_private *dev_priv = dev->dev_private;
4526 struct intel_encoder *encoder;
4527 struct drm_mode_config *mode_config = &dev->mode_config;
4528 struct intel_encoder *edp_encoder = NULL;
4529 int num_connectors = 0;
4530 bool is_lvds = false;
4531
4532 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4533 if (encoder->base.crtc != crtc)
4534 continue;
4535
4536 switch (encoder->type) {
4537 case INTEL_OUTPUT_LVDS:
4538 is_lvds = true;
4539 break;
4540 case INTEL_OUTPUT_EDP:
4541 edp_encoder = encoder;
4542 break;
4543 }
4544 num_connectors++;
4545 }
4546
4547 if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) {
4548 DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n",
4549 dev_priv->lvds_ssc_freq);
4550 return dev_priv->lvds_ssc_freq * 1000;
4551 }
4552
4553 return 120000;
4554}
4555
f564048e
EA
4556static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
4557 struct drm_display_mode *mode,
4558 struct drm_display_mode *adjusted_mode,
4559 int x, int y,
4560 struct drm_framebuffer *old_fb)
79e53945
JB
4561{
4562 struct drm_device *dev = crtc->dev;
4563 struct drm_i915_private *dev_priv = dev->dev_private;
4564 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4565 int pipe = intel_crtc->pipe;
80824003 4566 int plane = intel_crtc->plane;
c751ce4f 4567 int refclk, num_connectors = 0;
652c393a 4568 intel_clock_t clock, reduced_clock;
5eddb70b 4569 u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf;
a07d6787 4570 bool ok, has_reduced_clock = false, is_sdvo = false;
a4fc5ed6 4571 bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false;
79e53945 4572 struct drm_mode_config *mode_config = &dev->mode_config;
e3aef172 4573 struct intel_encoder *encoder, *edp_encoder = NULL;
d4906093 4574 const intel_limit_t *limit;
5c3b82e2 4575 int ret;
2c07245f 4576 struct fdi_m_n m_n = {0};
fae14981 4577 u32 temp;
5a354204
JB
4578 int target_clock, pixel_multiplier, lane, link_bw, factor;
4579 unsigned int pipe_bpp;
4580 bool dither;
e3aef172 4581 bool is_cpu_edp = false, is_pch_edp = false;
79e53945 4582
5eddb70b
CW
4583 list_for_each_entry(encoder, &mode_config->encoder_list, base.head) {
4584 if (encoder->base.crtc != crtc)
79e53945
JB
4585 continue;
4586
5eddb70b 4587 switch (encoder->type) {
79e53945
JB
4588 case INTEL_OUTPUT_LVDS:
4589 is_lvds = true;
4590 break;
4591 case INTEL_OUTPUT_SDVO:
7d57382e 4592 case INTEL_OUTPUT_HDMI:
79e53945 4593 is_sdvo = true;
5eddb70b 4594 if (encoder->needs_tv_clock)
e2f0ba97 4595 is_tv = true;
79e53945 4596 break;
79e53945
JB
4597 case INTEL_OUTPUT_TVOUT:
4598 is_tv = true;
4599 break;
4600 case INTEL_OUTPUT_ANALOG:
4601 is_crt = true;
4602 break;
a4fc5ed6
KP
4603 case INTEL_OUTPUT_DISPLAYPORT:
4604 is_dp = true;
4605 break;
32f9d658 4606 case INTEL_OUTPUT_EDP:
e3aef172
JB
4607 is_dp = true;
4608 if (intel_encoder_is_pch_edp(&encoder->base))
4609 is_pch_edp = true;
4610 else
4611 is_cpu_edp = true;
4612 edp_encoder = encoder;
32f9d658 4613 break;
79e53945 4614 }
43565a06 4615
c751ce4f 4616 num_connectors++;
79e53945
JB
4617 }
4618
d9d444cb 4619 refclk = ironlake_get_refclk(crtc);
79e53945 4620
d4906093
ML
4621 /*
4622 * Returns a set of divisors for the desired target clock with the given
4623 * refclk, or FALSE. The returned values represent the clock equation:
4624 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
4625 */
1b894b59 4626 limit = intel_limit(crtc, refclk);
cec2f356
SP
4627 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, NULL,
4628 &clock);
79e53945
JB
4629 if (!ok) {
4630 DRM_ERROR("Couldn't find PLL settings for mode!\n");
5c3b82e2 4631 return -EINVAL;
79e53945
JB
4632 }
4633
cda4b7d3 4634 /* Ensure that the cursor is valid for the new mode before changing... */
6b383a7f 4635 intel_crtc_update_cursor(crtc, true);
cda4b7d3 4636
ddc9003c 4637 if (is_lvds && dev_priv->lvds_downclock_avail) {
cec2f356
SP
4638 /*
4639 * Ensure we match the reduced clock's P to the target clock.
4640 * If the clocks don't match, we can't switch the display clock
4641 * by using the FP0/FP1. In such case we will disable the LVDS
4642 * downclock feature.
4643 */
ddc9003c 4644 has_reduced_clock = limit->find_pll(limit, crtc,
5eddb70b
CW
4645 dev_priv->lvds_downclock,
4646 refclk,
cec2f356 4647 &clock,
5eddb70b 4648 &reduced_clock);
652c393a 4649 }
61e9653f
DV
4650
4651 if (is_sdvo && is_tv)
4652 i9xx_adjust_sdvo_tv_clock(adjusted_mode, &clock);
4653
7026d4ac 4654
2c07245f 4655 /* FDI link */
8febb297
EA
4656 pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4657 lane = 0;
4658 /* CPU eDP doesn't require FDI link, so just set DP M/N
4659 according to current link config */
e3aef172 4660 if (is_cpu_edp) {
e3aef172 4661 intel_edp_link_config(edp_encoder, &lane, &link_bw);
8febb297 4662 } else {
8febb297
EA
4663 /* FDI is a binary signal running at ~2.7GHz, encoding
4664 * each output octet as 10 bits. The actual frequency
4665 * is stored as a divider into a 100MHz clock, and the
4666 * mode pixel clock is stored in units of 1KHz.
4667 * Hence the bw of each lane in terms of the mode signal
4668 * is:
4669 */
4670 link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10;
4671 }
58a27471 4672
94bf2ced
DV
4673 /* [e]DP over FDI requires target mode clock instead of link clock. */
4674 if (edp_encoder)
4675 target_clock = intel_edp_target_clock(edp_encoder, mode);
4676 else if (is_dp)
4677 target_clock = mode->clock;
4678 else
4679 target_clock = adjusted_mode->clock;
4680
8febb297
EA
4681 /* determine panel color depth */
4682 temp = I915_READ(PIPECONF(pipe));
4683 temp &= ~PIPE_BPC_MASK;
3b5c78a3 4684 dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode);
5a354204
JB
4685 switch (pipe_bpp) {
4686 case 18:
4687 temp |= PIPE_6BPC;
8febb297 4688 break;
5a354204
JB
4689 case 24:
4690 temp |= PIPE_8BPC;
8febb297 4691 break;
5a354204
JB
4692 case 30:
4693 temp |= PIPE_10BPC;
8febb297 4694 break;
5a354204
JB
4695 case 36:
4696 temp |= PIPE_12BPC;
8febb297
EA
4697 break;
4698 default:
62ac41a6
JB
4699 WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n",
4700 pipe_bpp);
5a354204
JB
4701 temp |= PIPE_8BPC;
4702 pipe_bpp = 24;
4703 break;
8febb297 4704 }
77ffb597 4705
5a354204
JB
4706 intel_crtc->bpp = pipe_bpp;
4707 I915_WRITE(PIPECONF(pipe), temp);
4708
8febb297
EA
4709 if (!lane) {
4710 /*
4711 * Account for spread spectrum to avoid
4712 * oversubscribing the link. Max center spread
4713 * is 2.5%; use 5% for safety's sake.
4714 */
5a354204 4715 u32 bps = target_clock * intel_crtc->bpp * 21 / 20;
8febb297 4716 lane = bps / (link_bw * 8) + 1;
5eb08b69 4717 }
2c07245f 4718
8febb297
EA
4719 intel_crtc->fdi_lanes = lane;
4720
4721 if (pixel_multiplier > 1)
4722 link_bw *= pixel_multiplier;
5a354204
JB
4723 ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw,
4724 &m_n);
8febb297 4725
a07d6787
EA
4726 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
4727 if (has_reduced_clock)
4728 fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 |
4729 reduced_clock.m2;
79e53945 4730
c1858123 4731 /* Enable autotuning of the PLL clock (if permissible) */
8febb297
EA
4732 factor = 21;
4733 if (is_lvds) {
4734 if ((intel_panel_use_ssc(dev_priv) &&
4735 dev_priv->lvds_ssc_freq == 100) ||
4736 (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
4737 factor = 25;
4738 } else if (is_sdvo && is_tv)
4739 factor = 20;
c1858123 4740
cb0e0931 4741 if (clock.m < factor * clock.n)
8febb297 4742 fp |= FP_CB_TUNE;
2c07245f 4743
5eddb70b 4744 dpll = 0;
2c07245f 4745
a07d6787
EA
4746 if (is_lvds)
4747 dpll |= DPLLB_MODE_LVDS;
4748 else
4749 dpll |= DPLLB_MODE_DAC_SERIAL;
4750 if (is_sdvo) {
4751 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
4752 if (pixel_multiplier > 1) {
4753 dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
79e53945 4754 }
a07d6787
EA
4755 dpll |= DPLL_DVO_HIGH_SPEED;
4756 }
e3aef172 4757 if (is_dp && !is_cpu_edp)
a07d6787 4758 dpll |= DPLL_DVO_HIGH_SPEED;
79e53945 4759
a07d6787
EA
4760 /* compute bitmask from p1 value */
4761 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
4762 /* also FPA1 */
4763 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
4764
4765 switch (clock.p2) {
4766 case 5:
4767 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
4768 break;
4769 case 7:
4770 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
4771 break;
4772 case 10:
4773 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
4774 break;
4775 case 14:
4776 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
4777 break;
79e53945
JB
4778 }
4779
43565a06
KH
4780 if (is_sdvo && is_tv)
4781 dpll |= PLL_REF_INPUT_TVCLKINBC;
4782 else if (is_tv)
79e53945 4783 /* XXX: just matching BIOS for now */
43565a06 4784 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
79e53945 4785 dpll |= 3;
a7615030 4786 else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2)
43565a06 4787 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
79e53945
JB
4788 else
4789 dpll |= PLL_REF_INPUT_DREFCLK;
4790
4791 /* setup pipeconf */
5eddb70b 4792 pipeconf = I915_READ(PIPECONF(pipe));
79e53945
JB
4793
4794 /* Set up the display plane register */
4795 dspcntr = DISPPLANE_GAMMA_ENABLE;
4796
f7cb34d4 4797 DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe);
79e53945
JB
4798 drm_mode_debug_printmodeline(mode);
4799
9d82aa17
ED
4800 /* CPU eDP is the only output that doesn't need a PCH PLL of its own on
4801 * pre-Haswell/LPT generation */
4802 if (HAS_PCH_LPT(dev)) {
4803 DRM_DEBUG_KMS("LPT detected: no PLL for pipe %d necessary\n",
4804 pipe);
4805 } else if (!is_cpu_edp) {
ee7b9f93 4806 struct intel_pch_pll *pll;
4b645f14 4807
ee7b9f93
JB
4808 pll = intel_get_pch_pll(intel_crtc, dpll, fp);
4809 if (pll == NULL) {
4810 DRM_DEBUG_DRIVER("failed to find PLL for pipe %d\n",
4811 pipe);
4b645f14
JB
4812 return -EINVAL;
4813 }
ee7b9f93
JB
4814 } else
4815 intel_put_pch_pll(intel_crtc);
79e53945
JB
4816
4817 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
4818 * This is an exception to the general rule that mode_set doesn't turn
4819 * things on.
4820 */
4821 if (is_lvds) {
fae14981 4822 temp = I915_READ(PCH_LVDS);
5eddb70b 4823 temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
7885d205
JB
4824 if (HAS_PCH_CPT(dev)) {
4825 temp &= ~PORT_TRANS_SEL_MASK;
4b645f14 4826 temp |= PORT_TRANS_SEL_CPT(pipe);
7885d205
JB
4827 } else {
4828 if (pipe == 1)
4829 temp |= LVDS_PIPEB_SELECT;
4830 else
4831 temp &= ~LVDS_PIPEB_SELECT;
4832 }
4b645f14 4833
a3e17eb8 4834 /* set the corresponsding LVDS_BORDER bit */
5eddb70b 4835 temp |= dev_priv->lvds_border_bits;
79e53945
JB
4836 /* Set the B0-B3 data pairs corresponding to whether we're going to
4837 * set the DPLLs for dual-channel mode or not.
4838 */
4839 if (clock.p2 == 7)
5eddb70b 4840 temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP;
79e53945 4841 else
5eddb70b 4842 temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP);
79e53945
JB
4843
4844 /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP)
4845 * appropriately here, but we need to look more thoroughly into how
4846 * panels behave in the two modes.
4847 */
284d5df5 4848 temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY);
aa9b500d 4849 if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC)
284d5df5 4850 temp |= LVDS_HSYNC_POLARITY;
aa9b500d 4851 if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
284d5df5 4852 temp |= LVDS_VSYNC_POLARITY;
fae14981 4853 I915_WRITE(PCH_LVDS, temp);
79e53945 4854 }
434ed097 4855
8febb297
EA
4856 pipeconf &= ~PIPECONF_DITHER_EN;
4857 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5a354204 4858 if ((is_lvds && dev_priv->lvds_dither) || dither) {
8febb297 4859 pipeconf |= PIPECONF_DITHER_EN;
f74974c7 4860 pipeconf |= PIPECONF_DITHER_TYPE_SP;
434ed097 4861 }
e3aef172 4862 if (is_dp && !is_cpu_edp) {
a4fc5ed6 4863 intel_dp_set_m_n(crtc, mode, adjusted_mode);
8febb297 4864 } else {
8db9d77b 4865 /* For non-DP output, clear any trans DP clock recovery setting.*/
9db4a9c7
JB
4866 I915_WRITE(TRANSDATA_M1(pipe), 0);
4867 I915_WRITE(TRANSDATA_N1(pipe), 0);
4868 I915_WRITE(TRANSDPLINK_M1(pipe), 0);
4869 I915_WRITE(TRANSDPLINK_N1(pipe), 0);
8db9d77b 4870 }
79e53945 4871
ee7b9f93
JB
4872 if (intel_crtc->pch_pll) {
4873 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
5eddb70b 4874
32f9d658 4875 /* Wait for the clocks to stabilize. */
ee7b9f93 4876 POSTING_READ(intel_crtc->pch_pll->pll_reg);
32f9d658
ZW
4877 udelay(150);
4878
8febb297
EA
4879 /* The pixel multiplier can only be updated once the
4880 * DPLL is enabled and the clocks are stable.
4881 *
4882 * So write it again.
4883 */
ee7b9f93 4884 I915_WRITE(intel_crtc->pch_pll->pll_reg, dpll);
79e53945 4885 }
79e53945 4886
5eddb70b 4887 intel_crtc->lowfreq_avail = false;
ee7b9f93 4888 if (intel_crtc->pch_pll) {
4b645f14 4889 if (is_lvds && has_reduced_clock && i915_powersave) {
ee7b9f93 4890 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp2);
4b645f14 4891 intel_crtc->lowfreq_avail = true;
4b645f14 4892 } else {
ee7b9f93 4893 I915_WRITE(intel_crtc->pch_pll->fp1_reg, fp);
652c393a
JB
4894 }
4895 }
4896
617cf884 4897 pipeconf &= ~PIPECONF_INTERLACE_MASK;
734b4157 4898 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
5def474e 4899 pipeconf |= PIPECONF_INTERLACED_ILK;
734b4157 4900 /* the chip adds 2 halflines automatically */
734b4157 4901 adjusted_mode->crtc_vtotal -= 1;
734b4157 4902 adjusted_mode->crtc_vblank_end -= 1;
0529a0d9
DV
4903 I915_WRITE(VSYNCSHIFT(pipe),
4904 adjusted_mode->crtc_hsync_start
4905 - adjusted_mode->crtc_htotal/2);
4906 } else {
617cf884 4907 pipeconf |= PIPECONF_PROGRESSIVE;
0529a0d9
DV
4908 I915_WRITE(VSYNCSHIFT(pipe), 0);
4909 }
734b4157 4910
5eddb70b
CW
4911 I915_WRITE(HTOTAL(pipe),
4912 (adjusted_mode->crtc_hdisplay - 1) |
79e53945 4913 ((adjusted_mode->crtc_htotal - 1) << 16));
5eddb70b
CW
4914 I915_WRITE(HBLANK(pipe),
4915 (adjusted_mode->crtc_hblank_start - 1) |
79e53945 4916 ((adjusted_mode->crtc_hblank_end - 1) << 16));
5eddb70b
CW
4917 I915_WRITE(HSYNC(pipe),
4918 (adjusted_mode->crtc_hsync_start - 1) |
79e53945 4919 ((adjusted_mode->crtc_hsync_end - 1) << 16));
5eddb70b
CW
4920
4921 I915_WRITE(VTOTAL(pipe),
4922 (adjusted_mode->crtc_vdisplay - 1) |
79e53945 4923 ((adjusted_mode->crtc_vtotal - 1) << 16));
5eddb70b
CW
4924 I915_WRITE(VBLANK(pipe),
4925 (adjusted_mode->crtc_vblank_start - 1) |
79e53945 4926 ((adjusted_mode->crtc_vblank_end - 1) << 16));
5eddb70b
CW
4927 I915_WRITE(VSYNC(pipe),
4928 (adjusted_mode->crtc_vsync_start - 1) |
79e53945 4929 ((adjusted_mode->crtc_vsync_end - 1) << 16));
5eddb70b 4930
8febb297
EA
4931 /* pipesrc controls the size that is scaled from, which should
4932 * always be the user's requested size.
79e53945 4933 */
5eddb70b
CW
4934 I915_WRITE(PIPESRC(pipe),
4935 ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
2c07245f 4936
8febb297
EA
4937 I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m);
4938 I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n);
4939 I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m);
4940 I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n);
2c07245f 4941
e3aef172 4942 if (is_cpu_edp)
8febb297 4943 ironlake_set_pll_edp(crtc, adjusted_mode->clock);
2c07245f 4944
5eddb70b
CW
4945 I915_WRITE(PIPECONF(pipe), pipeconf);
4946 POSTING_READ(PIPECONF(pipe));
79e53945 4947
9d0498a2 4948 intel_wait_for_vblank(dev, pipe);
79e53945 4949
5eddb70b 4950 I915_WRITE(DSPCNTR(plane), dspcntr);
b24e7179 4951 POSTING_READ(DSPCNTR(plane));
79e53945 4952
5c3b82e2 4953 ret = intel_pipe_set_base(crtc, x, y, old_fb);
7662c8bd
SL
4954
4955 intel_update_watermarks(dev);
4956
1f8eeabf
ED
4957 intel_update_linetime_watermarks(dev, pipe, adjusted_mode);
4958
1f803ee5 4959 return ret;
79e53945
JB
4960}
4961
f564048e
EA
4962static int intel_crtc_mode_set(struct drm_crtc *crtc,
4963 struct drm_display_mode *mode,
4964 struct drm_display_mode *adjusted_mode,
4965 int x, int y,
4966 struct drm_framebuffer *old_fb)
4967{
4968 struct drm_device *dev = crtc->dev;
4969 struct drm_i915_private *dev_priv = dev->dev_private;
0b701d27
EA
4970 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4971 int pipe = intel_crtc->pipe;
f564048e
EA
4972 int ret;
4973
0b701d27 4974 drm_vblank_pre_modeset(dev, pipe);
7662c8bd 4975
f564048e
EA
4976 ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
4977 x, y, old_fb);
79e53945 4978 drm_vblank_post_modeset(dev, pipe);
5c3b82e2 4979
d8e70a25
JB
4980 if (ret)
4981 intel_crtc->dpms_mode = DRM_MODE_DPMS_OFF;
4982 else
4983 intel_crtc->dpms_mode = DRM_MODE_DPMS_ON;
120eced9 4984
1f803ee5 4985 return ret;
79e53945
JB
4986}
4987
3a9627f4
WF
4988static bool intel_eld_uptodate(struct drm_connector *connector,
4989 int reg_eldv, uint32_t bits_eldv,
4990 int reg_elda, uint32_t bits_elda,
4991 int reg_edid)
4992{
4993 struct drm_i915_private *dev_priv = connector->dev->dev_private;
4994 uint8_t *eld = connector->eld;
4995 uint32_t i;
4996
4997 i = I915_READ(reg_eldv);
4998 i &= bits_eldv;
4999
5000 if (!eld[0])
5001 return !i;
5002
5003 if (!i)
5004 return false;
5005
5006 i = I915_READ(reg_elda);
5007 i &= ~bits_elda;
5008 I915_WRITE(reg_elda, i);
5009
5010 for (i = 0; i < eld[2]; i++)
5011 if (I915_READ(reg_edid) != *((uint32_t *)eld + i))
5012 return false;
5013
5014 return true;
5015}
5016
e0dac65e
WF
5017static void g4x_write_eld(struct drm_connector *connector,
5018 struct drm_crtc *crtc)
5019{
5020 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5021 uint8_t *eld = connector->eld;
5022 uint32_t eldv;
5023 uint32_t len;
5024 uint32_t i;
5025
5026 i = I915_READ(G4X_AUD_VID_DID);
5027
5028 if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL)
5029 eldv = G4X_ELDV_DEVCL_DEVBLC;
5030 else
5031 eldv = G4X_ELDV_DEVCTG;
5032
3a9627f4
WF
5033 if (intel_eld_uptodate(connector,
5034 G4X_AUD_CNTL_ST, eldv,
5035 G4X_AUD_CNTL_ST, G4X_ELD_ADDR,
5036 G4X_HDMIW_HDMIEDID))
5037 return;
5038
e0dac65e
WF
5039 i = I915_READ(G4X_AUD_CNTL_ST);
5040 i &= ~(eldv | G4X_ELD_ADDR);
5041 len = (i >> 9) & 0x1f; /* ELD buffer size */
5042 I915_WRITE(G4X_AUD_CNTL_ST, i);
5043
5044 if (!eld[0])
5045 return;
5046
5047 len = min_t(uint8_t, eld[2], len);
5048 DRM_DEBUG_DRIVER("ELD size %d\n", len);
5049 for (i = 0; i < len; i++)
5050 I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i));
5051
5052 i = I915_READ(G4X_AUD_CNTL_ST);
5053 i |= eldv;
5054 I915_WRITE(G4X_AUD_CNTL_ST, i);
5055}
5056
5057static void ironlake_write_eld(struct drm_connector *connector,
5058 struct drm_crtc *crtc)
5059{
5060 struct drm_i915_private *dev_priv = connector->dev->dev_private;
5061 uint8_t *eld = connector->eld;
5062 uint32_t eldv;
5063 uint32_t i;
5064 int len;
5065 int hdmiw_hdmiedid;
b6daa025 5066 int aud_config;
e0dac65e
WF
5067 int aud_cntl_st;
5068 int aud_cntrl_st2;
5069
b3f33cbf 5070 if (HAS_PCH_IBX(connector->dev)) {
1202b4c6 5071 hdmiw_hdmiedid = IBX_HDMIW_HDMIEDID_A;
b6daa025 5072 aud_config = IBX_AUD_CONFIG_A;
1202b4c6
WF
5073 aud_cntl_st = IBX_AUD_CNTL_ST_A;
5074 aud_cntrl_st2 = IBX_AUD_CNTL_ST2;
e0dac65e 5075 } else {
1202b4c6 5076 hdmiw_hdmiedid = CPT_HDMIW_HDMIEDID_A;
b6daa025 5077 aud_config = CPT_AUD_CONFIG_A;
1202b4c6
WF
5078 aud_cntl_st = CPT_AUD_CNTL_ST_A;
5079 aud_cntrl_st2 = CPT_AUD_CNTRL_ST2;
e0dac65e
WF
5080 }
5081
5082 i = to_intel_crtc(crtc)->pipe;
5083 hdmiw_hdmiedid += i * 0x100;
5084 aud_cntl_st += i * 0x100;
b6daa025 5085 aud_config += i * 0x100;
e0dac65e
WF
5086
5087 DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i));
5088
5089 i = I915_READ(aud_cntl_st);
5090 i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */
5091 if (!i) {
5092 DRM_DEBUG_DRIVER("Audio directed to unknown port\n");
5093 /* operate blindly on all ports */
1202b4c6
WF
5094 eldv = IBX_ELD_VALIDB;
5095 eldv |= IBX_ELD_VALIDB << 4;
5096 eldv |= IBX_ELD_VALIDB << 8;
e0dac65e
WF
5097 } else {
5098 DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i);
1202b4c6 5099 eldv = IBX_ELD_VALIDB << ((i - 1) * 4);
e0dac65e
WF
5100 }
5101
3a9627f4
WF
5102 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
5103 DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
5104 eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */
b6daa025
WF
5105 I915_WRITE(aud_config, AUD_CONFIG_N_VALUE_INDEX); /* 0x1 = DP */
5106 } else
5107 I915_WRITE(aud_config, 0);
e0dac65e 5108
3a9627f4
WF
5109 if (intel_eld_uptodate(connector,
5110 aud_cntrl_st2, eldv,
5111 aud_cntl_st, IBX_ELD_ADDRESS,
5112 hdmiw_hdmiedid))
5113 return;
5114
e0dac65e
WF
5115 i = I915_READ(aud_cntrl_st2);
5116 i &= ~eldv;
5117 I915_WRITE(aud_cntrl_st2, i);
5118
5119 if (!eld[0])
5120 return;
5121
e0dac65e 5122 i = I915_READ(aud_cntl_st);
1202b4c6 5123 i &= ~IBX_ELD_ADDRESS;
e0dac65e
WF
5124 I915_WRITE(aud_cntl_st, i);
5125
5126 len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */
5127 DRM_DEBUG_DRIVER("ELD size %d\n", len);
5128 for (i = 0; i < len; i++)
5129 I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i));
5130
5131 i = I915_READ(aud_cntrl_st2);
5132 i |= eldv;
5133 I915_WRITE(aud_cntrl_st2, i);
5134}
5135
5136void intel_write_eld(struct drm_encoder *encoder,
5137 struct drm_display_mode *mode)
5138{
5139 struct drm_crtc *crtc = encoder->crtc;
5140 struct drm_connector *connector;
5141 struct drm_device *dev = encoder->dev;
5142 struct drm_i915_private *dev_priv = dev->dev_private;
5143
5144 connector = drm_select_eld(encoder, mode);
5145 if (!connector)
5146 return;
5147
5148 DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5149 connector->base.id,
5150 drm_get_connector_name(connector),
5151 connector->encoder->base.id,
5152 drm_get_encoder_name(connector->encoder));
5153
5154 connector->eld[6] = drm_av_sync_delay(connector, mode) / 2;
5155
5156 if (dev_priv->display.write_eld)
5157 dev_priv->display.write_eld(connector, crtc);
5158}
5159
79e53945
JB
5160/** Loads the palette/gamma unit for the CRTC with the prepared values */
5161void intel_crtc_load_lut(struct drm_crtc *crtc)
5162{
5163 struct drm_device *dev = crtc->dev;
5164 struct drm_i915_private *dev_priv = dev->dev_private;
5165 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
9db4a9c7 5166 int palreg = PALETTE(intel_crtc->pipe);
79e53945
JB
5167 int i;
5168
5169 /* The clocks have to be on to load the palette. */
aed3f09d 5170 if (!crtc->enabled || !intel_crtc->active)
79e53945
JB
5171 return;
5172
f2b115e6 5173 /* use legacy palette for Ironlake */
bad720ff 5174 if (HAS_PCH_SPLIT(dev))
9db4a9c7 5175 palreg = LGC_PALETTE(intel_crtc->pipe);
2c07245f 5176
79e53945
JB
5177 for (i = 0; i < 256; i++) {
5178 I915_WRITE(palreg + 4 * i,
5179 (intel_crtc->lut_r[i] << 16) |
5180 (intel_crtc->lut_g[i] << 8) |
5181 intel_crtc->lut_b[i]);
5182 }
5183}
5184
560b85bb
CW
5185static void i845_update_cursor(struct drm_crtc *crtc, u32 base)
5186{
5187 struct drm_device *dev = crtc->dev;
5188 struct drm_i915_private *dev_priv = dev->dev_private;
5189 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5190 bool visible = base != 0;
5191 u32 cntl;
5192
5193 if (intel_crtc->cursor_visible == visible)
5194 return;
5195
9db4a9c7 5196 cntl = I915_READ(_CURACNTR);
560b85bb
CW
5197 if (visible) {
5198 /* On these chipsets we can only modify the base whilst
5199 * the cursor is disabled.
5200 */
9db4a9c7 5201 I915_WRITE(_CURABASE, base);
560b85bb
CW
5202
5203 cntl &= ~(CURSOR_FORMAT_MASK);
5204 /* XXX width must be 64, stride 256 => 0x00 << 28 */
5205 cntl |= CURSOR_ENABLE |
5206 CURSOR_GAMMA_ENABLE |
5207 CURSOR_FORMAT_ARGB;
5208 } else
5209 cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
9db4a9c7 5210 I915_WRITE(_CURACNTR, cntl);
560b85bb
CW
5211
5212 intel_crtc->cursor_visible = visible;
5213}
5214
5215static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base)
5216{
5217 struct drm_device *dev = crtc->dev;
5218 struct drm_i915_private *dev_priv = dev->dev_private;
5219 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5220 int pipe = intel_crtc->pipe;
5221 bool visible = base != 0;
5222
5223 if (intel_crtc->cursor_visible != visible) {
548f245b 5224 uint32_t cntl = I915_READ(CURCNTR(pipe));
560b85bb
CW
5225 if (base) {
5226 cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
5227 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5228 cntl |= pipe << 28; /* Connect to correct pipe */
5229 } else {
5230 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5231 cntl |= CURSOR_MODE_DISABLE;
5232 }
9db4a9c7 5233 I915_WRITE(CURCNTR(pipe), cntl);
560b85bb
CW
5234
5235 intel_crtc->cursor_visible = visible;
5236 }
5237 /* and commit changes on next vblank */
9db4a9c7 5238 I915_WRITE(CURBASE(pipe), base);
560b85bb
CW
5239}
5240
65a21cd6
JB
5241static void ivb_update_cursor(struct drm_crtc *crtc, u32 base)
5242{
5243 struct drm_device *dev = crtc->dev;
5244 struct drm_i915_private *dev_priv = dev->dev_private;
5245 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5246 int pipe = intel_crtc->pipe;
5247 bool visible = base != 0;
5248
5249 if (intel_crtc->cursor_visible != visible) {
5250 uint32_t cntl = I915_READ(CURCNTR_IVB(pipe));
5251 if (base) {
5252 cntl &= ~CURSOR_MODE;
5253 cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
5254 } else {
5255 cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
5256 cntl |= CURSOR_MODE_DISABLE;
5257 }
5258 I915_WRITE(CURCNTR_IVB(pipe), cntl);
5259
5260 intel_crtc->cursor_visible = visible;
5261 }
5262 /* and commit changes on next vblank */
5263 I915_WRITE(CURBASE_IVB(pipe), base);
5264}
5265
cda4b7d3 5266/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6b383a7f
CW
5267static void intel_crtc_update_cursor(struct drm_crtc *crtc,
5268 bool on)
cda4b7d3
CW
5269{
5270 struct drm_device *dev = crtc->dev;
5271 struct drm_i915_private *dev_priv = dev->dev_private;
5272 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5273 int pipe = intel_crtc->pipe;
5274 int x = intel_crtc->cursor_x;
5275 int y = intel_crtc->cursor_y;
560b85bb 5276 u32 base, pos;
cda4b7d3
CW
5277 bool visible;
5278
5279 pos = 0;
5280
6b383a7f 5281 if (on && crtc->enabled && crtc->fb) {
cda4b7d3
CW
5282 base = intel_crtc->cursor_addr;
5283 if (x > (int) crtc->fb->width)
5284 base = 0;
5285
5286 if (y > (int) crtc->fb->height)
5287 base = 0;
5288 } else
5289 base = 0;
5290
5291 if (x < 0) {
5292 if (x + intel_crtc->cursor_width < 0)
5293 base = 0;
5294
5295 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
5296 x = -x;
5297 }
5298 pos |= x << CURSOR_X_SHIFT;
5299
5300 if (y < 0) {
5301 if (y + intel_crtc->cursor_height < 0)
5302 base = 0;
5303
5304 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
5305 y = -y;
5306 }
5307 pos |= y << CURSOR_Y_SHIFT;
5308
5309 visible = base != 0;
560b85bb 5310 if (!visible && !intel_crtc->cursor_visible)
cda4b7d3
CW
5311 return;
5312
0cd83aa9 5313 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
65a21cd6
JB
5314 I915_WRITE(CURPOS_IVB(pipe), pos);
5315 ivb_update_cursor(crtc, base);
5316 } else {
5317 I915_WRITE(CURPOS(pipe), pos);
5318 if (IS_845G(dev) || IS_I865G(dev))
5319 i845_update_cursor(crtc, base);
5320 else
5321 i9xx_update_cursor(crtc, base);
5322 }
cda4b7d3
CW
5323}
5324
79e53945 5325static int intel_crtc_cursor_set(struct drm_crtc *crtc,
05394f39 5326 struct drm_file *file,
79e53945
JB
5327 uint32_t handle,
5328 uint32_t width, uint32_t height)
5329{
5330 struct drm_device *dev = crtc->dev;
5331 struct drm_i915_private *dev_priv = dev->dev_private;
5332 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
05394f39 5333 struct drm_i915_gem_object *obj;
cda4b7d3 5334 uint32_t addr;
3f8bc370 5335 int ret;
79e53945 5336
28c97730 5337 DRM_DEBUG_KMS("\n");
79e53945
JB
5338
5339 /* if we want to turn off the cursor ignore width and height */
5340 if (!handle) {
28c97730 5341 DRM_DEBUG_KMS("cursor off\n");
3f8bc370 5342 addr = 0;
05394f39 5343 obj = NULL;
5004417d 5344 mutex_lock(&dev->struct_mutex);
3f8bc370 5345 goto finish;
79e53945
JB
5346 }
5347
5348 /* Currently we only support 64x64 cursors */
5349 if (width != 64 || height != 64) {
5350 DRM_ERROR("we currently only support 64x64 cursors\n");
5351 return -EINVAL;
5352 }
5353
05394f39 5354 obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
c8725226 5355 if (&obj->base == NULL)
79e53945
JB
5356 return -ENOENT;
5357
05394f39 5358 if (obj->base.size < width * height * 4) {
79e53945 5359 DRM_ERROR("buffer is to small\n");
34b8686e
DA
5360 ret = -ENOMEM;
5361 goto fail;
79e53945
JB
5362 }
5363
71acb5eb 5364 /* we only need to pin inside GTT if cursor is non-phy */
7f9872e0 5365 mutex_lock(&dev->struct_mutex);
b295d1b6 5366 if (!dev_priv->info->cursor_needs_physical) {
d9e86c0e
CW
5367 if (obj->tiling_mode) {
5368 DRM_ERROR("cursor cannot be tiled\n");
5369 ret = -EINVAL;
5370 goto fail_locked;
5371 }
5372
2da3b9b9 5373 ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL);
e7b526bb
CW
5374 if (ret) {
5375 DRM_ERROR("failed to move cursor bo into the GTT\n");
2da3b9b9 5376 goto fail_locked;
e7b526bb
CW
5377 }
5378
d9e86c0e
CW
5379 ret = i915_gem_object_put_fence(obj);
5380 if (ret) {
2da3b9b9 5381 DRM_ERROR("failed to release fence for cursor");
d9e86c0e
CW
5382 goto fail_unpin;
5383 }
5384
05394f39 5385 addr = obj->gtt_offset;
71acb5eb 5386 } else {
6eeefaf3 5387 int align = IS_I830(dev) ? 16 * 1024 : 256;
05394f39 5388 ret = i915_gem_attach_phys_object(dev, obj,
6eeefaf3
CW
5389 (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
5390 align);
71acb5eb
DA
5391 if (ret) {
5392 DRM_ERROR("failed to attach phys object\n");
7f9872e0 5393 goto fail_locked;
71acb5eb 5394 }
05394f39 5395 addr = obj->phys_obj->handle->busaddr;
3f8bc370
KH
5396 }
5397
a6c45cf0 5398 if (IS_GEN2(dev))
14b60391
JB
5399 I915_WRITE(CURSIZE, (height << 12) | width);
5400
3f8bc370 5401 finish:
3f8bc370 5402 if (intel_crtc->cursor_bo) {
b295d1b6 5403 if (dev_priv->info->cursor_needs_physical) {
05394f39 5404 if (intel_crtc->cursor_bo != obj)
71acb5eb
DA
5405 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
5406 } else
5407 i915_gem_object_unpin(intel_crtc->cursor_bo);
05394f39 5408 drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
3f8bc370 5409 }
80824003 5410
7f9872e0 5411 mutex_unlock(&dev->struct_mutex);
3f8bc370
KH
5412
5413 intel_crtc->cursor_addr = addr;
05394f39 5414 intel_crtc->cursor_bo = obj;
cda4b7d3
CW
5415 intel_crtc->cursor_width = width;
5416 intel_crtc->cursor_height = height;
5417
6b383a7f 5418 intel_crtc_update_cursor(crtc, true);
3f8bc370 5419
79e53945 5420 return 0;
e7b526bb 5421fail_unpin:
05394f39 5422 i915_gem_object_unpin(obj);
7f9872e0 5423fail_locked:
34b8686e 5424 mutex_unlock(&dev->struct_mutex);
bc9025bd 5425fail:
05394f39 5426 drm_gem_object_unreference_unlocked(&obj->base);
34b8686e 5427 return ret;
79e53945
JB
5428}
5429
5430static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
5431{
79e53945 5432 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79e53945 5433
cda4b7d3
CW
5434 intel_crtc->cursor_x = x;
5435 intel_crtc->cursor_y = y;
652c393a 5436
6b383a7f 5437 intel_crtc_update_cursor(crtc, true);
79e53945
JB
5438
5439 return 0;
5440}
5441
5442/** Sets the color ramps on behalf of RandR */
5443void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
5444 u16 blue, int regno)
5445{
5446 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5447
5448 intel_crtc->lut_r[regno] = red >> 8;
5449 intel_crtc->lut_g[regno] = green >> 8;
5450 intel_crtc->lut_b[regno] = blue >> 8;
5451}
5452
b8c00ac5
DA
5453void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
5454 u16 *blue, int regno)
5455{
5456 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5457
5458 *red = intel_crtc->lut_r[regno] << 8;
5459 *green = intel_crtc->lut_g[regno] << 8;
5460 *blue = intel_crtc->lut_b[regno] << 8;
5461}
5462
79e53945 5463static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
7203425a 5464 u16 *blue, uint32_t start, uint32_t size)
79e53945 5465{
7203425a 5466 int end = (start + size > 256) ? 256 : start + size, i;
79e53945 5467 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
79e53945 5468
7203425a 5469 for (i = start; i < end; i++) {
79e53945
JB
5470 intel_crtc->lut_r[i] = red[i] >> 8;
5471 intel_crtc->lut_g[i] = green[i] >> 8;
5472 intel_crtc->lut_b[i] = blue[i] >> 8;
5473 }
5474
5475 intel_crtc_load_lut(crtc);
5476}
5477
5478/**
5479 * Get a pipe with a simple mode set on it for doing load-based monitor
5480 * detection.
5481 *
5482 * It will be up to the load-detect code to adjust the pipe as appropriate for
c751ce4f 5483 * its requirements. The pipe will be connected to no other encoders.
79e53945 5484 *
c751ce4f 5485 * Currently this code will only succeed if there is a pipe with no encoders
79e53945
JB
5486 * configured for it. In the future, it could choose to temporarily disable
5487 * some outputs to free up a pipe for its use.
5488 *
5489 * \return crtc, or NULL if no pipes are available.
5490 */
5491
5492/* VESA 640x480x72Hz mode to set on the pipe */
5493static struct drm_display_mode load_detect_mode = {
5494 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
5495 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
5496};
5497
d2dff872
CW
5498static struct drm_framebuffer *
5499intel_framebuffer_create(struct drm_device *dev,
308e5bcb 5500 struct drm_mode_fb_cmd2 *mode_cmd,
d2dff872
CW
5501 struct drm_i915_gem_object *obj)
5502{
5503 struct intel_framebuffer *intel_fb;
5504 int ret;
5505
5506 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
5507 if (!intel_fb) {
5508 drm_gem_object_unreference_unlocked(&obj->base);
5509 return ERR_PTR(-ENOMEM);
5510 }
5511
5512 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
5513 if (ret) {
5514 drm_gem_object_unreference_unlocked(&obj->base);
5515 kfree(intel_fb);
5516 return ERR_PTR(ret);
5517 }
5518
5519 return &intel_fb->base;
5520}
5521
5522static u32
5523intel_framebuffer_pitch_for_width(int width, int bpp)
5524{
5525 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
5526 return ALIGN(pitch, 64);
5527}
5528
5529static u32
5530intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
5531{
5532 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
5533 return ALIGN(pitch * mode->vdisplay, PAGE_SIZE);
5534}
5535
5536static struct drm_framebuffer *
5537intel_framebuffer_create_for_mode(struct drm_device *dev,
5538 struct drm_display_mode *mode,
5539 int depth, int bpp)
5540{
5541 struct drm_i915_gem_object *obj;
308e5bcb 5542 struct drm_mode_fb_cmd2 mode_cmd;
d2dff872
CW
5543
5544 obj = i915_gem_alloc_object(dev,
5545 intel_framebuffer_size_for_mode(mode, bpp));
5546 if (obj == NULL)
5547 return ERR_PTR(-ENOMEM);
5548
5549 mode_cmd.width = mode->hdisplay;
5550 mode_cmd.height = mode->vdisplay;
308e5bcb
JB
5551 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
5552 bpp);
5ca0c34a 5553 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
d2dff872
CW
5554
5555 return intel_framebuffer_create(dev, &mode_cmd, obj);
5556}
5557
5558static struct drm_framebuffer *
5559mode_fits_in_fbdev(struct drm_device *dev,
5560 struct drm_display_mode *mode)
5561{
5562 struct drm_i915_private *dev_priv = dev->dev_private;
5563 struct drm_i915_gem_object *obj;
5564 struct drm_framebuffer *fb;
5565
5566 if (dev_priv->fbdev == NULL)
5567 return NULL;
5568
5569 obj = dev_priv->fbdev->ifb.obj;
5570 if (obj == NULL)
5571 return NULL;
5572
5573 fb = &dev_priv->fbdev->ifb.base;
01f2c773
VS
5574 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
5575 fb->bits_per_pixel))
d2dff872
CW
5576 return NULL;
5577
01f2c773 5578 if (obj->base.size < mode->vdisplay * fb->pitches[0])
d2dff872
CW
5579 return NULL;
5580
5581 return fb;
5582}
5583
7173188d
CW
5584bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
5585 struct drm_connector *connector,
5586 struct drm_display_mode *mode,
8261b191 5587 struct intel_load_detect_pipe *old)
79e53945
JB
5588{
5589 struct intel_crtc *intel_crtc;
5590 struct drm_crtc *possible_crtc;
4ef69c7a 5591 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
5592 struct drm_crtc *crtc = NULL;
5593 struct drm_device *dev = encoder->dev;
d2dff872 5594 struct drm_framebuffer *old_fb;
79e53945
JB
5595 int i = -1;
5596
d2dff872
CW
5597 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5598 connector->base.id, drm_get_connector_name(connector),
5599 encoder->base.id, drm_get_encoder_name(encoder));
5600
79e53945
JB
5601 /*
5602 * Algorithm gets a little messy:
7a5e4805 5603 *
79e53945
JB
5604 * - if the connector already has an assigned crtc, use it (but make
5605 * sure it's on first)
7a5e4805 5606 *
79e53945
JB
5607 * - try to find the first unused crtc that can drive this connector,
5608 * and use that if we find one
79e53945
JB
5609 */
5610
5611 /* See if we already have a CRTC for this connector */
5612 if (encoder->crtc) {
5613 crtc = encoder->crtc;
8261b191 5614
79e53945 5615 intel_crtc = to_intel_crtc(crtc);
8261b191
CW
5616 old->dpms_mode = intel_crtc->dpms_mode;
5617 old->load_detect_temp = false;
5618
5619 /* Make sure the crtc and connector are running */
79e53945 5620 if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) {
6492711d
CW
5621 struct drm_encoder_helper_funcs *encoder_funcs;
5622 struct drm_crtc_helper_funcs *crtc_funcs;
5623
79e53945
JB
5624 crtc_funcs = crtc->helper_private;
5625 crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON);
6492711d
CW
5626
5627 encoder_funcs = encoder->helper_private;
79e53945
JB
5628 encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
5629 }
8261b191 5630
7173188d 5631 return true;
79e53945
JB
5632 }
5633
5634 /* Find an unused one (if possible) */
5635 list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) {
5636 i++;
5637 if (!(encoder->possible_crtcs & (1 << i)))
5638 continue;
5639 if (!possible_crtc->enabled) {
5640 crtc = possible_crtc;
5641 break;
5642 }
79e53945
JB
5643 }
5644
5645 /*
5646 * If we didn't find an unused CRTC, don't use any.
5647 */
5648 if (!crtc) {
7173188d
CW
5649 DRM_DEBUG_KMS("no pipe available for load-detect\n");
5650 return false;
79e53945
JB
5651 }
5652
5653 encoder->crtc = crtc;
c1c43977 5654 connector->encoder = encoder;
79e53945
JB
5655
5656 intel_crtc = to_intel_crtc(crtc);
8261b191
CW
5657 old->dpms_mode = intel_crtc->dpms_mode;
5658 old->load_detect_temp = true;
d2dff872 5659 old->release_fb = NULL;
79e53945 5660
6492711d
CW
5661 if (!mode)
5662 mode = &load_detect_mode;
79e53945 5663
d2dff872
CW
5664 old_fb = crtc->fb;
5665
5666 /* We need a framebuffer large enough to accommodate all accesses
5667 * that the plane may generate whilst we perform load detection.
5668 * We can not rely on the fbcon either being present (we get called
5669 * during its initialisation to detect all boot displays, or it may
5670 * not even exist) or that it is large enough to satisfy the
5671 * requested mode.
5672 */
5673 crtc->fb = mode_fits_in_fbdev(dev, mode);
5674 if (crtc->fb == NULL) {
5675 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
5676 crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
5677 old->release_fb = crtc->fb;
5678 } else
5679 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
5680 if (IS_ERR(crtc->fb)) {
5681 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
5682 crtc->fb = old_fb;
5683 return false;
79e53945 5684 }
79e53945 5685
d2dff872 5686 if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) {
6492711d 5687 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
d2dff872
CW
5688 if (old->release_fb)
5689 old->release_fb->funcs->destroy(old->release_fb);
5690 crtc->fb = old_fb;
6492711d 5691 return false;
79e53945 5692 }
7173188d 5693
79e53945 5694 /* let the connector get through one full cycle before testing */
9d0498a2 5695 intel_wait_for_vblank(dev, intel_crtc->pipe);
79e53945 5696
7173188d 5697 return true;
79e53945
JB
5698}
5699
c1c43977 5700void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder,
8261b191
CW
5701 struct drm_connector *connector,
5702 struct intel_load_detect_pipe *old)
79e53945 5703{
4ef69c7a 5704 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
5705 struct drm_device *dev = encoder->dev;
5706 struct drm_crtc *crtc = encoder->crtc;
5707 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
5708 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
5709
d2dff872
CW
5710 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
5711 connector->base.id, drm_get_connector_name(connector),
5712 encoder->base.id, drm_get_encoder_name(encoder));
5713
8261b191 5714 if (old->load_detect_temp) {
c1c43977 5715 connector->encoder = NULL;
79e53945 5716 drm_helper_disable_unused_functions(dev);
d2dff872
CW
5717
5718 if (old->release_fb)
5719 old->release_fb->funcs->destroy(old->release_fb);
5720
0622a53c 5721 return;
79e53945
JB
5722 }
5723
c751ce4f 5724 /* Switch crtc and encoder back off if necessary */
0622a53c
CW
5725 if (old->dpms_mode != DRM_MODE_DPMS_ON) {
5726 encoder_funcs->dpms(encoder, old->dpms_mode);
8261b191 5727 crtc_funcs->dpms(crtc, old->dpms_mode);
79e53945
JB
5728 }
5729}
5730
5731/* Returns the clock of the currently programmed mode of the given pipe. */
5732static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
5733{
5734 struct drm_i915_private *dev_priv = dev->dev_private;
5735 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5736 int pipe = intel_crtc->pipe;
548f245b 5737 u32 dpll = I915_READ(DPLL(pipe));
79e53945
JB
5738 u32 fp;
5739 intel_clock_t clock;
5740
5741 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
39adb7a5 5742 fp = I915_READ(FP0(pipe));
79e53945 5743 else
39adb7a5 5744 fp = I915_READ(FP1(pipe));
79e53945
JB
5745
5746 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
f2b115e6
AJ
5747 if (IS_PINEVIEW(dev)) {
5748 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
5749 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
2177832f
SL
5750 } else {
5751 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
5752 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
5753 }
5754
a6c45cf0 5755 if (!IS_GEN2(dev)) {
f2b115e6
AJ
5756 if (IS_PINEVIEW(dev))
5757 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
5758 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
2177832f
SL
5759 else
5760 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
79e53945
JB
5761 DPLL_FPA01_P1_POST_DIV_SHIFT);
5762
5763 switch (dpll & DPLL_MODE_MASK) {
5764 case DPLLB_MODE_DAC_SERIAL:
5765 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
5766 5 : 10;
5767 break;
5768 case DPLLB_MODE_LVDS:
5769 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
5770 7 : 14;
5771 break;
5772 default:
28c97730 5773 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
79e53945
JB
5774 "mode\n", (int)(dpll & DPLL_MODE_MASK));
5775 return 0;
5776 }
5777
5778 /* XXX: Handle the 100Mhz refclk */
2177832f 5779 intel_clock(dev, 96000, &clock);
79e53945
JB
5780 } else {
5781 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
5782
5783 if (is_lvds) {
5784 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
5785 DPLL_FPA01_P1_POST_DIV_SHIFT);
5786 clock.p2 = 14;
5787
5788 if ((dpll & PLL_REF_INPUT_MASK) ==
5789 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
5790 /* XXX: might not be 66MHz */
2177832f 5791 intel_clock(dev, 66000, &clock);
79e53945 5792 } else
2177832f 5793 intel_clock(dev, 48000, &clock);
79e53945
JB
5794 } else {
5795 if (dpll & PLL_P1_DIVIDE_BY_TWO)
5796 clock.p1 = 2;
5797 else {
5798 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
5799 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
5800 }
5801 if (dpll & PLL_P2_DIVIDE_BY_4)
5802 clock.p2 = 4;
5803 else
5804 clock.p2 = 2;
5805
2177832f 5806 intel_clock(dev, 48000, &clock);
79e53945
JB
5807 }
5808 }
5809
5810 /* XXX: It would be nice to validate the clocks, but we can't reuse
5811 * i830PllIsValid() because it relies on the xf86_config connector
5812 * configuration being accurate, which it isn't necessarily.
5813 */
5814
5815 return clock.dot;
5816}
5817
5818/** Returns the currently programmed mode of the given pipe. */
5819struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
5820 struct drm_crtc *crtc)
5821{
548f245b 5822 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945
JB
5823 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5824 int pipe = intel_crtc->pipe;
5825 struct drm_display_mode *mode;
548f245b
JB
5826 int htot = I915_READ(HTOTAL(pipe));
5827 int hsync = I915_READ(HSYNC(pipe));
5828 int vtot = I915_READ(VTOTAL(pipe));
5829 int vsync = I915_READ(VSYNC(pipe));
79e53945
JB
5830
5831 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
5832 if (!mode)
5833 return NULL;
5834
5835 mode->clock = intel_crtc_clock_get(dev, crtc);
5836 mode->hdisplay = (htot & 0xffff) + 1;
5837 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
5838 mode->hsync_start = (hsync & 0xffff) + 1;
5839 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
5840 mode->vdisplay = (vtot & 0xffff) + 1;
5841 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
5842 mode->vsync_start = (vsync & 0xffff) + 1;
5843 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
5844
5845 drm_mode_set_name(mode);
79e53945
JB
5846
5847 return mode;
5848}
5849
652c393a
JB
5850#define GPU_IDLE_TIMEOUT 500 /* ms */
5851
5852/* When this timer fires, we've been idle for awhile */
5853static void intel_gpu_idle_timer(unsigned long arg)
5854{
5855 struct drm_device *dev = (struct drm_device *)arg;
5856 drm_i915_private_t *dev_priv = dev->dev_private;
5857
ff7ea4c0
CW
5858 if (!list_empty(&dev_priv->mm.active_list)) {
5859 /* Still processing requests, so just re-arm the timer. */
5860 mod_timer(&dev_priv->idle_timer, jiffies +
5861 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
5862 return;
5863 }
652c393a 5864
ff7ea4c0 5865 dev_priv->busy = false;
01dfba93 5866 queue_work(dev_priv->wq, &dev_priv->idle_work);
652c393a
JB
5867}
5868
652c393a
JB
5869#define CRTC_IDLE_TIMEOUT 1000 /* ms */
5870
5871static void intel_crtc_idle_timer(unsigned long arg)
5872{
5873 struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
5874 struct drm_crtc *crtc = &intel_crtc->base;
5875 drm_i915_private_t *dev_priv = crtc->dev->dev_private;
ff7ea4c0 5876 struct intel_framebuffer *intel_fb;
652c393a 5877
ff7ea4c0
CW
5878 intel_fb = to_intel_framebuffer(crtc->fb);
5879 if (intel_fb && intel_fb->obj->active) {
5880 /* The framebuffer is still being accessed by the GPU. */
5881 mod_timer(&intel_crtc->idle_timer, jiffies +
5882 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
5883 return;
5884 }
652c393a 5885
ff7ea4c0 5886 intel_crtc->busy = false;
01dfba93 5887 queue_work(dev_priv->wq, &dev_priv->idle_work);
652c393a
JB
5888}
5889
3dec0095 5890static void intel_increase_pllclock(struct drm_crtc *crtc)
652c393a
JB
5891{
5892 struct drm_device *dev = crtc->dev;
5893 drm_i915_private_t *dev_priv = dev->dev_private;
5894 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5895 int pipe = intel_crtc->pipe;
dbdc6479
JB
5896 int dpll_reg = DPLL(pipe);
5897 int dpll;
652c393a 5898
bad720ff 5899 if (HAS_PCH_SPLIT(dev))
652c393a
JB
5900 return;
5901
5902 if (!dev_priv->lvds_downclock_avail)
5903 return;
5904
dbdc6479 5905 dpll = I915_READ(dpll_reg);
652c393a 5906 if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) {
44d98a61 5907 DRM_DEBUG_DRIVER("upclocking LVDS\n");
652c393a 5908
8ac5a6d5 5909 assert_panel_unlocked(dev_priv, pipe);
652c393a
JB
5910
5911 dpll &= ~DISPLAY_RATE_SELECT_FPA1;
5912 I915_WRITE(dpll_reg, dpll);
9d0498a2 5913 intel_wait_for_vblank(dev, pipe);
dbdc6479 5914
652c393a
JB
5915 dpll = I915_READ(dpll_reg);
5916 if (dpll & DISPLAY_RATE_SELECT_FPA1)
44d98a61 5917 DRM_DEBUG_DRIVER("failed to upclock LVDS!\n");
652c393a
JB
5918 }
5919
5920 /* Schedule downclock */
3dec0095
DV
5921 mod_timer(&intel_crtc->idle_timer, jiffies +
5922 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
652c393a
JB
5923}
5924
5925static void intel_decrease_pllclock(struct drm_crtc *crtc)
5926{
5927 struct drm_device *dev = crtc->dev;
5928 drm_i915_private_t *dev_priv = dev->dev_private;
5929 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
652c393a 5930
bad720ff 5931 if (HAS_PCH_SPLIT(dev))
652c393a
JB
5932 return;
5933
5934 if (!dev_priv->lvds_downclock_avail)
5935 return;
5936
5937 /*
5938 * Since this is called by a timer, we should never get here in
5939 * the manual case.
5940 */
5941 if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) {
dc257cf1
DV
5942 int pipe = intel_crtc->pipe;
5943 int dpll_reg = DPLL(pipe);
5944 int dpll;
f6e5b160 5945
44d98a61 5946 DRM_DEBUG_DRIVER("downclocking LVDS\n");
652c393a 5947
8ac5a6d5 5948 assert_panel_unlocked(dev_priv, pipe);
652c393a 5949
dc257cf1 5950 dpll = I915_READ(dpll_reg);
652c393a
JB
5951 dpll |= DISPLAY_RATE_SELECT_FPA1;
5952 I915_WRITE(dpll_reg, dpll);
9d0498a2 5953 intel_wait_for_vblank(dev, pipe);
652c393a
JB
5954 dpll = I915_READ(dpll_reg);
5955 if (!(dpll & DISPLAY_RATE_SELECT_FPA1))
44d98a61 5956 DRM_DEBUG_DRIVER("failed to downclock LVDS!\n");
652c393a
JB
5957 }
5958
5959}
5960
5961/**
5962 * intel_idle_update - adjust clocks for idleness
5963 * @work: work struct
5964 *
5965 * Either the GPU or display (or both) went idle. Check the busy status
5966 * here and adjust the CRTC and GPU clocks as necessary.
5967 */
5968static void intel_idle_update(struct work_struct *work)
5969{
5970 drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
5971 idle_work);
5972 struct drm_device *dev = dev_priv->dev;
5973 struct drm_crtc *crtc;
5974 struct intel_crtc *intel_crtc;
5975
5976 if (!i915_powersave)
5977 return;
5978
5979 mutex_lock(&dev->struct_mutex);
5980
7648fa99
JB
5981 i915_update_gfx_val(dev_priv);
5982
652c393a
JB
5983 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
5984 /* Skip inactive CRTCs */
5985 if (!crtc->fb)
5986 continue;
5987
5988 intel_crtc = to_intel_crtc(crtc);
5989 if (!intel_crtc->busy)
5990 intel_decrease_pllclock(crtc);
5991 }
5992
45ac22c8 5993
652c393a
JB
5994 mutex_unlock(&dev->struct_mutex);
5995}
5996
5997/**
5998 * intel_mark_busy - mark the GPU and possibly the display busy
5999 * @dev: drm device
6000 * @obj: object we're operating on
6001 *
6002 * Callers can use this function to indicate that the GPU is busy processing
6003 * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout
6004 * buffer), we'll also mark the display as busy, so we know to increase its
6005 * clock frequency.
6006 */
05394f39 6007void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
652c393a
JB
6008{
6009 drm_i915_private_t *dev_priv = dev->dev_private;
6010 struct drm_crtc *crtc = NULL;
6011 struct intel_framebuffer *intel_fb;
6012 struct intel_crtc *intel_crtc;
6013
5e17ee74
ZW
6014 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6015 return;
6016
9104183d
CW
6017 if (!dev_priv->busy) {
6018 intel_sanitize_pm(dev);
28cf798f 6019 dev_priv->busy = true;
9104183d 6020 } else
28cf798f
CW
6021 mod_timer(&dev_priv->idle_timer, jiffies +
6022 msecs_to_jiffies(GPU_IDLE_TIMEOUT));
652c393a 6023
acb87dfb
CW
6024 if (obj == NULL)
6025 return;
6026
652c393a
JB
6027 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
6028 if (!crtc->fb)
6029 continue;
6030
6031 intel_crtc = to_intel_crtc(crtc);
6032 intel_fb = to_intel_framebuffer(crtc->fb);
6033 if (intel_fb->obj == obj) {
6034 if (!intel_crtc->busy) {
6035 /* Non-busy -> busy, upclock */
3dec0095 6036 intel_increase_pllclock(crtc);
652c393a
JB
6037 intel_crtc->busy = true;
6038 } else {
6039 /* Busy -> busy, put off timer */
6040 mod_timer(&intel_crtc->idle_timer, jiffies +
6041 msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
6042 }
6043 }
6044 }
6045}
6046
79e53945
JB
6047static void intel_crtc_destroy(struct drm_crtc *crtc)
6048{
6049 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
67e77c5a
DV
6050 struct drm_device *dev = crtc->dev;
6051 struct intel_unpin_work *work;
6052 unsigned long flags;
6053
6054 spin_lock_irqsave(&dev->event_lock, flags);
6055 work = intel_crtc->unpin_work;
6056 intel_crtc->unpin_work = NULL;
6057 spin_unlock_irqrestore(&dev->event_lock, flags);
6058
6059 if (work) {
6060 cancel_work_sync(&work->work);
6061 kfree(work);
6062 }
79e53945
JB
6063
6064 drm_crtc_cleanup(crtc);
67e77c5a 6065
79e53945
JB
6066 kfree(intel_crtc);
6067}
6068
6b95a207
KH
6069static void intel_unpin_work_fn(struct work_struct *__work)
6070{
6071 struct intel_unpin_work *work =
6072 container_of(__work, struct intel_unpin_work, work);
6073
6074 mutex_lock(&work->dev->struct_mutex);
1690e1eb 6075 intel_unpin_fb_obj(work->old_fb_obj);
05394f39
CW
6076 drm_gem_object_unreference(&work->pending_flip_obj->base);
6077 drm_gem_object_unreference(&work->old_fb_obj->base);
d9e86c0e 6078
7782de3b 6079 intel_update_fbc(work->dev);
6b95a207
KH
6080 mutex_unlock(&work->dev->struct_mutex);
6081 kfree(work);
6082}
6083
1afe3e9d 6084static void do_intel_finish_page_flip(struct drm_device *dev,
49b14a5c 6085 struct drm_crtc *crtc)
6b95a207
KH
6086{
6087 drm_i915_private_t *dev_priv = dev->dev_private;
6b95a207
KH
6088 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6089 struct intel_unpin_work *work;
05394f39 6090 struct drm_i915_gem_object *obj;
6b95a207 6091 struct drm_pending_vblank_event *e;
49b14a5c 6092 struct timeval tnow, tvbl;
6b95a207
KH
6093 unsigned long flags;
6094
6095 /* Ignore early vblank irqs */
6096 if (intel_crtc == NULL)
6097 return;
6098
49b14a5c
MK
6099 do_gettimeofday(&tnow);
6100
6b95a207
KH
6101 spin_lock_irqsave(&dev->event_lock, flags);
6102 work = intel_crtc->unpin_work;
6103 if (work == NULL || !work->pending) {
6104 spin_unlock_irqrestore(&dev->event_lock, flags);
6105 return;
6106 }
6107
6108 intel_crtc->unpin_work = NULL;
6b95a207
KH
6109
6110 if (work->event) {
6111 e = work->event;
49b14a5c 6112 e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
0af7e4df
MK
6113
6114 /* Called before vblank count and timestamps have
6115 * been updated for the vblank interval of flip
6116 * completion? Need to increment vblank count and
6117 * add one videorefresh duration to returned timestamp
49b14a5c
MK
6118 * to account for this. We assume this happened if we
6119 * get called over 0.9 frame durations after the last
6120 * timestamped vblank.
6121 *
6122 * This calculation can not be used with vrefresh rates
6123 * below 5Hz (10Hz to be on the safe side) without
6124 * promoting to 64 integers.
0af7e4df 6125 */
49b14a5c
MK
6126 if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
6127 9 * crtc->framedur_ns) {
0af7e4df 6128 e->event.sequence++;
49b14a5c
MK
6129 tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
6130 crtc->framedur_ns);
0af7e4df
MK
6131 }
6132
49b14a5c
MK
6133 e->event.tv_sec = tvbl.tv_sec;
6134 e->event.tv_usec = tvbl.tv_usec;
0af7e4df 6135
6b95a207
KH
6136 list_add_tail(&e->base.link,
6137 &e->base.file_priv->event_list);
6138 wake_up_interruptible(&e->base.file_priv->event_wait);
6139 }
6140
0af7e4df
MK
6141 drm_vblank_put(dev, intel_crtc->pipe);
6142
6b95a207
KH
6143 spin_unlock_irqrestore(&dev->event_lock, flags);
6144
05394f39 6145 obj = work->old_fb_obj;
d9e86c0e 6146
e59f2bac 6147 atomic_clear_mask(1 << intel_crtc->plane,
05394f39
CW
6148 &obj->pending_flip.counter);
6149 if (atomic_read(&obj->pending_flip) == 0)
f787a5f5 6150 wake_up(&dev_priv->pending_flip_queue);
d9e86c0e 6151
6b95a207 6152 schedule_work(&work->work);
e5510fac
JB
6153
6154 trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
6b95a207
KH
6155}
6156
1afe3e9d
JB
6157void intel_finish_page_flip(struct drm_device *dev, int pipe)
6158{
6159 drm_i915_private_t *dev_priv = dev->dev_private;
6160 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
6161
49b14a5c 6162 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
6163}
6164
6165void intel_finish_page_flip_plane(struct drm_device *dev, int plane)
6166{
6167 drm_i915_private_t *dev_priv = dev->dev_private;
6168 struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane];
6169
49b14a5c 6170 do_intel_finish_page_flip(dev, crtc);
1afe3e9d
JB
6171}
6172
6b95a207
KH
6173void intel_prepare_page_flip(struct drm_device *dev, int plane)
6174{
6175 drm_i915_private_t *dev_priv = dev->dev_private;
6176 struct intel_crtc *intel_crtc =
6177 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
6178 unsigned long flags;
6179
6180 spin_lock_irqsave(&dev->event_lock, flags);
de3f440f 6181 if (intel_crtc->unpin_work) {
4e5359cd
SF
6182 if ((++intel_crtc->unpin_work->pending) > 1)
6183 DRM_ERROR("Prepared flip multiple times\n");
de3f440f
JB
6184 } else {
6185 DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n");
6186 }
6b95a207
KH
6187 spin_unlock_irqrestore(&dev->event_lock, flags);
6188}
6189
8c9f3aaf
JB
6190static int intel_gen2_queue_flip(struct drm_device *dev,
6191 struct drm_crtc *crtc,
6192 struct drm_framebuffer *fb,
6193 struct drm_i915_gem_object *obj)
6194{
6195 struct drm_i915_private *dev_priv = dev->dev_private;
6196 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6197 unsigned long offset;
6198 u32 flip_mask;
6d90c952 6199 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6200 int ret;
6201
6d90c952 6202 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6203 if (ret)
83d4092b 6204 goto err;
8c9f3aaf
JB
6205
6206 /* Offset into the new buffer for cases of shared fbs between CRTCs */
01f2c773 6207 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
8c9f3aaf 6208
6d90c952 6209 ret = intel_ring_begin(ring, 6);
8c9f3aaf 6210 if (ret)
83d4092b 6211 goto err_unpin;
8c9f3aaf
JB
6212
6213 /* Can't queue multiple flips, so wait for the previous
6214 * one to finish before executing the next.
6215 */
6216 if (intel_crtc->plane)
6217 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6218 else
6219 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
6220 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6221 intel_ring_emit(ring, MI_NOOP);
6222 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6223 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6224 intel_ring_emit(ring, fb->pitches[0]);
6225 intel_ring_emit(ring, obj->gtt_offset + offset);
6226 intel_ring_emit(ring, 0); /* aux display base address, unused */
6227 intel_ring_advance(ring);
83d4092b
CW
6228 return 0;
6229
6230err_unpin:
6231 intel_unpin_fb_obj(obj);
6232err:
8c9f3aaf
JB
6233 return ret;
6234}
6235
6236static int intel_gen3_queue_flip(struct drm_device *dev,
6237 struct drm_crtc *crtc,
6238 struct drm_framebuffer *fb,
6239 struct drm_i915_gem_object *obj)
6240{
6241 struct drm_i915_private *dev_priv = dev->dev_private;
6242 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6243 unsigned long offset;
6244 u32 flip_mask;
6d90c952 6245 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6246 int ret;
6247
6d90c952 6248 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6249 if (ret)
83d4092b 6250 goto err;
8c9f3aaf
JB
6251
6252 /* Offset into the new buffer for cases of shared fbs between CRTCs */
01f2c773 6253 offset = crtc->y * fb->pitches[0] + crtc->x * fb->bits_per_pixel/8;
8c9f3aaf 6254
6d90c952 6255 ret = intel_ring_begin(ring, 6);
8c9f3aaf 6256 if (ret)
83d4092b 6257 goto err_unpin;
8c9f3aaf
JB
6258
6259 if (intel_crtc->plane)
6260 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
6261 else
6262 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
6d90c952
DV
6263 intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
6264 intel_ring_emit(ring, MI_NOOP);
6265 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 |
6266 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6267 intel_ring_emit(ring, fb->pitches[0]);
6268 intel_ring_emit(ring, obj->gtt_offset + offset);
6269 intel_ring_emit(ring, MI_NOOP);
6270
6271 intel_ring_advance(ring);
83d4092b
CW
6272 return 0;
6273
6274err_unpin:
6275 intel_unpin_fb_obj(obj);
6276err:
8c9f3aaf
JB
6277 return ret;
6278}
6279
6280static int intel_gen4_queue_flip(struct drm_device *dev,
6281 struct drm_crtc *crtc,
6282 struct drm_framebuffer *fb,
6283 struct drm_i915_gem_object *obj)
6284{
6285 struct drm_i915_private *dev_priv = dev->dev_private;
6286 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6287 uint32_t pf, pipesrc;
6d90c952 6288 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6289 int ret;
6290
6d90c952 6291 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6292 if (ret)
83d4092b 6293 goto err;
8c9f3aaf 6294
6d90c952 6295 ret = intel_ring_begin(ring, 4);
8c9f3aaf 6296 if (ret)
83d4092b 6297 goto err_unpin;
8c9f3aaf
JB
6298
6299 /* i965+ uses the linear or tiled offsets from the
6300 * Display Registers (which do not change across a page-flip)
6301 * so we need only reprogram the base address.
6302 */
6d90c952
DV
6303 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6304 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6305 intel_ring_emit(ring, fb->pitches[0]);
6306 intel_ring_emit(ring, obj->gtt_offset | obj->tiling_mode);
8c9f3aaf
JB
6307
6308 /* XXX Enabling the panel-fitter across page-flip is so far
6309 * untested on non-native modes, so ignore it for now.
6310 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
6311 */
6312 pf = 0;
6313 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952
DV
6314 intel_ring_emit(ring, pf | pipesrc);
6315 intel_ring_advance(ring);
83d4092b
CW
6316 return 0;
6317
6318err_unpin:
6319 intel_unpin_fb_obj(obj);
6320err:
8c9f3aaf
JB
6321 return ret;
6322}
6323
6324static int intel_gen6_queue_flip(struct drm_device *dev,
6325 struct drm_crtc *crtc,
6326 struct drm_framebuffer *fb,
6327 struct drm_i915_gem_object *obj)
6328{
6329 struct drm_i915_private *dev_priv = dev->dev_private;
6330 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6d90c952 6331 struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
8c9f3aaf
JB
6332 uint32_t pf, pipesrc;
6333 int ret;
6334
6d90c952 6335 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
8c9f3aaf 6336 if (ret)
83d4092b 6337 goto err;
8c9f3aaf 6338
6d90c952 6339 ret = intel_ring_begin(ring, 4);
8c9f3aaf 6340 if (ret)
83d4092b 6341 goto err_unpin;
8c9f3aaf 6342
6d90c952
DV
6343 intel_ring_emit(ring, MI_DISPLAY_FLIP |
6344 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
6345 intel_ring_emit(ring, fb->pitches[0] | obj->tiling_mode);
6346 intel_ring_emit(ring, obj->gtt_offset);
8c9f3aaf 6347
dc257cf1
DV
6348 /* Contrary to the suggestions in the documentation,
6349 * "Enable Panel Fitter" does not seem to be required when page
6350 * flipping with a non-native mode, and worse causes a normal
6351 * modeset to fail.
6352 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
6353 */
6354 pf = 0;
8c9f3aaf 6355 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
6d90c952
DV
6356 intel_ring_emit(ring, pf | pipesrc);
6357 intel_ring_advance(ring);
83d4092b
CW
6358 return 0;
6359
6360err_unpin:
6361 intel_unpin_fb_obj(obj);
6362err:
8c9f3aaf
JB
6363 return ret;
6364}
6365
7c9017e5
JB
6366/*
6367 * On gen7 we currently use the blit ring because (in early silicon at least)
6368 * the render ring doesn't give us interrpts for page flip completion, which
6369 * means clients will hang after the first flip is queued. Fortunately the
6370 * blit ring generates interrupts properly, so use it instead.
6371 */
6372static int intel_gen7_queue_flip(struct drm_device *dev,
6373 struct drm_crtc *crtc,
6374 struct drm_framebuffer *fb,
6375 struct drm_i915_gem_object *obj)
6376{
6377 struct drm_i915_private *dev_priv = dev->dev_private;
6378 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6379 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
6380 int ret;
6381
6382 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6383 if (ret)
83d4092b 6384 goto err;
7c9017e5
JB
6385
6386 ret = intel_ring_begin(ring, 4);
6387 if (ret)
83d4092b 6388 goto err_unpin;
7c9017e5
JB
6389
6390 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19));
01f2c773 6391 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
7c9017e5
JB
6392 intel_ring_emit(ring, (obj->gtt_offset));
6393 intel_ring_emit(ring, (MI_NOOP));
6394 intel_ring_advance(ring);
83d4092b
CW
6395 return 0;
6396
6397err_unpin:
6398 intel_unpin_fb_obj(obj);
6399err:
7c9017e5
JB
6400 return ret;
6401}
6402
8c9f3aaf
JB
6403static int intel_default_queue_flip(struct drm_device *dev,
6404 struct drm_crtc *crtc,
6405 struct drm_framebuffer *fb,
6406 struct drm_i915_gem_object *obj)
6407{
6408 return -ENODEV;
6409}
6410
6b95a207
KH
6411static int intel_crtc_page_flip(struct drm_crtc *crtc,
6412 struct drm_framebuffer *fb,
6413 struct drm_pending_vblank_event *event)
6414{
6415 struct drm_device *dev = crtc->dev;
6416 struct drm_i915_private *dev_priv = dev->dev_private;
6417 struct intel_framebuffer *intel_fb;
05394f39 6418 struct drm_i915_gem_object *obj;
6b95a207
KH
6419 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6420 struct intel_unpin_work *work;
8c9f3aaf 6421 unsigned long flags;
52e68630 6422 int ret;
6b95a207
KH
6423
6424 work = kzalloc(sizeof *work, GFP_KERNEL);
6425 if (work == NULL)
6426 return -ENOMEM;
6427
6b95a207
KH
6428 work->event = event;
6429 work->dev = crtc->dev;
6430 intel_fb = to_intel_framebuffer(crtc->fb);
b1b87f6b 6431 work->old_fb_obj = intel_fb->obj;
6b95a207
KH
6432 INIT_WORK(&work->work, intel_unpin_work_fn);
6433
7317c75e
JB
6434 ret = drm_vblank_get(dev, intel_crtc->pipe);
6435 if (ret)
6436 goto free_work;
6437
6b95a207
KH
6438 /* We borrow the event spin lock for protecting unpin_work */
6439 spin_lock_irqsave(&dev->event_lock, flags);
6440 if (intel_crtc->unpin_work) {
6441 spin_unlock_irqrestore(&dev->event_lock, flags);
6442 kfree(work);
7317c75e 6443 drm_vblank_put(dev, intel_crtc->pipe);
468f0b44
CW
6444
6445 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
6b95a207
KH
6446 return -EBUSY;
6447 }
6448 intel_crtc->unpin_work = work;
6449 spin_unlock_irqrestore(&dev->event_lock, flags);
6450
6451 intel_fb = to_intel_framebuffer(fb);
6452 obj = intel_fb->obj;
6453
468f0b44 6454 mutex_lock(&dev->struct_mutex);
6b95a207 6455
75dfca80 6456 /* Reference the objects for the scheduled work. */
05394f39
CW
6457 drm_gem_object_reference(&work->old_fb_obj->base);
6458 drm_gem_object_reference(&obj->base);
6b95a207
KH
6459
6460 crtc->fb = fb;
96b099fd 6461
e1f99ce6 6462 work->pending_flip_obj = obj;
e1f99ce6 6463
4e5359cd
SF
6464 work->enable_stall_check = true;
6465
e1f99ce6
CW
6466 /* Block clients from rendering to the new back buffer until
6467 * the flip occurs and the object is no longer visible.
6468 */
05394f39 6469 atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
e1f99ce6 6470
8c9f3aaf
JB
6471 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj);
6472 if (ret)
6473 goto cleanup_pending;
6b95a207 6474
7782de3b 6475 intel_disable_fbc(dev);
acb87dfb 6476 intel_mark_busy(dev, obj);
6b95a207
KH
6477 mutex_unlock(&dev->struct_mutex);
6478
e5510fac
JB
6479 trace_i915_flip_request(intel_crtc->plane, obj);
6480
6b95a207 6481 return 0;
96b099fd 6482
8c9f3aaf
JB
6483cleanup_pending:
6484 atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
05394f39
CW
6485 drm_gem_object_unreference(&work->old_fb_obj->base);
6486 drm_gem_object_unreference(&obj->base);
96b099fd
CW
6487 mutex_unlock(&dev->struct_mutex);
6488
6489 spin_lock_irqsave(&dev->event_lock, flags);
6490 intel_crtc->unpin_work = NULL;
6491 spin_unlock_irqrestore(&dev->event_lock, flags);
6492
7317c75e
JB
6493 drm_vblank_put(dev, intel_crtc->pipe);
6494free_work:
96b099fd
CW
6495 kfree(work);
6496
6497 return ret;
6b95a207
KH
6498}
6499
47f1c6c9
CW
6500static void intel_sanitize_modesetting(struct drm_device *dev,
6501 int pipe, int plane)
6502{
6503 struct drm_i915_private *dev_priv = dev->dev_private;
6504 u32 reg, val;
a9dcf84b 6505 int i;
47f1c6c9 6506
f47166d2 6507 /* Clear any frame start delays used for debugging left by the BIOS */
a9dcf84b
DV
6508 for_each_pipe(i) {
6509 reg = PIPECONF(i);
f47166d2
CW
6510 I915_WRITE(reg, I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
6511 }
6512
47f1c6c9
CW
6513 if (HAS_PCH_SPLIT(dev))
6514 return;
6515
6516 /* Who knows what state these registers were left in by the BIOS or
6517 * grub?
6518 *
6519 * If we leave the registers in a conflicting state (e.g. with the
6520 * display plane reading from the other pipe than the one we intend
6521 * to use) then when we attempt to teardown the active mode, we will
6522 * not disable the pipes and planes in the correct order -- leaving
6523 * a plane reading from a disabled pipe and possibly leading to
6524 * undefined behaviour.
6525 */
6526
6527 reg = DSPCNTR(plane);
6528 val = I915_READ(reg);
6529
6530 if ((val & DISPLAY_PLANE_ENABLE) == 0)
6531 return;
6532 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
6533 return;
6534
6535 /* This display plane is active and attached to the other CPU pipe. */
6536 pipe = !pipe;
6537
6538 /* Disable the plane and wait for it to stop reading from the pipe. */
b24e7179
JB
6539 intel_disable_plane(dev_priv, plane, pipe);
6540 intel_disable_pipe(dev_priv, pipe);
47f1c6c9 6541}
79e53945 6542
f6e5b160
CW
6543static void intel_crtc_reset(struct drm_crtc *crtc)
6544{
6545 struct drm_device *dev = crtc->dev;
6546 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6547
6548 /* Reset flags back to the 'unknown' status so that they
6549 * will be correctly set on the initial modeset.
6550 */
6551 intel_crtc->dpms_mode = -1;
6552
6553 /* We need to fix up any BIOS configuration that conflicts with
6554 * our expectations.
6555 */
6556 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
6557}
6558
6559static struct drm_crtc_helper_funcs intel_helper_funcs = {
6560 .dpms = intel_crtc_dpms,
6561 .mode_fixup = intel_crtc_mode_fixup,
6562 .mode_set = intel_crtc_mode_set,
6563 .mode_set_base = intel_pipe_set_base,
6564 .mode_set_base_atomic = intel_pipe_set_base_atomic,
6565 .load_lut = intel_crtc_load_lut,
6566 .disable = intel_crtc_disable,
6567};
6568
6569static const struct drm_crtc_funcs intel_crtc_funcs = {
6570 .reset = intel_crtc_reset,
6571 .cursor_set = intel_crtc_cursor_set,
6572 .cursor_move = intel_crtc_cursor_move,
6573 .gamma_set = intel_crtc_gamma_set,
6574 .set_config = drm_crtc_helper_set_config,
6575 .destroy = intel_crtc_destroy,
6576 .page_flip = intel_crtc_page_flip,
6577};
6578
ee7b9f93
JB
6579static void intel_pch_pll_init(struct drm_device *dev)
6580{
6581 drm_i915_private_t *dev_priv = dev->dev_private;
6582 int i;
6583
6584 if (dev_priv->num_pch_pll == 0) {
6585 DRM_DEBUG_KMS("No PCH PLLs on this hardware, skipping initialisation\n");
6586 return;
6587 }
6588
6589 for (i = 0; i < dev_priv->num_pch_pll; i++) {
6590 dev_priv->pch_plls[i].pll_reg = _PCH_DPLL(i);
6591 dev_priv->pch_plls[i].fp0_reg = _PCH_FP0(i);
6592 dev_priv->pch_plls[i].fp1_reg = _PCH_FP1(i);
6593 }
6594}
6595
b358d0a6 6596static void intel_crtc_init(struct drm_device *dev, int pipe)
79e53945 6597{
22fd0fab 6598 drm_i915_private_t *dev_priv = dev->dev_private;
79e53945
JB
6599 struct intel_crtc *intel_crtc;
6600 int i;
6601
6602 intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL);
6603 if (intel_crtc == NULL)
6604 return;
6605
6606 drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs);
6607
6608 drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256);
79e53945
JB
6609 for (i = 0; i < 256; i++) {
6610 intel_crtc->lut_r[i] = i;
6611 intel_crtc->lut_g[i] = i;
6612 intel_crtc->lut_b[i] = i;
6613 }
6614
80824003
JB
6615 /* Swap pipes & planes for FBC on pre-965 */
6616 intel_crtc->pipe = pipe;
6617 intel_crtc->plane = pipe;
e2e767ab 6618 if (IS_MOBILE(dev) && IS_GEN3(dev)) {
28c97730 6619 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
e2e767ab 6620 intel_crtc->plane = !pipe;
80824003
JB
6621 }
6622
22fd0fab
JB
6623 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
6624 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
6625 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
6626 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
6627
5d1d0cc8 6628 intel_crtc_reset(&intel_crtc->base);
04dbff52 6629 intel_crtc->active = true; /* force the pipe off on setup_init_config */
5a354204 6630 intel_crtc->bpp = 24; /* default for pre-Ironlake */
7e7d76c3
JB
6631
6632 if (HAS_PCH_SPLIT(dev)) {
6633 intel_helper_funcs.prepare = ironlake_crtc_prepare;
6634 intel_helper_funcs.commit = ironlake_crtc_commit;
6635 } else {
6636 intel_helper_funcs.prepare = i9xx_crtc_prepare;
6637 intel_helper_funcs.commit = i9xx_crtc_commit;
6638 }
6639
79e53945
JB
6640 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
6641
652c393a
JB
6642 intel_crtc->busy = false;
6643
6644 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
6645 (unsigned long)intel_crtc);
79e53945
JB
6646}
6647
08d7b3d1 6648int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
05394f39 6649 struct drm_file *file)
08d7b3d1 6650{
08d7b3d1 6651 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
c05422d5
DV
6652 struct drm_mode_object *drmmode_obj;
6653 struct intel_crtc *crtc;
08d7b3d1 6654
1cff8f6b
DV
6655 if (!drm_core_check_feature(dev, DRIVER_MODESET))
6656 return -ENODEV;
08d7b3d1 6657
c05422d5
DV
6658 drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id,
6659 DRM_MODE_OBJECT_CRTC);
08d7b3d1 6660
c05422d5 6661 if (!drmmode_obj) {
08d7b3d1
CW
6662 DRM_ERROR("no such CRTC id\n");
6663 return -EINVAL;
6664 }
6665
c05422d5
DV
6666 crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
6667 pipe_from_crtc_id->pipe = crtc->pipe;
08d7b3d1 6668
c05422d5 6669 return 0;
08d7b3d1
CW
6670}
6671
c5e4df33 6672static int intel_encoder_clones(struct drm_device *dev, int type_mask)
79e53945 6673{
4ef69c7a 6674 struct intel_encoder *encoder;
79e53945 6675 int index_mask = 0;
79e53945
JB
6676 int entry = 0;
6677
4ef69c7a
CW
6678 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6679 if (type_mask & encoder->clone_mask)
79e53945
JB
6680 index_mask |= (1 << entry);
6681 entry++;
6682 }
4ef69c7a 6683
79e53945
JB
6684 return index_mask;
6685}
6686
4d302442
CW
6687static bool has_edp_a(struct drm_device *dev)
6688{
6689 struct drm_i915_private *dev_priv = dev->dev_private;
6690
6691 if (!IS_MOBILE(dev))
6692 return false;
6693
6694 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
6695 return false;
6696
6697 if (IS_GEN5(dev) &&
6698 (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
6699 return false;
6700
6701 return true;
6702}
6703
79e53945
JB
6704static void intel_setup_outputs(struct drm_device *dev)
6705{
725e30ad 6706 struct drm_i915_private *dev_priv = dev->dev_private;
4ef69c7a 6707 struct intel_encoder *encoder;
cb0953d7 6708 bool dpd_is_edp = false;
f3cfcba6 6709 bool has_lvds;
79e53945 6710
f3cfcba6 6711 has_lvds = intel_lvds_init(dev);
c5d1b51d
CW
6712 if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
6713 /* disable the panel fitter on everything but LVDS */
6714 I915_WRITE(PFIT_CONTROL, 0);
6715 }
79e53945 6716
bad720ff 6717 if (HAS_PCH_SPLIT(dev)) {
cb0953d7 6718 dpd_is_edp = intel_dpd_is_edp(dev);
30ad48b7 6719
4d302442 6720 if (has_edp_a(dev))
32f9d658
ZW
6721 intel_dp_init(dev, DP_A);
6722
cb0953d7
AJ
6723 if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
6724 intel_dp_init(dev, PCH_DP_D);
6725 }
6726
6727 intel_crt_init(dev);
6728
0e72a5b5
ED
6729 if (IS_HASWELL(dev)) {
6730 int found;
6731
6732 /* Haswell uses DDI functions to detect digital outputs */
6733 found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED;
6734 /* DDI A only supports eDP */
6735 if (found)
6736 intel_ddi_init(dev, PORT_A);
6737
6738 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
6739 * register */
6740 found = I915_READ(SFUSE_STRAP);
6741
6742 if (found & SFUSE_STRAP_DDIB_DETECTED)
6743 intel_ddi_init(dev, PORT_B);
6744 if (found & SFUSE_STRAP_DDIC_DETECTED)
6745 intel_ddi_init(dev, PORT_C);
6746 if (found & SFUSE_STRAP_DDID_DETECTED)
6747 intel_ddi_init(dev, PORT_D);
6748 } else if (HAS_PCH_SPLIT(dev)) {
cb0953d7
AJ
6749 int found;
6750
30ad48b7 6751 if (I915_READ(HDMIB) & PORT_DETECTED) {
461ed3ca 6752 /* PCH SDVOB multiplex with HDMIB */
eef4eacb 6753 found = intel_sdvo_init(dev, PCH_SDVOB, true);
30ad48b7
ZW
6754 if (!found)
6755 intel_hdmi_init(dev, HDMIB);
5eb08b69
ZW
6756 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
6757 intel_dp_init(dev, PCH_DP_B);
30ad48b7
ZW
6758 }
6759
6760 if (I915_READ(HDMIC) & PORT_DETECTED)
6761 intel_hdmi_init(dev, HDMIC);
6762
6763 if (I915_READ(HDMID) & PORT_DETECTED)
6764 intel_hdmi_init(dev, HDMID);
6765
5eb08b69
ZW
6766 if (I915_READ(PCH_DP_C) & DP_DETECTED)
6767 intel_dp_init(dev, PCH_DP_C);
6768
cb0953d7 6769 if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
5eb08b69
ZW
6770 intel_dp_init(dev, PCH_DP_D);
6771
103a196f 6772 } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
27185ae1 6773 bool found = false;
7d57382e 6774
725e30ad 6775 if (I915_READ(SDVOB) & SDVO_DETECTED) {
b01f2c3a 6776 DRM_DEBUG_KMS("probing SDVOB\n");
eef4eacb 6777 found = intel_sdvo_init(dev, SDVOB, true);
b01f2c3a
JB
6778 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) {
6779 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
725e30ad 6780 intel_hdmi_init(dev, SDVOB);
b01f2c3a 6781 }
27185ae1 6782
b01f2c3a
JB
6783 if (!found && SUPPORTS_INTEGRATED_DP(dev)) {
6784 DRM_DEBUG_KMS("probing DP_B\n");
a4fc5ed6 6785 intel_dp_init(dev, DP_B);
b01f2c3a 6786 }
725e30ad 6787 }
13520b05
KH
6788
6789 /* Before G4X SDVOC doesn't have its own detect register */
13520b05 6790
b01f2c3a
JB
6791 if (I915_READ(SDVOB) & SDVO_DETECTED) {
6792 DRM_DEBUG_KMS("probing SDVOC\n");
eef4eacb 6793 found = intel_sdvo_init(dev, SDVOC, false);
b01f2c3a 6794 }
27185ae1
ML
6795
6796 if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) {
6797
b01f2c3a
JB
6798 if (SUPPORTS_INTEGRATED_HDMI(dev)) {
6799 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
725e30ad 6800 intel_hdmi_init(dev, SDVOC);
b01f2c3a
JB
6801 }
6802 if (SUPPORTS_INTEGRATED_DP(dev)) {
6803 DRM_DEBUG_KMS("probing DP_C\n");
a4fc5ed6 6804 intel_dp_init(dev, DP_C);
b01f2c3a 6805 }
725e30ad 6806 }
27185ae1 6807
b01f2c3a
JB
6808 if (SUPPORTS_INTEGRATED_DP(dev) &&
6809 (I915_READ(DP_D) & DP_DETECTED)) {
6810 DRM_DEBUG_KMS("probing DP_D\n");
a4fc5ed6 6811 intel_dp_init(dev, DP_D);
b01f2c3a 6812 }
bad720ff 6813 } else if (IS_GEN2(dev))
79e53945
JB
6814 intel_dvo_init(dev);
6815
103a196f 6816 if (SUPPORTS_TV(dev))
79e53945
JB
6817 intel_tv_init(dev);
6818
4ef69c7a
CW
6819 list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) {
6820 encoder->base.possible_crtcs = encoder->crtc_mask;
6821 encoder->base.possible_clones =
6822 intel_encoder_clones(dev, encoder->clone_mask);
79e53945 6823 }
47356eb6 6824
2c7111db
CW
6825 /* disable all the possible outputs/crtcs before entering KMS mode */
6826 drm_helper_disable_unused_functions(dev);
9fb526db
KP
6827
6828 if (HAS_PCH_SPLIT(dev))
6829 ironlake_init_pch_refclk(dev);
79e53945
JB
6830}
6831
6832static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
6833{
6834 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
79e53945
JB
6835
6836 drm_framebuffer_cleanup(fb);
05394f39 6837 drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
79e53945
JB
6838
6839 kfree(intel_fb);
6840}
6841
6842static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
05394f39 6843 struct drm_file *file,
79e53945
JB
6844 unsigned int *handle)
6845{
6846 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
05394f39 6847 struct drm_i915_gem_object *obj = intel_fb->obj;
79e53945 6848
05394f39 6849 return drm_gem_handle_create(file, &obj->base, handle);
79e53945
JB
6850}
6851
6852static const struct drm_framebuffer_funcs intel_fb_funcs = {
6853 .destroy = intel_user_framebuffer_destroy,
6854 .create_handle = intel_user_framebuffer_create_handle,
6855};
6856
38651674
DA
6857int intel_framebuffer_init(struct drm_device *dev,
6858 struct intel_framebuffer *intel_fb,
308e5bcb 6859 struct drm_mode_fb_cmd2 *mode_cmd,
05394f39 6860 struct drm_i915_gem_object *obj)
79e53945 6861{
79e53945
JB
6862 int ret;
6863
05394f39 6864 if (obj->tiling_mode == I915_TILING_Y)
57cd6508
CW
6865 return -EINVAL;
6866
308e5bcb 6867 if (mode_cmd->pitches[0] & 63)
57cd6508
CW
6868 return -EINVAL;
6869
308e5bcb 6870 switch (mode_cmd->pixel_format) {
04b3924d
VS
6871 case DRM_FORMAT_RGB332:
6872 case DRM_FORMAT_RGB565:
6873 case DRM_FORMAT_XRGB8888:
b250da79 6874 case DRM_FORMAT_XBGR8888:
04b3924d
VS
6875 case DRM_FORMAT_ARGB8888:
6876 case DRM_FORMAT_XRGB2101010:
6877 case DRM_FORMAT_ARGB2101010:
308e5bcb 6878 /* RGB formats are common across chipsets */
b5626747 6879 break;
04b3924d
VS
6880 case DRM_FORMAT_YUYV:
6881 case DRM_FORMAT_UYVY:
6882 case DRM_FORMAT_YVYU:
6883 case DRM_FORMAT_VYUY:
57cd6508
CW
6884 break;
6885 default:
aca25848
ED
6886 DRM_DEBUG_KMS("unsupported pixel format %u\n",
6887 mode_cmd->pixel_format);
57cd6508
CW
6888 return -EINVAL;
6889 }
6890
79e53945
JB
6891 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
6892 if (ret) {
6893 DRM_ERROR("framebuffer init failed %d\n", ret);
6894 return ret;
6895 }
6896
6897 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
79e53945 6898 intel_fb->obj = obj;
79e53945
JB
6899 return 0;
6900}
6901
79e53945
JB
6902static struct drm_framebuffer *
6903intel_user_framebuffer_create(struct drm_device *dev,
6904 struct drm_file *filp,
308e5bcb 6905 struct drm_mode_fb_cmd2 *mode_cmd)
79e53945 6906{
05394f39 6907 struct drm_i915_gem_object *obj;
79e53945 6908
308e5bcb
JB
6909 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
6910 mode_cmd->handles[0]));
c8725226 6911 if (&obj->base == NULL)
cce13ff7 6912 return ERR_PTR(-ENOENT);
79e53945 6913
d2dff872 6914 return intel_framebuffer_create(dev, mode_cmd, obj);
79e53945
JB
6915}
6916
79e53945 6917static const struct drm_mode_config_funcs intel_mode_funcs = {
79e53945 6918 .fb_create = intel_user_framebuffer_create,
eb1f8e4f 6919 .output_poll_changed = intel_fb_output_poll_changed,
79e53945
JB
6920};
6921
e70236a8
JB
6922/* Set up chip specific display functions */
6923static void intel_init_display(struct drm_device *dev)
6924{
6925 struct drm_i915_private *dev_priv = dev->dev_private;
6926
6927 /* We always want a DPMS function */
f564048e 6928 if (HAS_PCH_SPLIT(dev)) {
f2b115e6 6929 dev_priv->display.dpms = ironlake_crtc_dpms;
f564048e 6930 dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set;
ee7b9f93 6931 dev_priv->display.off = ironlake_crtc_off;
17638cd6 6932 dev_priv->display.update_plane = ironlake_update_plane;
f564048e 6933 } else {
e70236a8 6934 dev_priv->display.dpms = i9xx_crtc_dpms;
f564048e 6935 dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set;
ee7b9f93 6936 dev_priv->display.off = i9xx_crtc_off;
17638cd6 6937 dev_priv->display.update_plane = i9xx_update_plane;
f564048e 6938 }
e70236a8 6939
e70236a8 6940 /* Returns the core display clock speed */
25eb05fc
JB
6941 if (IS_VALLEYVIEW(dev))
6942 dev_priv->display.get_display_clock_speed =
6943 valleyview_get_display_clock_speed;
6944 else if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev)))
e70236a8
JB
6945 dev_priv->display.get_display_clock_speed =
6946 i945_get_display_clock_speed;
6947 else if (IS_I915G(dev))
6948 dev_priv->display.get_display_clock_speed =
6949 i915_get_display_clock_speed;
f2b115e6 6950 else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev))
e70236a8
JB
6951 dev_priv->display.get_display_clock_speed =
6952 i9xx_misc_get_display_clock_speed;
6953 else if (IS_I915GM(dev))
6954 dev_priv->display.get_display_clock_speed =
6955 i915gm_get_display_clock_speed;
6956 else if (IS_I865G(dev))
6957 dev_priv->display.get_display_clock_speed =
6958 i865_get_display_clock_speed;
f0f8a9ce 6959 else if (IS_I85X(dev))
e70236a8
JB
6960 dev_priv->display.get_display_clock_speed =
6961 i855_get_display_clock_speed;
6962 else /* 852, 830 */
6963 dev_priv->display.get_display_clock_speed =
6964 i830_get_display_clock_speed;
6965
7f8a8569 6966 if (HAS_PCH_SPLIT(dev)) {
f00a3ddf 6967 if (IS_GEN5(dev)) {
674cf967 6968 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
e0dac65e 6969 dev_priv->display.write_eld = ironlake_write_eld;
1398261a 6970 } else if (IS_GEN6(dev)) {
674cf967 6971 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
e0dac65e 6972 dev_priv->display.write_eld = ironlake_write_eld;
357555c0
JB
6973 } else if (IS_IVYBRIDGE(dev)) {
6974 /* FIXME: detect B0+ stepping and use auto training */
6975 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
e0dac65e 6976 dev_priv->display.write_eld = ironlake_write_eld;
c82e4d26
ED
6977 } else if (IS_HASWELL(dev)) {
6978 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
4abb3c8c 6979 dev_priv->display.write_eld = ironlake_write_eld;
7f8a8569
ZW
6980 } else
6981 dev_priv->display.update_wm = NULL;
ceb04246 6982 } else if (IS_VALLEYVIEW(dev)) {
575155a9
JB
6983 dev_priv->display.force_wake_get = vlv_force_wake_get;
6984 dev_priv->display.force_wake_put = vlv_force_wake_put;
6067aaea 6985 } else if (IS_G4X(dev)) {
e0dac65e 6986 dev_priv->display.write_eld = g4x_write_eld;
e70236a8 6987 }
8c9f3aaf
JB
6988
6989 /* Default just returns -ENODEV to indicate unsupported */
6990 dev_priv->display.queue_flip = intel_default_queue_flip;
6991
6992 switch (INTEL_INFO(dev)->gen) {
6993 case 2:
6994 dev_priv->display.queue_flip = intel_gen2_queue_flip;
6995 break;
6996
6997 case 3:
6998 dev_priv->display.queue_flip = intel_gen3_queue_flip;
6999 break;
7000
7001 case 4:
7002 case 5:
7003 dev_priv->display.queue_flip = intel_gen4_queue_flip;
7004 break;
7005
7006 case 6:
7007 dev_priv->display.queue_flip = intel_gen6_queue_flip;
7008 break;
7c9017e5
JB
7009 case 7:
7010 dev_priv->display.queue_flip = intel_gen7_queue_flip;
7011 break;
8c9f3aaf 7012 }
e70236a8
JB
7013}
7014
b690e96c
JB
7015/*
7016 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
7017 * resume, or other times. This quirk makes sure that's the case for
7018 * affected systems.
7019 */
0206e353 7020static void quirk_pipea_force(struct drm_device *dev)
b690e96c
JB
7021{
7022 struct drm_i915_private *dev_priv = dev->dev_private;
7023
7024 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
bc0daf48 7025 DRM_INFO("applying pipe a force quirk\n");
b690e96c
JB
7026}
7027
435793df
KP
7028/*
7029 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
7030 */
7031static void quirk_ssc_force_disable(struct drm_device *dev)
7032{
7033 struct drm_i915_private *dev_priv = dev->dev_private;
7034 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
bc0daf48 7035 DRM_INFO("applying lvds SSC disable quirk\n");
435793df
KP
7036}
7037
4dca20ef 7038/*
5a15ab5b
CE
7039 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
7040 * brightness value
4dca20ef
CE
7041 */
7042static void quirk_invert_brightness(struct drm_device *dev)
7043{
7044 struct drm_i915_private *dev_priv = dev->dev_private;
7045 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
bc0daf48 7046 DRM_INFO("applying inverted panel brightness quirk\n");
435793df
KP
7047}
7048
b690e96c
JB
7049struct intel_quirk {
7050 int device;
7051 int subsystem_vendor;
7052 int subsystem_device;
7053 void (*hook)(struct drm_device *dev);
7054};
7055
c43b5634 7056static struct intel_quirk intel_quirks[] = {
b690e96c 7057 /* HP Mini needs pipe A force quirk (LP: #322104) */
0206e353 7058 { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
b690e96c
JB
7059
7060 /* Thinkpad R31 needs pipe A force quirk */
7061 { 0x3577, 0x1014, 0x0505, quirk_pipea_force },
7062 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
7063 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
7064
7065 /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */
7066 { 0x3577, 0x1014, 0x0513, quirk_pipea_force },
7067 /* ThinkPad X40 needs pipe A force quirk */
7068
7069 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
7070 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
7071
7072 /* 855 & before need to leave pipe A & dpll A up */
7073 { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
7074 { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
435793df
KP
7075
7076 /* Lenovo U160 cannot use SSC on LVDS */
7077 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
070d329a
MAS
7078
7079 /* Sony Vaio Y cannot use SSC on LVDS */
7080 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
5a15ab5b
CE
7081
7082 /* Acer Aspire 5734Z must invert backlight brightness */
7083 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
b690e96c
JB
7084};
7085
7086static void intel_init_quirks(struct drm_device *dev)
7087{
7088 struct pci_dev *d = dev->pdev;
7089 int i;
7090
7091 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
7092 struct intel_quirk *q = &intel_quirks[i];
7093
7094 if (d->device == q->device &&
7095 (d->subsystem_vendor == q->subsystem_vendor ||
7096 q->subsystem_vendor == PCI_ANY_ID) &&
7097 (d->subsystem_device == q->subsystem_device ||
7098 q->subsystem_device == PCI_ANY_ID))
7099 q->hook(dev);
7100 }
7101}
7102
9cce37f4
JB
7103/* Disable the VGA plane that we never use */
7104static void i915_disable_vga(struct drm_device *dev)
7105{
7106 struct drm_i915_private *dev_priv = dev->dev_private;
7107 u8 sr1;
7108 u32 vga_reg;
7109
7110 if (HAS_PCH_SPLIT(dev))
7111 vga_reg = CPU_VGACNTRL;
7112 else
7113 vga_reg = VGACNTRL;
7114
7115 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
3fdcf431 7116 outb(SR01, VGA_SR_INDEX);
9cce37f4
JB
7117 sr1 = inb(VGA_SR_DATA);
7118 outb(sr1 | 1<<5, VGA_SR_DATA);
7119 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
7120 udelay(300);
7121
7122 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
7123 POSTING_READ(vga_reg);
7124}
7125
f82cfb6b
JB
7126static void ivb_pch_pwm_override(struct drm_device *dev)
7127{
7128 struct drm_i915_private *dev_priv = dev->dev_private;
7129
7130 /*
7131 * IVB has CPU eDP backlight regs too, set things up to let the
7132 * PCH regs control the backlight
7133 */
7cf41601 7134 I915_WRITE(BLC_PWM_CPU_CTL2, BLM_PWM_ENABLE);
f82cfb6b 7135 I915_WRITE(BLC_PWM_CPU_CTL, 0);
7cf41601 7136 I915_WRITE(BLC_PWM_PCH_CTL1, BLM_PCH_PWM_ENABLE | BLM_PCH_OVERRIDE_ENABLE);
f82cfb6b
JB
7137}
7138
f817586c
DV
7139void intel_modeset_init_hw(struct drm_device *dev)
7140{
7141 struct drm_i915_private *dev_priv = dev->dev_private;
7142
7143 intel_init_clock_gating(dev);
7144
7145 if (IS_IRONLAKE_M(dev)) {
7146 ironlake_enable_drps(dev);
1833b134 7147 ironlake_enable_rc6(dev);
f817586c
DV
7148 intel_init_emon(dev);
7149 }
7150
b6834bd6 7151 if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev)) {
f817586c
DV
7152 gen6_enable_rps(dev_priv);
7153 gen6_update_ring_freq(dev_priv);
7154 }
f82cfb6b
JB
7155
7156 if (IS_IVYBRIDGE(dev))
7157 ivb_pch_pwm_override(dev);
f817586c
DV
7158}
7159
79e53945
JB
7160void intel_modeset_init(struct drm_device *dev)
7161{
652c393a 7162 struct drm_i915_private *dev_priv = dev->dev_private;
b840d907 7163 int i, ret;
79e53945
JB
7164
7165 drm_mode_config_init(dev);
7166
7167 dev->mode_config.min_width = 0;
7168 dev->mode_config.min_height = 0;
7169
019d96cb
DA
7170 dev->mode_config.preferred_depth = 24;
7171 dev->mode_config.prefer_shadow = 1;
7172
e6ecefaa 7173 dev->mode_config.funcs = &intel_mode_funcs;
79e53945 7174
b690e96c
JB
7175 intel_init_quirks(dev);
7176
1fa61106
ED
7177 intel_init_pm(dev);
7178
45244b87
ED
7179 intel_prepare_ddi(dev);
7180
e70236a8
JB
7181 intel_init_display(dev);
7182
a6c45cf0
CW
7183 if (IS_GEN2(dev)) {
7184 dev->mode_config.max_width = 2048;
7185 dev->mode_config.max_height = 2048;
7186 } else if (IS_GEN3(dev)) {
5e4d6fa7
KP
7187 dev->mode_config.max_width = 4096;
7188 dev->mode_config.max_height = 4096;
79e53945 7189 } else {
a6c45cf0
CW
7190 dev->mode_config.max_width = 8192;
7191 dev->mode_config.max_height = 8192;
79e53945 7192 }
dd2757f8 7193 dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
79e53945 7194
28c97730 7195 DRM_DEBUG_KMS("%d display pipe%s available.\n",
a3524f1b 7196 dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
79e53945 7197
a3524f1b 7198 for (i = 0; i < dev_priv->num_pipe; i++) {
79e53945 7199 intel_crtc_init(dev, i);
00c2064b
JB
7200 ret = intel_plane_init(dev, i);
7201 if (ret)
7202 DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
79e53945
JB
7203 }
7204
ee7b9f93
JB
7205 intel_pch_pll_init(dev);
7206
9cce37f4
JB
7207 /* Just disable it once at startup */
7208 i915_disable_vga(dev);
79e53945 7209 intel_setup_outputs(dev);
652c393a 7210
652c393a
JB
7211 INIT_WORK(&dev_priv->idle_work, intel_idle_update);
7212 setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
7213 (unsigned long)dev);
2c7111db
CW
7214}
7215
7216void intel_modeset_gem_init(struct drm_device *dev)
7217{
1833b134 7218 intel_modeset_init_hw(dev);
02e792fb
DV
7219
7220 intel_setup_overlay(dev);
79e53945
JB
7221}
7222
7223void intel_modeset_cleanup(struct drm_device *dev)
7224{
652c393a
JB
7225 struct drm_i915_private *dev_priv = dev->dev_private;
7226 struct drm_crtc *crtc;
7227 struct intel_crtc *intel_crtc;
7228
f87ea761 7229 drm_kms_helper_poll_fini(dev);
652c393a
JB
7230 mutex_lock(&dev->struct_mutex);
7231
723bfd70
JB
7232 intel_unregister_dsm_handler();
7233
7234
652c393a
JB
7235 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7236 /* Skip inactive CRTCs */
7237 if (!crtc->fb)
7238 continue;
7239
7240 intel_crtc = to_intel_crtc(crtc);
3dec0095 7241 intel_increase_pllclock(crtc);
652c393a
JB
7242 }
7243
973d04f9 7244 intel_disable_fbc(dev);
e70236a8 7245
f97108d1
JB
7246 if (IS_IRONLAKE_M(dev))
7247 ironlake_disable_drps(dev);
b6834bd6 7248 if ((IS_GEN6(dev) || IS_GEN7(dev)) && !IS_VALLEYVIEW(dev))
3b8d8d91 7249 gen6_disable_rps(dev);
f97108d1 7250
d5bb081b
JB
7251 if (IS_IRONLAKE_M(dev))
7252 ironlake_disable_rc6(dev);
0cdab21f 7253
57f350b6
JB
7254 if (IS_VALLEYVIEW(dev))
7255 vlv_init_dpio(dev);
7256
69341a5e
KH
7257 mutex_unlock(&dev->struct_mutex);
7258
6c0d9350
DV
7259 /* Disable the irq before mode object teardown, for the irq might
7260 * enqueue unpin/hotplug work. */
7261 drm_irq_uninstall(dev);
7262 cancel_work_sync(&dev_priv->hotplug_work);
6fdd4d98 7263 cancel_work_sync(&dev_priv->rps_work);
6c0d9350 7264
1630fe75
CW
7265 /* flush any delayed tasks or pending work */
7266 flush_scheduled_work();
7267
3dec0095
DV
7268 /* Shut off idle work before the crtcs get freed. */
7269 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
7270 intel_crtc = to_intel_crtc(crtc);
7271 del_timer_sync(&intel_crtc->idle_timer);
7272 }
7273 del_timer_sync(&dev_priv->idle_timer);
7274 cancel_work_sync(&dev_priv->idle_work);
7275
79e53945
JB
7276 drm_mode_config_cleanup(dev);
7277}
7278
f1c79df3
ZW
7279/*
7280 * Return which encoder is currently attached for connector.
7281 */
df0e9248 7282struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
79e53945 7283{
df0e9248
CW
7284 return &intel_attached_encoder(connector)->base;
7285}
f1c79df3 7286
df0e9248
CW
7287void intel_connector_attach_encoder(struct intel_connector *connector,
7288 struct intel_encoder *encoder)
7289{
7290 connector->encoder = encoder;
7291 drm_mode_connector_attach_encoder(&connector->base,
7292 &encoder->base);
79e53945 7293}
28d52043
DA
7294
7295/*
7296 * set vga decode state - true == enable VGA decode
7297 */
7298int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
7299{
7300 struct drm_i915_private *dev_priv = dev->dev_private;
7301 u16 gmch_ctrl;
7302
7303 pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
7304 if (state)
7305 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
7306 else
7307 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
7308 pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
7309 return 0;
7310}
c4a1d9e4
CW
7311
7312#ifdef CONFIG_DEBUG_FS
7313#include <linux/seq_file.h>
7314
7315struct intel_display_error_state {
7316 struct intel_cursor_error_state {
7317 u32 control;
7318 u32 position;
7319 u32 base;
7320 u32 size;
7321 } cursor[2];
7322
7323 struct intel_pipe_error_state {
7324 u32 conf;
7325 u32 source;
7326
7327 u32 htotal;
7328 u32 hblank;
7329 u32 hsync;
7330 u32 vtotal;
7331 u32 vblank;
7332 u32 vsync;
7333 } pipe[2];
7334
7335 struct intel_plane_error_state {
7336 u32 control;
7337 u32 stride;
7338 u32 size;
7339 u32 pos;
7340 u32 addr;
7341 u32 surface;
7342 u32 tile_offset;
7343 } plane[2];
7344};
7345
7346struct intel_display_error_state *
7347intel_display_capture_error_state(struct drm_device *dev)
7348{
0206e353 7349 drm_i915_private_t *dev_priv = dev->dev_private;
c4a1d9e4
CW
7350 struct intel_display_error_state *error;
7351 int i;
7352
7353 error = kmalloc(sizeof(*error), GFP_ATOMIC);
7354 if (error == NULL)
7355 return NULL;
7356
7357 for (i = 0; i < 2; i++) {
7358 error->cursor[i].control = I915_READ(CURCNTR(i));
7359 error->cursor[i].position = I915_READ(CURPOS(i));
7360 error->cursor[i].base = I915_READ(CURBASE(i));
7361
7362 error->plane[i].control = I915_READ(DSPCNTR(i));
7363 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
7364 error->plane[i].size = I915_READ(DSPSIZE(i));
0206e353 7365 error->plane[i].pos = I915_READ(DSPPOS(i));
c4a1d9e4
CW
7366 error->plane[i].addr = I915_READ(DSPADDR(i));
7367 if (INTEL_INFO(dev)->gen >= 4) {
7368 error->plane[i].surface = I915_READ(DSPSURF(i));
7369 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
7370 }
7371
7372 error->pipe[i].conf = I915_READ(PIPECONF(i));
7373 error->pipe[i].source = I915_READ(PIPESRC(i));
7374 error->pipe[i].htotal = I915_READ(HTOTAL(i));
7375 error->pipe[i].hblank = I915_READ(HBLANK(i));
7376 error->pipe[i].hsync = I915_READ(HSYNC(i));
7377 error->pipe[i].vtotal = I915_READ(VTOTAL(i));
7378 error->pipe[i].vblank = I915_READ(VBLANK(i));
7379 error->pipe[i].vsync = I915_READ(VSYNC(i));
7380 }
7381
7382 return error;
7383}
7384
7385void
7386intel_display_print_error_state(struct seq_file *m,
7387 struct drm_device *dev,
7388 struct intel_display_error_state *error)
7389{
7390 int i;
7391
7392 for (i = 0; i < 2; i++) {
7393 seq_printf(m, "Pipe [%d]:\n", i);
7394 seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
7395 seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
7396 seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
7397 seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
7398 seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
7399 seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
7400 seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
7401 seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
7402
7403 seq_printf(m, "Plane [%d]:\n", i);
7404 seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
7405 seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
7406 seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
7407 seq_printf(m, " POS: %08x\n", error->plane[i].pos);
7408 seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
7409 if (INTEL_INFO(dev)->gen >= 4) {
7410 seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
7411 seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
7412 }
7413
7414 seq_printf(m, "Cursor [%d]:\n", i);
7415 seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
7416 seq_printf(m, " POS: %08x\n", error->cursor[i].position);
7417 seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
7418 }
7419}
7420#endif