]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/gpu/drm/i915/intel_display.c
drm/i915: Remove reset_counter from intel_crtc.
[mirror_ubuntu-artful-kernel.git] / drivers / gpu / drm / i915 / intel_display.c
1 /*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27 #include <linux/dmi.h>
28 #include <linux/module.h>
29 #include <linux/input.h>
30 #include <linux/i2c.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/vgaarb.h>
34 #include <drm/drm_edid.h>
35 #include <drm/drmP.h>
36 #include "intel_drv.h"
37 #include <drm/i915_drm.h>
38 #include "i915_drv.h"
39 #include "intel_dsi.h"
40 #include "i915_trace.h"
41 #include <drm/drm_atomic.h>
42 #include <drm/drm_atomic_helper.h>
43 #include <drm/drm_dp_helper.h>
44 #include <drm/drm_crtc_helper.h>
45 #include <drm/drm_plane_helper.h>
46 #include <drm/drm_rect.h>
47 #include <linux/dma_remapping.h>
48 #include <linux/reservation.h>
49 #include <linux/dma-buf.h>
50
51 /* Primary plane formats for gen <= 3 */
52 static const uint32_t i8xx_primary_formats[] = {
53 DRM_FORMAT_C8,
54 DRM_FORMAT_RGB565,
55 DRM_FORMAT_XRGB1555,
56 DRM_FORMAT_XRGB8888,
57 };
58
59 /* Primary plane formats for gen >= 4 */
60 static const uint32_t i965_primary_formats[] = {
61 DRM_FORMAT_C8,
62 DRM_FORMAT_RGB565,
63 DRM_FORMAT_XRGB8888,
64 DRM_FORMAT_XBGR8888,
65 DRM_FORMAT_XRGB2101010,
66 DRM_FORMAT_XBGR2101010,
67 };
68
69 static const uint32_t skl_primary_formats[] = {
70 DRM_FORMAT_C8,
71 DRM_FORMAT_RGB565,
72 DRM_FORMAT_XRGB8888,
73 DRM_FORMAT_XBGR8888,
74 DRM_FORMAT_ARGB8888,
75 DRM_FORMAT_ABGR8888,
76 DRM_FORMAT_XRGB2101010,
77 DRM_FORMAT_XBGR2101010,
78 DRM_FORMAT_YUYV,
79 DRM_FORMAT_YVYU,
80 DRM_FORMAT_UYVY,
81 DRM_FORMAT_VYUY,
82 };
83
84 /* Cursor formats */
85 static const uint32_t intel_cursor_formats[] = {
86 DRM_FORMAT_ARGB8888,
87 };
88
89 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
90 struct intel_crtc_state *pipe_config);
91 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
92 struct intel_crtc_state *pipe_config);
93
94 static int intel_framebuffer_init(struct drm_device *dev,
95 struct intel_framebuffer *ifb,
96 struct drm_mode_fb_cmd2 *mode_cmd,
97 struct drm_i915_gem_object *obj);
98 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
99 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
100 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
101 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
102 struct intel_link_m_n *m_n,
103 struct intel_link_m_n *m2_n2);
104 static void ironlake_set_pipeconf(struct drm_crtc *crtc);
105 static void haswell_set_pipeconf(struct drm_crtc *crtc);
106 static void haswell_set_pipemisc(struct drm_crtc *crtc);
107 static void vlv_prepare_pll(struct intel_crtc *crtc,
108 const struct intel_crtc_state *pipe_config);
109 static void chv_prepare_pll(struct intel_crtc *crtc,
110 const struct intel_crtc_state *pipe_config);
111 static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
112 static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
113 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
114 struct intel_crtc_state *crtc_state);
115 static void skylake_pfit_enable(struct intel_crtc *crtc);
116 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117 static void ironlake_pfit_enable(struct intel_crtc *crtc);
118 static void intel_modeset_setup_hw_state(struct drm_device *dev);
119 static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
120 static int ilk_max_pixel_rate(struct drm_atomic_state *state);
121 static void intel_modeset_verify_crtc(struct drm_crtc *crtc,
122 struct drm_crtc_state *old_state,
123 struct drm_crtc_state *new_state);
124
125 struct intel_limit {
126 struct {
127 int min, max;
128 } dot, vco, n, m, m1, m2, p, p1;
129
130 struct {
131 int dot_limit;
132 int p2_slow, p2_fast;
133 } p2;
134 };
135
136 /* returns HPLL frequency in kHz */
137 static int valleyview_get_vco(struct drm_i915_private *dev_priv)
138 {
139 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
140
141 /* Obtain SKU information */
142 mutex_lock(&dev_priv->sb_lock);
143 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
144 CCK_FUSE_HPLL_FREQ_MASK;
145 mutex_unlock(&dev_priv->sb_lock);
146
147 return vco_freq[hpll_freq] * 1000;
148 }
149
150 int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
151 const char *name, u32 reg, int ref_freq)
152 {
153 u32 val;
154 int divider;
155
156 mutex_lock(&dev_priv->sb_lock);
157 val = vlv_cck_read(dev_priv, reg);
158 mutex_unlock(&dev_priv->sb_lock);
159
160 divider = val & CCK_FREQUENCY_VALUES;
161
162 WARN((val & CCK_FREQUENCY_STATUS) !=
163 (divider << CCK_FREQUENCY_STATUS_SHIFT),
164 "%s change in progress\n", name);
165
166 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
167 }
168
169 static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
170 const char *name, u32 reg)
171 {
172 if (dev_priv->hpll_freq == 0)
173 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
174
175 return vlv_get_cck_clock(dev_priv, name, reg,
176 dev_priv->hpll_freq);
177 }
178
179 static int
180 intel_pch_rawclk(struct drm_i915_private *dev_priv)
181 {
182 return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
183 }
184
185 static int
186 intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
187 {
188 /* RAWCLK_FREQ_VLV register updated from power well code */
189 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
190 CCK_DISPLAY_REF_CLOCK_CONTROL);
191 }
192
193 static int
194 intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
195 {
196 uint32_t clkcfg;
197
198 /* hrawclock is 1/4 the FSB frequency */
199 clkcfg = I915_READ(CLKCFG);
200 switch (clkcfg & CLKCFG_FSB_MASK) {
201 case CLKCFG_FSB_400:
202 return 100000;
203 case CLKCFG_FSB_533:
204 return 133333;
205 case CLKCFG_FSB_667:
206 return 166667;
207 case CLKCFG_FSB_800:
208 return 200000;
209 case CLKCFG_FSB_1067:
210 return 266667;
211 case CLKCFG_FSB_1333:
212 return 333333;
213 /* these two are just a guess; one of them might be right */
214 case CLKCFG_FSB_1600:
215 case CLKCFG_FSB_1600_ALT:
216 return 400000;
217 default:
218 return 133333;
219 }
220 }
221
222 void intel_update_rawclk(struct drm_i915_private *dev_priv)
223 {
224 if (HAS_PCH_SPLIT(dev_priv))
225 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
226 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
227 dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
228 else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
229 dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
230 else
231 return; /* no rawclk on other platforms, or no need to know it */
232
233 DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
234 }
235
236 static void intel_update_czclk(struct drm_i915_private *dev_priv)
237 {
238 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
239 return;
240
241 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
242 CCK_CZ_CLOCK_CONTROL);
243
244 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
245 }
246
247 static inline u32 /* units of 100MHz */
248 intel_fdi_link_freq(struct drm_i915_private *dev_priv,
249 const struct intel_crtc_state *pipe_config)
250 {
251 if (HAS_DDI(dev_priv))
252 return pipe_config->port_clock; /* SPLL */
253 else if (IS_GEN5(dev_priv))
254 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
255 else
256 return 270000;
257 }
258
259 static const struct intel_limit intel_limits_i8xx_dac = {
260 .dot = { .min = 25000, .max = 350000 },
261 .vco = { .min = 908000, .max = 1512000 },
262 .n = { .min = 2, .max = 16 },
263 .m = { .min = 96, .max = 140 },
264 .m1 = { .min = 18, .max = 26 },
265 .m2 = { .min = 6, .max = 16 },
266 .p = { .min = 4, .max = 128 },
267 .p1 = { .min = 2, .max = 33 },
268 .p2 = { .dot_limit = 165000,
269 .p2_slow = 4, .p2_fast = 2 },
270 };
271
272 static const struct intel_limit intel_limits_i8xx_dvo = {
273 .dot = { .min = 25000, .max = 350000 },
274 .vco = { .min = 908000, .max = 1512000 },
275 .n = { .min = 2, .max = 16 },
276 .m = { .min = 96, .max = 140 },
277 .m1 = { .min = 18, .max = 26 },
278 .m2 = { .min = 6, .max = 16 },
279 .p = { .min = 4, .max = 128 },
280 .p1 = { .min = 2, .max = 33 },
281 .p2 = { .dot_limit = 165000,
282 .p2_slow = 4, .p2_fast = 4 },
283 };
284
285 static const struct intel_limit intel_limits_i8xx_lvds = {
286 .dot = { .min = 25000, .max = 350000 },
287 .vco = { .min = 908000, .max = 1512000 },
288 .n = { .min = 2, .max = 16 },
289 .m = { .min = 96, .max = 140 },
290 .m1 = { .min = 18, .max = 26 },
291 .m2 = { .min = 6, .max = 16 },
292 .p = { .min = 4, .max = 128 },
293 .p1 = { .min = 1, .max = 6 },
294 .p2 = { .dot_limit = 165000,
295 .p2_slow = 14, .p2_fast = 7 },
296 };
297
298 static const struct intel_limit intel_limits_i9xx_sdvo = {
299 .dot = { .min = 20000, .max = 400000 },
300 .vco = { .min = 1400000, .max = 2800000 },
301 .n = { .min = 1, .max = 6 },
302 .m = { .min = 70, .max = 120 },
303 .m1 = { .min = 8, .max = 18 },
304 .m2 = { .min = 3, .max = 7 },
305 .p = { .min = 5, .max = 80 },
306 .p1 = { .min = 1, .max = 8 },
307 .p2 = { .dot_limit = 200000,
308 .p2_slow = 10, .p2_fast = 5 },
309 };
310
311 static const struct intel_limit intel_limits_i9xx_lvds = {
312 .dot = { .min = 20000, .max = 400000 },
313 .vco = { .min = 1400000, .max = 2800000 },
314 .n = { .min = 1, .max = 6 },
315 .m = { .min = 70, .max = 120 },
316 .m1 = { .min = 8, .max = 18 },
317 .m2 = { .min = 3, .max = 7 },
318 .p = { .min = 7, .max = 98 },
319 .p1 = { .min = 1, .max = 8 },
320 .p2 = { .dot_limit = 112000,
321 .p2_slow = 14, .p2_fast = 7 },
322 };
323
324
325 static const struct intel_limit intel_limits_g4x_sdvo = {
326 .dot = { .min = 25000, .max = 270000 },
327 .vco = { .min = 1750000, .max = 3500000},
328 .n = { .min = 1, .max = 4 },
329 .m = { .min = 104, .max = 138 },
330 .m1 = { .min = 17, .max = 23 },
331 .m2 = { .min = 5, .max = 11 },
332 .p = { .min = 10, .max = 30 },
333 .p1 = { .min = 1, .max = 3},
334 .p2 = { .dot_limit = 270000,
335 .p2_slow = 10,
336 .p2_fast = 10
337 },
338 };
339
340 static const struct intel_limit intel_limits_g4x_hdmi = {
341 .dot = { .min = 22000, .max = 400000 },
342 .vco = { .min = 1750000, .max = 3500000},
343 .n = { .min = 1, .max = 4 },
344 .m = { .min = 104, .max = 138 },
345 .m1 = { .min = 16, .max = 23 },
346 .m2 = { .min = 5, .max = 11 },
347 .p = { .min = 5, .max = 80 },
348 .p1 = { .min = 1, .max = 8},
349 .p2 = { .dot_limit = 165000,
350 .p2_slow = 10, .p2_fast = 5 },
351 };
352
353 static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
354 .dot = { .min = 20000, .max = 115000 },
355 .vco = { .min = 1750000, .max = 3500000 },
356 .n = { .min = 1, .max = 3 },
357 .m = { .min = 104, .max = 138 },
358 .m1 = { .min = 17, .max = 23 },
359 .m2 = { .min = 5, .max = 11 },
360 .p = { .min = 28, .max = 112 },
361 .p1 = { .min = 2, .max = 8 },
362 .p2 = { .dot_limit = 0,
363 .p2_slow = 14, .p2_fast = 14
364 },
365 };
366
367 static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
368 .dot = { .min = 80000, .max = 224000 },
369 .vco = { .min = 1750000, .max = 3500000 },
370 .n = { .min = 1, .max = 3 },
371 .m = { .min = 104, .max = 138 },
372 .m1 = { .min = 17, .max = 23 },
373 .m2 = { .min = 5, .max = 11 },
374 .p = { .min = 14, .max = 42 },
375 .p1 = { .min = 2, .max = 6 },
376 .p2 = { .dot_limit = 0,
377 .p2_slow = 7, .p2_fast = 7
378 },
379 };
380
381 static const struct intel_limit intel_limits_pineview_sdvo = {
382 .dot = { .min = 20000, .max = 400000},
383 .vco = { .min = 1700000, .max = 3500000 },
384 /* Pineview's Ncounter is a ring counter */
385 .n = { .min = 3, .max = 6 },
386 .m = { .min = 2, .max = 256 },
387 /* Pineview only has one combined m divider, which we treat as m2. */
388 .m1 = { .min = 0, .max = 0 },
389 .m2 = { .min = 0, .max = 254 },
390 .p = { .min = 5, .max = 80 },
391 .p1 = { .min = 1, .max = 8 },
392 .p2 = { .dot_limit = 200000,
393 .p2_slow = 10, .p2_fast = 5 },
394 };
395
396 static const struct intel_limit intel_limits_pineview_lvds = {
397 .dot = { .min = 20000, .max = 400000 },
398 .vco = { .min = 1700000, .max = 3500000 },
399 .n = { .min = 3, .max = 6 },
400 .m = { .min = 2, .max = 256 },
401 .m1 = { .min = 0, .max = 0 },
402 .m2 = { .min = 0, .max = 254 },
403 .p = { .min = 7, .max = 112 },
404 .p1 = { .min = 1, .max = 8 },
405 .p2 = { .dot_limit = 112000,
406 .p2_slow = 14, .p2_fast = 14 },
407 };
408
409 /* Ironlake / Sandybridge
410 *
411 * We calculate clock using (register_value + 2) for N/M1/M2, so here
412 * the range value for them is (actual_value - 2).
413 */
414 static const struct intel_limit intel_limits_ironlake_dac = {
415 .dot = { .min = 25000, .max = 350000 },
416 .vco = { .min = 1760000, .max = 3510000 },
417 .n = { .min = 1, .max = 5 },
418 .m = { .min = 79, .max = 127 },
419 .m1 = { .min = 12, .max = 22 },
420 .m2 = { .min = 5, .max = 9 },
421 .p = { .min = 5, .max = 80 },
422 .p1 = { .min = 1, .max = 8 },
423 .p2 = { .dot_limit = 225000,
424 .p2_slow = 10, .p2_fast = 5 },
425 };
426
427 static const struct intel_limit intel_limits_ironlake_single_lvds = {
428 .dot = { .min = 25000, .max = 350000 },
429 .vco = { .min = 1760000, .max = 3510000 },
430 .n = { .min = 1, .max = 3 },
431 .m = { .min = 79, .max = 118 },
432 .m1 = { .min = 12, .max = 22 },
433 .m2 = { .min = 5, .max = 9 },
434 .p = { .min = 28, .max = 112 },
435 .p1 = { .min = 2, .max = 8 },
436 .p2 = { .dot_limit = 225000,
437 .p2_slow = 14, .p2_fast = 14 },
438 };
439
440 static const struct intel_limit intel_limits_ironlake_dual_lvds = {
441 .dot = { .min = 25000, .max = 350000 },
442 .vco = { .min = 1760000, .max = 3510000 },
443 .n = { .min = 1, .max = 3 },
444 .m = { .min = 79, .max = 127 },
445 .m1 = { .min = 12, .max = 22 },
446 .m2 = { .min = 5, .max = 9 },
447 .p = { .min = 14, .max = 56 },
448 .p1 = { .min = 2, .max = 8 },
449 .p2 = { .dot_limit = 225000,
450 .p2_slow = 7, .p2_fast = 7 },
451 };
452
453 /* LVDS 100mhz refclk limits. */
454 static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
455 .dot = { .min = 25000, .max = 350000 },
456 .vco = { .min = 1760000, .max = 3510000 },
457 .n = { .min = 1, .max = 2 },
458 .m = { .min = 79, .max = 126 },
459 .m1 = { .min = 12, .max = 22 },
460 .m2 = { .min = 5, .max = 9 },
461 .p = { .min = 28, .max = 112 },
462 .p1 = { .min = 2, .max = 8 },
463 .p2 = { .dot_limit = 225000,
464 .p2_slow = 14, .p2_fast = 14 },
465 };
466
467 static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
468 .dot = { .min = 25000, .max = 350000 },
469 .vco = { .min = 1760000, .max = 3510000 },
470 .n = { .min = 1, .max = 3 },
471 .m = { .min = 79, .max = 126 },
472 .m1 = { .min = 12, .max = 22 },
473 .m2 = { .min = 5, .max = 9 },
474 .p = { .min = 14, .max = 42 },
475 .p1 = { .min = 2, .max = 6 },
476 .p2 = { .dot_limit = 225000,
477 .p2_slow = 7, .p2_fast = 7 },
478 };
479
480 static const struct intel_limit intel_limits_vlv = {
481 /*
482 * These are the data rate limits (measured in fast clocks)
483 * since those are the strictest limits we have. The fast
484 * clock and actual rate limits are more relaxed, so checking
485 * them would make no difference.
486 */
487 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
488 .vco = { .min = 4000000, .max = 6000000 },
489 .n = { .min = 1, .max = 7 },
490 .m1 = { .min = 2, .max = 3 },
491 .m2 = { .min = 11, .max = 156 },
492 .p1 = { .min = 2, .max = 3 },
493 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
494 };
495
496 static const struct intel_limit intel_limits_chv = {
497 /*
498 * These are the data rate limits (measured in fast clocks)
499 * since those are the strictest limits we have. The fast
500 * clock and actual rate limits are more relaxed, so checking
501 * them would make no difference.
502 */
503 .dot = { .min = 25000 * 5, .max = 540000 * 5},
504 .vco = { .min = 4800000, .max = 6480000 },
505 .n = { .min = 1, .max = 1 },
506 .m1 = { .min = 2, .max = 2 },
507 .m2 = { .min = 24 << 22, .max = 175 << 22 },
508 .p1 = { .min = 2, .max = 4 },
509 .p2 = { .p2_slow = 1, .p2_fast = 14 },
510 };
511
512 static const struct intel_limit intel_limits_bxt = {
513 /* FIXME: find real dot limits */
514 .dot = { .min = 0, .max = INT_MAX },
515 .vco = { .min = 4800000, .max = 6700000 },
516 .n = { .min = 1, .max = 1 },
517 .m1 = { .min = 2, .max = 2 },
518 /* FIXME: find real m2 limits */
519 .m2 = { .min = 2 << 22, .max = 255 << 22 },
520 .p1 = { .min = 2, .max = 4 },
521 .p2 = { .p2_slow = 1, .p2_fast = 20 },
522 };
523
524 static bool
525 needs_modeset(struct drm_crtc_state *state)
526 {
527 return drm_atomic_crtc_needs_modeset(state);
528 }
529
530 /**
531 * Returns whether any output on the specified pipe is of the specified type
532 */
533 bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
534 {
535 struct drm_device *dev = crtc->base.dev;
536 struct intel_encoder *encoder;
537
538 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
539 if (encoder->type == type)
540 return true;
541
542 return false;
543 }
544
545 /**
546 * Returns whether any output on the specified pipe will have the specified
547 * type after a staged modeset is complete, i.e., the same as
548 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
549 * encoder->crtc.
550 */
551 static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
552 int type)
553 {
554 struct drm_atomic_state *state = crtc_state->base.state;
555 struct drm_connector *connector;
556 struct drm_connector_state *connector_state;
557 struct intel_encoder *encoder;
558 int i, num_connectors = 0;
559
560 for_each_connector_in_state(state, connector, connector_state, i) {
561 if (connector_state->crtc != crtc_state->base.crtc)
562 continue;
563
564 num_connectors++;
565
566 encoder = to_intel_encoder(connector_state->best_encoder);
567 if (encoder->type == type)
568 return true;
569 }
570
571 WARN_ON(num_connectors == 0);
572
573 return false;
574 }
575
576 /*
577 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
578 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
579 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
580 * The helpers' return value is the rate of the clock that is fed to the
581 * display engine's pipe which can be the above fast dot clock rate or a
582 * divided-down version of it.
583 */
584 /* m1 is reserved as 0 in Pineview, n is a ring counter */
585 static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
586 {
587 clock->m = clock->m2 + 2;
588 clock->p = clock->p1 * clock->p2;
589 if (WARN_ON(clock->n == 0 || clock->p == 0))
590 return 0;
591 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
592 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
593
594 return clock->dot;
595 }
596
597 static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
598 {
599 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
600 }
601
602 static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
603 {
604 clock->m = i9xx_dpll_compute_m(clock);
605 clock->p = clock->p1 * clock->p2;
606 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
607 return 0;
608 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
609 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
610
611 return clock->dot;
612 }
613
614 static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
615 {
616 clock->m = clock->m1 * clock->m2;
617 clock->p = clock->p1 * clock->p2;
618 if (WARN_ON(clock->n == 0 || clock->p == 0))
619 return 0;
620 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
621 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
622
623 return clock->dot / 5;
624 }
625
626 int chv_calc_dpll_params(int refclk, struct dpll *clock)
627 {
628 clock->m = clock->m1 * clock->m2;
629 clock->p = clock->p1 * clock->p2;
630 if (WARN_ON(clock->n == 0 || clock->p == 0))
631 return 0;
632 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
633 clock->n << 22);
634 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
635
636 return clock->dot / 5;
637 }
638
639 #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
640 /**
641 * Returns whether the given set of divisors are valid for a given refclk with
642 * the given connectors.
643 */
644
645 static bool intel_PLL_is_valid(struct drm_device *dev,
646 const struct intel_limit *limit,
647 const struct dpll *clock)
648 {
649 if (clock->n < limit->n.min || limit->n.max < clock->n)
650 INTELPllInvalid("n out of range\n");
651 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
652 INTELPllInvalid("p1 out of range\n");
653 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
654 INTELPllInvalid("m2 out of range\n");
655 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
656 INTELPllInvalid("m1 out of range\n");
657
658 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
659 !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
660 if (clock->m1 <= clock->m2)
661 INTELPllInvalid("m1 <= m2\n");
662
663 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
664 if (clock->p < limit->p.min || limit->p.max < clock->p)
665 INTELPllInvalid("p out of range\n");
666 if (clock->m < limit->m.min || limit->m.max < clock->m)
667 INTELPllInvalid("m out of range\n");
668 }
669
670 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
671 INTELPllInvalid("vco out of range\n");
672 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
673 * connector, etc., rather than just a single range.
674 */
675 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
676 INTELPllInvalid("dot out of range\n");
677
678 return true;
679 }
680
681 static int
682 i9xx_select_p2_div(const struct intel_limit *limit,
683 const struct intel_crtc_state *crtc_state,
684 int target)
685 {
686 struct drm_device *dev = crtc_state->base.crtc->dev;
687
688 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
689 /*
690 * For LVDS just rely on its current settings for dual-channel.
691 * We haven't figured out how to reliably set up different
692 * single/dual channel state, if we even can.
693 */
694 if (intel_is_dual_link_lvds(dev))
695 return limit->p2.p2_fast;
696 else
697 return limit->p2.p2_slow;
698 } else {
699 if (target < limit->p2.dot_limit)
700 return limit->p2.p2_slow;
701 else
702 return limit->p2.p2_fast;
703 }
704 }
705
706 /*
707 * Returns a set of divisors for the desired target clock with the given
708 * refclk, or FALSE. The returned values represent the clock equation:
709 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
710 *
711 * Target and reference clocks are specified in kHz.
712 *
713 * If match_clock is provided, then best_clock P divider must match the P
714 * divider from @match_clock used for LVDS downclocking.
715 */
716 static bool
717 i9xx_find_best_dpll(const struct intel_limit *limit,
718 struct intel_crtc_state *crtc_state,
719 int target, int refclk, struct dpll *match_clock,
720 struct dpll *best_clock)
721 {
722 struct drm_device *dev = crtc_state->base.crtc->dev;
723 struct dpll clock;
724 int err = target;
725
726 memset(best_clock, 0, sizeof(*best_clock));
727
728 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
729
730 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
731 clock.m1++) {
732 for (clock.m2 = limit->m2.min;
733 clock.m2 <= limit->m2.max; clock.m2++) {
734 if (clock.m2 >= clock.m1)
735 break;
736 for (clock.n = limit->n.min;
737 clock.n <= limit->n.max; clock.n++) {
738 for (clock.p1 = limit->p1.min;
739 clock.p1 <= limit->p1.max; clock.p1++) {
740 int this_err;
741
742 i9xx_calc_dpll_params(refclk, &clock);
743 if (!intel_PLL_is_valid(dev, limit,
744 &clock))
745 continue;
746 if (match_clock &&
747 clock.p != match_clock->p)
748 continue;
749
750 this_err = abs(clock.dot - target);
751 if (this_err < err) {
752 *best_clock = clock;
753 err = this_err;
754 }
755 }
756 }
757 }
758 }
759
760 return (err != target);
761 }
762
763 /*
764 * Returns a set of divisors for the desired target clock with the given
765 * refclk, or FALSE. The returned values represent the clock equation:
766 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
767 *
768 * Target and reference clocks are specified in kHz.
769 *
770 * If match_clock is provided, then best_clock P divider must match the P
771 * divider from @match_clock used for LVDS downclocking.
772 */
773 static bool
774 pnv_find_best_dpll(const struct intel_limit *limit,
775 struct intel_crtc_state *crtc_state,
776 int target, int refclk, struct dpll *match_clock,
777 struct dpll *best_clock)
778 {
779 struct drm_device *dev = crtc_state->base.crtc->dev;
780 struct dpll clock;
781 int err = target;
782
783 memset(best_clock, 0, sizeof(*best_clock));
784
785 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
786
787 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
788 clock.m1++) {
789 for (clock.m2 = limit->m2.min;
790 clock.m2 <= limit->m2.max; clock.m2++) {
791 for (clock.n = limit->n.min;
792 clock.n <= limit->n.max; clock.n++) {
793 for (clock.p1 = limit->p1.min;
794 clock.p1 <= limit->p1.max; clock.p1++) {
795 int this_err;
796
797 pnv_calc_dpll_params(refclk, &clock);
798 if (!intel_PLL_is_valid(dev, limit,
799 &clock))
800 continue;
801 if (match_clock &&
802 clock.p != match_clock->p)
803 continue;
804
805 this_err = abs(clock.dot - target);
806 if (this_err < err) {
807 *best_clock = clock;
808 err = this_err;
809 }
810 }
811 }
812 }
813 }
814
815 return (err != target);
816 }
817
818 /*
819 * Returns a set of divisors for the desired target clock with the given
820 * refclk, or FALSE. The returned values represent the clock equation:
821 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
822 *
823 * Target and reference clocks are specified in kHz.
824 *
825 * If match_clock is provided, then best_clock P divider must match the P
826 * divider from @match_clock used for LVDS downclocking.
827 */
828 static bool
829 g4x_find_best_dpll(const struct intel_limit *limit,
830 struct intel_crtc_state *crtc_state,
831 int target, int refclk, struct dpll *match_clock,
832 struct dpll *best_clock)
833 {
834 struct drm_device *dev = crtc_state->base.crtc->dev;
835 struct dpll clock;
836 int max_n;
837 bool found = false;
838 /* approximately equals target * 0.00585 */
839 int err_most = (target >> 8) + (target >> 9);
840
841 memset(best_clock, 0, sizeof(*best_clock));
842
843 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
844
845 max_n = limit->n.max;
846 /* based on hardware requirement, prefer smaller n to precision */
847 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
848 /* based on hardware requirement, prefere larger m1,m2 */
849 for (clock.m1 = limit->m1.max;
850 clock.m1 >= limit->m1.min; clock.m1--) {
851 for (clock.m2 = limit->m2.max;
852 clock.m2 >= limit->m2.min; clock.m2--) {
853 for (clock.p1 = limit->p1.max;
854 clock.p1 >= limit->p1.min; clock.p1--) {
855 int this_err;
856
857 i9xx_calc_dpll_params(refclk, &clock);
858 if (!intel_PLL_is_valid(dev, limit,
859 &clock))
860 continue;
861
862 this_err = abs(clock.dot - target);
863 if (this_err < err_most) {
864 *best_clock = clock;
865 err_most = this_err;
866 max_n = clock.n;
867 found = true;
868 }
869 }
870 }
871 }
872 }
873 return found;
874 }
875
876 /*
877 * Check if the calculated PLL configuration is more optimal compared to the
878 * best configuration and error found so far. Return the calculated error.
879 */
880 static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
881 const struct dpll *calculated_clock,
882 const struct dpll *best_clock,
883 unsigned int best_error_ppm,
884 unsigned int *error_ppm)
885 {
886 /*
887 * For CHV ignore the error and consider only the P value.
888 * Prefer a bigger P value based on HW requirements.
889 */
890 if (IS_CHERRYVIEW(dev)) {
891 *error_ppm = 0;
892
893 return calculated_clock->p > best_clock->p;
894 }
895
896 if (WARN_ON_ONCE(!target_freq))
897 return false;
898
899 *error_ppm = div_u64(1000000ULL *
900 abs(target_freq - calculated_clock->dot),
901 target_freq);
902 /*
903 * Prefer a better P value over a better (smaller) error if the error
904 * is small. Ensure this preference for future configurations too by
905 * setting the error to 0.
906 */
907 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
908 *error_ppm = 0;
909
910 return true;
911 }
912
913 return *error_ppm + 10 < best_error_ppm;
914 }
915
916 /*
917 * Returns a set of divisors for the desired target clock with the given
918 * refclk, or FALSE. The returned values represent the clock equation:
919 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
920 */
921 static bool
922 vlv_find_best_dpll(const struct intel_limit *limit,
923 struct intel_crtc_state *crtc_state,
924 int target, int refclk, struct dpll *match_clock,
925 struct dpll *best_clock)
926 {
927 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
928 struct drm_device *dev = crtc->base.dev;
929 struct dpll clock;
930 unsigned int bestppm = 1000000;
931 /* min update 19.2 MHz */
932 int max_n = min(limit->n.max, refclk / 19200);
933 bool found = false;
934
935 target *= 5; /* fast clock */
936
937 memset(best_clock, 0, sizeof(*best_clock));
938
939 /* based on hardware requirement, prefer smaller n to precision */
940 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
941 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
942 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
943 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
944 clock.p = clock.p1 * clock.p2;
945 /* based on hardware requirement, prefer bigger m1,m2 values */
946 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
947 unsigned int ppm;
948
949 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
950 refclk * clock.m1);
951
952 vlv_calc_dpll_params(refclk, &clock);
953
954 if (!intel_PLL_is_valid(dev, limit,
955 &clock))
956 continue;
957
958 if (!vlv_PLL_is_optimal(dev, target,
959 &clock,
960 best_clock,
961 bestppm, &ppm))
962 continue;
963
964 *best_clock = clock;
965 bestppm = ppm;
966 found = true;
967 }
968 }
969 }
970 }
971
972 return found;
973 }
974
975 /*
976 * Returns a set of divisors for the desired target clock with the given
977 * refclk, or FALSE. The returned values represent the clock equation:
978 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
979 */
980 static bool
981 chv_find_best_dpll(const struct intel_limit *limit,
982 struct intel_crtc_state *crtc_state,
983 int target, int refclk, struct dpll *match_clock,
984 struct dpll *best_clock)
985 {
986 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
987 struct drm_device *dev = crtc->base.dev;
988 unsigned int best_error_ppm;
989 struct dpll clock;
990 uint64_t m2;
991 int found = false;
992
993 memset(best_clock, 0, sizeof(*best_clock));
994 best_error_ppm = 1000000;
995
996 /*
997 * Based on hardware doc, the n always set to 1, and m1 always
998 * set to 2. If requires to support 200Mhz refclk, we need to
999 * revisit this because n may not 1 anymore.
1000 */
1001 clock.n = 1, clock.m1 = 2;
1002 target *= 5; /* fast clock */
1003
1004 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1005 for (clock.p2 = limit->p2.p2_fast;
1006 clock.p2 >= limit->p2.p2_slow;
1007 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
1008 unsigned int error_ppm;
1009
1010 clock.p = clock.p1 * clock.p2;
1011
1012 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1013 clock.n) << 22, refclk * clock.m1);
1014
1015 if (m2 > INT_MAX/clock.m1)
1016 continue;
1017
1018 clock.m2 = m2;
1019
1020 chv_calc_dpll_params(refclk, &clock);
1021
1022 if (!intel_PLL_is_valid(dev, limit, &clock))
1023 continue;
1024
1025 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1026 best_error_ppm, &error_ppm))
1027 continue;
1028
1029 *best_clock = clock;
1030 best_error_ppm = error_ppm;
1031 found = true;
1032 }
1033 }
1034
1035 return found;
1036 }
1037
1038 bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
1039 struct dpll *best_clock)
1040 {
1041 int refclk = 100000;
1042 const struct intel_limit *limit = &intel_limits_bxt;
1043
1044 return chv_find_best_dpll(limit, crtc_state,
1045 target_clock, refclk, NULL, best_clock);
1046 }
1047
1048 bool intel_crtc_active(struct drm_crtc *crtc)
1049 {
1050 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1051
1052 /* Be paranoid as we can arrive here with only partial
1053 * state retrieved from the hardware during setup.
1054 *
1055 * We can ditch the adjusted_mode.crtc_clock check as soon
1056 * as Haswell has gained clock readout/fastboot support.
1057 *
1058 * We can ditch the crtc->primary->fb check as soon as we can
1059 * properly reconstruct framebuffers.
1060 *
1061 * FIXME: The intel_crtc->active here should be switched to
1062 * crtc->state->active once we have proper CRTC states wired up
1063 * for atomic.
1064 */
1065 return intel_crtc->active && crtc->primary->state->fb &&
1066 intel_crtc->config->base.adjusted_mode.crtc_clock;
1067 }
1068
1069 enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1070 enum pipe pipe)
1071 {
1072 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1073 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1074
1075 return intel_crtc->config->cpu_transcoder;
1076 }
1077
1078 static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1079 {
1080 struct drm_i915_private *dev_priv = dev->dev_private;
1081 i915_reg_t reg = PIPEDSL(pipe);
1082 u32 line1, line2;
1083 u32 line_mask;
1084
1085 if (IS_GEN2(dev))
1086 line_mask = DSL_LINEMASK_GEN2;
1087 else
1088 line_mask = DSL_LINEMASK_GEN3;
1089
1090 line1 = I915_READ(reg) & line_mask;
1091 msleep(5);
1092 line2 = I915_READ(reg) & line_mask;
1093
1094 return line1 == line2;
1095 }
1096
1097 /*
1098 * intel_wait_for_pipe_off - wait for pipe to turn off
1099 * @crtc: crtc whose pipe to wait for
1100 *
1101 * After disabling a pipe, we can't wait for vblank in the usual way,
1102 * spinning on the vblank interrupt status bit, since we won't actually
1103 * see an interrupt when the pipe is disabled.
1104 *
1105 * On Gen4 and above:
1106 * wait for the pipe register state bit to turn off
1107 *
1108 * Otherwise:
1109 * wait for the display line value to settle (it usually
1110 * ends up stopping at the start of the next frame).
1111 *
1112 */
1113 static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
1114 {
1115 struct drm_device *dev = crtc->base.dev;
1116 struct drm_i915_private *dev_priv = dev->dev_private;
1117 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1118 enum pipe pipe = crtc->pipe;
1119
1120 if (INTEL_INFO(dev)->gen >= 4) {
1121 i915_reg_t reg = PIPECONF(cpu_transcoder);
1122
1123 /* Wait for the Pipe State to go off */
1124 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1125 100))
1126 WARN(1, "pipe_off wait timed out\n");
1127 } else {
1128 /* Wait for the display line to settle */
1129 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
1130 WARN(1, "pipe_off wait timed out\n");
1131 }
1132 }
1133
1134 /* Only for pre-ILK configs */
1135 void assert_pll(struct drm_i915_private *dev_priv,
1136 enum pipe pipe, bool state)
1137 {
1138 u32 val;
1139 bool cur_state;
1140
1141 val = I915_READ(DPLL(pipe));
1142 cur_state = !!(val & DPLL_VCO_ENABLE);
1143 I915_STATE_WARN(cur_state != state,
1144 "PLL state assertion failure (expected %s, current %s)\n",
1145 onoff(state), onoff(cur_state));
1146 }
1147
1148 /* XXX: the dsi pll is shared between MIPI DSI ports */
1149 void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
1150 {
1151 u32 val;
1152 bool cur_state;
1153
1154 mutex_lock(&dev_priv->sb_lock);
1155 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
1156 mutex_unlock(&dev_priv->sb_lock);
1157
1158 cur_state = val & DSI_PLL_VCO_EN;
1159 I915_STATE_WARN(cur_state != state,
1160 "DSI PLL state assertion failure (expected %s, current %s)\n",
1161 onoff(state), onoff(cur_state));
1162 }
1163
1164 static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1165 enum pipe pipe, bool state)
1166 {
1167 bool cur_state;
1168 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1169 pipe);
1170
1171 if (HAS_DDI(dev_priv)) {
1172 /* DDI does not have a specific FDI_TX register */
1173 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
1174 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
1175 } else {
1176 u32 val = I915_READ(FDI_TX_CTL(pipe));
1177 cur_state = !!(val & FDI_TX_ENABLE);
1178 }
1179 I915_STATE_WARN(cur_state != state,
1180 "FDI TX state assertion failure (expected %s, current %s)\n",
1181 onoff(state), onoff(cur_state));
1182 }
1183 #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1184 #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1185
1186 static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1187 enum pipe pipe, bool state)
1188 {
1189 u32 val;
1190 bool cur_state;
1191
1192 val = I915_READ(FDI_RX_CTL(pipe));
1193 cur_state = !!(val & FDI_RX_ENABLE);
1194 I915_STATE_WARN(cur_state != state,
1195 "FDI RX state assertion failure (expected %s, current %s)\n",
1196 onoff(state), onoff(cur_state));
1197 }
1198 #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1199 #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1200
1201 static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1202 enum pipe pipe)
1203 {
1204 u32 val;
1205
1206 /* ILK FDI PLL is always enabled */
1207 if (IS_GEN5(dev_priv))
1208 return;
1209
1210 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
1211 if (HAS_DDI(dev_priv))
1212 return;
1213
1214 val = I915_READ(FDI_TX_CTL(pipe));
1215 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
1216 }
1217
1218 void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1219 enum pipe pipe, bool state)
1220 {
1221 u32 val;
1222 bool cur_state;
1223
1224 val = I915_READ(FDI_RX_CTL(pipe));
1225 cur_state = !!(val & FDI_RX_PLL_ENABLE);
1226 I915_STATE_WARN(cur_state != state,
1227 "FDI RX PLL assertion failure (expected %s, current %s)\n",
1228 onoff(state), onoff(cur_state));
1229 }
1230
1231 void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1232 enum pipe pipe)
1233 {
1234 struct drm_device *dev = dev_priv->dev;
1235 i915_reg_t pp_reg;
1236 u32 val;
1237 enum pipe panel_pipe = PIPE_A;
1238 bool locked = true;
1239
1240 if (WARN_ON(HAS_DDI(dev)))
1241 return;
1242
1243 if (HAS_PCH_SPLIT(dev)) {
1244 u32 port_sel;
1245
1246 pp_reg = PCH_PP_CONTROL;
1247 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1248
1249 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1250 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1251 panel_pipe = PIPE_B;
1252 /* XXX: else fix for eDP */
1253 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1254 /* presumably write lock depends on pipe, not port select */
1255 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1256 panel_pipe = pipe;
1257 } else {
1258 pp_reg = PP_CONTROL;
1259 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1260 panel_pipe = PIPE_B;
1261 }
1262
1263 val = I915_READ(pp_reg);
1264 if (!(val & PANEL_POWER_ON) ||
1265 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
1266 locked = false;
1267
1268 I915_STATE_WARN(panel_pipe == pipe && locked,
1269 "panel assertion failure, pipe %c regs locked\n",
1270 pipe_name(pipe));
1271 }
1272
1273 static void assert_cursor(struct drm_i915_private *dev_priv,
1274 enum pipe pipe, bool state)
1275 {
1276 struct drm_device *dev = dev_priv->dev;
1277 bool cur_state;
1278
1279 if (IS_845G(dev) || IS_I865G(dev))
1280 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
1281 else
1282 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
1283
1284 I915_STATE_WARN(cur_state != state,
1285 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
1286 pipe_name(pipe), onoff(state), onoff(cur_state));
1287 }
1288 #define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1289 #define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1290
1291 void assert_pipe(struct drm_i915_private *dev_priv,
1292 enum pipe pipe, bool state)
1293 {
1294 bool cur_state;
1295 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1296 pipe);
1297 enum intel_display_power_domain power_domain;
1298
1299 /* if we need the pipe quirk it must be always on */
1300 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1301 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1302 state = true;
1303
1304 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1305 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
1306 u32 val = I915_READ(PIPECONF(cpu_transcoder));
1307 cur_state = !!(val & PIPECONF_ENABLE);
1308
1309 intel_display_power_put(dev_priv, power_domain);
1310 } else {
1311 cur_state = false;
1312 }
1313
1314 I915_STATE_WARN(cur_state != state,
1315 "pipe %c assertion failure (expected %s, current %s)\n",
1316 pipe_name(pipe), onoff(state), onoff(cur_state));
1317 }
1318
1319 static void assert_plane(struct drm_i915_private *dev_priv,
1320 enum plane plane, bool state)
1321 {
1322 u32 val;
1323 bool cur_state;
1324
1325 val = I915_READ(DSPCNTR(plane));
1326 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
1327 I915_STATE_WARN(cur_state != state,
1328 "plane %c assertion failure (expected %s, current %s)\n",
1329 plane_name(plane), onoff(state), onoff(cur_state));
1330 }
1331
1332 #define assert_plane_enabled(d, p) assert_plane(d, p, true)
1333 #define assert_plane_disabled(d, p) assert_plane(d, p, false)
1334
1335 static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1336 enum pipe pipe)
1337 {
1338 struct drm_device *dev = dev_priv->dev;
1339 int i;
1340
1341 /* Primary planes are fixed to pipes on gen4+ */
1342 if (INTEL_INFO(dev)->gen >= 4) {
1343 u32 val = I915_READ(DSPCNTR(pipe));
1344 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
1345 "plane %c assertion failure, should be disabled but not\n",
1346 plane_name(pipe));
1347 return;
1348 }
1349
1350 /* Need to check both planes against the pipe */
1351 for_each_pipe(dev_priv, i) {
1352 u32 val = I915_READ(DSPCNTR(i));
1353 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
1354 DISPPLANE_SEL_PIPE_SHIFT;
1355 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
1356 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1357 plane_name(i), pipe_name(pipe));
1358 }
1359 }
1360
1361 static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1362 enum pipe pipe)
1363 {
1364 struct drm_device *dev = dev_priv->dev;
1365 int sprite;
1366
1367 if (INTEL_INFO(dev)->gen >= 9) {
1368 for_each_sprite(dev_priv, pipe, sprite) {
1369 u32 val = I915_READ(PLANE_CTL(pipe, sprite));
1370 I915_STATE_WARN(val & PLANE_CTL_ENABLE,
1371 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1372 sprite, pipe_name(pipe));
1373 }
1374 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
1375 for_each_sprite(dev_priv, pipe, sprite) {
1376 u32 val = I915_READ(SPCNTR(pipe, sprite));
1377 I915_STATE_WARN(val & SP_ENABLE,
1378 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1379 sprite_name(pipe, sprite), pipe_name(pipe));
1380 }
1381 } else if (INTEL_INFO(dev)->gen >= 7) {
1382 u32 val = I915_READ(SPRCTL(pipe));
1383 I915_STATE_WARN(val & SPRITE_ENABLE,
1384 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1385 plane_name(pipe), pipe_name(pipe));
1386 } else if (INTEL_INFO(dev)->gen >= 5) {
1387 u32 val = I915_READ(DVSCNTR(pipe));
1388 I915_STATE_WARN(val & DVS_ENABLE,
1389 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1390 plane_name(pipe), pipe_name(pipe));
1391 }
1392 }
1393
1394 static void assert_vblank_disabled(struct drm_crtc *crtc)
1395 {
1396 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
1397 drm_crtc_vblank_put(crtc);
1398 }
1399
1400 void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1401 enum pipe pipe)
1402 {
1403 u32 val;
1404 bool enabled;
1405
1406 val = I915_READ(PCH_TRANSCONF(pipe));
1407 enabled = !!(val & TRANS_ENABLE);
1408 I915_STATE_WARN(enabled,
1409 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1410 pipe_name(pipe));
1411 }
1412
1413 static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1414 enum pipe pipe, u32 port_sel, u32 val)
1415 {
1416 if ((val & DP_PORT_EN) == 0)
1417 return false;
1418
1419 if (HAS_PCH_CPT(dev_priv)) {
1420 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
1421 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1422 return false;
1423 } else if (IS_CHERRYVIEW(dev_priv)) {
1424 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1425 return false;
1426 } else {
1427 if ((val & DP_PIPE_MASK) != (pipe << 30))
1428 return false;
1429 }
1430 return true;
1431 }
1432
1433 static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1434 enum pipe pipe, u32 val)
1435 {
1436 if ((val & SDVO_ENABLE) == 0)
1437 return false;
1438
1439 if (HAS_PCH_CPT(dev_priv)) {
1440 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1441 return false;
1442 } else if (IS_CHERRYVIEW(dev_priv)) {
1443 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1444 return false;
1445 } else {
1446 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1447 return false;
1448 }
1449 return true;
1450 }
1451
1452 static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1453 enum pipe pipe, u32 val)
1454 {
1455 if ((val & LVDS_PORT_EN) == 0)
1456 return false;
1457
1458 if (HAS_PCH_CPT(dev_priv)) {
1459 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1460 return false;
1461 } else {
1462 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1463 return false;
1464 }
1465 return true;
1466 }
1467
1468 static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1469 enum pipe pipe, u32 val)
1470 {
1471 if ((val & ADPA_DAC_ENABLE) == 0)
1472 return false;
1473 if (HAS_PCH_CPT(dev_priv)) {
1474 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1475 return false;
1476 } else {
1477 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1478 return false;
1479 }
1480 return true;
1481 }
1482
1483 static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1484 enum pipe pipe, i915_reg_t reg,
1485 u32 port_sel)
1486 {
1487 u32 val = I915_READ(reg);
1488 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
1489 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
1490 i915_mmio_reg_offset(reg), pipe_name(pipe));
1491
1492 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
1493 && (val & DP_PIPEB_SELECT),
1494 "IBX PCH dp port still using transcoder B\n");
1495 }
1496
1497 static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
1498 enum pipe pipe, i915_reg_t reg)
1499 {
1500 u32 val = I915_READ(reg);
1501 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
1502 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
1503 i915_mmio_reg_offset(reg), pipe_name(pipe));
1504
1505 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
1506 && (val & SDVO_PIPE_B_SELECT),
1507 "IBX PCH hdmi port still using transcoder B\n");
1508 }
1509
1510 static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1511 enum pipe pipe)
1512 {
1513 u32 val;
1514
1515 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1516 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1517 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
1518
1519 val = I915_READ(PCH_ADPA);
1520 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
1521 "PCH VGA enabled on transcoder %c, should be disabled\n",
1522 pipe_name(pipe));
1523
1524 val = I915_READ(PCH_LVDS);
1525 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
1526 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1527 pipe_name(pipe));
1528
1529 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1530 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1531 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
1532 }
1533
1534 static void _vlv_enable_pll(struct intel_crtc *crtc,
1535 const struct intel_crtc_state *pipe_config)
1536 {
1537 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1538 enum pipe pipe = crtc->pipe;
1539
1540 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1541 POSTING_READ(DPLL(pipe));
1542 udelay(150);
1543
1544 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1545 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1546 }
1547
1548 static void vlv_enable_pll(struct intel_crtc *crtc,
1549 const struct intel_crtc_state *pipe_config)
1550 {
1551 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1552 enum pipe pipe = crtc->pipe;
1553
1554 assert_pipe_disabled(dev_priv, pipe);
1555
1556 /* PLL is protected by panel, make sure we can write it */
1557 assert_panel_unlocked(dev_priv, pipe);
1558
1559 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1560 _vlv_enable_pll(crtc, pipe_config);
1561
1562 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1563 POSTING_READ(DPLL_MD(pipe));
1564 }
1565
1566
1567 static void _chv_enable_pll(struct intel_crtc *crtc,
1568 const struct intel_crtc_state *pipe_config)
1569 {
1570 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1571 enum pipe pipe = crtc->pipe;
1572 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1573 u32 tmp;
1574
1575 mutex_lock(&dev_priv->sb_lock);
1576
1577 /* Enable back the 10bit clock to display controller */
1578 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1579 tmp |= DPIO_DCLKP_EN;
1580 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1581
1582 mutex_unlock(&dev_priv->sb_lock);
1583
1584 /*
1585 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1586 */
1587 udelay(1);
1588
1589 /* Enable PLL */
1590 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1591
1592 /* Check PLL is locked */
1593 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1594 DRM_ERROR("PLL %d failed to lock\n", pipe);
1595 }
1596
1597 static void chv_enable_pll(struct intel_crtc *crtc,
1598 const struct intel_crtc_state *pipe_config)
1599 {
1600 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1601 enum pipe pipe = crtc->pipe;
1602
1603 assert_pipe_disabled(dev_priv, pipe);
1604
1605 /* PLL is protected by panel, make sure we can write it */
1606 assert_panel_unlocked(dev_priv, pipe);
1607
1608 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1609 _chv_enable_pll(crtc, pipe_config);
1610
1611 if (pipe != PIPE_A) {
1612 /*
1613 * WaPixelRepeatModeFixForC0:chv
1614 *
1615 * DPLLCMD is AWOL. Use chicken bits to propagate
1616 * the value from DPLLBMD to either pipe B or C.
1617 */
1618 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
1619 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1620 I915_WRITE(CBR4_VLV, 0);
1621 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1622
1623 /*
1624 * DPLLB VGA mode also seems to cause problems.
1625 * We should always have it disabled.
1626 */
1627 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1628 } else {
1629 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1630 POSTING_READ(DPLL_MD(pipe));
1631 }
1632 }
1633
1634 static int intel_num_dvo_pipes(struct drm_device *dev)
1635 {
1636 struct intel_crtc *crtc;
1637 int count = 0;
1638
1639 for_each_intel_crtc(dev, crtc)
1640 count += crtc->base.state->active &&
1641 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1642
1643 return count;
1644 }
1645
1646 static void i9xx_enable_pll(struct intel_crtc *crtc)
1647 {
1648 struct drm_device *dev = crtc->base.dev;
1649 struct drm_i915_private *dev_priv = dev->dev_private;
1650 i915_reg_t reg = DPLL(crtc->pipe);
1651 u32 dpll = crtc->config->dpll_hw_state.dpll;
1652
1653 assert_pipe_disabled(dev_priv, crtc->pipe);
1654
1655 /* PLL is protected by panel, make sure we can write it */
1656 if (IS_MOBILE(dev) && !IS_I830(dev))
1657 assert_panel_unlocked(dev_priv, crtc->pipe);
1658
1659 /* Enable DVO 2x clock on both PLLs if necessary */
1660 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1661 /*
1662 * It appears to be important that we don't enable this
1663 * for the current pipe before otherwise configuring the
1664 * PLL. No idea how this should be handled if multiple
1665 * DVO outputs are enabled simultaneosly.
1666 */
1667 dpll |= DPLL_DVO_2X_MODE;
1668 I915_WRITE(DPLL(!crtc->pipe),
1669 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1670 }
1671
1672 /*
1673 * Apparently we need to have VGA mode enabled prior to changing
1674 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1675 * dividers, even though the register value does change.
1676 */
1677 I915_WRITE(reg, 0);
1678
1679 I915_WRITE(reg, dpll);
1680
1681 /* Wait for the clocks to stabilize. */
1682 POSTING_READ(reg);
1683 udelay(150);
1684
1685 if (INTEL_INFO(dev)->gen >= 4) {
1686 I915_WRITE(DPLL_MD(crtc->pipe),
1687 crtc->config->dpll_hw_state.dpll_md);
1688 } else {
1689 /* The pixel multiplier can only be updated once the
1690 * DPLL is enabled and the clocks are stable.
1691 *
1692 * So write it again.
1693 */
1694 I915_WRITE(reg, dpll);
1695 }
1696
1697 /* We do this three times for luck */
1698 I915_WRITE(reg, dpll);
1699 POSTING_READ(reg);
1700 udelay(150); /* wait for warmup */
1701 I915_WRITE(reg, dpll);
1702 POSTING_READ(reg);
1703 udelay(150); /* wait for warmup */
1704 I915_WRITE(reg, dpll);
1705 POSTING_READ(reg);
1706 udelay(150); /* wait for warmup */
1707 }
1708
1709 /**
1710 * i9xx_disable_pll - disable a PLL
1711 * @dev_priv: i915 private structure
1712 * @pipe: pipe PLL to disable
1713 *
1714 * Disable the PLL for @pipe, making sure the pipe is off first.
1715 *
1716 * Note! This is for pre-ILK only.
1717 */
1718 static void i9xx_disable_pll(struct intel_crtc *crtc)
1719 {
1720 struct drm_device *dev = crtc->base.dev;
1721 struct drm_i915_private *dev_priv = dev->dev_private;
1722 enum pipe pipe = crtc->pipe;
1723
1724 /* Disable DVO 2x clock on both PLLs if necessary */
1725 if (IS_I830(dev) &&
1726 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
1727 !intel_num_dvo_pipes(dev)) {
1728 I915_WRITE(DPLL(PIPE_B),
1729 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1730 I915_WRITE(DPLL(PIPE_A),
1731 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1732 }
1733
1734 /* Don't disable pipe or pipe PLLs if needed */
1735 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1736 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
1737 return;
1738
1739 /* Make sure the pipe isn't still relying on us */
1740 assert_pipe_disabled(dev_priv, pipe);
1741
1742 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
1743 POSTING_READ(DPLL(pipe));
1744 }
1745
1746 static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1747 {
1748 u32 val;
1749
1750 /* Make sure the pipe isn't still relying on us */
1751 assert_pipe_disabled(dev_priv, pipe);
1752
1753 val = DPLL_INTEGRATED_REF_CLK_VLV |
1754 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1755 if (pipe != PIPE_A)
1756 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1757
1758 I915_WRITE(DPLL(pipe), val);
1759 POSTING_READ(DPLL(pipe));
1760 }
1761
1762 static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1763 {
1764 enum dpio_channel port = vlv_pipe_to_channel(pipe);
1765 u32 val;
1766
1767 /* Make sure the pipe isn't still relying on us */
1768 assert_pipe_disabled(dev_priv, pipe);
1769
1770 val = DPLL_SSC_REF_CLK_CHV |
1771 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1772 if (pipe != PIPE_A)
1773 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1774
1775 I915_WRITE(DPLL(pipe), val);
1776 POSTING_READ(DPLL(pipe));
1777
1778 mutex_lock(&dev_priv->sb_lock);
1779
1780 /* Disable 10bit clock to display controller */
1781 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1782 val &= ~DPIO_DCLKP_EN;
1783 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1784
1785 mutex_unlock(&dev_priv->sb_lock);
1786 }
1787
1788 void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
1789 struct intel_digital_port *dport,
1790 unsigned int expected_mask)
1791 {
1792 u32 port_mask;
1793 i915_reg_t dpll_reg;
1794
1795 switch (dport->port) {
1796 case PORT_B:
1797 port_mask = DPLL_PORTB_READY_MASK;
1798 dpll_reg = DPLL(0);
1799 break;
1800 case PORT_C:
1801 port_mask = DPLL_PORTC_READY_MASK;
1802 dpll_reg = DPLL(0);
1803 expected_mask <<= 4;
1804 break;
1805 case PORT_D:
1806 port_mask = DPLL_PORTD_READY_MASK;
1807 dpll_reg = DPIO_PHY_STATUS;
1808 break;
1809 default:
1810 BUG();
1811 }
1812
1813 if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1814 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1815 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
1816 }
1817
1818 static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1819 enum pipe pipe)
1820 {
1821 struct drm_device *dev = dev_priv->dev;
1822 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1823 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1824 i915_reg_t reg;
1825 uint32_t val, pipeconf_val;
1826
1827 /* Make sure PCH DPLL is enabled */
1828 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
1829
1830 /* FDI must be feeding us bits for PCH ports */
1831 assert_fdi_tx_enabled(dev_priv, pipe);
1832 assert_fdi_rx_enabled(dev_priv, pipe);
1833
1834 if (HAS_PCH_CPT(dev)) {
1835 /* Workaround: Set the timing override bit before enabling the
1836 * pch transcoder. */
1837 reg = TRANS_CHICKEN2(pipe);
1838 val = I915_READ(reg);
1839 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1840 I915_WRITE(reg, val);
1841 }
1842
1843 reg = PCH_TRANSCONF(pipe);
1844 val = I915_READ(reg);
1845 pipeconf_val = I915_READ(PIPECONF(pipe));
1846
1847 if (HAS_PCH_IBX(dev_priv)) {
1848 /*
1849 * Make the BPC in transcoder be consistent with
1850 * that in pipeconf reg. For HDMI we must use 8bpc
1851 * here for both 8bpc and 12bpc.
1852 */
1853 val &= ~PIPECONF_BPC_MASK;
1854 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1855 val |= PIPECONF_8BPC;
1856 else
1857 val |= pipeconf_val & PIPECONF_BPC_MASK;
1858 }
1859
1860 val &= ~TRANS_INTERLACE_MASK;
1861 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
1862 if (HAS_PCH_IBX(dev_priv) &&
1863 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
1864 val |= TRANS_LEGACY_INTERLACED_ILK;
1865 else
1866 val |= TRANS_INTERLACED;
1867 else
1868 val |= TRANS_PROGRESSIVE;
1869
1870 I915_WRITE(reg, val | TRANS_ENABLE);
1871 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
1872 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
1873 }
1874
1875 static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1876 enum transcoder cpu_transcoder)
1877 {
1878 u32 val, pipeconf_val;
1879
1880 /* FDI must be feeding us bits for PCH ports */
1881 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
1882 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
1883
1884 /* Workaround: set timing override bit. */
1885 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1886 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1887 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1888
1889 val = TRANS_ENABLE;
1890 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
1891
1892 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1893 PIPECONF_INTERLACED_ILK)
1894 val |= TRANS_INTERLACED;
1895 else
1896 val |= TRANS_PROGRESSIVE;
1897
1898 I915_WRITE(LPT_TRANSCONF, val);
1899 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
1900 DRM_ERROR("Failed to enable PCH transcoder\n");
1901 }
1902
1903 static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1904 enum pipe pipe)
1905 {
1906 struct drm_device *dev = dev_priv->dev;
1907 i915_reg_t reg;
1908 uint32_t val;
1909
1910 /* FDI relies on the transcoder */
1911 assert_fdi_tx_disabled(dev_priv, pipe);
1912 assert_fdi_rx_disabled(dev_priv, pipe);
1913
1914 /* Ports must be off as well */
1915 assert_pch_ports_disabled(dev_priv, pipe);
1916
1917 reg = PCH_TRANSCONF(pipe);
1918 val = I915_READ(reg);
1919 val &= ~TRANS_ENABLE;
1920 I915_WRITE(reg, val);
1921 /* wait for PCH transcoder off, transcoder state */
1922 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
1923 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
1924
1925 if (HAS_PCH_CPT(dev)) {
1926 /* Workaround: Clear the timing override chicken bit again. */
1927 reg = TRANS_CHICKEN2(pipe);
1928 val = I915_READ(reg);
1929 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1930 I915_WRITE(reg, val);
1931 }
1932 }
1933
1934 static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
1935 {
1936 u32 val;
1937
1938 val = I915_READ(LPT_TRANSCONF);
1939 val &= ~TRANS_ENABLE;
1940 I915_WRITE(LPT_TRANSCONF, val);
1941 /* wait for PCH transcoder off, transcoder state */
1942 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
1943 DRM_ERROR("Failed to disable PCH transcoder\n");
1944
1945 /* Workaround: clear timing override bit. */
1946 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
1947 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1948 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
1949 }
1950
1951 /**
1952 * intel_enable_pipe - enable a pipe, asserting requirements
1953 * @crtc: crtc responsible for the pipe
1954 *
1955 * Enable @crtc's pipe, making sure that various hardware specific requirements
1956 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
1957 */
1958 static void intel_enable_pipe(struct intel_crtc *crtc)
1959 {
1960 struct drm_device *dev = crtc->base.dev;
1961 struct drm_i915_private *dev_priv = dev->dev_private;
1962 enum pipe pipe = crtc->pipe;
1963 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1964 enum pipe pch_transcoder;
1965 i915_reg_t reg;
1966 u32 val;
1967
1968 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1969
1970 assert_planes_disabled(dev_priv, pipe);
1971 assert_cursor_disabled(dev_priv, pipe);
1972 assert_sprites_disabled(dev_priv, pipe);
1973
1974 if (HAS_PCH_LPT(dev_priv))
1975 pch_transcoder = TRANSCODER_A;
1976 else
1977 pch_transcoder = pipe;
1978
1979 /*
1980 * A pipe without a PLL won't actually be able to drive bits from
1981 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1982 * need the check.
1983 */
1984 if (HAS_GMCH_DISPLAY(dev_priv))
1985 if (crtc->config->has_dsi_encoder)
1986 assert_dsi_pll_enabled(dev_priv);
1987 else
1988 assert_pll_enabled(dev_priv, pipe);
1989 else {
1990 if (crtc->config->has_pch_encoder) {
1991 /* if driving the PCH, we need FDI enabled */
1992 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1993 assert_fdi_tx_pll_enabled(dev_priv,
1994 (enum pipe) cpu_transcoder);
1995 }
1996 /* FIXME: assert CPU port conditions for SNB+ */
1997 }
1998
1999 reg = PIPECONF(cpu_transcoder);
2000 val = I915_READ(reg);
2001 if (val & PIPECONF_ENABLE) {
2002 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2003 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
2004 return;
2005 }
2006
2007 I915_WRITE(reg, val | PIPECONF_ENABLE);
2008 POSTING_READ(reg);
2009
2010 /*
2011 * Until the pipe starts DSL will read as 0, which would cause
2012 * an apparent vblank timestamp jump, which messes up also the
2013 * frame count when it's derived from the timestamps. So let's
2014 * wait for the pipe to start properly before we call
2015 * drm_crtc_vblank_on()
2016 */
2017 if (dev->max_vblank_count == 0 &&
2018 wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2019 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
2020 }
2021
2022 /**
2023 * intel_disable_pipe - disable a pipe, asserting requirements
2024 * @crtc: crtc whose pipes is to be disabled
2025 *
2026 * Disable the pipe of @crtc, making sure that various hardware
2027 * specific requirements are met, if applicable, e.g. plane
2028 * disabled, panel fitter off, etc.
2029 *
2030 * Will wait until the pipe has shut down before returning.
2031 */
2032 static void intel_disable_pipe(struct intel_crtc *crtc)
2033 {
2034 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
2035 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
2036 enum pipe pipe = crtc->pipe;
2037 i915_reg_t reg;
2038 u32 val;
2039
2040 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2041
2042 /*
2043 * Make sure planes won't keep trying to pump pixels to us,
2044 * or we might hang the display.
2045 */
2046 assert_planes_disabled(dev_priv, pipe);
2047 assert_cursor_disabled(dev_priv, pipe);
2048 assert_sprites_disabled(dev_priv, pipe);
2049
2050 reg = PIPECONF(cpu_transcoder);
2051 val = I915_READ(reg);
2052 if ((val & PIPECONF_ENABLE) == 0)
2053 return;
2054
2055 /*
2056 * Double wide has implications for planes
2057 * so best keep it disabled when not needed.
2058 */
2059 if (crtc->config->double_wide)
2060 val &= ~PIPECONF_DOUBLE_WIDE;
2061
2062 /* Don't disable pipe or pipe PLLs if needed */
2063 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2064 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
2065 val &= ~PIPECONF_ENABLE;
2066
2067 I915_WRITE(reg, val);
2068 if ((val & PIPECONF_ENABLE) == 0)
2069 intel_wait_for_pipe_off(crtc);
2070 }
2071
2072 static bool need_vtd_wa(struct drm_device *dev)
2073 {
2074 #ifdef CONFIG_INTEL_IOMMU
2075 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2076 return true;
2077 #endif
2078 return false;
2079 }
2080
2081 static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2082 {
2083 return IS_GEN2(dev_priv) ? 2048 : 4096;
2084 }
2085
2086 static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
2087 uint64_t fb_modifier, unsigned int cpp)
2088 {
2089 switch (fb_modifier) {
2090 case DRM_FORMAT_MOD_NONE:
2091 return cpp;
2092 case I915_FORMAT_MOD_X_TILED:
2093 if (IS_GEN2(dev_priv))
2094 return 128;
2095 else
2096 return 512;
2097 case I915_FORMAT_MOD_Y_TILED:
2098 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2099 return 128;
2100 else
2101 return 512;
2102 case I915_FORMAT_MOD_Yf_TILED:
2103 switch (cpp) {
2104 case 1:
2105 return 64;
2106 case 2:
2107 case 4:
2108 return 128;
2109 case 8:
2110 case 16:
2111 return 256;
2112 default:
2113 MISSING_CASE(cpp);
2114 return cpp;
2115 }
2116 break;
2117 default:
2118 MISSING_CASE(fb_modifier);
2119 return cpp;
2120 }
2121 }
2122
2123 unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2124 uint64_t fb_modifier, unsigned int cpp)
2125 {
2126 if (fb_modifier == DRM_FORMAT_MOD_NONE)
2127 return 1;
2128 else
2129 return intel_tile_size(dev_priv) /
2130 intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2131 }
2132
2133 /* Return the tile dimensions in pixel units */
2134 static void intel_tile_dims(const struct drm_i915_private *dev_priv,
2135 unsigned int *tile_width,
2136 unsigned int *tile_height,
2137 uint64_t fb_modifier,
2138 unsigned int cpp)
2139 {
2140 unsigned int tile_width_bytes =
2141 intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2142
2143 *tile_width = tile_width_bytes / cpp;
2144 *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
2145 }
2146
2147 unsigned int
2148 intel_fb_align_height(struct drm_device *dev, unsigned int height,
2149 uint32_t pixel_format, uint64_t fb_modifier)
2150 {
2151 unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2152 unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2153
2154 return ALIGN(height, tile_height);
2155 }
2156
2157 unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2158 {
2159 unsigned int size = 0;
2160 int i;
2161
2162 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2163 size += rot_info->plane[i].width * rot_info->plane[i].height;
2164
2165 return size;
2166 }
2167
2168 static void
2169 intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2170 const struct drm_framebuffer *fb,
2171 unsigned int rotation)
2172 {
2173 if (intel_rotation_90_or_270(rotation)) {
2174 *view = i915_ggtt_view_rotated;
2175 view->params.rotated = to_intel_framebuffer(fb)->rot_info;
2176 } else {
2177 *view = i915_ggtt_view_normal;
2178 }
2179 }
2180
2181 static void
2182 intel_fill_fb_info(struct drm_i915_private *dev_priv,
2183 struct drm_framebuffer *fb)
2184 {
2185 struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
2186 unsigned int tile_size, tile_width, tile_height, cpp;
2187
2188 tile_size = intel_tile_size(dev_priv);
2189
2190 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2191 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2192 fb->modifier[0], cpp);
2193
2194 info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
2195 info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
2196
2197 if (info->pixel_format == DRM_FORMAT_NV12) {
2198 cpp = drm_format_plane_cpp(fb->pixel_format, 1);
2199 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2200 fb->modifier[1], cpp);
2201
2202 info->uv_offset = fb->offsets[1];
2203 info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
2204 info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
2205 }
2206 }
2207
2208 static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
2209 {
2210 if (INTEL_INFO(dev_priv)->gen >= 9)
2211 return 256 * 1024;
2212 else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
2213 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
2214 return 128 * 1024;
2215 else if (INTEL_INFO(dev_priv)->gen >= 4)
2216 return 4 * 1024;
2217 else
2218 return 0;
2219 }
2220
2221 static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2222 uint64_t fb_modifier)
2223 {
2224 switch (fb_modifier) {
2225 case DRM_FORMAT_MOD_NONE:
2226 return intel_linear_alignment(dev_priv);
2227 case I915_FORMAT_MOD_X_TILED:
2228 if (INTEL_INFO(dev_priv)->gen >= 9)
2229 return 256 * 1024;
2230 return 0;
2231 case I915_FORMAT_MOD_Y_TILED:
2232 case I915_FORMAT_MOD_Yf_TILED:
2233 return 1 * 1024 * 1024;
2234 default:
2235 MISSING_CASE(fb_modifier);
2236 return 0;
2237 }
2238 }
2239
2240 int
2241 intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2242 unsigned int rotation)
2243 {
2244 struct drm_device *dev = fb->dev;
2245 struct drm_i915_private *dev_priv = dev->dev_private;
2246 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2247 struct i915_ggtt_view view;
2248 u32 alignment;
2249 int ret;
2250
2251 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2252
2253 alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
2254
2255 intel_fill_fb_ggtt_view(&view, fb, rotation);
2256
2257 /* Note that the w/a also requires 64 PTE of padding following the
2258 * bo. We currently fill all unused PTE with the shadow page and so
2259 * we should always have valid PTE following the scanout preventing
2260 * the VT-d warning.
2261 */
2262 if (need_vtd_wa(dev) && alignment < 256 * 1024)
2263 alignment = 256 * 1024;
2264
2265 /*
2266 * Global gtt pte registers are special registers which actually forward
2267 * writes to a chunk of system memory. Which means that there is no risk
2268 * that the register values disappear as soon as we call
2269 * intel_runtime_pm_put(), so it is correct to wrap only the
2270 * pin/unpin/fence and not more.
2271 */
2272 intel_runtime_pm_get(dev_priv);
2273
2274 ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2275 &view);
2276 if (ret)
2277 goto err_pm;
2278
2279 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2280 * fence, whereas 965+ only requires a fence if using
2281 * framebuffer compression. For simplicity, we always install
2282 * a fence as the cost is not that onerous.
2283 */
2284 if (view.type == I915_GGTT_VIEW_NORMAL) {
2285 ret = i915_gem_object_get_fence(obj);
2286 if (ret == -EDEADLK) {
2287 /*
2288 * -EDEADLK means there are no free fences
2289 * no pending flips.
2290 *
2291 * This is propagated to atomic, but it uses
2292 * -EDEADLK to force a locking recovery, so
2293 * change the returned error to -EBUSY.
2294 */
2295 ret = -EBUSY;
2296 goto err_unpin;
2297 } else if (ret)
2298 goto err_unpin;
2299
2300 i915_gem_object_pin_fence(obj);
2301 }
2302
2303 intel_runtime_pm_put(dev_priv);
2304 return 0;
2305
2306 err_unpin:
2307 i915_gem_object_unpin_from_display_plane(obj, &view);
2308 err_pm:
2309 intel_runtime_pm_put(dev_priv);
2310 return ret;
2311 }
2312
2313 void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
2314 {
2315 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2316 struct i915_ggtt_view view;
2317
2318 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2319
2320 intel_fill_fb_ggtt_view(&view, fb, rotation);
2321
2322 if (view.type == I915_GGTT_VIEW_NORMAL)
2323 i915_gem_object_unpin_fence(obj);
2324
2325 i915_gem_object_unpin_from_display_plane(obj, &view);
2326 }
2327
2328 /*
2329 * Adjust the tile offset by moving the difference into
2330 * the x/y offsets.
2331 *
2332 * Input tile dimensions and pitch must already be
2333 * rotated to match x and y, and in pixel units.
2334 */
2335 static u32 intel_adjust_tile_offset(int *x, int *y,
2336 unsigned int tile_width,
2337 unsigned int tile_height,
2338 unsigned int tile_size,
2339 unsigned int pitch_tiles,
2340 u32 old_offset,
2341 u32 new_offset)
2342 {
2343 unsigned int tiles;
2344
2345 WARN_ON(old_offset & (tile_size - 1));
2346 WARN_ON(new_offset & (tile_size - 1));
2347 WARN_ON(new_offset > old_offset);
2348
2349 tiles = (old_offset - new_offset) / tile_size;
2350
2351 *y += tiles / pitch_tiles * tile_height;
2352 *x += tiles % pitch_tiles * tile_width;
2353
2354 return new_offset;
2355 }
2356
2357 /*
2358 * Computes the linear offset to the base tile and adjusts
2359 * x, y. bytes per pixel is assumed to be a power-of-two.
2360 *
2361 * In the 90/270 rotated case, x and y are assumed
2362 * to be already rotated to match the rotated GTT view, and
2363 * pitch is the tile_height aligned framebuffer height.
2364 */
2365 u32 intel_compute_tile_offset(int *x, int *y,
2366 const struct drm_framebuffer *fb, int plane,
2367 unsigned int pitch,
2368 unsigned int rotation)
2369 {
2370 const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2371 uint64_t fb_modifier = fb->modifier[plane];
2372 unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
2373 u32 offset, offset_aligned, alignment;
2374
2375 alignment = intel_surf_alignment(dev_priv, fb_modifier);
2376 if (alignment)
2377 alignment--;
2378
2379 if (fb_modifier != DRM_FORMAT_MOD_NONE) {
2380 unsigned int tile_size, tile_width, tile_height;
2381 unsigned int tile_rows, tiles, pitch_tiles;
2382
2383 tile_size = intel_tile_size(dev_priv);
2384 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2385 fb_modifier, cpp);
2386
2387 if (intel_rotation_90_or_270(rotation)) {
2388 pitch_tiles = pitch / tile_height;
2389 swap(tile_width, tile_height);
2390 } else {
2391 pitch_tiles = pitch / (tile_width * cpp);
2392 }
2393
2394 tile_rows = *y / tile_height;
2395 *y %= tile_height;
2396
2397 tiles = *x / tile_width;
2398 *x %= tile_width;
2399
2400 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2401 offset_aligned = offset & ~alignment;
2402
2403 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2404 tile_size, pitch_tiles,
2405 offset, offset_aligned);
2406 } else {
2407 offset = *y * pitch + *x * cpp;
2408 offset_aligned = offset & ~alignment;
2409
2410 *y = (offset & alignment) / pitch;
2411 *x = ((offset & alignment) - *y * pitch) / cpp;
2412 }
2413
2414 return offset_aligned;
2415 }
2416
2417 static int i9xx_format_to_fourcc(int format)
2418 {
2419 switch (format) {
2420 case DISPPLANE_8BPP:
2421 return DRM_FORMAT_C8;
2422 case DISPPLANE_BGRX555:
2423 return DRM_FORMAT_XRGB1555;
2424 case DISPPLANE_BGRX565:
2425 return DRM_FORMAT_RGB565;
2426 default:
2427 case DISPPLANE_BGRX888:
2428 return DRM_FORMAT_XRGB8888;
2429 case DISPPLANE_RGBX888:
2430 return DRM_FORMAT_XBGR8888;
2431 case DISPPLANE_BGRX101010:
2432 return DRM_FORMAT_XRGB2101010;
2433 case DISPPLANE_RGBX101010:
2434 return DRM_FORMAT_XBGR2101010;
2435 }
2436 }
2437
2438 static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2439 {
2440 switch (format) {
2441 case PLANE_CTL_FORMAT_RGB_565:
2442 return DRM_FORMAT_RGB565;
2443 default:
2444 case PLANE_CTL_FORMAT_XRGB_8888:
2445 if (rgb_order) {
2446 if (alpha)
2447 return DRM_FORMAT_ABGR8888;
2448 else
2449 return DRM_FORMAT_XBGR8888;
2450 } else {
2451 if (alpha)
2452 return DRM_FORMAT_ARGB8888;
2453 else
2454 return DRM_FORMAT_XRGB8888;
2455 }
2456 case PLANE_CTL_FORMAT_XRGB_2101010:
2457 if (rgb_order)
2458 return DRM_FORMAT_XBGR2101010;
2459 else
2460 return DRM_FORMAT_XRGB2101010;
2461 }
2462 }
2463
2464 static bool
2465 intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2466 struct intel_initial_plane_config *plane_config)
2467 {
2468 struct drm_device *dev = crtc->base.dev;
2469 struct drm_i915_private *dev_priv = to_i915(dev);
2470 struct i915_ggtt *ggtt = &dev_priv->ggtt;
2471 struct drm_i915_gem_object *obj = NULL;
2472 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2473 struct drm_framebuffer *fb = &plane_config->fb->base;
2474 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2475 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2476 PAGE_SIZE);
2477
2478 size_aligned -= base_aligned;
2479
2480 if (plane_config->size == 0)
2481 return false;
2482
2483 /* If the FB is too big, just don't use it since fbdev is not very
2484 * important and we should probably use that space with FBC or other
2485 * features. */
2486 if (size_aligned * 2 > ggtt->stolen_usable_size)
2487 return false;
2488
2489 mutex_lock(&dev->struct_mutex);
2490
2491 obj = i915_gem_object_create_stolen_for_preallocated(dev,
2492 base_aligned,
2493 base_aligned,
2494 size_aligned);
2495 if (!obj) {
2496 mutex_unlock(&dev->struct_mutex);
2497 return false;
2498 }
2499
2500 obj->tiling_mode = plane_config->tiling;
2501 if (obj->tiling_mode == I915_TILING_X)
2502 obj->stride = fb->pitches[0];
2503
2504 mode_cmd.pixel_format = fb->pixel_format;
2505 mode_cmd.width = fb->width;
2506 mode_cmd.height = fb->height;
2507 mode_cmd.pitches[0] = fb->pitches[0];
2508 mode_cmd.modifier[0] = fb->modifier[0];
2509 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
2510
2511 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
2512 &mode_cmd, obj)) {
2513 DRM_DEBUG_KMS("intel fb init failed\n");
2514 goto out_unref_obj;
2515 }
2516
2517 mutex_unlock(&dev->struct_mutex);
2518
2519 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
2520 return true;
2521
2522 out_unref_obj:
2523 drm_gem_object_unreference(&obj->base);
2524 mutex_unlock(&dev->struct_mutex);
2525 return false;
2526 }
2527
2528 static void
2529 intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2530 struct intel_initial_plane_config *plane_config)
2531 {
2532 struct drm_device *dev = intel_crtc->base.dev;
2533 struct drm_i915_private *dev_priv = dev->dev_private;
2534 struct drm_crtc *c;
2535 struct intel_crtc *i;
2536 struct drm_i915_gem_object *obj;
2537 struct drm_plane *primary = intel_crtc->base.primary;
2538 struct drm_plane_state *plane_state = primary->state;
2539 struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2540 struct intel_plane *intel_plane = to_intel_plane(primary);
2541 struct intel_plane_state *intel_state =
2542 to_intel_plane_state(plane_state);
2543 struct drm_framebuffer *fb;
2544
2545 if (!plane_config->fb)
2546 return;
2547
2548 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
2549 fb = &plane_config->fb->base;
2550 goto valid_fb;
2551 }
2552
2553 kfree(plane_config->fb);
2554
2555 /*
2556 * Failed to alloc the obj, check to see if we should share
2557 * an fb with another CRTC instead
2558 */
2559 for_each_crtc(dev, c) {
2560 i = to_intel_crtc(c);
2561
2562 if (c == &intel_crtc->base)
2563 continue;
2564
2565 if (!i->active)
2566 continue;
2567
2568 fb = c->primary->fb;
2569 if (!fb)
2570 continue;
2571
2572 obj = intel_fb_obj(fb);
2573 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
2574 drm_framebuffer_reference(fb);
2575 goto valid_fb;
2576 }
2577 }
2578
2579 /*
2580 * We've failed to reconstruct the BIOS FB. Current display state
2581 * indicates that the primary plane is visible, but has a NULL FB,
2582 * which will lead to problems later if we don't fix it up. The
2583 * simplest solution is to just disable the primary plane now and
2584 * pretend the BIOS never had it enabled.
2585 */
2586 to_intel_plane_state(plane_state)->visible = false;
2587 crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2588 intel_pre_disable_primary_noatomic(&intel_crtc->base);
2589 intel_plane->disable_plane(primary, &intel_crtc->base);
2590
2591 return;
2592
2593 valid_fb:
2594 plane_state->src_x = 0;
2595 plane_state->src_y = 0;
2596 plane_state->src_w = fb->width << 16;
2597 plane_state->src_h = fb->height << 16;
2598
2599 plane_state->crtc_x = 0;
2600 plane_state->crtc_y = 0;
2601 plane_state->crtc_w = fb->width;
2602 plane_state->crtc_h = fb->height;
2603
2604 intel_state->src.x1 = plane_state->src_x;
2605 intel_state->src.y1 = plane_state->src_y;
2606 intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2607 intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2608 intel_state->dst.x1 = plane_state->crtc_x;
2609 intel_state->dst.y1 = plane_state->crtc_y;
2610 intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2611 intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2612
2613 obj = intel_fb_obj(fb);
2614 if (obj->tiling_mode != I915_TILING_NONE)
2615 dev_priv->preserve_bios_swizzle = true;
2616
2617 drm_framebuffer_reference(fb);
2618 primary->fb = primary->state->fb = fb;
2619 primary->crtc = primary->state->crtc = &intel_crtc->base;
2620 intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
2621 obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
2622 }
2623
2624 static void i9xx_update_primary_plane(struct drm_plane *primary,
2625 const struct intel_crtc_state *crtc_state,
2626 const struct intel_plane_state *plane_state)
2627 {
2628 struct drm_device *dev = primary->dev;
2629 struct drm_i915_private *dev_priv = dev->dev_private;
2630 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2631 struct drm_framebuffer *fb = plane_state->base.fb;
2632 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2633 int plane = intel_crtc->plane;
2634 u32 linear_offset;
2635 u32 dspcntr;
2636 i915_reg_t reg = DSPCNTR(plane);
2637 unsigned int rotation = plane_state->base.rotation;
2638 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2639 int x = plane_state->src.x1 >> 16;
2640 int y = plane_state->src.y1 >> 16;
2641
2642 dspcntr = DISPPLANE_GAMMA_ENABLE;
2643
2644 dspcntr |= DISPLAY_PLANE_ENABLE;
2645
2646 if (INTEL_INFO(dev)->gen < 4) {
2647 if (intel_crtc->pipe == PIPE_B)
2648 dspcntr |= DISPPLANE_SEL_PIPE_B;
2649
2650 /* pipesrc and dspsize control the size that is scaled from,
2651 * which should always be the user's requested size.
2652 */
2653 I915_WRITE(DSPSIZE(plane),
2654 ((crtc_state->pipe_src_h - 1) << 16) |
2655 (crtc_state->pipe_src_w - 1));
2656 I915_WRITE(DSPPOS(plane), 0);
2657 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2658 I915_WRITE(PRIMSIZE(plane),
2659 ((crtc_state->pipe_src_h - 1) << 16) |
2660 (crtc_state->pipe_src_w - 1));
2661 I915_WRITE(PRIMPOS(plane), 0);
2662 I915_WRITE(PRIMCNSTALPHA(plane), 0);
2663 }
2664
2665 switch (fb->pixel_format) {
2666 case DRM_FORMAT_C8:
2667 dspcntr |= DISPPLANE_8BPP;
2668 break;
2669 case DRM_FORMAT_XRGB1555:
2670 dspcntr |= DISPPLANE_BGRX555;
2671 break;
2672 case DRM_FORMAT_RGB565:
2673 dspcntr |= DISPPLANE_BGRX565;
2674 break;
2675 case DRM_FORMAT_XRGB8888:
2676 dspcntr |= DISPPLANE_BGRX888;
2677 break;
2678 case DRM_FORMAT_XBGR8888:
2679 dspcntr |= DISPPLANE_RGBX888;
2680 break;
2681 case DRM_FORMAT_XRGB2101010:
2682 dspcntr |= DISPPLANE_BGRX101010;
2683 break;
2684 case DRM_FORMAT_XBGR2101010:
2685 dspcntr |= DISPPLANE_RGBX101010;
2686 break;
2687 default:
2688 BUG();
2689 }
2690
2691 if (INTEL_INFO(dev)->gen >= 4 &&
2692 obj->tiling_mode != I915_TILING_NONE)
2693 dspcntr |= DISPPLANE_TILED;
2694
2695 if (IS_G4X(dev))
2696 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2697
2698 linear_offset = y * fb->pitches[0] + x * cpp;
2699
2700 if (INTEL_INFO(dev)->gen >= 4) {
2701 intel_crtc->dspaddr_offset =
2702 intel_compute_tile_offset(&x, &y, fb, 0,
2703 fb->pitches[0], rotation);
2704 linear_offset -= intel_crtc->dspaddr_offset;
2705 } else {
2706 intel_crtc->dspaddr_offset = linear_offset;
2707 }
2708
2709 if (rotation == BIT(DRM_ROTATE_180)) {
2710 dspcntr |= DISPPLANE_ROTATE_180;
2711
2712 x += (crtc_state->pipe_src_w - 1);
2713 y += (crtc_state->pipe_src_h - 1);
2714
2715 /* Finding the last pixel of the last line of the display
2716 data and adding to linear_offset*/
2717 linear_offset +=
2718 (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2719 (crtc_state->pipe_src_w - 1) * cpp;
2720 }
2721
2722 intel_crtc->adjusted_x = x;
2723 intel_crtc->adjusted_y = y;
2724
2725 I915_WRITE(reg, dspcntr);
2726
2727 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2728 if (INTEL_INFO(dev)->gen >= 4) {
2729 I915_WRITE(DSPSURF(plane),
2730 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2731 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2732 I915_WRITE(DSPLINOFF(plane), linear_offset);
2733 } else
2734 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
2735 POSTING_READ(reg);
2736 }
2737
2738 static void i9xx_disable_primary_plane(struct drm_plane *primary,
2739 struct drm_crtc *crtc)
2740 {
2741 struct drm_device *dev = crtc->dev;
2742 struct drm_i915_private *dev_priv = dev->dev_private;
2743 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2744 int plane = intel_crtc->plane;
2745
2746 I915_WRITE(DSPCNTR(plane), 0);
2747 if (INTEL_INFO(dev_priv)->gen >= 4)
2748 I915_WRITE(DSPSURF(plane), 0);
2749 else
2750 I915_WRITE(DSPADDR(plane), 0);
2751 POSTING_READ(DSPCNTR(plane));
2752 }
2753
2754 static void ironlake_update_primary_plane(struct drm_plane *primary,
2755 const struct intel_crtc_state *crtc_state,
2756 const struct intel_plane_state *plane_state)
2757 {
2758 struct drm_device *dev = primary->dev;
2759 struct drm_i915_private *dev_priv = dev->dev_private;
2760 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2761 struct drm_framebuffer *fb = plane_state->base.fb;
2762 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2763 int plane = intel_crtc->plane;
2764 u32 linear_offset;
2765 u32 dspcntr;
2766 i915_reg_t reg = DSPCNTR(plane);
2767 unsigned int rotation = plane_state->base.rotation;
2768 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
2769 int x = plane_state->src.x1 >> 16;
2770 int y = plane_state->src.y1 >> 16;
2771
2772 dspcntr = DISPPLANE_GAMMA_ENABLE;
2773 dspcntr |= DISPLAY_PLANE_ENABLE;
2774
2775 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2776 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
2777
2778 switch (fb->pixel_format) {
2779 case DRM_FORMAT_C8:
2780 dspcntr |= DISPPLANE_8BPP;
2781 break;
2782 case DRM_FORMAT_RGB565:
2783 dspcntr |= DISPPLANE_BGRX565;
2784 break;
2785 case DRM_FORMAT_XRGB8888:
2786 dspcntr |= DISPPLANE_BGRX888;
2787 break;
2788 case DRM_FORMAT_XBGR8888:
2789 dspcntr |= DISPPLANE_RGBX888;
2790 break;
2791 case DRM_FORMAT_XRGB2101010:
2792 dspcntr |= DISPPLANE_BGRX101010;
2793 break;
2794 case DRM_FORMAT_XBGR2101010:
2795 dspcntr |= DISPPLANE_RGBX101010;
2796 break;
2797 default:
2798 BUG();
2799 }
2800
2801 if (obj->tiling_mode != I915_TILING_NONE)
2802 dspcntr |= DISPPLANE_TILED;
2803
2804 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
2805 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2806
2807 linear_offset = y * fb->pitches[0] + x * cpp;
2808 intel_crtc->dspaddr_offset =
2809 intel_compute_tile_offset(&x, &y, fb, 0,
2810 fb->pitches[0], rotation);
2811 linear_offset -= intel_crtc->dspaddr_offset;
2812 if (rotation == BIT(DRM_ROTATE_180)) {
2813 dspcntr |= DISPPLANE_ROTATE_180;
2814
2815 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
2816 x += (crtc_state->pipe_src_w - 1);
2817 y += (crtc_state->pipe_src_h - 1);
2818
2819 /* Finding the last pixel of the last line of the display
2820 data and adding to linear_offset*/
2821 linear_offset +=
2822 (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
2823 (crtc_state->pipe_src_w - 1) * cpp;
2824 }
2825 }
2826
2827 intel_crtc->adjusted_x = x;
2828 intel_crtc->adjusted_y = y;
2829
2830 I915_WRITE(reg, dspcntr);
2831
2832 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
2833 I915_WRITE(DSPSURF(plane),
2834 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
2835 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
2836 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2837 } else {
2838 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2839 I915_WRITE(DSPLINOFF(plane), linear_offset);
2840 }
2841 POSTING_READ(reg);
2842 }
2843
2844 u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2845 uint64_t fb_modifier, uint32_t pixel_format)
2846 {
2847 if (fb_modifier == DRM_FORMAT_MOD_NONE) {
2848 return 64;
2849 } else {
2850 int cpp = drm_format_plane_cpp(pixel_format, 0);
2851
2852 return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2853 }
2854 }
2855
2856 u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2857 struct drm_i915_gem_object *obj,
2858 unsigned int plane)
2859 {
2860 struct i915_ggtt_view view;
2861 struct i915_vma *vma;
2862 u64 offset;
2863
2864 intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
2865 intel_plane->base.state->rotation);
2866
2867 vma = i915_gem_obj_to_ggtt_view(obj, &view);
2868 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
2869 view.type))
2870 return -1;
2871
2872 offset = vma->node.start;
2873
2874 if (plane == 1) {
2875 offset += vma->ggtt_view.params.rotated.uv_start_page *
2876 PAGE_SIZE;
2877 }
2878
2879 WARN_ON(upper_32_bits(offset));
2880
2881 return lower_32_bits(offset);
2882 }
2883
2884 static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2885 {
2886 struct drm_device *dev = intel_crtc->base.dev;
2887 struct drm_i915_private *dev_priv = dev->dev_private;
2888
2889 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2890 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2891 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
2892 }
2893
2894 /*
2895 * This function detaches (aka. unbinds) unused scalers in hardware
2896 */
2897 static void skl_detach_scalers(struct intel_crtc *intel_crtc)
2898 {
2899 struct intel_crtc_scaler_state *scaler_state;
2900 int i;
2901
2902 scaler_state = &intel_crtc->config->scaler_state;
2903
2904 /* loop through and disable scalers that aren't in use */
2905 for (i = 0; i < intel_crtc->num_scalers; i++) {
2906 if (!scaler_state->scalers[i].in_use)
2907 skl_detach_scaler(intel_crtc, i);
2908 }
2909 }
2910
2911 u32 skl_plane_ctl_format(uint32_t pixel_format)
2912 {
2913 switch (pixel_format) {
2914 case DRM_FORMAT_C8:
2915 return PLANE_CTL_FORMAT_INDEXED;
2916 case DRM_FORMAT_RGB565:
2917 return PLANE_CTL_FORMAT_RGB_565;
2918 case DRM_FORMAT_XBGR8888:
2919 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
2920 case DRM_FORMAT_XRGB8888:
2921 return PLANE_CTL_FORMAT_XRGB_8888;
2922 /*
2923 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
2924 * to be already pre-multiplied. We need to add a knob (or a different
2925 * DRM_FORMAT) for user-space to configure that.
2926 */
2927 case DRM_FORMAT_ABGR8888:
2928 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
2929 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2930 case DRM_FORMAT_ARGB8888:
2931 return PLANE_CTL_FORMAT_XRGB_8888 |
2932 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
2933 case DRM_FORMAT_XRGB2101010:
2934 return PLANE_CTL_FORMAT_XRGB_2101010;
2935 case DRM_FORMAT_XBGR2101010:
2936 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
2937 case DRM_FORMAT_YUYV:
2938 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
2939 case DRM_FORMAT_YVYU:
2940 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
2941 case DRM_FORMAT_UYVY:
2942 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
2943 case DRM_FORMAT_VYUY:
2944 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
2945 default:
2946 MISSING_CASE(pixel_format);
2947 }
2948
2949 return 0;
2950 }
2951
2952 u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
2953 {
2954 switch (fb_modifier) {
2955 case DRM_FORMAT_MOD_NONE:
2956 break;
2957 case I915_FORMAT_MOD_X_TILED:
2958 return PLANE_CTL_TILED_X;
2959 case I915_FORMAT_MOD_Y_TILED:
2960 return PLANE_CTL_TILED_Y;
2961 case I915_FORMAT_MOD_Yf_TILED:
2962 return PLANE_CTL_TILED_YF;
2963 default:
2964 MISSING_CASE(fb_modifier);
2965 }
2966
2967 return 0;
2968 }
2969
2970 u32 skl_plane_ctl_rotation(unsigned int rotation)
2971 {
2972 switch (rotation) {
2973 case BIT(DRM_ROTATE_0):
2974 break;
2975 /*
2976 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
2977 * while i915 HW rotation is clockwise, thats why this swapping.
2978 */
2979 case BIT(DRM_ROTATE_90):
2980 return PLANE_CTL_ROTATE_270;
2981 case BIT(DRM_ROTATE_180):
2982 return PLANE_CTL_ROTATE_180;
2983 case BIT(DRM_ROTATE_270):
2984 return PLANE_CTL_ROTATE_90;
2985 default:
2986 MISSING_CASE(rotation);
2987 }
2988
2989 return 0;
2990 }
2991
2992 static void skylake_update_primary_plane(struct drm_plane *plane,
2993 const struct intel_crtc_state *crtc_state,
2994 const struct intel_plane_state *plane_state)
2995 {
2996 struct drm_device *dev = plane->dev;
2997 struct drm_i915_private *dev_priv = dev->dev_private;
2998 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2999 struct drm_framebuffer *fb = plane_state->base.fb;
3000 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
3001 int pipe = intel_crtc->pipe;
3002 u32 plane_ctl, stride_div, stride;
3003 u32 tile_height, plane_offset, plane_size;
3004 unsigned int rotation = plane_state->base.rotation;
3005 int x_offset, y_offset;
3006 u32 surf_addr;
3007 int scaler_id = plane_state->scaler_id;
3008 int src_x = plane_state->src.x1 >> 16;
3009 int src_y = plane_state->src.y1 >> 16;
3010 int src_w = drm_rect_width(&plane_state->src) >> 16;
3011 int src_h = drm_rect_height(&plane_state->src) >> 16;
3012 int dst_x = plane_state->dst.x1;
3013 int dst_y = plane_state->dst.y1;
3014 int dst_w = drm_rect_width(&plane_state->dst);
3015 int dst_h = drm_rect_height(&plane_state->dst);
3016
3017 plane_ctl = PLANE_CTL_ENABLE |
3018 PLANE_CTL_PIPE_GAMMA_ENABLE |
3019 PLANE_CTL_PIPE_CSC_ENABLE;
3020
3021 plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3022 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3023 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
3024 plane_ctl |= skl_plane_ctl_rotation(rotation);
3025
3026 stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
3027 fb->pixel_format);
3028 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3029
3030 WARN_ON(drm_rect_width(&plane_state->src) == 0);
3031
3032 if (intel_rotation_90_or_270(rotation)) {
3033 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3034
3035 /* stride = Surface height in tiles */
3036 tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3037 stride = DIV_ROUND_UP(fb->height, tile_height);
3038 x_offset = stride * tile_height - src_y - src_h;
3039 y_offset = src_x;
3040 plane_size = (src_w - 1) << 16 | (src_h - 1);
3041 } else {
3042 stride = fb->pitches[0] / stride_div;
3043 x_offset = src_x;
3044 y_offset = src_y;
3045 plane_size = (src_h - 1) << 16 | (src_w - 1);
3046 }
3047 plane_offset = y_offset << 16 | x_offset;
3048
3049 intel_crtc->adjusted_x = x_offset;
3050 intel_crtc->adjusted_y = y_offset;
3051
3052 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3053 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3054 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3055 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
3056
3057 if (scaler_id >= 0) {
3058 uint32_t ps_ctrl = 0;
3059
3060 WARN_ON(!dst_w || !dst_h);
3061 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3062 crtc_state->scaler_state.scalers[scaler_id].mode;
3063 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3064 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3065 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3066 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3067 I915_WRITE(PLANE_POS(pipe, 0), 0);
3068 } else {
3069 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3070 }
3071
3072 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
3073
3074 POSTING_READ(PLANE_SURF(pipe, 0));
3075 }
3076
3077 static void skylake_disable_primary_plane(struct drm_plane *primary,
3078 struct drm_crtc *crtc)
3079 {
3080 struct drm_device *dev = crtc->dev;
3081 struct drm_i915_private *dev_priv = dev->dev_private;
3082 int pipe = to_intel_crtc(crtc)->pipe;
3083
3084 I915_WRITE(PLANE_CTL(pipe, 0), 0);
3085 I915_WRITE(PLANE_SURF(pipe, 0), 0);
3086 POSTING_READ(PLANE_SURF(pipe, 0));
3087 }
3088
3089 /* Assume fb object is pinned & idle & fenced and just update base pointers */
3090 static int
3091 intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3092 int x, int y, enum mode_set_atomic state)
3093 {
3094 /* Support for kgdboc is disabled, this needs a major rework. */
3095 DRM_ERROR("legacy panic handler not supported any more.\n");
3096
3097 return -ENODEV;
3098 }
3099
3100 static void intel_update_primary_planes(struct drm_device *dev)
3101 {
3102 struct drm_crtc *crtc;
3103
3104 for_each_crtc(dev, crtc) {
3105 struct intel_plane *plane = to_intel_plane(crtc->primary);
3106 struct intel_plane_state *plane_state;
3107
3108 drm_modeset_lock_crtc(crtc, &plane->base);
3109 plane_state = to_intel_plane_state(plane->base.state);
3110
3111 if (plane_state->visible)
3112 plane->update_plane(&plane->base,
3113 to_intel_crtc_state(crtc->state),
3114 plane_state);
3115
3116 drm_modeset_unlock_crtc(crtc);
3117 }
3118 }
3119
3120 void intel_prepare_reset(struct drm_i915_private *dev_priv)
3121 {
3122 /* no reset support for gen2 */
3123 if (IS_GEN2(dev_priv))
3124 return;
3125
3126 /* reset doesn't touch the display */
3127 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
3128 return;
3129
3130 drm_modeset_lock_all(dev_priv->dev);
3131 /*
3132 * Disabling the crtcs gracefully seems nicer. Also the
3133 * g33 docs say we should at least disable all the planes.
3134 */
3135 intel_display_suspend(dev_priv->dev);
3136 }
3137
3138 void intel_finish_reset(struct drm_i915_private *dev_priv)
3139 {
3140 /* no reset support for gen2 */
3141 if (IS_GEN2(dev_priv))
3142 return;
3143
3144 /* reset doesn't touch the display */
3145 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
3146 /*
3147 * Flips in the rings have been nuked by the reset,
3148 * so update the base address of all primary
3149 * planes to the the last fb to make sure we're
3150 * showing the correct fb after a reset.
3151 *
3152 * FIXME: Atomic will make this obsolete since we won't schedule
3153 * CS-based flips (which might get lost in gpu resets) any more.
3154 */
3155 intel_update_primary_planes(dev_priv->dev);
3156 return;
3157 }
3158
3159 /*
3160 * The display has been reset as well,
3161 * so need a full re-initialization.
3162 */
3163 intel_runtime_pm_disable_interrupts(dev_priv);
3164 intel_runtime_pm_enable_interrupts(dev_priv);
3165
3166 intel_modeset_init_hw(dev_priv->dev);
3167
3168 spin_lock_irq(&dev_priv->irq_lock);
3169 if (dev_priv->display.hpd_irq_setup)
3170 dev_priv->display.hpd_irq_setup(dev_priv);
3171 spin_unlock_irq(&dev_priv->irq_lock);
3172
3173 intel_display_resume(dev_priv->dev);
3174
3175 intel_hpd_init(dev_priv);
3176
3177 drm_modeset_unlock_all(dev_priv->dev);
3178 }
3179
3180 static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3181 {
3182 return !list_empty_careful(&to_intel_crtc(crtc)->flip_work);
3183 }
3184
3185 static void intel_update_pipe_config(struct intel_crtc *crtc,
3186 struct intel_crtc_state *old_crtc_state)
3187 {
3188 struct drm_device *dev = crtc->base.dev;
3189 struct drm_i915_private *dev_priv = dev->dev_private;
3190 struct intel_crtc_state *pipe_config =
3191 to_intel_crtc_state(crtc->base.state);
3192
3193 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3194 crtc->base.mode = crtc->base.state->mode;
3195
3196 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3197 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3198 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
3199
3200 /*
3201 * Update pipe size and adjust fitter if needed: the reason for this is
3202 * that in compute_mode_changes we check the native mode (not the pfit
3203 * mode) to see if we can flip rather than do a full mode set. In the
3204 * fastboot case, we'll flip, but if we don't update the pipesrc and
3205 * pfit state, we'll end up with a big fb scanned out into the wrong
3206 * sized surface.
3207 */
3208
3209 I915_WRITE(PIPESRC(crtc->pipe),
3210 ((pipe_config->pipe_src_w - 1) << 16) |
3211 (pipe_config->pipe_src_h - 1));
3212
3213 /* on skylake this is done by detaching scalers */
3214 if (INTEL_INFO(dev)->gen >= 9) {
3215 skl_detach_scalers(crtc);
3216
3217 if (pipe_config->pch_pfit.enabled)
3218 skylake_pfit_enable(crtc);
3219 } else if (HAS_PCH_SPLIT(dev)) {
3220 if (pipe_config->pch_pfit.enabled)
3221 ironlake_pfit_enable(crtc);
3222 else if (old_crtc_state->pch_pfit.enabled)
3223 ironlake_pfit_disable(crtc, true);
3224 }
3225 }
3226
3227 static void intel_fdi_normal_train(struct drm_crtc *crtc)
3228 {
3229 struct drm_device *dev = crtc->dev;
3230 struct drm_i915_private *dev_priv = dev->dev_private;
3231 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3232 int pipe = intel_crtc->pipe;
3233 i915_reg_t reg;
3234 u32 temp;
3235
3236 /* enable normal train */
3237 reg = FDI_TX_CTL(pipe);
3238 temp = I915_READ(reg);
3239 if (IS_IVYBRIDGE(dev)) {
3240 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3241 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
3242 } else {
3243 temp &= ~FDI_LINK_TRAIN_NONE;
3244 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
3245 }
3246 I915_WRITE(reg, temp);
3247
3248 reg = FDI_RX_CTL(pipe);
3249 temp = I915_READ(reg);
3250 if (HAS_PCH_CPT(dev)) {
3251 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3252 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3253 } else {
3254 temp &= ~FDI_LINK_TRAIN_NONE;
3255 temp |= FDI_LINK_TRAIN_NONE;
3256 }
3257 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3258
3259 /* wait one idle pattern time */
3260 POSTING_READ(reg);
3261 udelay(1000);
3262
3263 /* IVB wants error correction enabled */
3264 if (IS_IVYBRIDGE(dev))
3265 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3266 FDI_FE_ERRC_ENABLE);
3267 }
3268
3269 /* The FDI link training functions for ILK/Ibexpeak. */
3270 static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3271 {
3272 struct drm_device *dev = crtc->dev;
3273 struct drm_i915_private *dev_priv = dev->dev_private;
3274 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3275 int pipe = intel_crtc->pipe;
3276 i915_reg_t reg;
3277 u32 temp, tries;
3278
3279 /* FDI needs bits from pipe first */
3280 assert_pipe_enabled(dev_priv, pipe);
3281
3282 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3283 for train result */
3284 reg = FDI_RX_IMR(pipe);
3285 temp = I915_READ(reg);
3286 temp &= ~FDI_RX_SYMBOL_LOCK;
3287 temp &= ~FDI_RX_BIT_LOCK;
3288 I915_WRITE(reg, temp);
3289 I915_READ(reg);
3290 udelay(150);
3291
3292 /* enable CPU FDI TX and PCH FDI RX */
3293 reg = FDI_TX_CTL(pipe);
3294 temp = I915_READ(reg);
3295 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3296 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3297 temp &= ~FDI_LINK_TRAIN_NONE;
3298 temp |= FDI_LINK_TRAIN_PATTERN_1;
3299 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3300
3301 reg = FDI_RX_CTL(pipe);
3302 temp = I915_READ(reg);
3303 temp &= ~FDI_LINK_TRAIN_NONE;
3304 temp |= FDI_LINK_TRAIN_PATTERN_1;
3305 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3306
3307 POSTING_READ(reg);
3308 udelay(150);
3309
3310 /* Ironlake workaround, enable clock pointer after FDI enable*/
3311 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3312 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3313 FDI_RX_PHASE_SYNC_POINTER_EN);
3314
3315 reg = FDI_RX_IIR(pipe);
3316 for (tries = 0; tries < 5; tries++) {
3317 temp = I915_READ(reg);
3318 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3319
3320 if ((temp & FDI_RX_BIT_LOCK)) {
3321 DRM_DEBUG_KMS("FDI train 1 done.\n");
3322 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3323 break;
3324 }
3325 }
3326 if (tries == 5)
3327 DRM_ERROR("FDI train 1 fail!\n");
3328
3329 /* Train 2 */
3330 reg = FDI_TX_CTL(pipe);
3331 temp = I915_READ(reg);
3332 temp &= ~FDI_LINK_TRAIN_NONE;
3333 temp |= FDI_LINK_TRAIN_PATTERN_2;
3334 I915_WRITE(reg, temp);
3335
3336 reg = FDI_RX_CTL(pipe);
3337 temp = I915_READ(reg);
3338 temp &= ~FDI_LINK_TRAIN_NONE;
3339 temp |= FDI_LINK_TRAIN_PATTERN_2;
3340 I915_WRITE(reg, temp);
3341
3342 POSTING_READ(reg);
3343 udelay(150);
3344
3345 reg = FDI_RX_IIR(pipe);
3346 for (tries = 0; tries < 5; tries++) {
3347 temp = I915_READ(reg);
3348 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3349
3350 if (temp & FDI_RX_SYMBOL_LOCK) {
3351 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3352 DRM_DEBUG_KMS("FDI train 2 done.\n");
3353 break;
3354 }
3355 }
3356 if (tries == 5)
3357 DRM_ERROR("FDI train 2 fail!\n");
3358
3359 DRM_DEBUG_KMS("FDI train done\n");
3360
3361 }
3362
3363 static const int snb_b_fdi_train_param[] = {
3364 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3365 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3366 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3367 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3368 };
3369
3370 /* The FDI link training functions for SNB/Cougarpoint. */
3371 static void gen6_fdi_link_train(struct drm_crtc *crtc)
3372 {
3373 struct drm_device *dev = crtc->dev;
3374 struct drm_i915_private *dev_priv = dev->dev_private;
3375 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3376 int pipe = intel_crtc->pipe;
3377 i915_reg_t reg;
3378 u32 temp, i, retry;
3379
3380 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3381 for train result */
3382 reg = FDI_RX_IMR(pipe);
3383 temp = I915_READ(reg);
3384 temp &= ~FDI_RX_SYMBOL_LOCK;
3385 temp &= ~FDI_RX_BIT_LOCK;
3386 I915_WRITE(reg, temp);
3387
3388 POSTING_READ(reg);
3389 udelay(150);
3390
3391 /* enable CPU FDI TX and PCH FDI RX */
3392 reg = FDI_TX_CTL(pipe);
3393 temp = I915_READ(reg);
3394 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3395 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3396 temp &= ~FDI_LINK_TRAIN_NONE;
3397 temp |= FDI_LINK_TRAIN_PATTERN_1;
3398 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3399 /* SNB-B */
3400 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3401 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3402
3403 I915_WRITE(FDI_RX_MISC(pipe),
3404 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3405
3406 reg = FDI_RX_CTL(pipe);
3407 temp = I915_READ(reg);
3408 if (HAS_PCH_CPT(dev)) {
3409 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3410 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3411 } else {
3412 temp &= ~FDI_LINK_TRAIN_NONE;
3413 temp |= FDI_LINK_TRAIN_PATTERN_1;
3414 }
3415 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3416
3417 POSTING_READ(reg);
3418 udelay(150);
3419
3420 for (i = 0; i < 4; i++) {
3421 reg = FDI_TX_CTL(pipe);
3422 temp = I915_READ(reg);
3423 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3424 temp |= snb_b_fdi_train_param[i];
3425 I915_WRITE(reg, temp);
3426
3427 POSTING_READ(reg);
3428 udelay(500);
3429
3430 for (retry = 0; retry < 5; retry++) {
3431 reg = FDI_RX_IIR(pipe);
3432 temp = I915_READ(reg);
3433 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3434 if (temp & FDI_RX_BIT_LOCK) {
3435 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3436 DRM_DEBUG_KMS("FDI train 1 done.\n");
3437 break;
3438 }
3439 udelay(50);
3440 }
3441 if (retry < 5)
3442 break;
3443 }
3444 if (i == 4)
3445 DRM_ERROR("FDI train 1 fail!\n");
3446
3447 /* Train 2 */
3448 reg = FDI_TX_CTL(pipe);
3449 temp = I915_READ(reg);
3450 temp &= ~FDI_LINK_TRAIN_NONE;
3451 temp |= FDI_LINK_TRAIN_PATTERN_2;
3452 if (IS_GEN6(dev)) {
3453 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3454 /* SNB-B */
3455 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3456 }
3457 I915_WRITE(reg, temp);
3458
3459 reg = FDI_RX_CTL(pipe);
3460 temp = I915_READ(reg);
3461 if (HAS_PCH_CPT(dev)) {
3462 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3463 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3464 } else {
3465 temp &= ~FDI_LINK_TRAIN_NONE;
3466 temp |= FDI_LINK_TRAIN_PATTERN_2;
3467 }
3468 I915_WRITE(reg, temp);
3469
3470 POSTING_READ(reg);
3471 udelay(150);
3472
3473 for (i = 0; i < 4; i++) {
3474 reg = FDI_TX_CTL(pipe);
3475 temp = I915_READ(reg);
3476 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3477 temp |= snb_b_fdi_train_param[i];
3478 I915_WRITE(reg, temp);
3479
3480 POSTING_READ(reg);
3481 udelay(500);
3482
3483 for (retry = 0; retry < 5; retry++) {
3484 reg = FDI_RX_IIR(pipe);
3485 temp = I915_READ(reg);
3486 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3487 if (temp & FDI_RX_SYMBOL_LOCK) {
3488 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3489 DRM_DEBUG_KMS("FDI train 2 done.\n");
3490 break;
3491 }
3492 udelay(50);
3493 }
3494 if (retry < 5)
3495 break;
3496 }
3497 if (i == 4)
3498 DRM_ERROR("FDI train 2 fail!\n");
3499
3500 DRM_DEBUG_KMS("FDI train done.\n");
3501 }
3502
3503 /* Manual link training for Ivy Bridge A0 parts */
3504 static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3505 {
3506 struct drm_device *dev = crtc->dev;
3507 struct drm_i915_private *dev_priv = dev->dev_private;
3508 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3509 int pipe = intel_crtc->pipe;
3510 i915_reg_t reg;
3511 u32 temp, i, j;
3512
3513 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3514 for train result */
3515 reg = FDI_RX_IMR(pipe);
3516 temp = I915_READ(reg);
3517 temp &= ~FDI_RX_SYMBOL_LOCK;
3518 temp &= ~FDI_RX_BIT_LOCK;
3519 I915_WRITE(reg, temp);
3520
3521 POSTING_READ(reg);
3522 udelay(150);
3523
3524 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3525 I915_READ(FDI_RX_IIR(pipe)));
3526
3527 /* Try each vswing and preemphasis setting twice before moving on */
3528 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3529 /* disable first in case we need to retry */
3530 reg = FDI_TX_CTL(pipe);
3531 temp = I915_READ(reg);
3532 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3533 temp &= ~FDI_TX_ENABLE;
3534 I915_WRITE(reg, temp);
3535
3536 reg = FDI_RX_CTL(pipe);
3537 temp = I915_READ(reg);
3538 temp &= ~FDI_LINK_TRAIN_AUTO;
3539 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3540 temp &= ~FDI_RX_ENABLE;
3541 I915_WRITE(reg, temp);
3542
3543 /* enable CPU FDI TX and PCH FDI RX */
3544 reg = FDI_TX_CTL(pipe);
3545 temp = I915_READ(reg);
3546 temp &= ~FDI_DP_PORT_WIDTH_MASK;
3547 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3548 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
3549 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3550 temp |= snb_b_fdi_train_param[j/2];
3551 temp |= FDI_COMPOSITE_SYNC;
3552 I915_WRITE(reg, temp | FDI_TX_ENABLE);
3553
3554 I915_WRITE(FDI_RX_MISC(pipe),
3555 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3556
3557 reg = FDI_RX_CTL(pipe);
3558 temp = I915_READ(reg);
3559 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3560 temp |= FDI_COMPOSITE_SYNC;
3561 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3562
3563 POSTING_READ(reg);
3564 udelay(1); /* should be 0.5us */
3565
3566 for (i = 0; i < 4; i++) {
3567 reg = FDI_RX_IIR(pipe);
3568 temp = I915_READ(reg);
3569 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3570
3571 if (temp & FDI_RX_BIT_LOCK ||
3572 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3573 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3574 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3575 i);
3576 break;
3577 }
3578 udelay(1); /* should be 0.5us */
3579 }
3580 if (i == 4) {
3581 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3582 continue;
3583 }
3584
3585 /* Train 2 */
3586 reg = FDI_TX_CTL(pipe);
3587 temp = I915_READ(reg);
3588 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3589 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3590 I915_WRITE(reg, temp);
3591
3592 reg = FDI_RX_CTL(pipe);
3593 temp = I915_READ(reg);
3594 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3595 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3596 I915_WRITE(reg, temp);
3597
3598 POSTING_READ(reg);
3599 udelay(2); /* should be 1.5us */
3600
3601 for (i = 0; i < 4; i++) {
3602 reg = FDI_RX_IIR(pipe);
3603 temp = I915_READ(reg);
3604 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3605
3606 if (temp & FDI_RX_SYMBOL_LOCK ||
3607 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3608 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3609 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3610 i);
3611 goto train_done;
3612 }
3613 udelay(2); /* should be 1.5us */
3614 }
3615 if (i == 4)
3616 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
3617 }
3618
3619 train_done:
3620 DRM_DEBUG_KMS("FDI train done.\n");
3621 }
3622
3623 static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
3624 {
3625 struct drm_device *dev = intel_crtc->base.dev;
3626 struct drm_i915_private *dev_priv = dev->dev_private;
3627 int pipe = intel_crtc->pipe;
3628 i915_reg_t reg;
3629 u32 temp;
3630
3631 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
3632 reg = FDI_RX_CTL(pipe);
3633 temp = I915_READ(reg);
3634 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
3635 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
3636 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3637 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3638
3639 POSTING_READ(reg);
3640 udelay(200);
3641
3642 /* Switch from Rawclk to PCDclk */
3643 temp = I915_READ(reg);
3644 I915_WRITE(reg, temp | FDI_PCDCLK);
3645
3646 POSTING_READ(reg);
3647 udelay(200);
3648
3649 /* Enable CPU FDI TX PLL, always on for Ironlake */
3650 reg = FDI_TX_CTL(pipe);
3651 temp = I915_READ(reg);
3652 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3653 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
3654
3655 POSTING_READ(reg);
3656 udelay(100);
3657 }
3658 }
3659
3660 static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3661 {
3662 struct drm_device *dev = intel_crtc->base.dev;
3663 struct drm_i915_private *dev_priv = dev->dev_private;
3664 int pipe = intel_crtc->pipe;
3665 i915_reg_t reg;
3666 u32 temp;
3667
3668 /* Switch from PCDclk to Rawclk */
3669 reg = FDI_RX_CTL(pipe);
3670 temp = I915_READ(reg);
3671 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3672
3673 /* Disable CPU FDI TX PLL */
3674 reg = FDI_TX_CTL(pipe);
3675 temp = I915_READ(reg);
3676 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3677
3678 POSTING_READ(reg);
3679 udelay(100);
3680
3681 reg = FDI_RX_CTL(pipe);
3682 temp = I915_READ(reg);
3683 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3684
3685 /* Wait for the clocks to turn off. */
3686 POSTING_READ(reg);
3687 udelay(100);
3688 }
3689
3690 static void ironlake_fdi_disable(struct drm_crtc *crtc)
3691 {
3692 struct drm_device *dev = crtc->dev;
3693 struct drm_i915_private *dev_priv = dev->dev_private;
3694 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3695 int pipe = intel_crtc->pipe;
3696 i915_reg_t reg;
3697 u32 temp;
3698
3699 /* disable CPU FDI tx and PCH FDI rx */
3700 reg = FDI_TX_CTL(pipe);
3701 temp = I915_READ(reg);
3702 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3703 POSTING_READ(reg);
3704
3705 reg = FDI_RX_CTL(pipe);
3706 temp = I915_READ(reg);
3707 temp &= ~(0x7 << 16);
3708 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3709 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3710
3711 POSTING_READ(reg);
3712 udelay(100);
3713
3714 /* Ironlake workaround, disable clock pointer after downing FDI */
3715 if (HAS_PCH_IBX(dev))
3716 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3717
3718 /* still set train pattern 1 */
3719 reg = FDI_TX_CTL(pipe);
3720 temp = I915_READ(reg);
3721 temp &= ~FDI_LINK_TRAIN_NONE;
3722 temp |= FDI_LINK_TRAIN_PATTERN_1;
3723 I915_WRITE(reg, temp);
3724
3725 reg = FDI_RX_CTL(pipe);
3726 temp = I915_READ(reg);
3727 if (HAS_PCH_CPT(dev)) {
3728 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3729 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3730 } else {
3731 temp &= ~FDI_LINK_TRAIN_NONE;
3732 temp |= FDI_LINK_TRAIN_PATTERN_1;
3733 }
3734 /* BPC in FDI rx is consistent with that in PIPECONF */
3735 temp &= ~(0x07 << 16);
3736 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
3737 I915_WRITE(reg, temp);
3738
3739 POSTING_READ(reg);
3740 udelay(100);
3741 }
3742
3743 bool intel_has_pending_fb_unpin(struct drm_device *dev)
3744 {
3745 struct intel_crtc *crtc;
3746
3747 /* Note that we don't need to be called with mode_config.lock here
3748 * as our list of CRTC objects is static for the lifetime of the
3749 * device and so cannot disappear as we iterate. Similarly, we can
3750 * happily treat the predicates as racy, atomic checks as userspace
3751 * cannot claim and pin a new fb without at least acquring the
3752 * struct_mutex and so serialising with us.
3753 */
3754 for_each_intel_crtc(dev, crtc) {
3755 if (atomic_read(&crtc->unpin_work_count) == 0)
3756 continue;
3757
3758 if (!list_empty_careful(&crtc->flip_work))
3759 intel_wait_for_vblank(dev, crtc->pipe);
3760
3761 return true;
3762 }
3763
3764 return false;
3765 }
3766
3767 static void page_flip_completed(struct intel_crtc *intel_crtc, struct intel_flip_work *work)
3768 {
3769 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3770 struct drm_plane_state *new_plane_state;
3771 struct drm_plane *primary = intel_crtc->base.primary;
3772
3773 if (work->event)
3774 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
3775
3776 drm_crtc_vblank_put(&intel_crtc->base);
3777
3778 new_plane_state = &work->old_plane_state[0]->base;
3779 if (work->num_planes >= 1 &&
3780 new_plane_state->plane == primary &&
3781 new_plane_state->fb)
3782 trace_i915_flip_complete(intel_crtc->plane,
3783 intel_fb_obj(new_plane_state->fb));
3784
3785 if (work->can_async_unpin) {
3786 list_del_init(&work->head);
3787 wake_up_all(&dev_priv->pending_flip_queue);
3788 }
3789
3790 queue_work(dev_priv->wq, &work->unpin_work);
3791 }
3792
3793 static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
3794 {
3795 struct drm_device *dev = crtc->dev;
3796 struct drm_i915_private *dev_priv = dev->dev_private;
3797 long ret;
3798
3799 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
3800
3801 ret = wait_event_interruptible_timeout(
3802 dev_priv->pending_flip_queue,
3803 !intel_crtc_has_pending_flip(crtc),
3804 60*HZ);
3805
3806 if (ret < 0)
3807 return ret;
3808
3809 WARN(ret == 0, "Stuck page flip\n");
3810
3811 return 0;
3812 }
3813
3814 static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3815 {
3816 u32 temp;
3817
3818 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3819
3820 mutex_lock(&dev_priv->sb_lock);
3821
3822 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3823 temp |= SBI_SSCCTL_DISABLE;
3824 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3825
3826 mutex_unlock(&dev_priv->sb_lock);
3827 }
3828
3829 /* Program iCLKIP clock to the desired frequency */
3830 static void lpt_program_iclkip(struct drm_crtc *crtc)
3831 {
3832 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
3833 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
3834 u32 divsel, phaseinc, auxdiv, phasedir = 0;
3835 u32 temp;
3836
3837 lpt_disable_iclkip(dev_priv);
3838
3839 /* The iCLK virtual clock root frequency is in MHz,
3840 * but the adjusted_mode->crtc_clock in in KHz. To get the
3841 * divisors, it is necessary to divide one by another, so we
3842 * convert the virtual clock precision to KHz here for higher
3843 * precision.
3844 */
3845 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
3846 u32 iclk_virtual_root_freq = 172800 * 1000;
3847 u32 iclk_pi_range = 64;
3848 u32 desired_divisor;
3849
3850 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3851 clock << auxdiv);
3852 divsel = (desired_divisor / iclk_pi_range) - 2;
3853 phaseinc = desired_divisor % iclk_pi_range;
3854
3855 /*
3856 * Near 20MHz is a corner case which is
3857 * out of range for the 7-bit divisor
3858 */
3859 if (divsel <= 0x7f)
3860 break;
3861 }
3862
3863 /* This should not happen with any sane values */
3864 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3865 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3866 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3867 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3868
3869 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
3870 clock,
3871 auxdiv,
3872 divsel,
3873 phasedir,
3874 phaseinc);
3875
3876 mutex_lock(&dev_priv->sb_lock);
3877
3878 /* Program SSCDIVINTPHASE6 */
3879 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3880 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3881 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3882 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3883 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3884 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3885 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
3886 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
3887
3888 /* Program SSCAUXDIV */
3889 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3890 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3891 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
3892 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
3893
3894 /* Enable modulator and associated divider */
3895 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3896 temp &= ~SBI_SSCCTL_DISABLE;
3897 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3898
3899 mutex_unlock(&dev_priv->sb_lock);
3900
3901 /* Wait for initialization time */
3902 udelay(24);
3903
3904 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3905 }
3906
3907 int lpt_get_iclkip(struct drm_i915_private *dev_priv)
3908 {
3909 u32 divsel, phaseinc, auxdiv;
3910 u32 iclk_virtual_root_freq = 172800 * 1000;
3911 u32 iclk_pi_range = 64;
3912 u32 desired_divisor;
3913 u32 temp;
3914
3915 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
3916 return 0;
3917
3918 mutex_lock(&dev_priv->sb_lock);
3919
3920 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3921 if (temp & SBI_SSCCTL_DISABLE) {
3922 mutex_unlock(&dev_priv->sb_lock);
3923 return 0;
3924 }
3925
3926 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3927 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
3928 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
3929 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
3930 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
3931
3932 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3933 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
3934 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
3935
3936 mutex_unlock(&dev_priv->sb_lock);
3937
3938 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
3939
3940 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3941 desired_divisor << auxdiv);
3942 }
3943
3944 static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3945 enum pipe pch_transcoder)
3946 {
3947 struct drm_device *dev = crtc->base.dev;
3948 struct drm_i915_private *dev_priv = dev->dev_private;
3949 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
3950
3951 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3952 I915_READ(HTOTAL(cpu_transcoder)));
3953 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3954 I915_READ(HBLANK(cpu_transcoder)));
3955 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3956 I915_READ(HSYNC(cpu_transcoder)));
3957
3958 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
3959 I915_READ(VTOTAL(cpu_transcoder)));
3960 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
3961 I915_READ(VBLANK(cpu_transcoder)));
3962 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
3963 I915_READ(VSYNC(cpu_transcoder)));
3964 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
3965 I915_READ(VSYNCSHIFT(cpu_transcoder)));
3966 }
3967
3968 static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
3969 {
3970 struct drm_i915_private *dev_priv = dev->dev_private;
3971 uint32_t temp;
3972
3973 temp = I915_READ(SOUTH_CHICKEN1);
3974 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
3975 return;
3976
3977 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
3978 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
3979
3980 temp &= ~FDI_BC_BIFURCATION_SELECT;
3981 if (enable)
3982 temp |= FDI_BC_BIFURCATION_SELECT;
3983
3984 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
3985 I915_WRITE(SOUTH_CHICKEN1, temp);
3986 POSTING_READ(SOUTH_CHICKEN1);
3987 }
3988
3989 static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
3990 {
3991 struct drm_device *dev = intel_crtc->base.dev;
3992
3993 switch (intel_crtc->pipe) {
3994 case PIPE_A:
3995 break;
3996 case PIPE_B:
3997 if (intel_crtc->config->fdi_lanes > 2)
3998 cpt_set_fdi_bc_bifurcation(dev, false);
3999 else
4000 cpt_set_fdi_bc_bifurcation(dev, true);
4001
4002 break;
4003 case PIPE_C:
4004 cpt_set_fdi_bc_bifurcation(dev, true);
4005
4006 break;
4007 default:
4008 BUG();
4009 }
4010 }
4011
4012 /* Return which DP Port should be selected for Transcoder DP control */
4013 static enum port
4014 intel_trans_dp_port_sel(struct drm_crtc *crtc)
4015 {
4016 struct drm_device *dev = crtc->dev;
4017 struct intel_encoder *encoder;
4018
4019 for_each_encoder_on_crtc(dev, crtc, encoder) {
4020 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4021 encoder->type == INTEL_OUTPUT_EDP)
4022 return enc_to_dig_port(&encoder->base)->port;
4023 }
4024
4025 return -1;
4026 }
4027
4028 /*
4029 * Enable PCH resources required for PCH ports:
4030 * - PCH PLLs
4031 * - FDI training & RX/TX
4032 * - update transcoder timings
4033 * - DP transcoding bits
4034 * - transcoder
4035 */
4036 static void ironlake_pch_enable(struct drm_crtc *crtc)
4037 {
4038 struct drm_device *dev = crtc->dev;
4039 struct drm_i915_private *dev_priv = dev->dev_private;
4040 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4041 int pipe = intel_crtc->pipe;
4042 u32 temp;
4043
4044 assert_pch_transcoder_disabled(dev_priv, pipe);
4045
4046 if (IS_IVYBRIDGE(dev))
4047 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4048
4049 /* Write the TU size bits before fdi link training, so that error
4050 * detection works. */
4051 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4052 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4053
4054 /* For PCH output, training FDI link */
4055 dev_priv->display.fdi_link_train(crtc);
4056
4057 /* We need to program the right clock selection before writing the pixel
4058 * mutliplier into the DPLL. */
4059 if (HAS_PCH_CPT(dev)) {
4060 u32 sel;
4061
4062 temp = I915_READ(PCH_DPLL_SEL);
4063 temp |= TRANS_DPLL_ENABLE(pipe);
4064 sel = TRANS_DPLLB_SEL(pipe);
4065 if (intel_crtc->config->shared_dpll ==
4066 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
4067 temp |= sel;
4068 else
4069 temp &= ~sel;
4070 I915_WRITE(PCH_DPLL_SEL, temp);
4071 }
4072
4073 /* XXX: pch pll's can be enabled any time before we enable the PCH
4074 * transcoder, and we actually should do this to not upset any PCH
4075 * transcoder that already use the clock when we share it.
4076 *
4077 * Note that enable_shared_dpll tries to do the right thing, but
4078 * get_shared_dpll unconditionally resets the pll - we need that to have
4079 * the right LVDS enable sequence. */
4080 intel_enable_shared_dpll(intel_crtc);
4081
4082 /* set transcoder timing, panel must allow it */
4083 assert_panel_unlocked(dev_priv, pipe);
4084 ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
4085
4086 intel_fdi_normal_train(crtc);
4087
4088 /* For PCH DP, enable TRANS_DP_CTL */
4089 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
4090 const struct drm_display_mode *adjusted_mode =
4091 &intel_crtc->config->base.adjusted_mode;
4092 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
4093 i915_reg_t reg = TRANS_DP_CTL(pipe);
4094 temp = I915_READ(reg);
4095 temp &= ~(TRANS_DP_PORT_SEL_MASK |
4096 TRANS_DP_SYNC_MASK |
4097 TRANS_DP_BPC_MASK);
4098 temp |= TRANS_DP_OUTPUT_ENABLE;
4099 temp |= bpc << 9; /* same format but at 11:9 */
4100
4101 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
4102 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
4103 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
4104 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
4105
4106 switch (intel_trans_dp_port_sel(crtc)) {
4107 case PORT_B:
4108 temp |= TRANS_DP_PORT_SEL_B;
4109 break;
4110 case PORT_C:
4111 temp |= TRANS_DP_PORT_SEL_C;
4112 break;
4113 case PORT_D:
4114 temp |= TRANS_DP_PORT_SEL_D;
4115 break;
4116 default:
4117 BUG();
4118 }
4119
4120 I915_WRITE(reg, temp);
4121 }
4122
4123 ironlake_enable_pch_transcoder(dev_priv, pipe);
4124 }
4125
4126 static void lpt_pch_enable(struct drm_crtc *crtc)
4127 {
4128 struct drm_device *dev = crtc->dev;
4129 struct drm_i915_private *dev_priv = dev->dev_private;
4130 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4131 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4132
4133 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
4134
4135 lpt_program_iclkip(crtc);
4136
4137 /* Set transcoder timing. */
4138 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
4139
4140 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
4141 }
4142
4143 static void cpt_verify_modeset(struct drm_device *dev, int pipe)
4144 {
4145 struct drm_i915_private *dev_priv = dev->dev_private;
4146 i915_reg_t dslreg = PIPEDSL(pipe);
4147 u32 temp;
4148
4149 temp = I915_READ(dslreg);
4150 udelay(500);
4151 if (wait_for(I915_READ(dslreg) != temp, 5)) {
4152 if (wait_for(I915_READ(dslreg) != temp, 5))
4153 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
4154 }
4155 }
4156
4157 static int
4158 skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4159 unsigned scaler_user, int *scaler_id, unsigned int rotation,
4160 int src_w, int src_h, int dst_w, int dst_h)
4161 {
4162 struct intel_crtc_scaler_state *scaler_state =
4163 &crtc_state->scaler_state;
4164 struct intel_crtc *intel_crtc =
4165 to_intel_crtc(crtc_state->base.crtc);
4166 int need_scaling;
4167
4168 need_scaling = intel_rotation_90_or_270(rotation) ?
4169 (src_h != dst_w || src_w != dst_h):
4170 (src_w != dst_w || src_h != dst_h);
4171
4172 /*
4173 * if plane is being disabled or scaler is no more required or force detach
4174 * - free scaler binded to this plane/crtc
4175 * - in order to do this, update crtc->scaler_usage
4176 *
4177 * Here scaler state in crtc_state is set free so that
4178 * scaler can be assigned to other user. Actual register
4179 * update to free the scaler is done in plane/panel-fit programming.
4180 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4181 */
4182 if (force_detach || !need_scaling) {
4183 if (*scaler_id >= 0) {
4184 scaler_state->scaler_users &= ~(1 << scaler_user);
4185 scaler_state->scalers[*scaler_id].in_use = 0;
4186
4187 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4188 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4189 intel_crtc->pipe, scaler_user, *scaler_id,
4190 scaler_state->scaler_users);
4191 *scaler_id = -1;
4192 }
4193 return 0;
4194 }
4195
4196 /* range checks */
4197 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4198 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4199
4200 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4201 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
4202 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
4203 "size is out of scaler range\n",
4204 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
4205 return -EINVAL;
4206 }
4207
4208 /* mark this plane as a scaler user in crtc_state */
4209 scaler_state->scaler_users |= (1 << scaler_user);
4210 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4211 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4212 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4213 scaler_state->scaler_users);
4214
4215 return 0;
4216 }
4217
4218 /**
4219 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4220 *
4221 * @state: crtc's scaler state
4222 *
4223 * Return
4224 * 0 - scaler_usage updated successfully
4225 * error - requested scaling cannot be supported or other error condition
4226 */
4227 int skl_update_scaler_crtc(struct intel_crtc_state *state)
4228 {
4229 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
4230 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
4231
4232 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4233 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4234
4235 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
4236 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
4237 state->pipe_src_w, state->pipe_src_h,
4238 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
4239 }
4240
4241 /**
4242 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4243 *
4244 * @state: crtc's scaler state
4245 * @plane_state: atomic plane state to update
4246 *
4247 * Return
4248 * 0 - scaler_usage updated successfully
4249 * error - requested scaling cannot be supported or other error condition
4250 */
4251 static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4252 struct intel_plane_state *plane_state)
4253 {
4254
4255 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4256 struct intel_plane *intel_plane =
4257 to_intel_plane(plane_state->base.plane);
4258 struct drm_framebuffer *fb = plane_state->base.fb;
4259 int ret;
4260
4261 bool force_detach = !fb || !plane_state->visible;
4262
4263 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4264 intel_plane->base.base.id, intel_crtc->pipe,
4265 drm_plane_index(&intel_plane->base));
4266
4267 ret = skl_update_scaler(crtc_state, force_detach,
4268 drm_plane_index(&intel_plane->base),
4269 &plane_state->scaler_id,
4270 plane_state->base.rotation,
4271 drm_rect_width(&plane_state->src) >> 16,
4272 drm_rect_height(&plane_state->src) >> 16,
4273 drm_rect_width(&plane_state->dst),
4274 drm_rect_height(&plane_state->dst));
4275
4276 if (ret || plane_state->scaler_id < 0)
4277 return ret;
4278
4279 /* check colorkey */
4280 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
4281 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
4282 intel_plane->base.base.id);
4283 return -EINVAL;
4284 }
4285
4286 /* Check src format */
4287 switch (fb->pixel_format) {
4288 case DRM_FORMAT_RGB565:
4289 case DRM_FORMAT_XBGR8888:
4290 case DRM_FORMAT_XRGB8888:
4291 case DRM_FORMAT_ABGR8888:
4292 case DRM_FORMAT_ARGB8888:
4293 case DRM_FORMAT_XRGB2101010:
4294 case DRM_FORMAT_XBGR2101010:
4295 case DRM_FORMAT_YUYV:
4296 case DRM_FORMAT_YVYU:
4297 case DRM_FORMAT_UYVY:
4298 case DRM_FORMAT_VYUY:
4299 break;
4300 default:
4301 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4302 intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4303 return -EINVAL;
4304 }
4305
4306 return 0;
4307 }
4308
4309 static void skylake_scaler_disable(struct intel_crtc *crtc)
4310 {
4311 int i;
4312
4313 for (i = 0; i < crtc->num_scalers; i++)
4314 skl_detach_scaler(crtc, i);
4315 }
4316
4317 static void skylake_pfit_enable(struct intel_crtc *crtc)
4318 {
4319 struct drm_device *dev = crtc->base.dev;
4320 struct drm_i915_private *dev_priv = dev->dev_private;
4321 int pipe = crtc->pipe;
4322 struct intel_crtc_scaler_state *scaler_state =
4323 &crtc->config->scaler_state;
4324
4325 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4326
4327 if (crtc->config->pch_pfit.enabled) {
4328 int id;
4329
4330 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4331 DRM_ERROR("Requesting pfit without getting a scaler first\n");
4332 return;
4333 }
4334
4335 id = scaler_state->scaler_id;
4336 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4337 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4338 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4339 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4340
4341 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
4342 }
4343 }
4344
4345 static void ironlake_pfit_enable(struct intel_crtc *crtc)
4346 {
4347 struct drm_device *dev = crtc->base.dev;
4348 struct drm_i915_private *dev_priv = dev->dev_private;
4349 int pipe = crtc->pipe;
4350
4351 if (crtc->config->pch_pfit.enabled) {
4352 /* Force use of hard-coded filter coefficients
4353 * as some pre-programmed values are broken,
4354 * e.g. x201.
4355 */
4356 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4357 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4358 PF_PIPE_SEL_IVB(pipe));
4359 else
4360 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
4361 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4362 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
4363 }
4364 }
4365
4366 void hsw_enable_ips(struct intel_crtc *crtc)
4367 {
4368 struct drm_device *dev = crtc->base.dev;
4369 struct drm_i915_private *dev_priv = dev->dev_private;
4370
4371 if (!crtc->config->ips_enabled)
4372 return;
4373
4374 /*
4375 * We can only enable IPS after we enable a plane and wait for a vblank
4376 * This function is called from post_plane_update, which is run after
4377 * a vblank wait.
4378 */
4379
4380 assert_plane_enabled(dev_priv, crtc->plane);
4381 if (IS_BROADWELL(dev)) {
4382 mutex_lock(&dev_priv->rps.hw_lock);
4383 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4384 mutex_unlock(&dev_priv->rps.hw_lock);
4385 /* Quoting Art Runyan: "its not safe to expect any particular
4386 * value in IPS_CTL bit 31 after enabling IPS through the
4387 * mailbox." Moreover, the mailbox may return a bogus state,
4388 * so we need to just enable it and continue on.
4389 */
4390 } else {
4391 I915_WRITE(IPS_CTL, IPS_ENABLE);
4392 /* The bit only becomes 1 in the next vblank, so this wait here
4393 * is essentially intel_wait_for_vblank. If we don't have this
4394 * and don't wait for vblanks until the end of crtc_enable, then
4395 * the HW state readout code will complain that the expected
4396 * IPS_CTL value is not the one we read. */
4397 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4398 DRM_ERROR("Timed out waiting for IPS enable\n");
4399 }
4400 }
4401
4402 void hsw_disable_ips(struct intel_crtc *crtc)
4403 {
4404 struct drm_device *dev = crtc->base.dev;
4405 struct drm_i915_private *dev_priv = dev->dev_private;
4406
4407 if (!crtc->config->ips_enabled)
4408 return;
4409
4410 assert_plane_enabled(dev_priv, crtc->plane);
4411 if (IS_BROADWELL(dev)) {
4412 mutex_lock(&dev_priv->rps.hw_lock);
4413 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4414 mutex_unlock(&dev_priv->rps.hw_lock);
4415 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4416 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4417 DRM_ERROR("Timed out waiting for IPS disable\n");
4418 } else {
4419 I915_WRITE(IPS_CTL, 0);
4420 POSTING_READ(IPS_CTL);
4421 }
4422
4423 /* We need to wait for a vblank before we can disable the plane. */
4424 intel_wait_for_vblank(dev, crtc->pipe);
4425 }
4426
4427 static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
4428 {
4429 if (intel_crtc->overlay) {
4430 struct drm_device *dev = intel_crtc->base.dev;
4431 struct drm_i915_private *dev_priv = dev->dev_private;
4432
4433 mutex_lock(&dev->struct_mutex);
4434 dev_priv->mm.interruptible = false;
4435 (void) intel_overlay_switch_off(intel_crtc->overlay);
4436 dev_priv->mm.interruptible = true;
4437 mutex_unlock(&dev->struct_mutex);
4438 }
4439
4440 /* Let userspace switch the overlay on again. In most cases userspace
4441 * has to recompute where to put it anyway.
4442 */
4443 }
4444
4445 /**
4446 * intel_post_enable_primary - Perform operations after enabling primary plane
4447 * @crtc: the CRTC whose primary plane was just enabled
4448 *
4449 * Performs potentially sleeping operations that must be done after the primary
4450 * plane is enabled, such as updating FBC and IPS. Note that this may be
4451 * called due to an explicit primary plane update, or due to an implicit
4452 * re-enable that is caused when a sprite plane is updated to no longer
4453 * completely hide the primary plane.
4454 */
4455 static void
4456 intel_post_enable_primary(struct drm_crtc *crtc)
4457 {
4458 struct drm_device *dev = crtc->dev;
4459 struct drm_i915_private *dev_priv = dev->dev_private;
4460 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4461 int pipe = intel_crtc->pipe;
4462
4463 /*
4464 * FIXME IPS should be fine as long as one plane is
4465 * enabled, but in practice it seems to have problems
4466 * when going from primary only to sprite only and vice
4467 * versa.
4468 */
4469 hsw_enable_ips(intel_crtc);
4470
4471 /*
4472 * Gen2 reports pipe underruns whenever all planes are disabled.
4473 * So don't enable underrun reporting before at least some planes
4474 * are enabled.
4475 * FIXME: Need to fix the logic to work when we turn off all planes
4476 * but leave the pipe running.
4477 */
4478 if (IS_GEN2(dev))
4479 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4480
4481 /* Underruns don't always raise interrupts, so check manually. */
4482 intel_check_cpu_fifo_underruns(dev_priv);
4483 intel_check_pch_fifo_underruns(dev_priv);
4484 }
4485
4486 /* FIXME move all this to pre_plane_update() with proper state tracking */
4487 static void
4488 intel_pre_disable_primary(struct drm_crtc *crtc)
4489 {
4490 struct drm_device *dev = crtc->dev;
4491 struct drm_i915_private *dev_priv = dev->dev_private;
4492 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4493 int pipe = intel_crtc->pipe;
4494
4495 /*
4496 * Gen2 reports pipe underruns whenever all planes are disabled.
4497 * So diasble underrun reporting before all the planes get disabled.
4498 * FIXME: Need to fix the logic to work when we turn off all planes
4499 * but leave the pipe running.
4500 */
4501 if (IS_GEN2(dev))
4502 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4503
4504 /*
4505 * FIXME IPS should be fine as long as one plane is
4506 * enabled, but in practice it seems to have problems
4507 * when going from primary only to sprite only and vice
4508 * versa.
4509 */
4510 hsw_disable_ips(intel_crtc);
4511 }
4512
4513 /* FIXME get rid of this and use pre_plane_update */
4514 static void
4515 intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4516 {
4517 struct drm_device *dev = crtc->dev;
4518 struct drm_i915_private *dev_priv = dev->dev_private;
4519 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4520 int pipe = intel_crtc->pipe;
4521
4522 intel_pre_disable_primary(crtc);
4523
4524 /*
4525 * Vblank time updates from the shadow to live plane control register
4526 * are blocked if the memory self-refresh mode is active at that
4527 * moment. So to make sure the plane gets truly disabled, disable
4528 * first the self-refresh mode. The self-refresh enable bit in turn
4529 * will be checked/applied by the HW only at the next frame start
4530 * event which is after the vblank start event, so we need to have a
4531 * wait-for-vblank between disabling the plane and the pipe.
4532 */
4533 if (HAS_GMCH_DISPLAY(dev)) {
4534 intel_set_memory_cxsr(dev_priv, false);
4535 dev_priv->wm.vlv.cxsr = false;
4536 intel_wait_for_vblank(dev, pipe);
4537 }
4538 }
4539
4540 static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
4541 {
4542 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4543 struct drm_atomic_state *old_state = old_crtc_state->base.state;
4544 struct intel_crtc_state *pipe_config =
4545 to_intel_crtc_state(crtc->base.state);
4546 struct drm_device *dev = crtc->base.dev;
4547 struct drm_plane *primary = crtc->base.primary;
4548 struct drm_plane_state *old_pri_state =
4549 drm_atomic_get_existing_plane_state(old_state, primary);
4550
4551 intel_frontbuffer_flip(dev, pipe_config->fb_bits);
4552
4553 crtc->wm.cxsr_allowed = true;
4554
4555 if (pipe_config->update_wm_post && pipe_config->base.active)
4556 intel_update_watermarks(&crtc->base);
4557
4558 if (old_pri_state) {
4559 struct intel_plane_state *primary_state =
4560 to_intel_plane_state(primary->state);
4561 struct intel_plane_state *old_primary_state =
4562 to_intel_plane_state(old_pri_state);
4563
4564 intel_fbc_post_update(crtc);
4565
4566 if (primary_state->visible &&
4567 (needs_modeset(&pipe_config->base) ||
4568 !old_primary_state->visible))
4569 intel_post_enable_primary(&crtc->base);
4570 }
4571 }
4572
4573 static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
4574 {
4575 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4576 struct drm_device *dev = crtc->base.dev;
4577 struct drm_i915_private *dev_priv = dev->dev_private;
4578 struct intel_crtc_state *pipe_config =
4579 to_intel_crtc_state(crtc->base.state);
4580 struct drm_atomic_state *old_state = old_crtc_state->base.state;
4581 struct drm_plane *primary = crtc->base.primary;
4582 struct drm_plane_state *old_pri_state =
4583 drm_atomic_get_existing_plane_state(old_state, primary);
4584 bool modeset = needs_modeset(&pipe_config->base);
4585
4586 if (old_pri_state) {
4587 struct intel_plane_state *primary_state =
4588 to_intel_plane_state(primary->state);
4589 struct intel_plane_state *old_primary_state =
4590 to_intel_plane_state(old_pri_state);
4591
4592 intel_fbc_pre_update(crtc);
4593
4594 if (old_primary_state->visible &&
4595 (modeset || !primary_state->visible))
4596 intel_pre_disable_primary(&crtc->base);
4597 }
4598
4599 if (pipe_config->disable_cxsr) {
4600 crtc->wm.cxsr_allowed = false;
4601
4602 /*
4603 * Vblank time updates from the shadow to live plane control register
4604 * are blocked if the memory self-refresh mode is active at that
4605 * moment. So to make sure the plane gets truly disabled, disable
4606 * first the self-refresh mode. The self-refresh enable bit in turn
4607 * will be checked/applied by the HW only at the next frame start
4608 * event which is after the vblank start event, so we need to have a
4609 * wait-for-vblank between disabling the plane and the pipe.
4610 */
4611 if (old_crtc_state->base.active) {
4612 intel_set_memory_cxsr(dev_priv, false);
4613 dev_priv->wm.vlv.cxsr = false;
4614 intel_wait_for_vblank(dev, crtc->pipe);
4615 }
4616 }
4617
4618 /*
4619 * IVB workaround: must disable low power watermarks for at least
4620 * one frame before enabling scaling. LP watermarks can be re-enabled
4621 * when scaling is disabled.
4622 *
4623 * WaCxSRDisabledForSpriteScaling:ivb
4624 */
4625 if (pipe_config->disable_lp_wm) {
4626 ilk_disable_lp_wm(dev);
4627 intel_wait_for_vblank(dev, crtc->pipe);
4628 }
4629
4630 /*
4631 * If we're doing a modeset, we're done. No need to do any pre-vblank
4632 * watermark programming here.
4633 */
4634 if (needs_modeset(&pipe_config->base))
4635 return;
4636
4637 /*
4638 * For platforms that support atomic watermarks, program the
4639 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
4640 * will be the intermediate values that are safe for both pre- and
4641 * post- vblank; when vblank happens, the 'active' values will be set
4642 * to the final 'target' values and we'll do this again to get the
4643 * optimal watermarks. For gen9+ platforms, the values we program here
4644 * will be the final target values which will get automatically latched
4645 * at vblank time; no further programming will be necessary.
4646 *
4647 * If a platform hasn't been transitioned to atomic watermarks yet,
4648 * we'll continue to update watermarks the old way, if flags tell
4649 * us to.
4650 */
4651 if (dev_priv->display.initial_watermarks != NULL)
4652 dev_priv->display.initial_watermarks(pipe_config);
4653 else if (pipe_config->update_wm_pre)
4654 intel_update_watermarks(&crtc->base);
4655 }
4656
4657 static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
4658 {
4659 struct drm_device *dev = crtc->dev;
4660 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4661 struct drm_plane *p;
4662 int pipe = intel_crtc->pipe;
4663
4664 intel_crtc_dpms_overlay_disable(intel_crtc);
4665
4666 drm_for_each_plane_mask(p, dev, plane_mask)
4667 to_intel_plane(p)->disable_plane(p, crtc);
4668
4669 /*
4670 * FIXME: Once we grow proper nuclear flip support out of this we need
4671 * to compute the mask of flip planes precisely. For the time being
4672 * consider this a flip to a NULL plane.
4673 */
4674 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
4675 }
4676
4677 static void ironlake_crtc_enable(struct drm_crtc *crtc)
4678 {
4679 struct drm_device *dev = crtc->dev;
4680 struct drm_i915_private *dev_priv = dev->dev_private;
4681 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4682 struct intel_encoder *encoder;
4683 int pipe = intel_crtc->pipe;
4684 struct intel_crtc_state *pipe_config =
4685 to_intel_crtc_state(crtc->state);
4686
4687 if (WARN_ON(intel_crtc->active))
4688 return;
4689
4690 /*
4691 * Sometimes spurious CPU pipe underruns happen during FDI
4692 * training, at least with VGA+HDMI cloning. Suppress them.
4693 *
4694 * On ILK we get an occasional spurious CPU pipe underruns
4695 * between eDP port A enable and vdd enable. Also PCH port
4696 * enable seems to result in the occasional CPU pipe underrun.
4697 *
4698 * Spurious PCH underruns also occur during PCH enabling.
4699 */
4700 if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
4701 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4702 if (intel_crtc->config->has_pch_encoder)
4703 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4704
4705 if (intel_crtc->config->has_pch_encoder)
4706 intel_prepare_shared_dpll(intel_crtc);
4707
4708 if (intel_crtc->config->has_dp_encoder)
4709 intel_dp_set_m_n(intel_crtc, M1_N1);
4710
4711 intel_set_pipe_timings(intel_crtc);
4712 intel_set_pipe_src_size(intel_crtc);
4713
4714 if (intel_crtc->config->has_pch_encoder) {
4715 intel_cpu_transcoder_set_m_n(intel_crtc,
4716 &intel_crtc->config->fdi_m_n, NULL);
4717 }
4718
4719 ironlake_set_pipeconf(crtc);
4720
4721 intel_crtc->active = true;
4722
4723 for_each_encoder_on_crtc(dev, crtc, encoder)
4724 if (encoder->pre_enable)
4725 encoder->pre_enable(encoder);
4726
4727 if (intel_crtc->config->has_pch_encoder) {
4728 /* Note: FDI PLL enabling _must_ be done before we enable the
4729 * cpu pipes, hence this is separate from all the other fdi/pch
4730 * enabling. */
4731 ironlake_fdi_pll_enable(intel_crtc);
4732 } else {
4733 assert_fdi_tx_disabled(dev_priv, pipe);
4734 assert_fdi_rx_disabled(dev_priv, pipe);
4735 }
4736
4737 ironlake_pfit_enable(intel_crtc);
4738
4739 /*
4740 * On ILK+ LUT must be loaded before the pipe is running but with
4741 * clocks enabled
4742 */
4743 intel_color_load_luts(&pipe_config->base);
4744
4745 if (dev_priv->display.initial_watermarks != NULL)
4746 dev_priv->display.initial_watermarks(intel_crtc->config);
4747 intel_enable_pipe(intel_crtc);
4748
4749 if (intel_crtc->config->has_pch_encoder)
4750 ironlake_pch_enable(crtc);
4751
4752 assert_vblank_disabled(crtc);
4753 drm_crtc_vblank_on(crtc);
4754
4755 for_each_encoder_on_crtc(dev, crtc, encoder)
4756 encoder->enable(encoder);
4757
4758 if (HAS_PCH_CPT(dev))
4759 cpt_verify_modeset(dev, intel_crtc->pipe);
4760
4761 /* Must wait for vblank to avoid spurious PCH FIFO underruns */
4762 if (intel_crtc->config->has_pch_encoder)
4763 intel_wait_for_vblank(dev, pipe);
4764 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4765 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4766 }
4767
4768 /* IPS only exists on ULT machines and is tied to pipe A. */
4769 static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4770 {
4771 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
4772 }
4773
4774 static void haswell_crtc_enable(struct drm_crtc *crtc)
4775 {
4776 struct drm_device *dev = crtc->dev;
4777 struct drm_i915_private *dev_priv = dev->dev_private;
4778 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4779 struct intel_encoder *encoder;
4780 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4781 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4782 struct intel_crtc_state *pipe_config =
4783 to_intel_crtc_state(crtc->state);
4784
4785 if (WARN_ON(intel_crtc->active))
4786 return;
4787
4788 if (intel_crtc->config->has_pch_encoder)
4789 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4790 false);
4791
4792 if (intel_crtc->config->shared_dpll)
4793 intel_enable_shared_dpll(intel_crtc);
4794
4795 if (intel_crtc->config->has_dp_encoder)
4796 intel_dp_set_m_n(intel_crtc, M1_N1);
4797
4798 if (!intel_crtc->config->has_dsi_encoder)
4799 intel_set_pipe_timings(intel_crtc);
4800
4801 intel_set_pipe_src_size(intel_crtc);
4802
4803 if (cpu_transcoder != TRANSCODER_EDP &&
4804 !transcoder_is_dsi(cpu_transcoder)) {
4805 I915_WRITE(PIPE_MULT(cpu_transcoder),
4806 intel_crtc->config->pixel_multiplier - 1);
4807 }
4808
4809 if (intel_crtc->config->has_pch_encoder) {
4810 intel_cpu_transcoder_set_m_n(intel_crtc,
4811 &intel_crtc->config->fdi_m_n, NULL);
4812 }
4813
4814 if (!intel_crtc->config->has_dsi_encoder)
4815 haswell_set_pipeconf(crtc);
4816
4817 haswell_set_pipemisc(crtc);
4818
4819 intel_color_set_csc(&pipe_config->base);
4820
4821 intel_crtc->active = true;
4822
4823 if (intel_crtc->config->has_pch_encoder)
4824 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4825 else
4826 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4827
4828 for_each_encoder_on_crtc(dev, crtc, encoder) {
4829 if (encoder->pre_enable)
4830 encoder->pre_enable(encoder);
4831 }
4832
4833 if (intel_crtc->config->has_pch_encoder)
4834 dev_priv->display.fdi_link_train(crtc);
4835
4836 if (!intel_crtc->config->has_dsi_encoder)
4837 intel_ddi_enable_pipe_clock(intel_crtc);
4838
4839 if (INTEL_INFO(dev)->gen >= 9)
4840 skylake_pfit_enable(intel_crtc);
4841 else
4842 ironlake_pfit_enable(intel_crtc);
4843
4844 /*
4845 * On ILK+ LUT must be loaded before the pipe is running but with
4846 * clocks enabled
4847 */
4848 intel_color_load_luts(&pipe_config->base);
4849
4850 intel_ddi_set_pipe_settings(crtc);
4851 if (!intel_crtc->config->has_dsi_encoder)
4852 intel_ddi_enable_transcoder_func(crtc);
4853
4854 if (dev_priv->display.initial_watermarks != NULL)
4855 dev_priv->display.initial_watermarks(pipe_config);
4856 else
4857 intel_update_watermarks(crtc);
4858
4859 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
4860 if (!intel_crtc->config->has_dsi_encoder)
4861 intel_enable_pipe(intel_crtc);
4862
4863 if (intel_crtc->config->has_pch_encoder)
4864 lpt_pch_enable(crtc);
4865
4866 if (intel_crtc->config->dp_encoder_is_mst)
4867 intel_ddi_set_vc_payload_alloc(crtc, true);
4868
4869 assert_vblank_disabled(crtc);
4870 drm_crtc_vblank_on(crtc);
4871
4872 for_each_encoder_on_crtc(dev, crtc, encoder) {
4873 encoder->enable(encoder);
4874 intel_opregion_notify_encoder(encoder, true);
4875 }
4876
4877 if (intel_crtc->config->has_pch_encoder) {
4878 intel_wait_for_vblank(dev, pipe);
4879 intel_wait_for_vblank(dev, pipe);
4880 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4881 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4882 true);
4883 }
4884
4885 /* If we change the relative order between pipe/planes enabling, we need
4886 * to change the workaround. */
4887 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
4888 if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
4889 intel_wait_for_vblank(dev, hsw_workaround_pipe);
4890 intel_wait_for_vblank(dev, hsw_workaround_pipe);
4891 }
4892 }
4893
4894 static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
4895 {
4896 struct drm_device *dev = crtc->base.dev;
4897 struct drm_i915_private *dev_priv = dev->dev_private;
4898 int pipe = crtc->pipe;
4899
4900 /* To avoid upsetting the power well on haswell only disable the pfit if
4901 * it's in use. The hw state code will make sure we get this right. */
4902 if (force || crtc->config->pch_pfit.enabled) {
4903 I915_WRITE(PF_CTL(pipe), 0);
4904 I915_WRITE(PF_WIN_POS(pipe), 0);
4905 I915_WRITE(PF_WIN_SZ(pipe), 0);
4906 }
4907 }
4908
4909 static void ironlake_crtc_disable(struct drm_crtc *crtc)
4910 {
4911 struct drm_device *dev = crtc->dev;
4912 struct drm_i915_private *dev_priv = dev->dev_private;
4913 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4914 struct intel_encoder *encoder;
4915 int pipe = intel_crtc->pipe;
4916
4917 /*
4918 * Sometimes spurious CPU pipe underruns happen when the
4919 * pipe is already disabled, but FDI RX/TX is still enabled.
4920 * Happens at least with VGA+HDMI cloning. Suppress them.
4921 */
4922 if (intel_crtc->config->has_pch_encoder) {
4923 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4924 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4925 }
4926
4927 for_each_encoder_on_crtc(dev, crtc, encoder)
4928 encoder->disable(encoder);
4929
4930 drm_crtc_vblank_off(crtc);
4931 assert_vblank_disabled(crtc);
4932
4933 intel_disable_pipe(intel_crtc);
4934
4935 ironlake_pfit_disable(intel_crtc, false);
4936
4937 if (intel_crtc->config->has_pch_encoder)
4938 ironlake_fdi_disable(crtc);
4939
4940 for_each_encoder_on_crtc(dev, crtc, encoder)
4941 if (encoder->post_disable)
4942 encoder->post_disable(encoder);
4943
4944 if (intel_crtc->config->has_pch_encoder) {
4945 ironlake_disable_pch_transcoder(dev_priv, pipe);
4946
4947 if (HAS_PCH_CPT(dev)) {
4948 i915_reg_t reg;
4949 u32 temp;
4950
4951 /* disable TRANS_DP_CTL */
4952 reg = TRANS_DP_CTL(pipe);
4953 temp = I915_READ(reg);
4954 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4955 TRANS_DP_PORT_SEL_MASK);
4956 temp |= TRANS_DP_PORT_SEL_NONE;
4957 I915_WRITE(reg, temp);
4958
4959 /* disable DPLL_SEL */
4960 temp = I915_READ(PCH_DPLL_SEL);
4961 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
4962 I915_WRITE(PCH_DPLL_SEL, temp);
4963 }
4964
4965 ironlake_fdi_pll_disable(intel_crtc);
4966 }
4967
4968 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4969 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
4970 }
4971
4972 static void haswell_crtc_disable(struct drm_crtc *crtc)
4973 {
4974 struct drm_device *dev = crtc->dev;
4975 struct drm_i915_private *dev_priv = dev->dev_private;
4976 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4977 struct intel_encoder *encoder;
4978 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
4979
4980 if (intel_crtc->config->has_pch_encoder)
4981 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4982 false);
4983
4984 for_each_encoder_on_crtc(dev, crtc, encoder) {
4985 intel_opregion_notify_encoder(encoder, false);
4986 encoder->disable(encoder);
4987 }
4988
4989 drm_crtc_vblank_off(crtc);
4990 assert_vblank_disabled(crtc);
4991
4992 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
4993 if (!intel_crtc->config->has_dsi_encoder)
4994 intel_disable_pipe(intel_crtc);
4995
4996 if (intel_crtc->config->dp_encoder_is_mst)
4997 intel_ddi_set_vc_payload_alloc(crtc, false);
4998
4999 if (!intel_crtc->config->has_dsi_encoder)
5000 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
5001
5002 if (INTEL_INFO(dev)->gen >= 9)
5003 skylake_scaler_disable(intel_crtc);
5004 else
5005 ironlake_pfit_disable(intel_crtc, false);
5006
5007 if (!intel_crtc->config->has_dsi_encoder)
5008 intel_ddi_disable_pipe_clock(intel_crtc);
5009
5010 for_each_encoder_on_crtc(dev, crtc, encoder)
5011 if (encoder->post_disable)
5012 encoder->post_disable(encoder);
5013
5014 if (intel_crtc->config->has_pch_encoder) {
5015 lpt_disable_pch_transcoder(dev_priv);
5016 lpt_disable_iclkip(dev_priv);
5017 intel_ddi_fdi_disable(crtc);
5018
5019 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5020 true);
5021 }
5022 }
5023
5024 static void i9xx_pfit_enable(struct intel_crtc *crtc)
5025 {
5026 struct drm_device *dev = crtc->base.dev;
5027 struct drm_i915_private *dev_priv = dev->dev_private;
5028 struct intel_crtc_state *pipe_config = crtc->config;
5029
5030 if (!pipe_config->gmch_pfit.control)
5031 return;
5032
5033 /*
5034 * The panel fitter should only be adjusted whilst the pipe is disabled,
5035 * according to register description and PRM.
5036 */
5037 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5038 assert_pipe_disabled(dev_priv, crtc->pipe);
5039
5040 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5041 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5042
5043 /* Border color in case we don't scale up to the full screen. Black by
5044 * default, change to something else for debugging. */
5045 I915_WRITE(BCLRPAT(crtc->pipe), 0);
5046 }
5047
5048 static enum intel_display_power_domain port_to_power_domain(enum port port)
5049 {
5050 switch (port) {
5051 case PORT_A:
5052 return POWER_DOMAIN_PORT_DDI_A_LANES;
5053 case PORT_B:
5054 return POWER_DOMAIN_PORT_DDI_B_LANES;
5055 case PORT_C:
5056 return POWER_DOMAIN_PORT_DDI_C_LANES;
5057 case PORT_D:
5058 return POWER_DOMAIN_PORT_DDI_D_LANES;
5059 case PORT_E:
5060 return POWER_DOMAIN_PORT_DDI_E_LANES;
5061 default:
5062 MISSING_CASE(port);
5063 return POWER_DOMAIN_PORT_OTHER;
5064 }
5065 }
5066
5067 static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5068 {
5069 switch (port) {
5070 case PORT_A:
5071 return POWER_DOMAIN_AUX_A;
5072 case PORT_B:
5073 return POWER_DOMAIN_AUX_B;
5074 case PORT_C:
5075 return POWER_DOMAIN_AUX_C;
5076 case PORT_D:
5077 return POWER_DOMAIN_AUX_D;
5078 case PORT_E:
5079 /* FIXME: Check VBT for actual wiring of PORT E */
5080 return POWER_DOMAIN_AUX_D;
5081 default:
5082 MISSING_CASE(port);
5083 return POWER_DOMAIN_AUX_A;
5084 }
5085 }
5086
5087 enum intel_display_power_domain
5088 intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5089 {
5090 struct drm_device *dev = intel_encoder->base.dev;
5091 struct intel_digital_port *intel_dig_port;
5092
5093 switch (intel_encoder->type) {
5094 case INTEL_OUTPUT_UNKNOWN:
5095 /* Only DDI platforms should ever use this output type */
5096 WARN_ON_ONCE(!HAS_DDI(dev));
5097 case INTEL_OUTPUT_DISPLAYPORT:
5098 case INTEL_OUTPUT_HDMI:
5099 case INTEL_OUTPUT_EDP:
5100 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5101 return port_to_power_domain(intel_dig_port->port);
5102 case INTEL_OUTPUT_DP_MST:
5103 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5104 return port_to_power_domain(intel_dig_port->port);
5105 case INTEL_OUTPUT_ANALOG:
5106 return POWER_DOMAIN_PORT_CRT;
5107 case INTEL_OUTPUT_DSI:
5108 return POWER_DOMAIN_PORT_DSI;
5109 default:
5110 return POWER_DOMAIN_PORT_OTHER;
5111 }
5112 }
5113
5114 enum intel_display_power_domain
5115 intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5116 {
5117 struct drm_device *dev = intel_encoder->base.dev;
5118 struct intel_digital_port *intel_dig_port;
5119
5120 switch (intel_encoder->type) {
5121 case INTEL_OUTPUT_UNKNOWN:
5122 case INTEL_OUTPUT_HDMI:
5123 /*
5124 * Only DDI platforms should ever use these output types.
5125 * We can get here after the HDMI detect code has already set
5126 * the type of the shared encoder. Since we can't be sure
5127 * what's the status of the given connectors, play safe and
5128 * run the DP detection too.
5129 */
5130 WARN_ON_ONCE(!HAS_DDI(dev));
5131 case INTEL_OUTPUT_DISPLAYPORT:
5132 case INTEL_OUTPUT_EDP:
5133 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5134 return port_to_aux_power_domain(intel_dig_port->port);
5135 case INTEL_OUTPUT_DP_MST:
5136 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5137 return port_to_aux_power_domain(intel_dig_port->port);
5138 default:
5139 MISSING_CASE(intel_encoder->type);
5140 return POWER_DOMAIN_AUX_A;
5141 }
5142 }
5143
5144 static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5145 struct intel_crtc_state *crtc_state)
5146 {
5147 struct drm_device *dev = crtc->dev;
5148 struct drm_encoder *encoder;
5149 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5150 enum pipe pipe = intel_crtc->pipe;
5151 unsigned long mask;
5152 enum transcoder transcoder = crtc_state->cpu_transcoder;
5153
5154 if (!crtc_state->base.active)
5155 return 0;
5156
5157 mask = BIT(POWER_DOMAIN_PIPE(pipe));
5158 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
5159 if (crtc_state->pch_pfit.enabled ||
5160 crtc_state->pch_pfit.force_thru)
5161 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5162
5163 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5164 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5165
5166 mask |= BIT(intel_display_port_power_domain(intel_encoder));
5167 }
5168
5169 if (crtc_state->shared_dpll)
5170 mask |= BIT(POWER_DOMAIN_PLLS);
5171
5172 return mask;
5173 }
5174
5175 static unsigned long
5176 modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5177 struct intel_crtc_state *crtc_state)
5178 {
5179 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5180 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5181 enum intel_display_power_domain domain;
5182 unsigned long domains, new_domains, old_domains;
5183
5184 old_domains = intel_crtc->enabled_power_domains;
5185 intel_crtc->enabled_power_domains = new_domains =
5186 get_crtc_power_domains(crtc, crtc_state);
5187
5188 domains = new_domains & ~old_domains;
5189
5190 for_each_power_domain(domain, domains)
5191 intel_display_power_get(dev_priv, domain);
5192
5193 return old_domains & ~new_domains;
5194 }
5195
5196 static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5197 unsigned long domains)
5198 {
5199 enum intel_display_power_domain domain;
5200
5201 for_each_power_domain(domain, domains)
5202 intel_display_power_put(dev_priv, domain);
5203 }
5204
5205 static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5206 {
5207 int max_cdclk_freq = dev_priv->max_cdclk_freq;
5208
5209 if (INTEL_INFO(dev_priv)->gen >= 9 ||
5210 IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5211 return max_cdclk_freq;
5212 else if (IS_CHERRYVIEW(dev_priv))
5213 return max_cdclk_freq*95/100;
5214 else if (INTEL_INFO(dev_priv)->gen < 4)
5215 return 2*max_cdclk_freq*90/100;
5216 else
5217 return max_cdclk_freq*90/100;
5218 }
5219
5220 static void intel_update_max_cdclk(struct drm_device *dev)
5221 {
5222 struct drm_i915_private *dev_priv = dev->dev_private;
5223
5224 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
5225 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5226
5227 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5228 dev_priv->max_cdclk_freq = 675000;
5229 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5230 dev_priv->max_cdclk_freq = 540000;
5231 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5232 dev_priv->max_cdclk_freq = 450000;
5233 else
5234 dev_priv->max_cdclk_freq = 337500;
5235 } else if (IS_BROXTON(dev)) {
5236 dev_priv->max_cdclk_freq = 624000;
5237 } else if (IS_BROADWELL(dev)) {
5238 /*
5239 * FIXME with extra cooling we can allow
5240 * 540 MHz for ULX and 675 Mhz for ULT.
5241 * How can we know if extra cooling is
5242 * available? PCI ID, VTB, something else?
5243 */
5244 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5245 dev_priv->max_cdclk_freq = 450000;
5246 else if (IS_BDW_ULX(dev))
5247 dev_priv->max_cdclk_freq = 450000;
5248 else if (IS_BDW_ULT(dev))
5249 dev_priv->max_cdclk_freq = 540000;
5250 else
5251 dev_priv->max_cdclk_freq = 675000;
5252 } else if (IS_CHERRYVIEW(dev)) {
5253 dev_priv->max_cdclk_freq = 320000;
5254 } else if (IS_VALLEYVIEW(dev)) {
5255 dev_priv->max_cdclk_freq = 400000;
5256 } else {
5257 /* otherwise assume cdclk is fixed */
5258 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5259 }
5260
5261 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5262
5263 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5264 dev_priv->max_cdclk_freq);
5265
5266 DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5267 dev_priv->max_dotclk_freq);
5268 }
5269
5270 static void intel_update_cdclk(struct drm_device *dev)
5271 {
5272 struct drm_i915_private *dev_priv = dev->dev_private;
5273
5274 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5275 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5276 dev_priv->cdclk_freq);
5277
5278 /*
5279 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
5280 * Programmng [sic] note: bit[9:2] should be programmed to the number
5281 * of cdclk that generates 4MHz reference clock freq which is used to
5282 * generate GMBus clock. This will vary with the cdclk freq.
5283 */
5284 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
5285 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
5286
5287 if (dev_priv->max_cdclk_freq == 0)
5288 intel_update_max_cdclk(dev);
5289 }
5290
5291 /* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5292 static int skl_cdclk_decimal(int cdclk)
5293 {
5294 return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5295 }
5296
5297 static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5298 {
5299 uint32_t divider;
5300 uint32_t ratio;
5301 uint32_t current_cdclk;
5302 int ret;
5303
5304 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
5305 switch (cdclk) {
5306 case 144000:
5307 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5308 ratio = BXT_DE_PLL_RATIO(60);
5309 break;
5310 case 288000:
5311 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5312 ratio = BXT_DE_PLL_RATIO(60);
5313 break;
5314 case 384000:
5315 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5316 ratio = BXT_DE_PLL_RATIO(60);
5317 break;
5318 case 576000:
5319 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5320 ratio = BXT_DE_PLL_RATIO(60);
5321 break;
5322 case 624000:
5323 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5324 ratio = BXT_DE_PLL_RATIO(65);
5325 break;
5326 case 19200:
5327 /*
5328 * Bypass frequency with DE PLL disabled. Init ratio, divider
5329 * to suppress GCC warning.
5330 */
5331 ratio = 0;
5332 divider = 0;
5333 break;
5334 default:
5335 DRM_ERROR("unsupported CDCLK freq %d", cdclk);
5336
5337 return;
5338 }
5339
5340 mutex_lock(&dev_priv->rps.hw_lock);
5341 /* Inform power controller of upcoming frequency change */
5342 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5343 0x80000000);
5344 mutex_unlock(&dev_priv->rps.hw_lock);
5345
5346 if (ret) {
5347 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
5348 ret, cdclk);
5349 return;
5350 }
5351
5352 current_cdclk = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
5353 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
5354 current_cdclk = current_cdclk * 500 + 1000;
5355
5356 /*
5357 * DE PLL has to be disabled when
5358 * - setting to 19.2MHz (bypass, PLL isn't used)
5359 * - before setting to 624MHz (PLL needs toggling)
5360 * - before setting to any frequency from 624MHz (PLL needs toggling)
5361 */
5362 if (cdclk == 19200 || cdclk == 624000 ||
5363 current_cdclk == 624000) {
5364 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5365 /* Timeout 200us */
5366 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5367 1))
5368 DRM_ERROR("timout waiting for DE PLL unlock\n");
5369 }
5370
5371 if (cdclk != 19200) {
5372 uint32_t val;
5373
5374 val = I915_READ(BXT_DE_PLL_CTL);
5375 val &= ~BXT_DE_PLL_RATIO_MASK;
5376 val |= ratio;
5377 I915_WRITE(BXT_DE_PLL_CTL, val);
5378
5379 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5380 /* Timeout 200us */
5381 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5382 DRM_ERROR("timeout waiting for DE PLL lock\n");
5383
5384 val = divider | skl_cdclk_decimal(cdclk);
5385 /*
5386 * FIXME if only the cd2x divider needs changing, it could be done
5387 * without shutting off the pipe (if only one pipe is active).
5388 */
5389 val |= BXT_CDCLK_CD2X_PIPE_NONE;
5390 /*
5391 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5392 * enable otherwise.
5393 */
5394 if (cdclk >= 500000)
5395 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5396 I915_WRITE(CDCLK_CTL, val);
5397 }
5398
5399 mutex_lock(&dev_priv->rps.hw_lock);
5400 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5401 DIV_ROUND_UP(cdclk, 25000));
5402 mutex_unlock(&dev_priv->rps.hw_lock);
5403
5404 if (ret) {
5405 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
5406 ret, cdclk);
5407 return;
5408 }
5409
5410 intel_update_cdclk(dev_priv->dev);
5411 }
5412
5413 static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
5414 {
5415 if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
5416 return false;
5417
5418 /* TODO: Check for a valid CDCLK rate */
5419
5420 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
5421 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
5422
5423 return false;
5424 }
5425
5426 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
5427 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
5428
5429 return false;
5430 }
5431
5432 return true;
5433 }
5434
5435 bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
5436 {
5437 return broxton_cdclk_is_enabled(dev_priv);
5438 }
5439
5440 void broxton_init_cdclk(struct drm_i915_private *dev_priv)
5441 {
5442 /* check if cd clock is enabled */
5443 if (broxton_cdclk_is_enabled(dev_priv)) {
5444 DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
5445 return;
5446 }
5447
5448 DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
5449
5450 /*
5451 * FIXME:
5452 * - The initial CDCLK needs to be read from VBT.
5453 * Need to make this change after VBT has changes for BXT.
5454 * - check if setting the max (or any) cdclk freq is really necessary
5455 * here, it belongs to modeset time
5456 */
5457 broxton_set_cdclk(dev_priv, 624000);
5458
5459 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5460 POSTING_READ(DBUF_CTL);
5461
5462 udelay(10);
5463
5464 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5465 DRM_ERROR("DBuf power enable timeout!\n");
5466 }
5467
5468 void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
5469 {
5470 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5471 POSTING_READ(DBUF_CTL);
5472
5473 udelay(10);
5474
5475 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5476 DRM_ERROR("DBuf power disable timeout!\n");
5477
5478 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
5479 broxton_set_cdclk(dev_priv, 19200);
5480 }
5481
5482 static const struct skl_cdclk_entry {
5483 unsigned int freq;
5484 unsigned int vco;
5485 } skl_cdclk_frequencies[] = {
5486 { .freq = 308570, .vco = 8640 },
5487 { .freq = 337500, .vco = 8100 },
5488 { .freq = 432000, .vco = 8640 },
5489 { .freq = 450000, .vco = 8100 },
5490 { .freq = 540000, .vco = 8100 },
5491 { .freq = 617140, .vco = 8640 },
5492 { .freq = 675000, .vco = 8100 },
5493 };
5494
5495 static unsigned int skl_cdclk_get_vco(unsigned int freq)
5496 {
5497 unsigned int i;
5498
5499 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5500 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5501
5502 if (e->freq == freq)
5503 return e->vco;
5504 }
5505
5506 return 8100;
5507 }
5508
5509 static void
5510 skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5511 {
5512 int min_cdclk;
5513 u32 val;
5514
5515 /* select the minimum CDCLK before enabling DPLL 0 */
5516 if (vco == 8640)
5517 min_cdclk = 308570;
5518 else
5519 min_cdclk = 337500;
5520
5521 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
5522 I915_WRITE(CDCLK_CTL, val);
5523 POSTING_READ(CDCLK_CTL);
5524
5525 /*
5526 * We always enable DPLL0 with the lowest link rate possible, but still
5527 * taking into account the VCO required to operate the eDP panel at the
5528 * desired frequency. The usual DP link rates operate with a VCO of
5529 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5530 * The modeset code is responsible for the selection of the exact link
5531 * rate later on, with the constraint of choosing a frequency that
5532 * works with required_vco.
5533 */
5534 val = I915_READ(DPLL_CTRL1);
5535
5536 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5537 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5538 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
5539 if (vco == 8640)
5540 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5541 SKL_DPLL0);
5542 else
5543 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5544 SKL_DPLL0);
5545
5546 I915_WRITE(DPLL_CTRL1, val);
5547 POSTING_READ(DPLL_CTRL1);
5548
5549 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5550
5551 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5552 DRM_ERROR("DPLL0 not locked\n");
5553 }
5554
5555 static void
5556 skl_dpll0_disable(struct drm_i915_private *dev_priv)
5557 {
5558 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5559 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5560 DRM_ERROR("Couldn't disable DPLL0\n");
5561 }
5562
5563 static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5564 {
5565 int ret;
5566 u32 val;
5567
5568 /* inform PCU we want to change CDCLK */
5569 val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5570 mutex_lock(&dev_priv->rps.hw_lock);
5571 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5572 mutex_unlock(&dev_priv->rps.hw_lock);
5573
5574 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5575 }
5576
5577 static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5578 {
5579 unsigned int i;
5580
5581 for (i = 0; i < 15; i++) {
5582 if (skl_cdclk_pcu_ready(dev_priv))
5583 return true;
5584 udelay(10);
5585 }
5586
5587 return false;
5588 }
5589
5590 static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5591 {
5592 struct drm_device *dev = dev_priv->dev;
5593 u32 freq_select, pcu_ack;
5594
5595 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", cdclk);
5596
5597 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5598 DRM_ERROR("failed to inform PCU about cdclk change\n");
5599 return;
5600 }
5601
5602 /* set CDCLK_CTL */
5603 switch (cdclk) {
5604 case 450000:
5605 case 432000:
5606 freq_select = CDCLK_FREQ_450_432;
5607 pcu_ack = 1;
5608 break;
5609 case 540000:
5610 freq_select = CDCLK_FREQ_540;
5611 pcu_ack = 2;
5612 break;
5613 case 308570:
5614 case 337500:
5615 default:
5616 freq_select = CDCLK_FREQ_337_308;
5617 pcu_ack = 0;
5618 break;
5619 case 617140:
5620 case 675000:
5621 freq_select = CDCLK_FREQ_675_617;
5622 pcu_ack = 3;
5623 break;
5624 }
5625
5626 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
5627 POSTING_READ(CDCLK_CTL);
5628
5629 /* inform PCU of the change */
5630 mutex_lock(&dev_priv->rps.hw_lock);
5631 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5632 mutex_unlock(&dev_priv->rps.hw_lock);
5633
5634 intel_update_cdclk(dev);
5635 }
5636
5637 void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5638 {
5639 /* disable DBUF power */
5640 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5641 POSTING_READ(DBUF_CTL);
5642
5643 udelay(10);
5644
5645 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5646 DRM_ERROR("DBuf power disable timeout\n");
5647
5648 skl_dpll0_disable(dev_priv);
5649 }
5650
5651 void skl_init_cdclk(struct drm_i915_private *dev_priv)
5652 {
5653 unsigned int vco;
5654
5655 /* DPLL0 not enabled (happens on early BIOS versions) */
5656 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5657 /* enable DPLL0 */
5658 vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5659 skl_dpll0_enable(dev_priv, vco);
5660 }
5661
5662 /* set CDCLK to the frequency the BIOS chose */
5663 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5664
5665 /* enable DBUF power */
5666 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5667 POSTING_READ(DBUF_CTL);
5668
5669 udelay(10);
5670
5671 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5672 DRM_ERROR("DBuf power enable timeout\n");
5673 }
5674
5675 int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5676 {
5677 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5678 uint32_t cdctl = I915_READ(CDCLK_CTL);
5679 int freq = dev_priv->skl_boot_cdclk;
5680
5681 /*
5682 * check if the pre-os intialized the display
5683 * There is SWF18 scratchpad register defined which is set by the
5684 * pre-os which can be used by the OS drivers to check the status
5685 */
5686 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5687 goto sanitize;
5688
5689 /* Is PLL enabled and locked ? */
5690 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5691 goto sanitize;
5692
5693 /* DPLL okay; verify the cdclock
5694 *
5695 * Noticed in some instances that the freq selection is correct but
5696 * decimal part is programmed wrong from BIOS where pre-os does not
5697 * enable display. Verify the same as well.
5698 */
5699 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5700 /* All well; nothing to sanitize */
5701 return false;
5702 sanitize:
5703 /*
5704 * As of now initialize with max cdclk till
5705 * we get dynamic cdclk support
5706 * */
5707 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5708 skl_init_cdclk(dev_priv);
5709
5710 /* we did have to sanitize */
5711 return true;
5712 }
5713
5714 /* Adjust CDclk dividers to allow high res or save power if possible */
5715 static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5716 {
5717 struct drm_i915_private *dev_priv = dev->dev_private;
5718 u32 val, cmd;
5719
5720 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5721 != dev_priv->cdclk_freq);
5722
5723 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
5724 cmd = 2;
5725 else if (cdclk == 266667)
5726 cmd = 1;
5727 else
5728 cmd = 0;
5729
5730 mutex_lock(&dev_priv->rps.hw_lock);
5731 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5732 val &= ~DSPFREQGUAR_MASK;
5733 val |= (cmd << DSPFREQGUAR_SHIFT);
5734 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5735 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5736 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5737 50)) {
5738 DRM_ERROR("timed out waiting for CDclk change\n");
5739 }
5740 mutex_unlock(&dev_priv->rps.hw_lock);
5741
5742 mutex_lock(&dev_priv->sb_lock);
5743
5744 if (cdclk == 400000) {
5745 u32 divider;
5746
5747 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5748
5749 /* adjust cdclk divider */
5750 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
5751 val &= ~CCK_FREQUENCY_VALUES;
5752 val |= divider;
5753 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
5754
5755 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
5756 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
5757 50))
5758 DRM_ERROR("timed out waiting for CDclk change\n");
5759 }
5760
5761 /* adjust self-refresh exit latency value */
5762 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5763 val &= ~0x7f;
5764
5765 /*
5766 * For high bandwidth configs, we set a higher latency in the bunit
5767 * so that the core display fetch happens in time to avoid underruns.
5768 */
5769 if (cdclk == 400000)
5770 val |= 4500 / 250; /* 4.5 usec */
5771 else
5772 val |= 3000 / 250; /* 3.0 usec */
5773 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
5774
5775 mutex_unlock(&dev_priv->sb_lock);
5776
5777 intel_update_cdclk(dev);
5778 }
5779
5780 static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5781 {
5782 struct drm_i915_private *dev_priv = dev->dev_private;
5783 u32 val, cmd;
5784
5785 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5786 != dev_priv->cdclk_freq);
5787
5788 switch (cdclk) {
5789 case 333333:
5790 case 320000:
5791 case 266667:
5792 case 200000:
5793 break;
5794 default:
5795 MISSING_CASE(cdclk);
5796 return;
5797 }
5798
5799 /*
5800 * Specs are full of misinformation, but testing on actual
5801 * hardware has shown that we just need to write the desired
5802 * CCK divider into the Punit register.
5803 */
5804 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5805
5806 mutex_lock(&dev_priv->rps.hw_lock);
5807 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5808 val &= ~DSPFREQGUAR_MASK_CHV;
5809 val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5810 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5811 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5812 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5813 50)) {
5814 DRM_ERROR("timed out waiting for CDclk change\n");
5815 }
5816 mutex_unlock(&dev_priv->rps.hw_lock);
5817
5818 intel_update_cdclk(dev);
5819 }
5820
5821 static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5822 int max_pixclk)
5823 {
5824 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
5825 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
5826
5827 /*
5828 * Really only a few cases to deal with, as only 4 CDclks are supported:
5829 * 200MHz
5830 * 267MHz
5831 * 320/333MHz (depends on HPLL freq)
5832 * 400MHz (VLV only)
5833 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5834 * of the lower bin and adjust if needed.
5835 *
5836 * We seem to get an unstable or solid color picture at 200MHz.
5837 * Not sure what's wrong. For now use 200MHz only when all pipes
5838 * are off.
5839 */
5840 if (!IS_CHERRYVIEW(dev_priv) &&
5841 max_pixclk > freq_320*limit/100)
5842 return 400000;
5843 else if (max_pixclk > 266667*limit/100)
5844 return freq_320;
5845 else if (max_pixclk > 0)
5846 return 266667;
5847 else
5848 return 200000;
5849 }
5850
5851 static int broxton_calc_cdclk(int max_pixclk)
5852 {
5853 /*
5854 * FIXME:
5855 * - set 19.2MHz bypass frequency if there are no active pipes
5856 */
5857 if (max_pixclk > 576000)
5858 return 624000;
5859 else if (max_pixclk > 384000)
5860 return 576000;
5861 else if (max_pixclk > 288000)
5862 return 384000;
5863 else if (max_pixclk > 144000)
5864 return 288000;
5865 else
5866 return 144000;
5867 }
5868
5869 /* Compute the max pixel clock for new configuration. */
5870 static int intel_mode_max_pixclk(struct drm_device *dev,
5871 struct drm_atomic_state *state)
5872 {
5873 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5874 struct drm_i915_private *dev_priv = dev->dev_private;
5875 struct drm_crtc *crtc;
5876 struct drm_crtc_state *crtc_state;
5877 unsigned max_pixclk = 0, i;
5878 enum pipe pipe;
5879
5880 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
5881 sizeof(intel_state->min_pixclk));
5882
5883 for_each_crtc_in_state(state, crtc, crtc_state, i) {
5884 int pixclk = 0;
5885
5886 if (crtc_state->enable)
5887 pixclk = crtc_state->adjusted_mode.crtc_clock;
5888
5889 intel_state->min_pixclk[i] = pixclk;
5890 }
5891
5892 for_each_pipe(dev_priv, pipe)
5893 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
5894
5895 return max_pixclk;
5896 }
5897
5898 static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
5899 {
5900 struct drm_device *dev = state->dev;
5901 struct drm_i915_private *dev_priv = dev->dev_private;
5902 int max_pixclk = intel_mode_max_pixclk(dev, state);
5903 struct intel_atomic_state *intel_state =
5904 to_intel_atomic_state(state);
5905
5906 intel_state->cdclk = intel_state->dev_cdclk =
5907 valleyview_calc_cdclk(dev_priv, max_pixclk);
5908
5909 if (!intel_state->active_crtcs)
5910 intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
5911
5912 return 0;
5913 }
5914
5915 static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
5916 {
5917 int max_pixclk = ilk_max_pixel_rate(state);
5918 struct intel_atomic_state *intel_state =
5919 to_intel_atomic_state(state);
5920
5921 intel_state->cdclk = intel_state->dev_cdclk =
5922 broxton_calc_cdclk(max_pixclk);
5923
5924 if (!intel_state->active_crtcs)
5925 intel_state->dev_cdclk = broxton_calc_cdclk(0);
5926
5927 return 0;
5928 }
5929
5930 static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
5931 {
5932 unsigned int credits, default_credits;
5933
5934 if (IS_CHERRYVIEW(dev_priv))
5935 default_credits = PFI_CREDIT(12);
5936 else
5937 default_credits = PFI_CREDIT(8);
5938
5939 if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
5940 /* CHV suggested value is 31 or 63 */
5941 if (IS_CHERRYVIEW(dev_priv))
5942 credits = PFI_CREDIT_63;
5943 else
5944 credits = PFI_CREDIT(15);
5945 } else {
5946 credits = default_credits;
5947 }
5948
5949 /*
5950 * WA - write default credits before re-programming
5951 * FIXME: should we also set the resend bit here?
5952 */
5953 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
5954 default_credits);
5955
5956 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
5957 credits | PFI_CREDIT_RESEND);
5958
5959 /*
5960 * FIXME is this guaranteed to clear
5961 * immediately or should we poll for it?
5962 */
5963 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
5964 }
5965
5966 static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
5967 {
5968 struct drm_device *dev = old_state->dev;
5969 struct drm_i915_private *dev_priv = dev->dev_private;
5970 struct intel_atomic_state *old_intel_state =
5971 to_intel_atomic_state(old_state);
5972 unsigned req_cdclk = old_intel_state->dev_cdclk;
5973
5974 /*
5975 * FIXME: We can end up here with all power domains off, yet
5976 * with a CDCLK frequency other than the minimum. To account
5977 * for this take the PIPE-A power domain, which covers the HW
5978 * blocks needed for the following programming. This can be
5979 * removed once it's guaranteed that we get here either with
5980 * the minimum CDCLK set, or the required power domains
5981 * enabled.
5982 */
5983 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
5984
5985 if (IS_CHERRYVIEW(dev))
5986 cherryview_set_cdclk(dev, req_cdclk);
5987 else
5988 valleyview_set_cdclk(dev, req_cdclk);
5989
5990 vlv_program_pfi_credits(dev_priv);
5991
5992 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
5993 }
5994
5995 static void valleyview_crtc_enable(struct drm_crtc *crtc)
5996 {
5997 struct drm_device *dev = crtc->dev;
5998 struct drm_i915_private *dev_priv = to_i915(dev);
5999 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6000 struct intel_encoder *encoder;
6001 struct intel_crtc_state *pipe_config =
6002 to_intel_crtc_state(crtc->state);
6003 int pipe = intel_crtc->pipe;
6004
6005 if (WARN_ON(intel_crtc->active))
6006 return;
6007
6008 if (intel_crtc->config->has_dp_encoder)
6009 intel_dp_set_m_n(intel_crtc, M1_N1);
6010
6011 intel_set_pipe_timings(intel_crtc);
6012 intel_set_pipe_src_size(intel_crtc);
6013
6014 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6015 struct drm_i915_private *dev_priv = dev->dev_private;
6016
6017 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6018 I915_WRITE(CHV_CANVAS(pipe), 0);
6019 }
6020
6021 i9xx_set_pipeconf(intel_crtc);
6022
6023 intel_crtc->active = true;
6024
6025 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6026
6027 for_each_encoder_on_crtc(dev, crtc, encoder)
6028 if (encoder->pre_pll_enable)
6029 encoder->pre_pll_enable(encoder);
6030
6031 if (IS_CHERRYVIEW(dev)) {
6032 chv_prepare_pll(intel_crtc, intel_crtc->config);
6033 chv_enable_pll(intel_crtc, intel_crtc->config);
6034 } else {
6035 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6036 vlv_enable_pll(intel_crtc, intel_crtc->config);
6037 }
6038
6039 for_each_encoder_on_crtc(dev, crtc, encoder)
6040 if (encoder->pre_enable)
6041 encoder->pre_enable(encoder);
6042
6043 i9xx_pfit_enable(intel_crtc);
6044
6045 intel_color_load_luts(&pipe_config->base);
6046
6047 intel_update_watermarks(crtc);
6048 intel_enable_pipe(intel_crtc);
6049
6050 assert_vblank_disabled(crtc);
6051 drm_crtc_vblank_on(crtc);
6052
6053 for_each_encoder_on_crtc(dev, crtc, encoder)
6054 encoder->enable(encoder);
6055 }
6056
6057 static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6058 {
6059 struct drm_device *dev = crtc->base.dev;
6060 struct drm_i915_private *dev_priv = dev->dev_private;
6061
6062 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6063 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
6064 }
6065
6066 static void i9xx_crtc_enable(struct drm_crtc *crtc)
6067 {
6068 struct drm_device *dev = crtc->dev;
6069 struct drm_i915_private *dev_priv = to_i915(dev);
6070 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6071 struct intel_encoder *encoder;
6072 struct intel_crtc_state *pipe_config =
6073 to_intel_crtc_state(crtc->state);
6074 enum pipe pipe = intel_crtc->pipe;
6075
6076 if (WARN_ON(intel_crtc->active))
6077 return;
6078
6079 i9xx_set_pll_dividers(intel_crtc);
6080
6081 if (intel_crtc->config->has_dp_encoder)
6082 intel_dp_set_m_n(intel_crtc, M1_N1);
6083
6084 intel_set_pipe_timings(intel_crtc);
6085 intel_set_pipe_src_size(intel_crtc);
6086
6087 i9xx_set_pipeconf(intel_crtc);
6088
6089 intel_crtc->active = true;
6090
6091 if (!IS_GEN2(dev))
6092 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
6093
6094 for_each_encoder_on_crtc(dev, crtc, encoder)
6095 if (encoder->pre_enable)
6096 encoder->pre_enable(encoder);
6097
6098 i9xx_enable_pll(intel_crtc);
6099
6100 i9xx_pfit_enable(intel_crtc);
6101
6102 intel_color_load_luts(&pipe_config->base);
6103
6104 intel_update_watermarks(crtc);
6105 intel_enable_pipe(intel_crtc);
6106
6107 assert_vblank_disabled(crtc);
6108 drm_crtc_vblank_on(crtc);
6109
6110 for_each_encoder_on_crtc(dev, crtc, encoder)
6111 encoder->enable(encoder);
6112 }
6113
6114 static void i9xx_pfit_disable(struct intel_crtc *crtc)
6115 {
6116 struct drm_device *dev = crtc->base.dev;
6117 struct drm_i915_private *dev_priv = dev->dev_private;
6118
6119 if (!crtc->config->gmch_pfit.control)
6120 return;
6121
6122 assert_pipe_disabled(dev_priv, crtc->pipe);
6123
6124 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6125 I915_READ(PFIT_CONTROL));
6126 I915_WRITE(PFIT_CONTROL, 0);
6127 }
6128
6129 static void i9xx_crtc_disable(struct drm_crtc *crtc)
6130 {
6131 struct drm_device *dev = crtc->dev;
6132 struct drm_i915_private *dev_priv = dev->dev_private;
6133 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6134 struct intel_encoder *encoder;
6135 int pipe = intel_crtc->pipe;
6136
6137 /*
6138 * On gen2 planes are double buffered but the pipe isn't, so we must
6139 * wait for planes to fully turn off before disabling the pipe.
6140 */
6141 if (IS_GEN2(dev))
6142 intel_wait_for_vblank(dev, pipe);
6143
6144 for_each_encoder_on_crtc(dev, crtc, encoder)
6145 encoder->disable(encoder);
6146
6147 drm_crtc_vblank_off(crtc);
6148 assert_vblank_disabled(crtc);
6149
6150 intel_disable_pipe(intel_crtc);
6151
6152 i9xx_pfit_disable(intel_crtc);
6153
6154 for_each_encoder_on_crtc(dev, crtc, encoder)
6155 if (encoder->post_disable)
6156 encoder->post_disable(encoder);
6157
6158 if (!intel_crtc->config->has_dsi_encoder) {
6159 if (IS_CHERRYVIEW(dev))
6160 chv_disable_pll(dev_priv, pipe);
6161 else if (IS_VALLEYVIEW(dev))
6162 vlv_disable_pll(dev_priv, pipe);
6163 else
6164 i9xx_disable_pll(intel_crtc);
6165 }
6166
6167 for_each_encoder_on_crtc(dev, crtc, encoder)
6168 if (encoder->post_pll_disable)
6169 encoder->post_pll_disable(encoder);
6170
6171 if (!IS_GEN2(dev))
6172 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6173 }
6174
6175 static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6176 {
6177 struct intel_encoder *encoder;
6178 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6179 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6180 enum intel_display_power_domain domain;
6181 unsigned long domains;
6182
6183 if (!intel_crtc->active)
6184 return;
6185
6186 if (to_intel_plane_state(crtc->primary->state)->visible) {
6187 WARN_ON(list_empty(&intel_crtc->flip_work));
6188
6189 intel_pre_disable_primary_noatomic(crtc);
6190
6191 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6192 to_intel_plane_state(crtc->primary->state)->visible = false;
6193 }
6194
6195 dev_priv->display.crtc_disable(crtc);
6196
6197 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
6198 crtc->base.id);
6199
6200 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6201 crtc->state->active = false;
6202 intel_crtc->active = false;
6203 crtc->enabled = false;
6204 crtc->state->connector_mask = 0;
6205 crtc->state->encoder_mask = 0;
6206
6207 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6208 encoder->base.crtc = NULL;
6209
6210 intel_fbc_disable(intel_crtc);
6211 intel_update_watermarks(crtc);
6212 intel_disable_shared_dpll(intel_crtc);
6213
6214 domains = intel_crtc->enabled_power_domains;
6215 for_each_power_domain(domain, domains)
6216 intel_display_power_put(dev_priv, domain);
6217 intel_crtc->enabled_power_domains = 0;
6218
6219 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6220 dev_priv->min_pixclk[intel_crtc->pipe] = 0;
6221 }
6222
6223 /*
6224 * turn all crtc's off, but do not adjust state
6225 * This has to be paired with a call to intel_modeset_setup_hw_state.
6226 */
6227 int intel_display_suspend(struct drm_device *dev)
6228 {
6229 struct drm_i915_private *dev_priv = to_i915(dev);
6230 struct drm_atomic_state *state;
6231 int ret;
6232
6233 state = drm_atomic_helper_suspend(dev);
6234 ret = PTR_ERR_OR_ZERO(state);
6235 if (ret)
6236 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
6237 else
6238 dev_priv->modeset_restore_state = state;
6239 return ret;
6240 }
6241
6242 void intel_encoder_destroy(struct drm_encoder *encoder)
6243 {
6244 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6245
6246 drm_encoder_cleanup(encoder);
6247 kfree(intel_encoder);
6248 }
6249
6250 /* Cross check the actual hw state with our own modeset state tracking (and it's
6251 * internal consistency). */
6252 static void intel_connector_verify_state(struct intel_connector *connector)
6253 {
6254 struct drm_crtc *crtc = connector->base.state->crtc;
6255
6256 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6257 connector->base.base.id,
6258 connector->base.name);
6259
6260 if (connector->get_hw_state(connector)) {
6261 struct intel_encoder *encoder = connector->encoder;
6262 struct drm_connector_state *conn_state = connector->base.state;
6263
6264 I915_STATE_WARN(!crtc,
6265 "connector enabled without attached crtc\n");
6266
6267 if (!crtc)
6268 return;
6269
6270 I915_STATE_WARN(!crtc->state->active,
6271 "connector is active, but attached crtc isn't\n");
6272
6273 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
6274 return;
6275
6276 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
6277 "atomic encoder doesn't match attached encoder\n");
6278
6279 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
6280 "attached encoder crtc differs from connector crtc\n");
6281 } else {
6282 I915_STATE_WARN(crtc && crtc->state->active,
6283 "attached crtc is active, but connector isn't\n");
6284 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6285 "best encoder set without crtc!\n");
6286 }
6287 }
6288
6289 int intel_connector_init(struct intel_connector *connector)
6290 {
6291 drm_atomic_helper_connector_reset(&connector->base);
6292
6293 if (!connector->base.state)
6294 return -ENOMEM;
6295
6296 return 0;
6297 }
6298
6299 struct intel_connector *intel_connector_alloc(void)
6300 {
6301 struct intel_connector *connector;
6302
6303 connector = kzalloc(sizeof *connector, GFP_KERNEL);
6304 if (!connector)
6305 return NULL;
6306
6307 if (intel_connector_init(connector) < 0) {
6308 kfree(connector);
6309 return NULL;
6310 }
6311
6312 return connector;
6313 }
6314
6315 /* Simple connector->get_hw_state implementation for encoders that support only
6316 * one connector and no cloning and hence the encoder state determines the state
6317 * of the connector. */
6318 bool intel_connector_get_hw_state(struct intel_connector *connector)
6319 {
6320 enum pipe pipe = 0;
6321 struct intel_encoder *encoder = connector->encoder;
6322
6323 return encoder->get_hw_state(encoder, &pipe);
6324 }
6325
6326 static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
6327 {
6328 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6329 return crtc_state->fdi_lanes;
6330
6331 return 0;
6332 }
6333
6334 static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
6335 struct intel_crtc_state *pipe_config)
6336 {
6337 struct drm_atomic_state *state = pipe_config->base.state;
6338 struct intel_crtc *other_crtc;
6339 struct intel_crtc_state *other_crtc_state;
6340
6341 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6342 pipe_name(pipe), pipe_config->fdi_lanes);
6343 if (pipe_config->fdi_lanes > 4) {
6344 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6345 pipe_name(pipe), pipe_config->fdi_lanes);
6346 return -EINVAL;
6347 }
6348
6349 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
6350 if (pipe_config->fdi_lanes > 2) {
6351 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6352 pipe_config->fdi_lanes);
6353 return -EINVAL;
6354 } else {
6355 return 0;
6356 }
6357 }
6358
6359 if (INTEL_INFO(dev)->num_pipes == 2)
6360 return 0;
6361
6362 /* Ivybridge 3 pipe is really complicated */
6363 switch (pipe) {
6364 case PIPE_A:
6365 return 0;
6366 case PIPE_B:
6367 if (pipe_config->fdi_lanes <= 2)
6368 return 0;
6369
6370 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6371 other_crtc_state =
6372 intel_atomic_get_crtc_state(state, other_crtc);
6373 if (IS_ERR(other_crtc_state))
6374 return PTR_ERR(other_crtc_state);
6375
6376 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
6377 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6378 pipe_name(pipe), pipe_config->fdi_lanes);
6379 return -EINVAL;
6380 }
6381 return 0;
6382 case PIPE_C:
6383 if (pipe_config->fdi_lanes > 2) {
6384 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6385 pipe_name(pipe), pipe_config->fdi_lanes);
6386 return -EINVAL;
6387 }
6388
6389 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6390 other_crtc_state =
6391 intel_atomic_get_crtc_state(state, other_crtc);
6392 if (IS_ERR(other_crtc_state))
6393 return PTR_ERR(other_crtc_state);
6394
6395 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
6396 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6397 return -EINVAL;
6398 }
6399 return 0;
6400 default:
6401 BUG();
6402 }
6403 }
6404
6405 #define RETRY 1
6406 static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
6407 struct intel_crtc_state *pipe_config)
6408 {
6409 struct drm_device *dev = intel_crtc->base.dev;
6410 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6411 int lane, link_bw, fdi_dotclock, ret;
6412 bool needs_recompute = false;
6413
6414 retry:
6415 /* FDI is a binary signal running at ~2.7GHz, encoding
6416 * each output octet as 10 bits. The actual frequency
6417 * is stored as a divider into a 100MHz clock, and the
6418 * mode pixel clock is stored in units of 1KHz.
6419 * Hence the bw of each lane in terms of the mode signal
6420 * is:
6421 */
6422 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
6423
6424 fdi_dotclock = adjusted_mode->crtc_clock;
6425
6426 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
6427 pipe_config->pipe_bpp);
6428
6429 pipe_config->fdi_lanes = lane;
6430
6431 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
6432 link_bw, &pipe_config->fdi_m_n);
6433
6434 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6435 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
6436 pipe_config->pipe_bpp -= 2*3;
6437 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6438 pipe_config->pipe_bpp);
6439 needs_recompute = true;
6440 pipe_config->bw_constrained = true;
6441
6442 goto retry;
6443 }
6444
6445 if (needs_recompute)
6446 return RETRY;
6447
6448 return ret;
6449 }
6450
6451 static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6452 struct intel_crtc_state *pipe_config)
6453 {
6454 if (pipe_config->pipe_bpp > 24)
6455 return false;
6456
6457 /* HSW can handle pixel rate up to cdclk? */
6458 if (IS_HASWELL(dev_priv))
6459 return true;
6460
6461 /*
6462 * We compare against max which means we must take
6463 * the increased cdclk requirement into account when
6464 * calculating the new cdclk.
6465 *
6466 * Should measure whether using a lower cdclk w/o IPS
6467 */
6468 return ilk_pipe_pixel_rate(pipe_config) <=
6469 dev_priv->max_cdclk_freq * 95 / 100;
6470 }
6471
6472 static void hsw_compute_ips_config(struct intel_crtc *crtc,
6473 struct intel_crtc_state *pipe_config)
6474 {
6475 struct drm_device *dev = crtc->base.dev;
6476 struct drm_i915_private *dev_priv = dev->dev_private;
6477
6478 pipe_config->ips_enabled = i915.enable_ips &&
6479 hsw_crtc_supports_ips(crtc) &&
6480 pipe_config_supports_ips(dev_priv, pipe_config);
6481 }
6482
6483 static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6484 {
6485 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6486
6487 /* GDG double wide on either pipe, otherwise pipe A only */
6488 return INTEL_INFO(dev_priv)->gen < 4 &&
6489 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6490 }
6491
6492 static int intel_crtc_compute_config(struct intel_crtc *crtc,
6493 struct intel_crtc_state *pipe_config)
6494 {
6495 struct drm_device *dev = crtc->base.dev;
6496 struct drm_i915_private *dev_priv = dev->dev_private;
6497 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6498
6499 /* FIXME should check pixel clock limits on all platforms */
6500 if (INTEL_INFO(dev)->gen < 4) {
6501 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
6502
6503 /*
6504 * Enable double wide mode when the dot clock
6505 * is > 90% of the (display) core speed.
6506 */
6507 if (intel_crtc_supports_double_wide(crtc) &&
6508 adjusted_mode->crtc_clock > clock_limit) {
6509 clock_limit *= 2;
6510 pipe_config->double_wide = true;
6511 }
6512
6513 if (adjusted_mode->crtc_clock > clock_limit) {
6514 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6515 adjusted_mode->crtc_clock, clock_limit,
6516 yesno(pipe_config->double_wide));
6517 return -EINVAL;
6518 }
6519 }
6520
6521 /*
6522 * Pipe horizontal size must be even in:
6523 * - DVO ganged mode
6524 * - LVDS dual channel mode
6525 * - Double wide pipe
6526 */
6527 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
6528 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6529 pipe_config->pipe_src_w &= ~1;
6530
6531 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6532 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
6533 */
6534 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
6535 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
6536 return -EINVAL;
6537
6538 if (HAS_IPS(dev))
6539 hsw_compute_ips_config(crtc, pipe_config);
6540
6541 if (pipe_config->has_pch_encoder)
6542 return ironlake_fdi_compute_config(crtc, pipe_config);
6543
6544 return 0;
6545 }
6546
6547 static int skylake_get_display_clock_speed(struct drm_device *dev)
6548 {
6549 struct drm_i915_private *dev_priv = to_i915(dev);
6550 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6551 uint32_t cdctl = I915_READ(CDCLK_CTL);
6552 uint32_t linkrate;
6553
6554 if (!(lcpll1 & LCPLL_PLL_ENABLE))
6555 return 24000; /* 24MHz is the cd freq with NSSC ref */
6556
6557 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6558 return 540000;
6559
6560 linkrate = (I915_READ(DPLL_CTRL1) &
6561 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
6562
6563 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6564 linkrate == DPLL_CTRL1_LINK_RATE_1080) {
6565 /* vco 8640 */
6566 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6567 case CDCLK_FREQ_450_432:
6568 return 432000;
6569 case CDCLK_FREQ_337_308:
6570 return 308570;
6571 case CDCLK_FREQ_675_617:
6572 return 617140;
6573 default:
6574 WARN(1, "Unknown cd freq selection\n");
6575 }
6576 } else {
6577 /* vco 8100 */
6578 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6579 case CDCLK_FREQ_450_432:
6580 return 450000;
6581 case CDCLK_FREQ_337_308:
6582 return 337500;
6583 case CDCLK_FREQ_675_617:
6584 return 675000;
6585 default:
6586 WARN(1, "Unknown cd freq selection\n");
6587 }
6588 }
6589
6590 /* error case, do as if DPLL0 isn't enabled */
6591 return 24000;
6592 }
6593
6594 static int broxton_get_display_clock_speed(struct drm_device *dev)
6595 {
6596 struct drm_i915_private *dev_priv = to_i915(dev);
6597 uint32_t cdctl = I915_READ(CDCLK_CTL);
6598 uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6599 uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6600 int cdclk;
6601
6602 if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6603 return 19200;
6604
6605 cdclk = 19200 * pll_ratio / 2;
6606
6607 switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6608 case BXT_CDCLK_CD2X_DIV_SEL_1:
6609 return cdclk; /* 576MHz or 624MHz */
6610 case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6611 return cdclk * 2 / 3; /* 384MHz */
6612 case BXT_CDCLK_CD2X_DIV_SEL_2:
6613 return cdclk / 2; /* 288MHz */
6614 case BXT_CDCLK_CD2X_DIV_SEL_4:
6615 return cdclk / 4; /* 144MHz */
6616 }
6617
6618 /* error case, do as if DE PLL isn't enabled */
6619 return 19200;
6620 }
6621
6622 static int broadwell_get_display_clock_speed(struct drm_device *dev)
6623 {
6624 struct drm_i915_private *dev_priv = dev->dev_private;
6625 uint32_t lcpll = I915_READ(LCPLL_CTL);
6626 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6627
6628 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6629 return 800000;
6630 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6631 return 450000;
6632 else if (freq == LCPLL_CLK_FREQ_450)
6633 return 450000;
6634 else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6635 return 540000;
6636 else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6637 return 337500;
6638 else
6639 return 675000;
6640 }
6641
6642 static int haswell_get_display_clock_speed(struct drm_device *dev)
6643 {
6644 struct drm_i915_private *dev_priv = dev->dev_private;
6645 uint32_t lcpll = I915_READ(LCPLL_CTL);
6646 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6647
6648 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6649 return 800000;
6650 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6651 return 450000;
6652 else if (freq == LCPLL_CLK_FREQ_450)
6653 return 450000;
6654 else if (IS_HSW_ULT(dev))
6655 return 337500;
6656 else
6657 return 540000;
6658 }
6659
6660 static int valleyview_get_display_clock_speed(struct drm_device *dev)
6661 {
6662 return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6663 CCK_DISPLAY_CLOCK_CONTROL);
6664 }
6665
6666 static int ilk_get_display_clock_speed(struct drm_device *dev)
6667 {
6668 return 450000;
6669 }
6670
6671 static int i945_get_display_clock_speed(struct drm_device *dev)
6672 {
6673 return 400000;
6674 }
6675
6676 static int i915_get_display_clock_speed(struct drm_device *dev)
6677 {
6678 return 333333;
6679 }
6680
6681 static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6682 {
6683 return 200000;
6684 }
6685
6686 static int pnv_get_display_clock_speed(struct drm_device *dev)
6687 {
6688 u16 gcfgc = 0;
6689
6690 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6691
6692 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6693 case GC_DISPLAY_CLOCK_267_MHZ_PNV:
6694 return 266667;
6695 case GC_DISPLAY_CLOCK_333_MHZ_PNV:
6696 return 333333;
6697 case GC_DISPLAY_CLOCK_444_MHZ_PNV:
6698 return 444444;
6699 case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6700 return 200000;
6701 default:
6702 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6703 case GC_DISPLAY_CLOCK_133_MHZ_PNV:
6704 return 133333;
6705 case GC_DISPLAY_CLOCK_167_MHZ_PNV:
6706 return 166667;
6707 }
6708 }
6709
6710 static int i915gm_get_display_clock_speed(struct drm_device *dev)
6711 {
6712 u16 gcfgc = 0;
6713
6714 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6715
6716 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
6717 return 133333;
6718 else {
6719 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6720 case GC_DISPLAY_CLOCK_333_MHZ:
6721 return 333333;
6722 default:
6723 case GC_DISPLAY_CLOCK_190_200_MHZ:
6724 return 190000;
6725 }
6726 }
6727 }
6728
6729 static int i865_get_display_clock_speed(struct drm_device *dev)
6730 {
6731 return 266667;
6732 }
6733
6734 static int i85x_get_display_clock_speed(struct drm_device *dev)
6735 {
6736 u16 hpllcc = 0;
6737
6738 /*
6739 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6740 * encoding is different :(
6741 * FIXME is this the right way to detect 852GM/852GMV?
6742 */
6743 if (dev->pdev->revision == 0x1)
6744 return 133333;
6745
6746 pci_bus_read_config_word(dev->pdev->bus,
6747 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6748
6749 /* Assume that the hardware is in the high speed state. This
6750 * should be the default.
6751 */
6752 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6753 case GC_CLOCK_133_200:
6754 case GC_CLOCK_133_200_2:
6755 case GC_CLOCK_100_200:
6756 return 200000;
6757 case GC_CLOCK_166_250:
6758 return 250000;
6759 case GC_CLOCK_100_133:
6760 return 133333;
6761 case GC_CLOCK_133_266:
6762 case GC_CLOCK_133_266_2:
6763 case GC_CLOCK_166_266:
6764 return 266667;
6765 }
6766
6767 /* Shouldn't happen */
6768 return 0;
6769 }
6770
6771 static int i830_get_display_clock_speed(struct drm_device *dev)
6772 {
6773 return 133333;
6774 }
6775
6776 static unsigned int intel_hpll_vco(struct drm_device *dev)
6777 {
6778 struct drm_i915_private *dev_priv = dev->dev_private;
6779 static const unsigned int blb_vco[8] = {
6780 [0] = 3200000,
6781 [1] = 4000000,
6782 [2] = 5333333,
6783 [3] = 4800000,
6784 [4] = 6400000,
6785 };
6786 static const unsigned int pnv_vco[8] = {
6787 [0] = 3200000,
6788 [1] = 4000000,
6789 [2] = 5333333,
6790 [3] = 4800000,
6791 [4] = 2666667,
6792 };
6793 static const unsigned int cl_vco[8] = {
6794 [0] = 3200000,
6795 [1] = 4000000,
6796 [2] = 5333333,
6797 [3] = 6400000,
6798 [4] = 3333333,
6799 [5] = 3566667,
6800 [6] = 4266667,
6801 };
6802 static const unsigned int elk_vco[8] = {
6803 [0] = 3200000,
6804 [1] = 4000000,
6805 [2] = 5333333,
6806 [3] = 4800000,
6807 };
6808 static const unsigned int ctg_vco[8] = {
6809 [0] = 3200000,
6810 [1] = 4000000,
6811 [2] = 5333333,
6812 [3] = 6400000,
6813 [4] = 2666667,
6814 [5] = 4266667,
6815 };
6816 const unsigned int *vco_table;
6817 unsigned int vco;
6818 uint8_t tmp = 0;
6819
6820 /* FIXME other chipsets? */
6821 if (IS_GM45(dev))
6822 vco_table = ctg_vco;
6823 else if (IS_G4X(dev))
6824 vco_table = elk_vco;
6825 else if (IS_CRESTLINE(dev))
6826 vco_table = cl_vco;
6827 else if (IS_PINEVIEW(dev))
6828 vco_table = pnv_vco;
6829 else if (IS_G33(dev))
6830 vco_table = blb_vco;
6831 else
6832 return 0;
6833
6834 tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6835
6836 vco = vco_table[tmp & 0x7];
6837 if (vco == 0)
6838 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6839 else
6840 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6841
6842 return vco;
6843 }
6844
6845 static int gm45_get_display_clock_speed(struct drm_device *dev)
6846 {
6847 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6848 uint16_t tmp = 0;
6849
6850 pci_read_config_word(dev->pdev, GCFGC, &tmp);
6851
6852 cdclk_sel = (tmp >> 12) & 0x1;
6853
6854 switch (vco) {
6855 case 2666667:
6856 case 4000000:
6857 case 5333333:
6858 return cdclk_sel ? 333333 : 222222;
6859 case 3200000:
6860 return cdclk_sel ? 320000 : 228571;
6861 default:
6862 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
6863 return 222222;
6864 }
6865 }
6866
6867 static int i965gm_get_display_clock_speed(struct drm_device *dev)
6868 {
6869 static const uint8_t div_3200[] = { 16, 10, 8 };
6870 static const uint8_t div_4000[] = { 20, 12, 10 };
6871 static const uint8_t div_5333[] = { 24, 16, 14 };
6872 const uint8_t *div_table;
6873 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6874 uint16_t tmp = 0;
6875
6876 pci_read_config_word(dev->pdev, GCFGC, &tmp);
6877
6878 cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
6879
6880 if (cdclk_sel >= ARRAY_SIZE(div_3200))
6881 goto fail;
6882
6883 switch (vco) {
6884 case 3200000:
6885 div_table = div_3200;
6886 break;
6887 case 4000000:
6888 div_table = div_4000;
6889 break;
6890 case 5333333:
6891 div_table = div_5333;
6892 break;
6893 default:
6894 goto fail;
6895 }
6896
6897 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
6898
6899 fail:
6900 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
6901 return 200000;
6902 }
6903
6904 static int g33_get_display_clock_speed(struct drm_device *dev)
6905 {
6906 static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
6907 static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
6908 static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
6909 static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
6910 const uint8_t *div_table;
6911 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6912 uint16_t tmp = 0;
6913
6914 pci_read_config_word(dev->pdev, GCFGC, &tmp);
6915
6916 cdclk_sel = (tmp >> 4) & 0x7;
6917
6918 if (cdclk_sel >= ARRAY_SIZE(div_3200))
6919 goto fail;
6920
6921 switch (vco) {
6922 case 3200000:
6923 div_table = div_3200;
6924 break;
6925 case 4000000:
6926 div_table = div_4000;
6927 break;
6928 case 4800000:
6929 div_table = div_4800;
6930 break;
6931 case 5333333:
6932 div_table = div_5333;
6933 break;
6934 default:
6935 goto fail;
6936 }
6937
6938 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
6939
6940 fail:
6941 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
6942 return 190476;
6943 }
6944
6945 static void
6946 intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
6947 {
6948 while (*num > DATA_LINK_M_N_MASK ||
6949 *den > DATA_LINK_M_N_MASK) {
6950 *num >>= 1;
6951 *den >>= 1;
6952 }
6953 }
6954
6955 static void compute_m_n(unsigned int m, unsigned int n,
6956 uint32_t *ret_m, uint32_t *ret_n)
6957 {
6958 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
6959 *ret_m = div_u64((uint64_t) m * *ret_n, n);
6960 intel_reduce_m_n_ratio(ret_m, ret_n);
6961 }
6962
6963 void
6964 intel_link_compute_m_n(int bits_per_pixel, int nlanes,
6965 int pixel_clock, int link_clock,
6966 struct intel_link_m_n *m_n)
6967 {
6968 m_n->tu = 64;
6969
6970 compute_m_n(bits_per_pixel * pixel_clock,
6971 link_clock * nlanes * 8,
6972 &m_n->gmch_m, &m_n->gmch_n);
6973
6974 compute_m_n(pixel_clock, link_clock,
6975 &m_n->link_m, &m_n->link_n);
6976 }
6977
6978 static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
6979 {
6980 if (i915.panel_use_ssc >= 0)
6981 return i915.panel_use_ssc != 0;
6982 return dev_priv->vbt.lvds_use_ssc
6983 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
6984 }
6985
6986 static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
6987 {
6988 return (1 << dpll->n) << 16 | dpll->m2;
6989 }
6990
6991 static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
6992 {
6993 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
6994 }
6995
6996 static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
6997 struct intel_crtc_state *crtc_state,
6998 struct dpll *reduced_clock)
6999 {
7000 struct drm_device *dev = crtc->base.dev;
7001 u32 fp, fp2 = 0;
7002
7003 if (IS_PINEVIEW(dev)) {
7004 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
7005 if (reduced_clock)
7006 fp2 = pnv_dpll_compute_fp(reduced_clock);
7007 } else {
7008 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
7009 if (reduced_clock)
7010 fp2 = i9xx_dpll_compute_fp(reduced_clock);
7011 }
7012
7013 crtc_state->dpll_hw_state.fp0 = fp;
7014
7015 crtc->lowfreq_avail = false;
7016 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7017 reduced_clock) {
7018 crtc_state->dpll_hw_state.fp1 = fp2;
7019 crtc->lowfreq_avail = true;
7020 } else {
7021 crtc_state->dpll_hw_state.fp1 = fp;
7022 }
7023 }
7024
7025 static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7026 pipe)
7027 {
7028 u32 reg_val;
7029
7030 /*
7031 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7032 * and set it to a reasonable value instead.
7033 */
7034 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7035 reg_val &= 0xffffff00;
7036 reg_val |= 0x00000030;
7037 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7038
7039 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7040 reg_val &= 0x8cffffff;
7041 reg_val = 0x8c000000;
7042 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7043
7044 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
7045 reg_val &= 0xffffff00;
7046 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
7047
7048 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
7049 reg_val &= 0x00ffffff;
7050 reg_val |= 0xb0000000;
7051 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
7052 }
7053
7054 static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7055 struct intel_link_m_n *m_n)
7056 {
7057 struct drm_device *dev = crtc->base.dev;
7058 struct drm_i915_private *dev_priv = dev->dev_private;
7059 int pipe = crtc->pipe;
7060
7061 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7062 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7063 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7064 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
7065 }
7066
7067 static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
7068 struct intel_link_m_n *m_n,
7069 struct intel_link_m_n *m2_n2)
7070 {
7071 struct drm_device *dev = crtc->base.dev;
7072 struct drm_i915_private *dev_priv = dev->dev_private;
7073 int pipe = crtc->pipe;
7074 enum transcoder transcoder = crtc->config->cpu_transcoder;
7075
7076 if (INTEL_INFO(dev)->gen >= 5) {
7077 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7078 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7079 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7080 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
7081 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7082 * for gen < 8) and if DRRS is supported (to make sure the
7083 * registers are not unnecessarily accessed).
7084 */
7085 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
7086 crtc->config->has_drrs) {
7087 I915_WRITE(PIPE_DATA_M2(transcoder),
7088 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7089 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7090 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7091 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7092 }
7093 } else {
7094 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7095 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7096 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7097 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
7098 }
7099 }
7100
7101 void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
7102 {
7103 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7104
7105 if (m_n == M1_N1) {
7106 dp_m_n = &crtc->config->dp_m_n;
7107 dp_m2_n2 = &crtc->config->dp_m2_n2;
7108 } else if (m_n == M2_N2) {
7109
7110 /*
7111 * M2_N2 registers are not supported. Hence m2_n2 divider value
7112 * needs to be programmed into M1_N1.
7113 */
7114 dp_m_n = &crtc->config->dp_m2_n2;
7115 } else {
7116 DRM_ERROR("Unsupported divider value\n");
7117 return;
7118 }
7119
7120 if (crtc->config->has_pch_encoder)
7121 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
7122 else
7123 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
7124 }
7125
7126 static void vlv_compute_dpll(struct intel_crtc *crtc,
7127 struct intel_crtc_state *pipe_config)
7128 {
7129 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
7130 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7131 if (crtc->pipe != PIPE_A)
7132 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7133
7134 /* DPLL not used with DSI, but still need the rest set up */
7135 if (!pipe_config->has_dsi_encoder)
7136 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7137 DPLL_EXT_BUFFER_ENABLE_VLV;
7138
7139 pipe_config->dpll_hw_state.dpll_md =
7140 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7141 }
7142
7143 static void chv_compute_dpll(struct intel_crtc *crtc,
7144 struct intel_crtc_state *pipe_config)
7145 {
7146 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
7147 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
7148 if (crtc->pipe != PIPE_A)
7149 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7150
7151 /* DPLL not used with DSI, but still need the rest set up */
7152 if (!pipe_config->has_dsi_encoder)
7153 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7154
7155 pipe_config->dpll_hw_state.dpll_md =
7156 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7157 }
7158
7159 static void vlv_prepare_pll(struct intel_crtc *crtc,
7160 const struct intel_crtc_state *pipe_config)
7161 {
7162 struct drm_device *dev = crtc->base.dev;
7163 struct drm_i915_private *dev_priv = dev->dev_private;
7164 enum pipe pipe = crtc->pipe;
7165 u32 mdiv;
7166 u32 bestn, bestm1, bestm2, bestp1, bestp2;
7167 u32 coreclk, reg_val;
7168
7169 /* Enable Refclk */
7170 I915_WRITE(DPLL(pipe),
7171 pipe_config->dpll_hw_state.dpll &
7172 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7173
7174 /* No need to actually set up the DPLL with DSI */
7175 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7176 return;
7177
7178 mutex_lock(&dev_priv->sb_lock);
7179
7180 bestn = pipe_config->dpll.n;
7181 bestm1 = pipe_config->dpll.m1;
7182 bestm2 = pipe_config->dpll.m2;
7183 bestp1 = pipe_config->dpll.p1;
7184 bestp2 = pipe_config->dpll.p2;
7185
7186 /* See eDP HDMI DPIO driver vbios notes doc */
7187
7188 /* PLL B needs special handling */
7189 if (pipe == PIPE_B)
7190 vlv_pllb_recal_opamp(dev_priv, pipe);
7191
7192 /* Set up Tx target for periodic Rcomp update */
7193 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
7194
7195 /* Disable target IRef on PLL */
7196 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
7197 reg_val &= 0x00ffffff;
7198 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
7199
7200 /* Disable fast lock */
7201 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
7202
7203 /* Set idtafcrecal before PLL is enabled */
7204 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7205 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7206 mdiv |= ((bestn << DPIO_N_SHIFT));
7207 mdiv |= (1 << DPIO_K_SHIFT);
7208
7209 /*
7210 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7211 * but we don't support that).
7212 * Note: don't use the DAC post divider as it seems unstable.
7213 */
7214 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
7215 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7216
7217 mdiv |= DPIO_ENABLE_CALIBRATION;
7218 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
7219
7220 /* Set HBR and RBR LPF coefficients */
7221 if (pipe_config->port_clock == 162000 ||
7222 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7223 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
7224 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7225 0x009f0003);
7226 else
7227 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
7228 0x00d0000f);
7229
7230 if (pipe_config->has_dp_encoder) {
7231 /* Use SSC source */
7232 if (pipe == PIPE_A)
7233 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7234 0x0df40000);
7235 else
7236 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7237 0x0df70000);
7238 } else { /* HDMI or VGA */
7239 /* Use bend source */
7240 if (pipe == PIPE_A)
7241 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7242 0x0df70000);
7243 else
7244 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
7245 0x0df40000);
7246 }
7247
7248 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
7249 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
7250 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7251 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
7252 coreclk |= 0x01000000;
7253 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
7254
7255 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
7256 mutex_unlock(&dev_priv->sb_lock);
7257 }
7258
7259 static void chv_prepare_pll(struct intel_crtc *crtc,
7260 const struct intel_crtc_state *pipe_config)
7261 {
7262 struct drm_device *dev = crtc->base.dev;
7263 struct drm_i915_private *dev_priv = dev->dev_private;
7264 enum pipe pipe = crtc->pipe;
7265 enum dpio_channel port = vlv_pipe_to_channel(pipe);
7266 u32 loopfilter, tribuf_calcntr;
7267 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
7268 u32 dpio_val;
7269 int vco;
7270
7271 /* Enable Refclk and SSC */
7272 I915_WRITE(DPLL(pipe),
7273 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7274
7275 /* No need to actually set up the DPLL with DSI */
7276 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7277 return;
7278
7279 bestn = pipe_config->dpll.n;
7280 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7281 bestm1 = pipe_config->dpll.m1;
7282 bestm2 = pipe_config->dpll.m2 >> 22;
7283 bestp1 = pipe_config->dpll.p1;
7284 bestp2 = pipe_config->dpll.p2;
7285 vco = pipe_config->dpll.vco;
7286 dpio_val = 0;
7287 loopfilter = 0;
7288
7289 mutex_lock(&dev_priv->sb_lock);
7290
7291 /* p1 and p2 divider */
7292 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7293 5 << DPIO_CHV_S1_DIV_SHIFT |
7294 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7295 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7296 1 << DPIO_CHV_K_DIV_SHIFT);
7297
7298 /* Feedback post-divider - m2 */
7299 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7300
7301 /* Feedback refclk divider - n and m1 */
7302 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7303 DPIO_CHV_M1_DIV_BY_2 |
7304 1 << DPIO_CHV_N_DIV_SHIFT);
7305
7306 /* M2 fraction division */
7307 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
7308
7309 /* M2 fraction division enable */
7310 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7311 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7312 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7313 if (bestm2_frac)
7314 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7315 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
7316
7317 /* Program digital lock detect threshold */
7318 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7319 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7320 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7321 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7322 if (!bestm2_frac)
7323 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7324 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7325
7326 /* Loop filter */
7327 if (vco == 5400000) {
7328 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7329 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7330 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7331 tribuf_calcntr = 0x9;
7332 } else if (vco <= 6200000) {
7333 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7334 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7335 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7336 tribuf_calcntr = 0x9;
7337 } else if (vco <= 6480000) {
7338 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7339 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7340 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7341 tribuf_calcntr = 0x8;
7342 } else {
7343 /* Not supported. Apply the same limits as in the max case */
7344 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7345 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7346 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7347 tribuf_calcntr = 0;
7348 }
7349 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7350
7351 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
7352 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7353 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7354 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7355
7356 /* AFC Recal */
7357 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7358 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7359 DPIO_AFC_RECAL);
7360
7361 mutex_unlock(&dev_priv->sb_lock);
7362 }
7363
7364 /**
7365 * vlv_force_pll_on - forcibly enable just the PLL
7366 * @dev_priv: i915 private structure
7367 * @pipe: pipe PLL to enable
7368 * @dpll: PLL configuration
7369 *
7370 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7371 * in cases where we need the PLL enabled even when @pipe is not going to
7372 * be enabled.
7373 */
7374 int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7375 const struct dpll *dpll)
7376 {
7377 struct intel_crtc *crtc =
7378 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
7379 struct intel_crtc_state *pipe_config;
7380
7381 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7382 if (!pipe_config)
7383 return -ENOMEM;
7384
7385 pipe_config->base.crtc = &crtc->base;
7386 pipe_config->pixel_multiplier = 1;
7387 pipe_config->dpll = *dpll;
7388
7389 if (IS_CHERRYVIEW(dev)) {
7390 chv_compute_dpll(crtc, pipe_config);
7391 chv_prepare_pll(crtc, pipe_config);
7392 chv_enable_pll(crtc, pipe_config);
7393 } else {
7394 vlv_compute_dpll(crtc, pipe_config);
7395 vlv_prepare_pll(crtc, pipe_config);
7396 vlv_enable_pll(crtc, pipe_config);
7397 }
7398
7399 kfree(pipe_config);
7400
7401 return 0;
7402 }
7403
7404 /**
7405 * vlv_force_pll_off - forcibly disable just the PLL
7406 * @dev_priv: i915 private structure
7407 * @pipe: pipe PLL to disable
7408 *
7409 * Disable the PLL for @pipe. To be used in cases where we need
7410 * the PLL enabled even when @pipe is not going to be enabled.
7411 */
7412 void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7413 {
7414 if (IS_CHERRYVIEW(dev))
7415 chv_disable_pll(to_i915(dev), pipe);
7416 else
7417 vlv_disable_pll(to_i915(dev), pipe);
7418 }
7419
7420 static void i9xx_compute_dpll(struct intel_crtc *crtc,
7421 struct intel_crtc_state *crtc_state,
7422 struct dpll *reduced_clock)
7423 {
7424 struct drm_device *dev = crtc->base.dev;
7425 struct drm_i915_private *dev_priv = dev->dev_private;
7426 u32 dpll;
7427 bool is_sdvo;
7428 struct dpll *clock = &crtc_state->dpll;
7429
7430 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7431
7432 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7433 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
7434
7435 dpll = DPLL_VGA_MODE_DIS;
7436
7437 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
7438 dpll |= DPLLB_MODE_LVDS;
7439 else
7440 dpll |= DPLLB_MODE_DAC_SERIAL;
7441
7442 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
7443 dpll |= (crtc_state->pixel_multiplier - 1)
7444 << SDVO_MULTIPLIER_SHIFT_HIRES;
7445 }
7446
7447 if (is_sdvo)
7448 dpll |= DPLL_SDVO_HIGH_SPEED;
7449
7450 if (crtc_state->has_dp_encoder)
7451 dpll |= DPLL_SDVO_HIGH_SPEED;
7452
7453 /* compute bitmask from p1 value */
7454 if (IS_PINEVIEW(dev))
7455 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7456 else {
7457 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7458 if (IS_G4X(dev) && reduced_clock)
7459 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7460 }
7461 switch (clock->p2) {
7462 case 5:
7463 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7464 break;
7465 case 7:
7466 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7467 break;
7468 case 10:
7469 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7470 break;
7471 case 14:
7472 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7473 break;
7474 }
7475 if (INTEL_INFO(dev)->gen >= 4)
7476 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7477
7478 if (crtc_state->sdvo_tv_clock)
7479 dpll |= PLL_REF_INPUT_TVCLKINBC;
7480 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7481 intel_panel_use_ssc(dev_priv))
7482 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7483 else
7484 dpll |= PLL_REF_INPUT_DREFCLK;
7485
7486 dpll |= DPLL_VCO_ENABLE;
7487 crtc_state->dpll_hw_state.dpll = dpll;
7488
7489 if (INTEL_INFO(dev)->gen >= 4) {
7490 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
7491 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7492 crtc_state->dpll_hw_state.dpll_md = dpll_md;
7493 }
7494 }
7495
7496 static void i8xx_compute_dpll(struct intel_crtc *crtc,
7497 struct intel_crtc_state *crtc_state,
7498 struct dpll *reduced_clock)
7499 {
7500 struct drm_device *dev = crtc->base.dev;
7501 struct drm_i915_private *dev_priv = dev->dev_private;
7502 u32 dpll;
7503 struct dpll *clock = &crtc_state->dpll;
7504
7505 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
7506
7507 dpll = DPLL_VGA_MODE_DIS;
7508
7509 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7510 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7511 } else {
7512 if (clock->p1 == 2)
7513 dpll |= PLL_P1_DIVIDE_BY_TWO;
7514 else
7515 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7516 if (clock->p2 == 4)
7517 dpll |= PLL_P2_DIVIDE_BY_4;
7518 }
7519
7520 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
7521 dpll |= DPLL_DVO_2X_MODE;
7522
7523 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
7524 intel_panel_use_ssc(dev_priv))
7525 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7526 else
7527 dpll |= PLL_REF_INPUT_DREFCLK;
7528
7529 dpll |= DPLL_VCO_ENABLE;
7530 crtc_state->dpll_hw_state.dpll = dpll;
7531 }
7532
7533 static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
7534 {
7535 struct drm_device *dev = intel_crtc->base.dev;
7536 struct drm_i915_private *dev_priv = dev->dev_private;
7537 enum pipe pipe = intel_crtc->pipe;
7538 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7539 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
7540 uint32_t crtc_vtotal, crtc_vblank_end;
7541 int vsyncshift = 0;
7542
7543 /* We need to be careful not to changed the adjusted mode, for otherwise
7544 * the hw state checker will get angry at the mismatch. */
7545 crtc_vtotal = adjusted_mode->crtc_vtotal;
7546 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
7547
7548 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7549 /* the chip adds 2 halflines automatically */
7550 crtc_vtotal -= 1;
7551 crtc_vblank_end -= 1;
7552
7553 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7554 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7555 else
7556 vsyncshift = adjusted_mode->crtc_hsync_start -
7557 adjusted_mode->crtc_htotal / 2;
7558 if (vsyncshift < 0)
7559 vsyncshift += adjusted_mode->crtc_htotal;
7560 }
7561
7562 if (INTEL_INFO(dev)->gen > 3)
7563 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
7564
7565 I915_WRITE(HTOTAL(cpu_transcoder),
7566 (adjusted_mode->crtc_hdisplay - 1) |
7567 ((adjusted_mode->crtc_htotal - 1) << 16));
7568 I915_WRITE(HBLANK(cpu_transcoder),
7569 (adjusted_mode->crtc_hblank_start - 1) |
7570 ((adjusted_mode->crtc_hblank_end - 1) << 16));
7571 I915_WRITE(HSYNC(cpu_transcoder),
7572 (adjusted_mode->crtc_hsync_start - 1) |
7573 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7574
7575 I915_WRITE(VTOTAL(cpu_transcoder),
7576 (adjusted_mode->crtc_vdisplay - 1) |
7577 ((crtc_vtotal - 1) << 16));
7578 I915_WRITE(VBLANK(cpu_transcoder),
7579 (adjusted_mode->crtc_vblank_start - 1) |
7580 ((crtc_vblank_end - 1) << 16));
7581 I915_WRITE(VSYNC(cpu_transcoder),
7582 (adjusted_mode->crtc_vsync_start - 1) |
7583 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7584
7585 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7586 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7587 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7588 * bits. */
7589 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7590 (pipe == PIPE_B || pipe == PIPE_C))
7591 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7592
7593 }
7594
7595 static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7596 {
7597 struct drm_device *dev = intel_crtc->base.dev;
7598 struct drm_i915_private *dev_priv = dev->dev_private;
7599 enum pipe pipe = intel_crtc->pipe;
7600
7601 /* pipesrc controls the size that is scaled from, which should
7602 * always be the user's requested size.
7603 */
7604 I915_WRITE(PIPESRC(pipe),
7605 ((intel_crtc->config->pipe_src_w - 1) << 16) |
7606 (intel_crtc->config->pipe_src_h - 1));
7607 }
7608
7609 static void intel_get_pipe_timings(struct intel_crtc *crtc,
7610 struct intel_crtc_state *pipe_config)
7611 {
7612 struct drm_device *dev = crtc->base.dev;
7613 struct drm_i915_private *dev_priv = dev->dev_private;
7614 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7615 uint32_t tmp;
7616
7617 tmp = I915_READ(HTOTAL(cpu_transcoder));
7618 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7619 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
7620 tmp = I915_READ(HBLANK(cpu_transcoder));
7621 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7622 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
7623 tmp = I915_READ(HSYNC(cpu_transcoder));
7624 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7625 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
7626
7627 tmp = I915_READ(VTOTAL(cpu_transcoder));
7628 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7629 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
7630 tmp = I915_READ(VBLANK(cpu_transcoder));
7631 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7632 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
7633 tmp = I915_READ(VSYNC(cpu_transcoder));
7634 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7635 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
7636
7637 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
7638 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7639 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7640 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
7641 }
7642 }
7643
7644 static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7645 struct intel_crtc_state *pipe_config)
7646 {
7647 struct drm_device *dev = crtc->base.dev;
7648 struct drm_i915_private *dev_priv = dev->dev_private;
7649 u32 tmp;
7650
7651 tmp = I915_READ(PIPESRC(crtc->pipe));
7652 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7653 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7654
7655 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7656 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
7657 }
7658
7659 void intel_mode_from_pipe_config(struct drm_display_mode *mode,
7660 struct intel_crtc_state *pipe_config)
7661 {
7662 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7663 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7664 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7665 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
7666
7667 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7668 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7669 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7670 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
7671
7672 mode->flags = pipe_config->base.adjusted_mode.flags;
7673 mode->type = DRM_MODE_TYPE_DRIVER;
7674
7675 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7676 mode->flags |= pipe_config->base.adjusted_mode.flags;
7677
7678 mode->hsync = drm_mode_hsync(mode);
7679 mode->vrefresh = drm_mode_vrefresh(mode);
7680 drm_mode_set_name(mode);
7681 }
7682
7683 static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7684 {
7685 struct drm_device *dev = intel_crtc->base.dev;
7686 struct drm_i915_private *dev_priv = dev->dev_private;
7687 uint32_t pipeconf;
7688
7689 pipeconf = 0;
7690
7691 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7692 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7693 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
7694
7695 if (intel_crtc->config->double_wide)
7696 pipeconf |= PIPECONF_DOUBLE_WIDE;
7697
7698 /* only g4x and later have fancy bpc/dither controls */
7699 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
7700 /* Bspec claims that we can't use dithering for 30bpp pipes. */
7701 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
7702 pipeconf |= PIPECONF_DITHER_EN |
7703 PIPECONF_DITHER_TYPE_SP;
7704
7705 switch (intel_crtc->config->pipe_bpp) {
7706 case 18:
7707 pipeconf |= PIPECONF_6BPC;
7708 break;
7709 case 24:
7710 pipeconf |= PIPECONF_8BPC;
7711 break;
7712 case 30:
7713 pipeconf |= PIPECONF_10BPC;
7714 break;
7715 default:
7716 /* Case prevented by intel_choose_pipe_bpp_dither. */
7717 BUG();
7718 }
7719 }
7720
7721 if (HAS_PIPE_CXSR(dev)) {
7722 if (intel_crtc->lowfreq_avail) {
7723 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7724 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7725 } else {
7726 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
7727 }
7728 }
7729
7730 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
7731 if (INTEL_INFO(dev)->gen < 4 ||
7732 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7733 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7734 else
7735 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7736 } else
7737 pipeconf |= PIPECONF_PROGRESSIVE;
7738
7739 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7740 intel_crtc->config->limited_color_range)
7741 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
7742
7743 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7744 POSTING_READ(PIPECONF(intel_crtc->pipe));
7745 }
7746
7747 static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7748 struct intel_crtc_state *crtc_state)
7749 {
7750 struct drm_device *dev = crtc->base.dev;
7751 struct drm_i915_private *dev_priv = dev->dev_private;
7752 const struct intel_limit *limit;
7753 int refclk = 48000;
7754
7755 memset(&crtc_state->dpll_hw_state, 0,
7756 sizeof(crtc_state->dpll_hw_state));
7757
7758 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7759 if (intel_panel_use_ssc(dev_priv)) {
7760 refclk = dev_priv->vbt.lvds_ssc_freq;
7761 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7762 }
7763
7764 limit = &intel_limits_i8xx_lvds;
7765 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
7766 limit = &intel_limits_i8xx_dvo;
7767 } else {
7768 limit = &intel_limits_i8xx_dac;
7769 }
7770
7771 if (!crtc_state->clock_set &&
7772 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7773 refclk, NULL, &crtc_state->dpll)) {
7774 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7775 return -EINVAL;
7776 }
7777
7778 i8xx_compute_dpll(crtc, crtc_state, NULL);
7779
7780 return 0;
7781 }
7782
7783 static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7784 struct intel_crtc_state *crtc_state)
7785 {
7786 struct drm_device *dev = crtc->base.dev;
7787 struct drm_i915_private *dev_priv = dev->dev_private;
7788 const struct intel_limit *limit;
7789 int refclk = 96000;
7790
7791 memset(&crtc_state->dpll_hw_state, 0,
7792 sizeof(crtc_state->dpll_hw_state));
7793
7794 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7795 if (intel_panel_use_ssc(dev_priv)) {
7796 refclk = dev_priv->vbt.lvds_ssc_freq;
7797 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7798 }
7799
7800 if (intel_is_dual_link_lvds(dev))
7801 limit = &intel_limits_g4x_dual_channel_lvds;
7802 else
7803 limit = &intel_limits_g4x_single_channel_lvds;
7804 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7805 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7806 limit = &intel_limits_g4x_hdmi;
7807 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7808 limit = &intel_limits_g4x_sdvo;
7809 } else {
7810 /* The option is for other outputs */
7811 limit = &intel_limits_i9xx_sdvo;
7812 }
7813
7814 if (!crtc_state->clock_set &&
7815 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7816 refclk, NULL, &crtc_state->dpll)) {
7817 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7818 return -EINVAL;
7819 }
7820
7821 i9xx_compute_dpll(crtc, crtc_state, NULL);
7822
7823 return 0;
7824 }
7825
7826 static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7827 struct intel_crtc_state *crtc_state)
7828 {
7829 struct drm_device *dev = crtc->base.dev;
7830 struct drm_i915_private *dev_priv = dev->dev_private;
7831 const struct intel_limit *limit;
7832 int refclk = 96000;
7833
7834 memset(&crtc_state->dpll_hw_state, 0,
7835 sizeof(crtc_state->dpll_hw_state));
7836
7837 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7838 if (intel_panel_use_ssc(dev_priv)) {
7839 refclk = dev_priv->vbt.lvds_ssc_freq;
7840 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7841 }
7842
7843 limit = &intel_limits_pineview_lvds;
7844 } else {
7845 limit = &intel_limits_pineview_sdvo;
7846 }
7847
7848 if (!crtc_state->clock_set &&
7849 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7850 refclk, NULL, &crtc_state->dpll)) {
7851 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7852 return -EINVAL;
7853 }
7854
7855 i9xx_compute_dpll(crtc, crtc_state, NULL);
7856
7857 return 0;
7858 }
7859
7860 static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7861 struct intel_crtc_state *crtc_state)
7862 {
7863 struct drm_device *dev = crtc->base.dev;
7864 struct drm_i915_private *dev_priv = dev->dev_private;
7865 const struct intel_limit *limit;
7866 int refclk = 96000;
7867
7868 memset(&crtc_state->dpll_hw_state, 0,
7869 sizeof(crtc_state->dpll_hw_state));
7870
7871 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7872 if (intel_panel_use_ssc(dev_priv)) {
7873 refclk = dev_priv->vbt.lvds_ssc_freq;
7874 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7875 }
7876
7877 limit = &intel_limits_i9xx_lvds;
7878 } else {
7879 limit = &intel_limits_i9xx_sdvo;
7880 }
7881
7882 if (!crtc_state->clock_set &&
7883 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7884 refclk, NULL, &crtc_state->dpll)) {
7885 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7886 return -EINVAL;
7887 }
7888
7889 i9xx_compute_dpll(crtc, crtc_state, NULL);
7890
7891 return 0;
7892 }
7893
7894 static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7895 struct intel_crtc_state *crtc_state)
7896 {
7897 int refclk = 100000;
7898 const struct intel_limit *limit = &intel_limits_chv;
7899
7900 memset(&crtc_state->dpll_hw_state, 0,
7901 sizeof(crtc_state->dpll_hw_state));
7902
7903 if (!crtc_state->clock_set &&
7904 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7905 refclk, NULL, &crtc_state->dpll)) {
7906 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7907 return -EINVAL;
7908 }
7909
7910 chv_compute_dpll(crtc, crtc_state);
7911
7912 return 0;
7913 }
7914
7915 static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7916 struct intel_crtc_state *crtc_state)
7917 {
7918 int refclk = 100000;
7919 const struct intel_limit *limit = &intel_limits_vlv;
7920
7921 memset(&crtc_state->dpll_hw_state, 0,
7922 sizeof(crtc_state->dpll_hw_state));
7923
7924 if (!crtc_state->clock_set &&
7925 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7926 refclk, NULL, &crtc_state->dpll)) {
7927 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7928 return -EINVAL;
7929 }
7930
7931 vlv_compute_dpll(crtc, crtc_state);
7932
7933 return 0;
7934 }
7935
7936 static void i9xx_get_pfit_config(struct intel_crtc *crtc,
7937 struct intel_crtc_state *pipe_config)
7938 {
7939 struct drm_device *dev = crtc->base.dev;
7940 struct drm_i915_private *dev_priv = dev->dev_private;
7941 uint32_t tmp;
7942
7943 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
7944 return;
7945
7946 tmp = I915_READ(PFIT_CONTROL);
7947 if (!(tmp & PFIT_ENABLE))
7948 return;
7949
7950 /* Check whether the pfit is attached to our pipe. */
7951 if (INTEL_INFO(dev)->gen < 4) {
7952 if (crtc->pipe != PIPE_B)
7953 return;
7954 } else {
7955 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7956 return;
7957 }
7958
7959 pipe_config->gmch_pfit.control = tmp;
7960 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
7961 }
7962
7963 static void vlv_crtc_clock_get(struct intel_crtc *crtc,
7964 struct intel_crtc_state *pipe_config)
7965 {
7966 struct drm_device *dev = crtc->base.dev;
7967 struct drm_i915_private *dev_priv = dev->dev_private;
7968 int pipe = pipe_config->cpu_transcoder;
7969 struct dpll clock;
7970 u32 mdiv;
7971 int refclk = 100000;
7972
7973 /* In case of DSI, DPLL will not be used */
7974 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7975 return;
7976
7977 mutex_lock(&dev_priv->sb_lock);
7978 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
7979 mutex_unlock(&dev_priv->sb_lock);
7980
7981 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
7982 clock.m2 = mdiv & DPIO_M2DIV_MASK;
7983 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
7984 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
7985 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
7986
7987 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
7988 }
7989
7990 static void
7991 i9xx_get_initial_plane_config(struct intel_crtc *crtc,
7992 struct intel_initial_plane_config *plane_config)
7993 {
7994 struct drm_device *dev = crtc->base.dev;
7995 struct drm_i915_private *dev_priv = dev->dev_private;
7996 u32 val, base, offset;
7997 int pipe = crtc->pipe, plane = crtc->plane;
7998 int fourcc, pixel_format;
7999 unsigned int aligned_height;
8000 struct drm_framebuffer *fb;
8001 struct intel_framebuffer *intel_fb;
8002
8003 val = I915_READ(DSPCNTR(plane));
8004 if (!(val & DISPLAY_PLANE_ENABLE))
8005 return;
8006
8007 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
8008 if (!intel_fb) {
8009 DRM_DEBUG_KMS("failed to alloc fb\n");
8010 return;
8011 }
8012
8013 fb = &intel_fb->base;
8014
8015 if (INTEL_INFO(dev)->gen >= 4) {
8016 if (val & DISPPLANE_TILED) {
8017 plane_config->tiling = I915_TILING_X;
8018 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8019 }
8020 }
8021
8022 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
8023 fourcc = i9xx_format_to_fourcc(pixel_format);
8024 fb->pixel_format = fourcc;
8025 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
8026
8027 if (INTEL_INFO(dev)->gen >= 4) {
8028 if (plane_config->tiling)
8029 offset = I915_READ(DSPTILEOFF(plane));
8030 else
8031 offset = I915_READ(DSPLINOFF(plane));
8032 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8033 } else {
8034 base = I915_READ(DSPADDR(plane));
8035 }
8036 plane_config->base = base;
8037
8038 val = I915_READ(PIPESRC(pipe));
8039 fb->width = ((val >> 16) & 0xfff) + 1;
8040 fb->height = ((val >> 0) & 0xfff) + 1;
8041
8042 val = I915_READ(DSPSTRIDE(pipe));
8043 fb->pitches[0] = val & 0xffffffc0;
8044
8045 aligned_height = intel_fb_align_height(dev, fb->height,
8046 fb->pixel_format,
8047 fb->modifier[0]);
8048
8049 plane_config->size = fb->pitches[0] * aligned_height;
8050
8051 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8052 pipe_name(pipe), plane, fb->width, fb->height,
8053 fb->bits_per_pixel, base, fb->pitches[0],
8054 plane_config->size);
8055
8056 plane_config->fb = intel_fb;
8057 }
8058
8059 static void chv_crtc_clock_get(struct intel_crtc *crtc,
8060 struct intel_crtc_state *pipe_config)
8061 {
8062 struct drm_device *dev = crtc->base.dev;
8063 struct drm_i915_private *dev_priv = dev->dev_private;
8064 int pipe = pipe_config->cpu_transcoder;
8065 enum dpio_channel port = vlv_pipe_to_channel(pipe);
8066 struct dpll clock;
8067 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
8068 int refclk = 100000;
8069
8070 /* In case of DSI, DPLL will not be used */
8071 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8072 return;
8073
8074 mutex_lock(&dev_priv->sb_lock);
8075 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8076 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8077 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8078 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
8079 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
8080 mutex_unlock(&dev_priv->sb_lock);
8081
8082 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
8083 clock.m2 = (pll_dw0 & 0xff) << 22;
8084 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8085 clock.m2 |= pll_dw2 & 0x3fffff;
8086 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8087 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8088 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8089
8090 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
8091 }
8092
8093 static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
8094 struct intel_crtc_state *pipe_config)
8095 {
8096 struct drm_device *dev = crtc->base.dev;
8097 struct drm_i915_private *dev_priv = dev->dev_private;
8098 enum intel_display_power_domain power_domain;
8099 uint32_t tmp;
8100 bool ret;
8101
8102 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8103 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
8104 return false;
8105
8106 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8107 pipe_config->shared_dpll = NULL;
8108
8109 ret = false;
8110
8111 tmp = I915_READ(PIPECONF(crtc->pipe));
8112 if (!(tmp & PIPECONF_ENABLE))
8113 goto out;
8114
8115 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
8116 switch (tmp & PIPECONF_BPC_MASK) {
8117 case PIPECONF_6BPC:
8118 pipe_config->pipe_bpp = 18;
8119 break;
8120 case PIPECONF_8BPC:
8121 pipe_config->pipe_bpp = 24;
8122 break;
8123 case PIPECONF_10BPC:
8124 pipe_config->pipe_bpp = 30;
8125 break;
8126 default:
8127 break;
8128 }
8129 }
8130
8131 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8132 (tmp & PIPECONF_COLOR_RANGE_SELECT))
8133 pipe_config->limited_color_range = true;
8134
8135 if (INTEL_INFO(dev)->gen < 4)
8136 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8137
8138 intel_get_pipe_timings(crtc, pipe_config);
8139 intel_get_pipe_src_size(crtc, pipe_config);
8140
8141 i9xx_get_pfit_config(crtc, pipe_config);
8142
8143 if (INTEL_INFO(dev)->gen >= 4) {
8144 /* No way to read it out on pipes B and C */
8145 if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
8146 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8147 else
8148 tmp = I915_READ(DPLL_MD(crtc->pipe));
8149 pipe_config->pixel_multiplier =
8150 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8151 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8152 pipe_config->dpll_hw_state.dpll_md = tmp;
8153 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8154 tmp = I915_READ(DPLL(crtc->pipe));
8155 pipe_config->pixel_multiplier =
8156 ((tmp & SDVO_MULTIPLIER_MASK)
8157 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8158 } else {
8159 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8160 * port and will be fixed up in the encoder->get_config
8161 * function. */
8162 pipe_config->pixel_multiplier = 1;
8163 }
8164 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
8165 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
8166 /*
8167 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8168 * on 830. Filter it out here so that we don't
8169 * report errors due to that.
8170 */
8171 if (IS_I830(dev))
8172 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8173
8174 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8175 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
8176 } else {
8177 /* Mask out read-only status bits. */
8178 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8179 DPLL_PORTC_READY_MASK |
8180 DPLL_PORTB_READY_MASK);
8181 }
8182
8183 if (IS_CHERRYVIEW(dev))
8184 chv_crtc_clock_get(crtc, pipe_config);
8185 else if (IS_VALLEYVIEW(dev))
8186 vlv_crtc_clock_get(crtc, pipe_config);
8187 else
8188 i9xx_crtc_clock_get(crtc, pipe_config);
8189
8190 /*
8191 * Normally the dotclock is filled in by the encoder .get_config()
8192 * but in case the pipe is enabled w/o any ports we need a sane
8193 * default.
8194 */
8195 pipe_config->base.adjusted_mode.crtc_clock =
8196 pipe_config->port_clock / pipe_config->pixel_multiplier;
8197
8198 ret = true;
8199
8200 out:
8201 intel_display_power_put(dev_priv, power_domain);
8202
8203 return ret;
8204 }
8205
8206 static void ironlake_init_pch_refclk(struct drm_device *dev)
8207 {
8208 struct drm_i915_private *dev_priv = dev->dev_private;
8209 struct intel_encoder *encoder;
8210 u32 val, final;
8211 bool has_lvds = false;
8212 bool has_cpu_edp = false;
8213 bool has_panel = false;
8214 bool has_ck505 = false;
8215 bool can_ssc = false;
8216
8217 /* We need to take the global config into account */
8218 for_each_intel_encoder(dev, encoder) {
8219 switch (encoder->type) {
8220 case INTEL_OUTPUT_LVDS:
8221 has_panel = true;
8222 has_lvds = true;
8223 break;
8224 case INTEL_OUTPUT_EDP:
8225 has_panel = true;
8226 if (enc_to_dig_port(&encoder->base)->port == PORT_A)
8227 has_cpu_edp = true;
8228 break;
8229 default:
8230 break;
8231 }
8232 }
8233
8234 if (HAS_PCH_IBX(dev)) {
8235 has_ck505 = dev_priv->vbt.display_clock_mode;
8236 can_ssc = has_ck505;
8237 } else {
8238 has_ck505 = false;
8239 can_ssc = true;
8240 }
8241
8242 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
8243 has_panel, has_lvds, has_ck505);
8244
8245 /* Ironlake: try to setup display ref clock before DPLL
8246 * enabling. This is only under driver's control after
8247 * PCH B stepping, previous chipset stepping should be
8248 * ignoring this setting.
8249 */
8250 val = I915_READ(PCH_DREF_CONTROL);
8251
8252 /* As we must carefully and slowly disable/enable each source in turn,
8253 * compute the final state we want first and check if we need to
8254 * make any changes at all.
8255 */
8256 final = val;
8257 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8258 if (has_ck505)
8259 final |= DREF_NONSPREAD_CK505_ENABLE;
8260 else
8261 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8262
8263 final &= ~DREF_SSC_SOURCE_MASK;
8264 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8265 final &= ~DREF_SSC1_ENABLE;
8266
8267 if (has_panel) {
8268 final |= DREF_SSC_SOURCE_ENABLE;
8269
8270 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8271 final |= DREF_SSC1_ENABLE;
8272
8273 if (has_cpu_edp) {
8274 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8275 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8276 else
8277 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8278 } else
8279 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8280 } else {
8281 final |= DREF_SSC_SOURCE_DISABLE;
8282 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8283 }
8284
8285 if (final == val)
8286 return;
8287
8288 /* Always enable nonspread source */
8289 val &= ~DREF_NONSPREAD_SOURCE_MASK;
8290
8291 if (has_ck505)
8292 val |= DREF_NONSPREAD_CK505_ENABLE;
8293 else
8294 val |= DREF_NONSPREAD_SOURCE_ENABLE;
8295
8296 if (has_panel) {
8297 val &= ~DREF_SSC_SOURCE_MASK;
8298 val |= DREF_SSC_SOURCE_ENABLE;
8299
8300 /* SSC must be turned on before enabling the CPU output */
8301 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8302 DRM_DEBUG_KMS("Using SSC on panel\n");
8303 val |= DREF_SSC1_ENABLE;
8304 } else
8305 val &= ~DREF_SSC1_ENABLE;
8306
8307 /* Get SSC going before enabling the outputs */
8308 I915_WRITE(PCH_DREF_CONTROL, val);
8309 POSTING_READ(PCH_DREF_CONTROL);
8310 udelay(200);
8311
8312 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8313
8314 /* Enable CPU source on CPU attached eDP */
8315 if (has_cpu_edp) {
8316 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
8317 DRM_DEBUG_KMS("Using SSC on eDP\n");
8318 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8319 } else
8320 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8321 } else
8322 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8323
8324 I915_WRITE(PCH_DREF_CONTROL, val);
8325 POSTING_READ(PCH_DREF_CONTROL);
8326 udelay(200);
8327 } else {
8328 DRM_DEBUG_KMS("Disabling SSC entirely\n");
8329
8330 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8331
8332 /* Turn off CPU output */
8333 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8334
8335 I915_WRITE(PCH_DREF_CONTROL, val);
8336 POSTING_READ(PCH_DREF_CONTROL);
8337 udelay(200);
8338
8339 /* Turn off the SSC source */
8340 val &= ~DREF_SSC_SOURCE_MASK;
8341 val |= DREF_SSC_SOURCE_DISABLE;
8342
8343 /* Turn off SSC1 */
8344 val &= ~DREF_SSC1_ENABLE;
8345
8346 I915_WRITE(PCH_DREF_CONTROL, val);
8347 POSTING_READ(PCH_DREF_CONTROL);
8348 udelay(200);
8349 }
8350
8351 BUG_ON(val != final);
8352 }
8353
8354 static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
8355 {
8356 uint32_t tmp;
8357
8358 tmp = I915_READ(SOUTH_CHICKEN2);
8359 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8360 I915_WRITE(SOUTH_CHICKEN2, tmp);
8361
8362 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8363 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8364 DRM_ERROR("FDI mPHY reset assert timeout\n");
8365
8366 tmp = I915_READ(SOUTH_CHICKEN2);
8367 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8368 I915_WRITE(SOUTH_CHICKEN2, tmp);
8369
8370 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
8371 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8372 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
8373 }
8374
8375 /* WaMPhyProgramming:hsw */
8376 static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8377 {
8378 uint32_t tmp;
8379
8380 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8381 tmp &= ~(0xFF << 24);
8382 tmp |= (0x12 << 24);
8383 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8384
8385 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8386 tmp |= (1 << 11);
8387 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8388
8389 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8390 tmp |= (1 << 11);
8391 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8392
8393 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8394 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8395 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8396
8397 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8398 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8399 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8400
8401 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8402 tmp &= ~(7 << 13);
8403 tmp |= (5 << 13);
8404 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
8405
8406 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8407 tmp &= ~(7 << 13);
8408 tmp |= (5 << 13);
8409 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
8410
8411 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8412 tmp &= ~0xFF;
8413 tmp |= 0x1C;
8414 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8415
8416 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8417 tmp &= ~0xFF;
8418 tmp |= 0x1C;
8419 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8420
8421 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8422 tmp &= ~(0xFF << 16);
8423 tmp |= (0x1C << 16);
8424 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8425
8426 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8427 tmp &= ~(0xFF << 16);
8428 tmp |= (0x1C << 16);
8429 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8430
8431 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8432 tmp |= (1 << 27);
8433 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
8434
8435 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8436 tmp |= (1 << 27);
8437 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
8438
8439 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8440 tmp &= ~(0xF << 28);
8441 tmp |= (4 << 28);
8442 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
8443
8444 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8445 tmp &= ~(0xF << 28);
8446 tmp |= (4 << 28);
8447 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
8448 }
8449
8450 /* Implements 3 different sequences from BSpec chapter "Display iCLK
8451 * Programming" based on the parameters passed:
8452 * - Sequence to enable CLKOUT_DP
8453 * - Sequence to enable CLKOUT_DP without spread
8454 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8455 */
8456 static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8457 bool with_fdi)
8458 {
8459 struct drm_i915_private *dev_priv = dev->dev_private;
8460 uint32_t reg, tmp;
8461
8462 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8463 with_spread = true;
8464 if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
8465 with_fdi = false;
8466
8467 mutex_lock(&dev_priv->sb_lock);
8468
8469 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8470 tmp &= ~SBI_SSCCTL_DISABLE;
8471 tmp |= SBI_SSCCTL_PATHALT;
8472 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8473
8474 udelay(24);
8475
8476 if (with_spread) {
8477 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8478 tmp &= ~SBI_SSCCTL_PATHALT;
8479 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8480
8481 if (with_fdi) {
8482 lpt_reset_fdi_mphy(dev_priv);
8483 lpt_program_fdi_mphy(dev_priv);
8484 }
8485 }
8486
8487 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8488 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8489 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8490 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8491
8492 mutex_unlock(&dev_priv->sb_lock);
8493 }
8494
8495 /* Sequence to disable CLKOUT_DP */
8496 static void lpt_disable_clkout_dp(struct drm_device *dev)
8497 {
8498 struct drm_i915_private *dev_priv = dev->dev_private;
8499 uint32_t reg, tmp;
8500
8501 mutex_lock(&dev_priv->sb_lock);
8502
8503 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
8504 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8505 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8506 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8507
8508 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8509 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8510 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8511 tmp |= SBI_SSCCTL_PATHALT;
8512 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8513 udelay(32);
8514 }
8515 tmp |= SBI_SSCCTL_DISABLE;
8516 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8517 }
8518
8519 mutex_unlock(&dev_priv->sb_lock);
8520 }
8521
8522 #define BEND_IDX(steps) ((50 + (steps)) / 5)
8523
8524 static const uint16_t sscdivintphase[] = {
8525 [BEND_IDX( 50)] = 0x3B23,
8526 [BEND_IDX( 45)] = 0x3B23,
8527 [BEND_IDX( 40)] = 0x3C23,
8528 [BEND_IDX( 35)] = 0x3C23,
8529 [BEND_IDX( 30)] = 0x3D23,
8530 [BEND_IDX( 25)] = 0x3D23,
8531 [BEND_IDX( 20)] = 0x3E23,
8532 [BEND_IDX( 15)] = 0x3E23,
8533 [BEND_IDX( 10)] = 0x3F23,
8534 [BEND_IDX( 5)] = 0x3F23,
8535 [BEND_IDX( 0)] = 0x0025,
8536 [BEND_IDX( -5)] = 0x0025,
8537 [BEND_IDX(-10)] = 0x0125,
8538 [BEND_IDX(-15)] = 0x0125,
8539 [BEND_IDX(-20)] = 0x0225,
8540 [BEND_IDX(-25)] = 0x0225,
8541 [BEND_IDX(-30)] = 0x0325,
8542 [BEND_IDX(-35)] = 0x0325,
8543 [BEND_IDX(-40)] = 0x0425,
8544 [BEND_IDX(-45)] = 0x0425,
8545 [BEND_IDX(-50)] = 0x0525,
8546 };
8547
8548 /*
8549 * Bend CLKOUT_DP
8550 * steps -50 to 50 inclusive, in steps of 5
8551 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8552 * change in clock period = -(steps / 10) * 5.787 ps
8553 */
8554 static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8555 {
8556 uint32_t tmp;
8557 int idx = BEND_IDX(steps);
8558
8559 if (WARN_ON(steps % 5 != 0))
8560 return;
8561
8562 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8563 return;
8564
8565 mutex_lock(&dev_priv->sb_lock);
8566
8567 if (steps % 10 != 0)
8568 tmp = 0xAAAAAAAB;
8569 else
8570 tmp = 0x00000000;
8571 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8572
8573 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8574 tmp &= 0xffff0000;
8575 tmp |= sscdivintphase[idx];
8576 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8577
8578 mutex_unlock(&dev_priv->sb_lock);
8579 }
8580
8581 #undef BEND_IDX
8582
8583 static void lpt_init_pch_refclk(struct drm_device *dev)
8584 {
8585 struct intel_encoder *encoder;
8586 bool has_vga = false;
8587
8588 for_each_intel_encoder(dev, encoder) {
8589 switch (encoder->type) {
8590 case INTEL_OUTPUT_ANALOG:
8591 has_vga = true;
8592 break;
8593 default:
8594 break;
8595 }
8596 }
8597
8598 if (has_vga) {
8599 lpt_bend_clkout_dp(to_i915(dev), 0);
8600 lpt_enable_clkout_dp(dev, true, true);
8601 } else {
8602 lpt_disable_clkout_dp(dev);
8603 }
8604 }
8605
8606 /*
8607 * Initialize reference clocks when the driver loads
8608 */
8609 void intel_init_pch_refclk(struct drm_device *dev)
8610 {
8611 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8612 ironlake_init_pch_refclk(dev);
8613 else if (HAS_PCH_LPT(dev))
8614 lpt_init_pch_refclk(dev);
8615 }
8616
8617 static void ironlake_set_pipeconf(struct drm_crtc *crtc)
8618 {
8619 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8620 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8621 int pipe = intel_crtc->pipe;
8622 uint32_t val;
8623
8624 val = 0;
8625
8626 switch (intel_crtc->config->pipe_bpp) {
8627 case 18:
8628 val |= PIPECONF_6BPC;
8629 break;
8630 case 24:
8631 val |= PIPECONF_8BPC;
8632 break;
8633 case 30:
8634 val |= PIPECONF_10BPC;
8635 break;
8636 case 36:
8637 val |= PIPECONF_12BPC;
8638 break;
8639 default:
8640 /* Case prevented by intel_choose_pipe_bpp_dither. */
8641 BUG();
8642 }
8643
8644 if (intel_crtc->config->dither)
8645 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8646
8647 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8648 val |= PIPECONF_INTERLACED_ILK;
8649 else
8650 val |= PIPECONF_PROGRESSIVE;
8651
8652 if (intel_crtc->config->limited_color_range)
8653 val |= PIPECONF_COLOR_RANGE_SELECT;
8654
8655 I915_WRITE(PIPECONF(pipe), val);
8656 POSTING_READ(PIPECONF(pipe));
8657 }
8658
8659 static void haswell_set_pipeconf(struct drm_crtc *crtc)
8660 {
8661 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8662 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8663 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
8664 u32 val = 0;
8665
8666 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
8667 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8668
8669 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
8670 val |= PIPECONF_INTERLACED_ILK;
8671 else
8672 val |= PIPECONF_PROGRESSIVE;
8673
8674 I915_WRITE(PIPECONF(cpu_transcoder), val);
8675 POSTING_READ(PIPECONF(cpu_transcoder));
8676 }
8677
8678 static void haswell_set_pipemisc(struct drm_crtc *crtc)
8679 {
8680 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8681 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8682
8683 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
8684 u32 val = 0;
8685
8686 switch (intel_crtc->config->pipe_bpp) {
8687 case 18:
8688 val |= PIPEMISC_DITHER_6_BPC;
8689 break;
8690 case 24:
8691 val |= PIPEMISC_DITHER_8_BPC;
8692 break;
8693 case 30:
8694 val |= PIPEMISC_DITHER_10_BPC;
8695 break;
8696 case 36:
8697 val |= PIPEMISC_DITHER_12_BPC;
8698 break;
8699 default:
8700 /* Case prevented by pipe_config_set_bpp. */
8701 BUG();
8702 }
8703
8704 if (intel_crtc->config->dither)
8705 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8706
8707 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
8708 }
8709 }
8710
8711 int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8712 {
8713 /*
8714 * Account for spread spectrum to avoid
8715 * oversubscribing the link. Max center spread
8716 * is 2.5%; use 5% for safety's sake.
8717 */
8718 u32 bps = target_clock * bpp * 21 / 20;
8719 return DIV_ROUND_UP(bps, link_bw * 8);
8720 }
8721
8722 static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
8723 {
8724 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
8725 }
8726
8727 static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8728 struct intel_crtc_state *crtc_state,
8729 struct dpll *reduced_clock)
8730 {
8731 struct drm_crtc *crtc = &intel_crtc->base;
8732 struct drm_device *dev = crtc->dev;
8733 struct drm_i915_private *dev_priv = dev->dev_private;
8734 struct drm_atomic_state *state = crtc_state->base.state;
8735 struct drm_connector *connector;
8736 struct drm_connector_state *connector_state;
8737 struct intel_encoder *encoder;
8738 u32 dpll, fp, fp2;
8739 int factor, i;
8740 bool is_lvds = false, is_sdvo = false;
8741
8742 for_each_connector_in_state(state, connector, connector_state, i) {
8743 if (connector_state->crtc != crtc_state->base.crtc)
8744 continue;
8745
8746 encoder = to_intel_encoder(connector_state->best_encoder);
8747
8748 switch (encoder->type) {
8749 case INTEL_OUTPUT_LVDS:
8750 is_lvds = true;
8751 break;
8752 case INTEL_OUTPUT_SDVO:
8753 case INTEL_OUTPUT_HDMI:
8754 is_sdvo = true;
8755 break;
8756 default:
8757 break;
8758 }
8759 }
8760
8761 /* Enable autotuning of the PLL clock (if permissible) */
8762 factor = 21;
8763 if (is_lvds) {
8764 if ((intel_panel_use_ssc(dev_priv) &&
8765 dev_priv->vbt.lvds_ssc_freq == 100000) ||
8766 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8767 factor = 25;
8768 } else if (crtc_state->sdvo_tv_clock)
8769 factor = 20;
8770
8771 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8772
8773 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
8774 fp |= FP_CB_TUNE;
8775
8776 if (reduced_clock) {
8777 fp2 = i9xx_dpll_compute_fp(reduced_clock);
8778
8779 if (reduced_clock->m < factor * reduced_clock->n)
8780 fp2 |= FP_CB_TUNE;
8781 } else {
8782 fp2 = fp;
8783 }
8784
8785 dpll = 0;
8786
8787 if (is_lvds)
8788 dpll |= DPLLB_MODE_LVDS;
8789 else
8790 dpll |= DPLLB_MODE_DAC_SERIAL;
8791
8792 dpll |= (crtc_state->pixel_multiplier - 1)
8793 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
8794
8795 if (is_sdvo)
8796 dpll |= DPLL_SDVO_HIGH_SPEED;
8797 if (crtc_state->has_dp_encoder)
8798 dpll |= DPLL_SDVO_HIGH_SPEED;
8799
8800 /* compute bitmask from p1 value */
8801 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
8802 /* also FPA1 */
8803 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
8804
8805 switch (crtc_state->dpll.p2) {
8806 case 5:
8807 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8808 break;
8809 case 7:
8810 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8811 break;
8812 case 10:
8813 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8814 break;
8815 case 14:
8816 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8817 break;
8818 }
8819
8820 if (is_lvds && intel_panel_use_ssc(dev_priv))
8821 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
8822 else
8823 dpll |= PLL_REF_INPUT_DREFCLK;
8824
8825 dpll |= DPLL_VCO_ENABLE;
8826
8827 crtc_state->dpll_hw_state.dpll = dpll;
8828 crtc_state->dpll_hw_state.fp0 = fp;
8829 crtc_state->dpll_hw_state.fp1 = fp2;
8830 }
8831
8832 static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8833 struct intel_crtc_state *crtc_state)
8834 {
8835 struct drm_device *dev = crtc->base.dev;
8836 struct drm_i915_private *dev_priv = dev->dev_private;
8837 struct dpll reduced_clock;
8838 bool has_reduced_clock = false;
8839 struct intel_shared_dpll *pll;
8840 const struct intel_limit *limit;
8841 int refclk = 120000;
8842
8843 memset(&crtc_state->dpll_hw_state, 0,
8844 sizeof(crtc_state->dpll_hw_state));
8845
8846 crtc->lowfreq_avail = false;
8847
8848 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8849 if (!crtc_state->has_pch_encoder)
8850 return 0;
8851
8852 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8853 if (intel_panel_use_ssc(dev_priv)) {
8854 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8855 dev_priv->vbt.lvds_ssc_freq);
8856 refclk = dev_priv->vbt.lvds_ssc_freq;
8857 }
8858
8859 if (intel_is_dual_link_lvds(dev)) {
8860 if (refclk == 100000)
8861 limit = &intel_limits_ironlake_dual_lvds_100m;
8862 else
8863 limit = &intel_limits_ironlake_dual_lvds;
8864 } else {
8865 if (refclk == 100000)
8866 limit = &intel_limits_ironlake_single_lvds_100m;
8867 else
8868 limit = &intel_limits_ironlake_single_lvds;
8869 }
8870 } else {
8871 limit = &intel_limits_ironlake_dac;
8872 }
8873
8874 if (!crtc_state->clock_set &&
8875 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8876 refclk, NULL, &crtc_state->dpll)) {
8877 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8878 return -EINVAL;
8879 }
8880
8881 ironlake_compute_dpll(crtc, crtc_state,
8882 has_reduced_clock ? &reduced_clock : NULL);
8883
8884 pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
8885 if (pll == NULL) {
8886 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8887 pipe_name(crtc->pipe));
8888 return -EINVAL;
8889 }
8890
8891 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8892 has_reduced_clock)
8893 crtc->lowfreq_avail = true;
8894
8895 return 0;
8896 }
8897
8898 static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8899 struct intel_link_m_n *m_n)
8900 {
8901 struct drm_device *dev = crtc->base.dev;
8902 struct drm_i915_private *dev_priv = dev->dev_private;
8903 enum pipe pipe = crtc->pipe;
8904
8905 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8906 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8907 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8908 & ~TU_SIZE_MASK;
8909 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8910 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8911 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8912 }
8913
8914 static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8915 enum transcoder transcoder,
8916 struct intel_link_m_n *m_n,
8917 struct intel_link_m_n *m2_n2)
8918 {
8919 struct drm_device *dev = crtc->base.dev;
8920 struct drm_i915_private *dev_priv = dev->dev_private;
8921 enum pipe pipe = crtc->pipe;
8922
8923 if (INTEL_INFO(dev)->gen >= 5) {
8924 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8925 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8926 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8927 & ~TU_SIZE_MASK;
8928 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8929 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8930 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8931 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
8932 * gen < 8) and if DRRS is supported (to make sure the
8933 * registers are not unnecessarily read).
8934 */
8935 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
8936 crtc->config->has_drrs) {
8937 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8938 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8939 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8940 & ~TU_SIZE_MASK;
8941 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8942 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8943 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8944 }
8945 } else {
8946 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8947 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8948 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8949 & ~TU_SIZE_MASK;
8950 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8951 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8952 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8953 }
8954 }
8955
8956 void intel_dp_get_m_n(struct intel_crtc *crtc,
8957 struct intel_crtc_state *pipe_config)
8958 {
8959 if (pipe_config->has_pch_encoder)
8960 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
8961 else
8962 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8963 &pipe_config->dp_m_n,
8964 &pipe_config->dp_m2_n2);
8965 }
8966
8967 static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
8968 struct intel_crtc_state *pipe_config)
8969 {
8970 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
8971 &pipe_config->fdi_m_n, NULL);
8972 }
8973
8974 static void skylake_get_pfit_config(struct intel_crtc *crtc,
8975 struct intel_crtc_state *pipe_config)
8976 {
8977 struct drm_device *dev = crtc->base.dev;
8978 struct drm_i915_private *dev_priv = dev->dev_private;
8979 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
8980 uint32_t ps_ctrl = 0;
8981 int id = -1;
8982 int i;
8983
8984 /* find scaler attached to this pipe */
8985 for (i = 0; i < crtc->num_scalers; i++) {
8986 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
8987 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
8988 id = i;
8989 pipe_config->pch_pfit.enabled = true;
8990 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
8991 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
8992 break;
8993 }
8994 }
8995
8996 scaler_state->scaler_id = id;
8997 if (id >= 0) {
8998 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
8999 } else {
9000 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
9001 }
9002 }
9003
9004 static void
9005 skylake_get_initial_plane_config(struct intel_crtc *crtc,
9006 struct intel_initial_plane_config *plane_config)
9007 {
9008 struct drm_device *dev = crtc->base.dev;
9009 struct drm_i915_private *dev_priv = dev->dev_private;
9010 u32 val, base, offset, stride_mult, tiling;
9011 int pipe = crtc->pipe;
9012 int fourcc, pixel_format;
9013 unsigned int aligned_height;
9014 struct drm_framebuffer *fb;
9015 struct intel_framebuffer *intel_fb;
9016
9017 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9018 if (!intel_fb) {
9019 DRM_DEBUG_KMS("failed to alloc fb\n");
9020 return;
9021 }
9022
9023 fb = &intel_fb->base;
9024
9025 val = I915_READ(PLANE_CTL(pipe, 0));
9026 if (!(val & PLANE_CTL_ENABLE))
9027 goto error;
9028
9029 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9030 fourcc = skl_format_to_fourcc(pixel_format,
9031 val & PLANE_CTL_ORDER_RGBX,
9032 val & PLANE_CTL_ALPHA_MASK);
9033 fb->pixel_format = fourcc;
9034 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9035
9036 tiling = val & PLANE_CTL_TILED_MASK;
9037 switch (tiling) {
9038 case PLANE_CTL_TILED_LINEAR:
9039 fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9040 break;
9041 case PLANE_CTL_TILED_X:
9042 plane_config->tiling = I915_TILING_X;
9043 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9044 break;
9045 case PLANE_CTL_TILED_Y:
9046 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9047 break;
9048 case PLANE_CTL_TILED_YF:
9049 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9050 break;
9051 default:
9052 MISSING_CASE(tiling);
9053 goto error;
9054 }
9055
9056 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9057 plane_config->base = base;
9058
9059 offset = I915_READ(PLANE_OFFSET(pipe, 0));
9060
9061 val = I915_READ(PLANE_SIZE(pipe, 0));
9062 fb->height = ((val >> 16) & 0xfff) + 1;
9063 fb->width = ((val >> 0) & 0x1fff) + 1;
9064
9065 val = I915_READ(PLANE_STRIDE(pipe, 0));
9066 stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
9067 fb->pixel_format);
9068 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9069
9070 aligned_height = intel_fb_align_height(dev, fb->height,
9071 fb->pixel_format,
9072 fb->modifier[0]);
9073
9074 plane_config->size = fb->pitches[0] * aligned_height;
9075
9076 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9077 pipe_name(pipe), fb->width, fb->height,
9078 fb->bits_per_pixel, base, fb->pitches[0],
9079 plane_config->size);
9080
9081 plane_config->fb = intel_fb;
9082 return;
9083
9084 error:
9085 kfree(fb);
9086 }
9087
9088 static void ironlake_get_pfit_config(struct intel_crtc *crtc,
9089 struct intel_crtc_state *pipe_config)
9090 {
9091 struct drm_device *dev = crtc->base.dev;
9092 struct drm_i915_private *dev_priv = dev->dev_private;
9093 uint32_t tmp;
9094
9095 tmp = I915_READ(PF_CTL(crtc->pipe));
9096
9097 if (tmp & PF_ENABLE) {
9098 pipe_config->pch_pfit.enabled = true;
9099 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9100 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
9101
9102 /* We currently do not free assignements of panel fitters on
9103 * ivb/hsw (since we don't use the higher upscaling modes which
9104 * differentiates them) so just WARN about this case for now. */
9105 if (IS_GEN7(dev)) {
9106 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9107 PF_PIPE_SEL_IVB(crtc->pipe));
9108 }
9109 }
9110 }
9111
9112 static void
9113 ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9114 struct intel_initial_plane_config *plane_config)
9115 {
9116 struct drm_device *dev = crtc->base.dev;
9117 struct drm_i915_private *dev_priv = dev->dev_private;
9118 u32 val, base, offset;
9119 int pipe = crtc->pipe;
9120 int fourcc, pixel_format;
9121 unsigned int aligned_height;
9122 struct drm_framebuffer *fb;
9123 struct intel_framebuffer *intel_fb;
9124
9125 val = I915_READ(DSPCNTR(pipe));
9126 if (!(val & DISPLAY_PLANE_ENABLE))
9127 return;
9128
9129 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
9130 if (!intel_fb) {
9131 DRM_DEBUG_KMS("failed to alloc fb\n");
9132 return;
9133 }
9134
9135 fb = &intel_fb->base;
9136
9137 if (INTEL_INFO(dev)->gen >= 4) {
9138 if (val & DISPPLANE_TILED) {
9139 plane_config->tiling = I915_TILING_X;
9140 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9141 }
9142 }
9143
9144 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
9145 fourcc = i9xx_format_to_fourcc(pixel_format);
9146 fb->pixel_format = fourcc;
9147 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9148
9149 base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
9150 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
9151 offset = I915_READ(DSPOFFSET(pipe));
9152 } else {
9153 if (plane_config->tiling)
9154 offset = I915_READ(DSPTILEOFF(pipe));
9155 else
9156 offset = I915_READ(DSPLINOFF(pipe));
9157 }
9158 plane_config->base = base;
9159
9160 val = I915_READ(PIPESRC(pipe));
9161 fb->width = ((val >> 16) & 0xfff) + 1;
9162 fb->height = ((val >> 0) & 0xfff) + 1;
9163
9164 val = I915_READ(DSPSTRIDE(pipe));
9165 fb->pitches[0] = val & 0xffffffc0;
9166
9167 aligned_height = intel_fb_align_height(dev, fb->height,
9168 fb->pixel_format,
9169 fb->modifier[0]);
9170
9171 plane_config->size = fb->pitches[0] * aligned_height;
9172
9173 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9174 pipe_name(pipe), fb->width, fb->height,
9175 fb->bits_per_pixel, base, fb->pitches[0],
9176 plane_config->size);
9177
9178 plane_config->fb = intel_fb;
9179 }
9180
9181 static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
9182 struct intel_crtc_state *pipe_config)
9183 {
9184 struct drm_device *dev = crtc->base.dev;
9185 struct drm_i915_private *dev_priv = dev->dev_private;
9186 enum intel_display_power_domain power_domain;
9187 uint32_t tmp;
9188 bool ret;
9189
9190 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9191 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9192 return false;
9193
9194 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9195 pipe_config->shared_dpll = NULL;
9196
9197 ret = false;
9198 tmp = I915_READ(PIPECONF(crtc->pipe));
9199 if (!(tmp & PIPECONF_ENABLE))
9200 goto out;
9201
9202 switch (tmp & PIPECONF_BPC_MASK) {
9203 case PIPECONF_6BPC:
9204 pipe_config->pipe_bpp = 18;
9205 break;
9206 case PIPECONF_8BPC:
9207 pipe_config->pipe_bpp = 24;
9208 break;
9209 case PIPECONF_10BPC:
9210 pipe_config->pipe_bpp = 30;
9211 break;
9212 case PIPECONF_12BPC:
9213 pipe_config->pipe_bpp = 36;
9214 break;
9215 default:
9216 break;
9217 }
9218
9219 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9220 pipe_config->limited_color_range = true;
9221
9222 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
9223 struct intel_shared_dpll *pll;
9224 enum intel_dpll_id pll_id;
9225
9226 pipe_config->has_pch_encoder = true;
9227
9228 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9229 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9230 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9231
9232 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9233
9234 if (HAS_PCH_IBX(dev_priv)) {
9235 /*
9236 * The pipe->pch transcoder and pch transcoder->pll
9237 * mapping is fixed.
9238 */
9239 pll_id = (enum intel_dpll_id) crtc->pipe;
9240 } else {
9241 tmp = I915_READ(PCH_DPLL_SEL);
9242 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
9243 pll_id = DPLL_ID_PCH_PLL_B;
9244 else
9245 pll_id= DPLL_ID_PCH_PLL_A;
9246 }
9247
9248 pipe_config->shared_dpll =
9249 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9250 pll = pipe_config->shared_dpll;
9251
9252 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9253 &pipe_config->dpll_hw_state));
9254
9255 tmp = pipe_config->dpll_hw_state.dpll;
9256 pipe_config->pixel_multiplier =
9257 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9258 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
9259
9260 ironlake_pch_clock_get(crtc, pipe_config);
9261 } else {
9262 pipe_config->pixel_multiplier = 1;
9263 }
9264
9265 intel_get_pipe_timings(crtc, pipe_config);
9266 intel_get_pipe_src_size(crtc, pipe_config);
9267
9268 ironlake_get_pfit_config(crtc, pipe_config);
9269
9270 ret = true;
9271
9272 out:
9273 intel_display_power_put(dev_priv, power_domain);
9274
9275 return ret;
9276 }
9277
9278 static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9279 {
9280 struct drm_device *dev = dev_priv->dev;
9281 struct intel_crtc *crtc;
9282
9283 for_each_intel_crtc(dev, crtc)
9284 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
9285 pipe_name(crtc->pipe));
9286
9287 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9288 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
9289 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9290 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
9291 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9292 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
9293 "CPU PWM1 enabled\n");
9294 if (IS_HASWELL(dev))
9295 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
9296 "CPU PWM2 enabled\n");
9297 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
9298 "PCH PWM1 enabled\n");
9299 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
9300 "Utility pin enabled\n");
9301 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
9302
9303 /*
9304 * In theory we can still leave IRQs enabled, as long as only the HPD
9305 * interrupts remain enabled. We used to check for that, but since it's
9306 * gen-specific and since we only disable LCPLL after we fully disable
9307 * the interrupts, the check below should be enough.
9308 */
9309 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
9310 }
9311
9312 static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9313 {
9314 struct drm_device *dev = dev_priv->dev;
9315
9316 if (IS_HASWELL(dev))
9317 return I915_READ(D_COMP_HSW);
9318 else
9319 return I915_READ(D_COMP_BDW);
9320 }
9321
9322 static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9323 {
9324 struct drm_device *dev = dev_priv->dev;
9325
9326 if (IS_HASWELL(dev)) {
9327 mutex_lock(&dev_priv->rps.hw_lock);
9328 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9329 val))
9330 DRM_ERROR("Failed to write to D_COMP\n");
9331 mutex_unlock(&dev_priv->rps.hw_lock);
9332 } else {
9333 I915_WRITE(D_COMP_BDW, val);
9334 POSTING_READ(D_COMP_BDW);
9335 }
9336 }
9337
9338 /*
9339 * This function implements pieces of two sequences from BSpec:
9340 * - Sequence for display software to disable LCPLL
9341 * - Sequence for display software to allow package C8+
9342 * The steps implemented here are just the steps that actually touch the LCPLL
9343 * register. Callers should take care of disabling all the display engine
9344 * functions, doing the mode unset, fixing interrupts, etc.
9345 */
9346 static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9347 bool switch_to_fclk, bool allow_power_down)
9348 {
9349 uint32_t val;
9350
9351 assert_can_disable_lcpll(dev_priv);
9352
9353 val = I915_READ(LCPLL_CTL);
9354
9355 if (switch_to_fclk) {
9356 val |= LCPLL_CD_SOURCE_FCLK;
9357 I915_WRITE(LCPLL_CTL, val);
9358
9359 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9360 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9361 DRM_ERROR("Switching to FCLK failed\n");
9362
9363 val = I915_READ(LCPLL_CTL);
9364 }
9365
9366 val |= LCPLL_PLL_DISABLE;
9367 I915_WRITE(LCPLL_CTL, val);
9368 POSTING_READ(LCPLL_CTL);
9369
9370 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9371 DRM_ERROR("LCPLL still locked\n");
9372
9373 val = hsw_read_dcomp(dev_priv);
9374 val |= D_COMP_COMP_DISABLE;
9375 hsw_write_dcomp(dev_priv, val);
9376 ndelay(100);
9377
9378 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9379 1))
9380 DRM_ERROR("D_COMP RCOMP still in progress\n");
9381
9382 if (allow_power_down) {
9383 val = I915_READ(LCPLL_CTL);
9384 val |= LCPLL_POWER_DOWN_ALLOW;
9385 I915_WRITE(LCPLL_CTL, val);
9386 POSTING_READ(LCPLL_CTL);
9387 }
9388 }
9389
9390 /*
9391 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9392 * source.
9393 */
9394 static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
9395 {
9396 uint32_t val;
9397
9398 val = I915_READ(LCPLL_CTL);
9399
9400 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9401 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9402 return;
9403
9404 /*
9405 * Make sure we're not on PC8 state before disabling PC8, otherwise
9406 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
9407 */
9408 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
9409
9410 if (val & LCPLL_POWER_DOWN_ALLOW) {
9411 val &= ~LCPLL_POWER_DOWN_ALLOW;
9412 I915_WRITE(LCPLL_CTL, val);
9413 POSTING_READ(LCPLL_CTL);
9414 }
9415
9416 val = hsw_read_dcomp(dev_priv);
9417 val |= D_COMP_COMP_FORCE;
9418 val &= ~D_COMP_COMP_DISABLE;
9419 hsw_write_dcomp(dev_priv, val);
9420
9421 val = I915_READ(LCPLL_CTL);
9422 val &= ~LCPLL_PLL_DISABLE;
9423 I915_WRITE(LCPLL_CTL, val);
9424
9425 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9426 DRM_ERROR("LCPLL not locked yet\n");
9427
9428 if (val & LCPLL_CD_SOURCE_FCLK) {
9429 val = I915_READ(LCPLL_CTL);
9430 val &= ~LCPLL_CD_SOURCE_FCLK;
9431 I915_WRITE(LCPLL_CTL, val);
9432
9433 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9434 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9435 DRM_ERROR("Switching back to LCPLL failed\n");
9436 }
9437
9438 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
9439 intel_update_cdclk(dev_priv->dev);
9440 }
9441
9442 /*
9443 * Package states C8 and deeper are really deep PC states that can only be
9444 * reached when all the devices on the system allow it, so even if the graphics
9445 * device allows PC8+, it doesn't mean the system will actually get to these
9446 * states. Our driver only allows PC8+ when going into runtime PM.
9447 *
9448 * The requirements for PC8+ are that all the outputs are disabled, the power
9449 * well is disabled and most interrupts are disabled, and these are also
9450 * requirements for runtime PM. When these conditions are met, we manually do
9451 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9452 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9453 * hang the machine.
9454 *
9455 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9456 * the state of some registers, so when we come back from PC8+ we need to
9457 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9458 * need to take care of the registers kept by RC6. Notice that this happens even
9459 * if we don't put the device in PCI D3 state (which is what currently happens
9460 * because of the runtime PM support).
9461 *
9462 * For more, read "Display Sequences for Package C8" on the hardware
9463 * documentation.
9464 */
9465 void hsw_enable_pc8(struct drm_i915_private *dev_priv)
9466 {
9467 struct drm_device *dev = dev_priv->dev;
9468 uint32_t val;
9469
9470 DRM_DEBUG_KMS("Enabling package C8+\n");
9471
9472 if (HAS_PCH_LPT_LP(dev)) {
9473 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9474 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9475 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9476 }
9477
9478 lpt_disable_clkout_dp(dev);
9479 hsw_disable_lcpll(dev_priv, true, true);
9480 }
9481
9482 void hsw_disable_pc8(struct drm_i915_private *dev_priv)
9483 {
9484 struct drm_device *dev = dev_priv->dev;
9485 uint32_t val;
9486
9487 DRM_DEBUG_KMS("Disabling package C8+\n");
9488
9489 hsw_restore_lcpll(dev_priv);
9490 lpt_init_pch_refclk(dev);
9491
9492 if (HAS_PCH_LPT_LP(dev)) {
9493 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9494 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9495 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9496 }
9497 }
9498
9499 static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9500 {
9501 struct drm_device *dev = old_state->dev;
9502 struct intel_atomic_state *old_intel_state =
9503 to_intel_atomic_state(old_state);
9504 unsigned int req_cdclk = old_intel_state->dev_cdclk;
9505
9506 broxton_set_cdclk(to_i915(dev), req_cdclk);
9507 }
9508
9509 /* compute the max rate for new configuration */
9510 static int ilk_max_pixel_rate(struct drm_atomic_state *state)
9511 {
9512 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9513 struct drm_i915_private *dev_priv = state->dev->dev_private;
9514 struct drm_crtc *crtc;
9515 struct drm_crtc_state *cstate;
9516 struct intel_crtc_state *crtc_state;
9517 unsigned max_pixel_rate = 0, i;
9518 enum pipe pipe;
9519
9520 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9521 sizeof(intel_state->min_pixclk));
9522
9523 for_each_crtc_in_state(state, crtc, cstate, i) {
9524 int pixel_rate;
9525
9526 crtc_state = to_intel_crtc_state(cstate);
9527 if (!crtc_state->base.enable) {
9528 intel_state->min_pixclk[i] = 0;
9529 continue;
9530 }
9531
9532 pixel_rate = ilk_pipe_pixel_rate(crtc_state);
9533
9534 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
9535 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
9536 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9537
9538 intel_state->min_pixclk[i] = pixel_rate;
9539 }
9540
9541 for_each_pipe(dev_priv, pipe)
9542 max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9543
9544 return max_pixel_rate;
9545 }
9546
9547 static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9548 {
9549 struct drm_i915_private *dev_priv = dev->dev_private;
9550 uint32_t val, data;
9551 int ret;
9552
9553 if (WARN((I915_READ(LCPLL_CTL) &
9554 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9555 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9556 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9557 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9558 "trying to change cdclk frequency with cdclk not enabled\n"))
9559 return;
9560
9561 mutex_lock(&dev_priv->rps.hw_lock);
9562 ret = sandybridge_pcode_write(dev_priv,
9563 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9564 mutex_unlock(&dev_priv->rps.hw_lock);
9565 if (ret) {
9566 DRM_ERROR("failed to inform pcode about cdclk change\n");
9567 return;
9568 }
9569
9570 val = I915_READ(LCPLL_CTL);
9571 val |= LCPLL_CD_SOURCE_FCLK;
9572 I915_WRITE(LCPLL_CTL, val);
9573
9574 if (wait_for_us(I915_READ(LCPLL_CTL) &
9575 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9576 DRM_ERROR("Switching to FCLK failed\n");
9577
9578 val = I915_READ(LCPLL_CTL);
9579 val &= ~LCPLL_CLK_FREQ_MASK;
9580
9581 switch (cdclk) {
9582 case 450000:
9583 val |= LCPLL_CLK_FREQ_450;
9584 data = 0;
9585 break;
9586 case 540000:
9587 val |= LCPLL_CLK_FREQ_54O_BDW;
9588 data = 1;
9589 break;
9590 case 337500:
9591 val |= LCPLL_CLK_FREQ_337_5_BDW;
9592 data = 2;
9593 break;
9594 case 675000:
9595 val |= LCPLL_CLK_FREQ_675_BDW;
9596 data = 3;
9597 break;
9598 default:
9599 WARN(1, "invalid cdclk frequency\n");
9600 return;
9601 }
9602
9603 I915_WRITE(LCPLL_CTL, val);
9604
9605 val = I915_READ(LCPLL_CTL);
9606 val &= ~LCPLL_CD_SOURCE_FCLK;
9607 I915_WRITE(LCPLL_CTL, val);
9608
9609 if (wait_for_us((I915_READ(LCPLL_CTL) &
9610 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9611 DRM_ERROR("Switching back to LCPLL failed\n");
9612
9613 mutex_lock(&dev_priv->rps.hw_lock);
9614 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9615 mutex_unlock(&dev_priv->rps.hw_lock);
9616
9617 I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9618
9619 intel_update_cdclk(dev);
9620
9621 WARN(cdclk != dev_priv->cdclk_freq,
9622 "cdclk requested %d kHz but got %d kHz\n",
9623 cdclk, dev_priv->cdclk_freq);
9624 }
9625
9626 static int broadwell_calc_cdclk(int max_pixclk)
9627 {
9628 if (max_pixclk > 540000)
9629 return 675000;
9630 else if (max_pixclk > 450000)
9631 return 540000;
9632 else if (max_pixclk > 337500)
9633 return 450000;
9634 else
9635 return 337500;
9636 }
9637
9638 static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
9639 {
9640 struct drm_i915_private *dev_priv = to_i915(state->dev);
9641 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9642 int max_pixclk = ilk_max_pixel_rate(state);
9643 int cdclk;
9644
9645 /*
9646 * FIXME should also account for plane ratio
9647 * once 64bpp pixel formats are supported.
9648 */
9649 cdclk = broadwell_calc_cdclk(max_pixclk);
9650
9651 if (cdclk > dev_priv->max_cdclk_freq) {
9652 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9653 cdclk, dev_priv->max_cdclk_freq);
9654 return -EINVAL;
9655 }
9656
9657 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9658 if (!intel_state->active_crtcs)
9659 intel_state->dev_cdclk = broadwell_calc_cdclk(0);
9660
9661 return 0;
9662 }
9663
9664 static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9665 {
9666 struct drm_device *dev = old_state->dev;
9667 struct intel_atomic_state *old_intel_state =
9668 to_intel_atomic_state(old_state);
9669 unsigned req_cdclk = old_intel_state->dev_cdclk;
9670
9671 broadwell_set_cdclk(dev, req_cdclk);
9672 }
9673
9674 static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9675 struct intel_crtc_state *crtc_state)
9676 {
9677 struct intel_encoder *intel_encoder =
9678 intel_ddi_get_crtc_new_encoder(crtc_state);
9679
9680 if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9681 if (!intel_ddi_pll_select(crtc, crtc_state))
9682 return -EINVAL;
9683 }
9684
9685 crtc->lowfreq_avail = false;
9686
9687 return 0;
9688 }
9689
9690 static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9691 enum port port,
9692 struct intel_crtc_state *pipe_config)
9693 {
9694 enum intel_dpll_id id;
9695
9696 switch (port) {
9697 case PORT_A:
9698 pipe_config->ddi_pll_sel = SKL_DPLL0;
9699 id = DPLL_ID_SKL_DPLL0;
9700 break;
9701 case PORT_B:
9702 pipe_config->ddi_pll_sel = SKL_DPLL1;
9703 id = DPLL_ID_SKL_DPLL1;
9704 break;
9705 case PORT_C:
9706 pipe_config->ddi_pll_sel = SKL_DPLL2;
9707 id = DPLL_ID_SKL_DPLL2;
9708 break;
9709 default:
9710 DRM_ERROR("Incorrect port type\n");
9711 return;
9712 }
9713
9714 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9715 }
9716
9717 static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9718 enum port port,
9719 struct intel_crtc_state *pipe_config)
9720 {
9721 enum intel_dpll_id id;
9722 u32 temp;
9723
9724 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9725 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9726
9727 switch (pipe_config->ddi_pll_sel) {
9728 case SKL_DPLL0:
9729 id = DPLL_ID_SKL_DPLL0;
9730 break;
9731 case SKL_DPLL1:
9732 id = DPLL_ID_SKL_DPLL1;
9733 break;
9734 case SKL_DPLL2:
9735 id = DPLL_ID_SKL_DPLL2;
9736 break;
9737 case SKL_DPLL3:
9738 id = DPLL_ID_SKL_DPLL3;
9739 break;
9740 default:
9741 MISSING_CASE(pipe_config->ddi_pll_sel);
9742 return;
9743 }
9744
9745 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9746 }
9747
9748 static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9749 enum port port,
9750 struct intel_crtc_state *pipe_config)
9751 {
9752 enum intel_dpll_id id;
9753
9754 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9755
9756 switch (pipe_config->ddi_pll_sel) {
9757 case PORT_CLK_SEL_WRPLL1:
9758 id = DPLL_ID_WRPLL1;
9759 break;
9760 case PORT_CLK_SEL_WRPLL2:
9761 id = DPLL_ID_WRPLL2;
9762 break;
9763 case PORT_CLK_SEL_SPLL:
9764 id = DPLL_ID_SPLL;
9765 break;
9766 case PORT_CLK_SEL_LCPLL_810:
9767 id = DPLL_ID_LCPLL_810;
9768 break;
9769 case PORT_CLK_SEL_LCPLL_1350:
9770 id = DPLL_ID_LCPLL_1350;
9771 break;
9772 case PORT_CLK_SEL_LCPLL_2700:
9773 id = DPLL_ID_LCPLL_2700;
9774 break;
9775 default:
9776 MISSING_CASE(pipe_config->ddi_pll_sel);
9777 /* fall through */
9778 case PORT_CLK_SEL_NONE:
9779 return;
9780 }
9781
9782 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9783 }
9784
9785 static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9786 struct intel_crtc_state *pipe_config,
9787 unsigned long *power_domain_mask)
9788 {
9789 struct drm_device *dev = crtc->base.dev;
9790 struct drm_i915_private *dev_priv = dev->dev_private;
9791 enum intel_display_power_domain power_domain;
9792 u32 tmp;
9793
9794 /*
9795 * The pipe->transcoder mapping is fixed with the exception of the eDP
9796 * transcoder handled below.
9797 */
9798 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9799
9800 /*
9801 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9802 * consistency and less surprising code; it's in always on power).
9803 */
9804 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9805 if (tmp & TRANS_DDI_FUNC_ENABLE) {
9806 enum pipe trans_edp_pipe;
9807 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9808 default:
9809 WARN(1, "unknown pipe linked to edp transcoder\n");
9810 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9811 case TRANS_DDI_EDP_INPUT_A_ON:
9812 trans_edp_pipe = PIPE_A;
9813 break;
9814 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9815 trans_edp_pipe = PIPE_B;
9816 break;
9817 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9818 trans_edp_pipe = PIPE_C;
9819 break;
9820 }
9821
9822 if (trans_edp_pipe == crtc->pipe)
9823 pipe_config->cpu_transcoder = TRANSCODER_EDP;
9824 }
9825
9826 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9827 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9828 return false;
9829 *power_domain_mask |= BIT(power_domain);
9830
9831 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9832
9833 return tmp & PIPECONF_ENABLE;
9834 }
9835
9836 static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9837 struct intel_crtc_state *pipe_config,
9838 unsigned long *power_domain_mask)
9839 {
9840 struct drm_device *dev = crtc->base.dev;
9841 struct drm_i915_private *dev_priv = dev->dev_private;
9842 enum intel_display_power_domain power_domain;
9843 enum port port;
9844 enum transcoder cpu_transcoder;
9845 u32 tmp;
9846
9847 pipe_config->has_dsi_encoder = false;
9848
9849 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9850 if (port == PORT_A)
9851 cpu_transcoder = TRANSCODER_DSI_A;
9852 else
9853 cpu_transcoder = TRANSCODER_DSI_C;
9854
9855 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9856 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9857 continue;
9858 *power_domain_mask |= BIT(power_domain);
9859
9860 /*
9861 * The PLL needs to be enabled with a valid divider
9862 * configuration, otherwise accessing DSI registers will hang
9863 * the machine. See BSpec North Display Engine
9864 * registers/MIPI[BXT]. We can break out here early, since we
9865 * need the same DSI PLL to be enabled for both DSI ports.
9866 */
9867 if (!intel_dsi_pll_is_enabled(dev_priv))
9868 break;
9869
9870 /* XXX: this works for video mode only */
9871 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9872 if (!(tmp & DPI_ENABLE))
9873 continue;
9874
9875 tmp = I915_READ(MIPI_CTRL(port));
9876 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9877 continue;
9878
9879 pipe_config->cpu_transcoder = cpu_transcoder;
9880 pipe_config->has_dsi_encoder = true;
9881 break;
9882 }
9883
9884 return pipe_config->has_dsi_encoder;
9885 }
9886
9887 static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
9888 struct intel_crtc_state *pipe_config)
9889 {
9890 struct drm_device *dev = crtc->base.dev;
9891 struct drm_i915_private *dev_priv = dev->dev_private;
9892 struct intel_shared_dpll *pll;
9893 enum port port;
9894 uint32_t tmp;
9895
9896 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9897
9898 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9899
9900 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
9901 skylake_get_ddi_pll(dev_priv, port, pipe_config);
9902 else if (IS_BROXTON(dev))
9903 bxt_get_ddi_pll(dev_priv, port, pipe_config);
9904 else
9905 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9906
9907 pll = pipe_config->shared_dpll;
9908 if (pll) {
9909 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9910 &pipe_config->dpll_hw_state));
9911 }
9912
9913 /*
9914 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9915 * DDI E. So just check whether this pipe is wired to DDI E and whether
9916 * the PCH transcoder is on.
9917 */
9918 if (INTEL_INFO(dev)->gen < 9 &&
9919 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
9920 pipe_config->has_pch_encoder = true;
9921
9922 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9923 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9924 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9925
9926 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9927 }
9928 }
9929
9930 static bool haswell_get_pipe_config(struct intel_crtc *crtc,
9931 struct intel_crtc_state *pipe_config)
9932 {
9933 struct drm_device *dev = crtc->base.dev;
9934 struct drm_i915_private *dev_priv = dev->dev_private;
9935 enum intel_display_power_domain power_domain;
9936 unsigned long power_domain_mask;
9937 bool active;
9938
9939 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9940 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9941 return false;
9942 power_domain_mask = BIT(power_domain);
9943
9944 pipe_config->shared_dpll = NULL;
9945
9946 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
9947
9948 if (IS_BROXTON(dev_priv)) {
9949 bxt_get_dsi_transcoder_state(crtc, pipe_config,
9950 &power_domain_mask);
9951 WARN_ON(active && pipe_config->has_dsi_encoder);
9952 if (pipe_config->has_dsi_encoder)
9953 active = true;
9954 }
9955
9956 if (!active)
9957 goto out;
9958
9959 if (!pipe_config->has_dsi_encoder) {
9960 haswell_get_ddi_port_state(crtc, pipe_config);
9961 intel_get_pipe_timings(crtc, pipe_config);
9962 }
9963
9964 intel_get_pipe_src_size(crtc, pipe_config);
9965
9966 pipe_config->gamma_mode =
9967 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
9968
9969 if (INTEL_INFO(dev)->gen >= 9) {
9970 skl_init_scalers(dev, crtc, pipe_config);
9971 }
9972
9973 if (INTEL_INFO(dev)->gen >= 9) {
9974 pipe_config->scaler_state.scaler_id = -1;
9975 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
9976 }
9977
9978 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
9979 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
9980 power_domain_mask |= BIT(power_domain);
9981 if (INTEL_INFO(dev)->gen >= 9)
9982 skylake_get_pfit_config(crtc, pipe_config);
9983 else
9984 ironlake_get_pfit_config(crtc, pipe_config);
9985 }
9986
9987 if (IS_HASWELL(dev))
9988 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
9989 (I915_READ(IPS_CTL) & IPS_ENABLE);
9990
9991 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
9992 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
9993 pipe_config->pixel_multiplier =
9994 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
9995 } else {
9996 pipe_config->pixel_multiplier = 1;
9997 }
9998
9999 out:
10000 for_each_power_domain(power_domain, power_domain_mask)
10001 intel_display_power_put(dev_priv, power_domain);
10002
10003 return active;
10004 }
10005
10006 static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10007 const struct intel_plane_state *plane_state)
10008 {
10009 struct drm_device *dev = crtc->dev;
10010 struct drm_i915_private *dev_priv = dev->dev_private;
10011 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10012 uint32_t cntl = 0, size = 0;
10013
10014 if (plane_state && plane_state->visible) {
10015 unsigned int width = plane_state->base.crtc_w;
10016 unsigned int height = plane_state->base.crtc_h;
10017 unsigned int stride = roundup_pow_of_two(width) * 4;
10018
10019 switch (stride) {
10020 default:
10021 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10022 width, stride);
10023 stride = 256;
10024 /* fallthrough */
10025 case 256:
10026 case 512:
10027 case 1024:
10028 case 2048:
10029 break;
10030 }
10031
10032 cntl |= CURSOR_ENABLE |
10033 CURSOR_GAMMA_ENABLE |
10034 CURSOR_FORMAT_ARGB |
10035 CURSOR_STRIDE(stride);
10036
10037 size = (height << 12) | width;
10038 }
10039
10040 if (intel_crtc->cursor_cntl != 0 &&
10041 (intel_crtc->cursor_base != base ||
10042 intel_crtc->cursor_size != size ||
10043 intel_crtc->cursor_cntl != cntl)) {
10044 /* On these chipsets we can only modify the base/size/stride
10045 * whilst the cursor is disabled.
10046 */
10047 I915_WRITE(CURCNTR(PIPE_A), 0);
10048 POSTING_READ(CURCNTR(PIPE_A));
10049 intel_crtc->cursor_cntl = 0;
10050 }
10051
10052 if (intel_crtc->cursor_base != base) {
10053 I915_WRITE(CURBASE(PIPE_A), base);
10054 intel_crtc->cursor_base = base;
10055 }
10056
10057 if (intel_crtc->cursor_size != size) {
10058 I915_WRITE(CURSIZE, size);
10059 intel_crtc->cursor_size = size;
10060 }
10061
10062 if (intel_crtc->cursor_cntl != cntl) {
10063 I915_WRITE(CURCNTR(PIPE_A), cntl);
10064 POSTING_READ(CURCNTR(PIPE_A));
10065 intel_crtc->cursor_cntl = cntl;
10066 }
10067 }
10068
10069 static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10070 const struct intel_plane_state *plane_state)
10071 {
10072 struct drm_device *dev = crtc->dev;
10073 struct drm_i915_private *dev_priv = dev->dev_private;
10074 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10075 int pipe = intel_crtc->pipe;
10076 uint32_t cntl = 0;
10077
10078 if (plane_state && plane_state->visible) {
10079 cntl = MCURSOR_GAMMA_ENABLE;
10080 switch (plane_state->base.crtc_w) {
10081 case 64:
10082 cntl |= CURSOR_MODE_64_ARGB_AX;
10083 break;
10084 case 128:
10085 cntl |= CURSOR_MODE_128_ARGB_AX;
10086 break;
10087 case 256:
10088 cntl |= CURSOR_MODE_256_ARGB_AX;
10089 break;
10090 default:
10091 MISSING_CASE(plane_state->base.crtc_w);
10092 return;
10093 }
10094 cntl |= pipe << 28; /* Connect to correct pipe */
10095
10096 if (HAS_DDI(dev))
10097 cntl |= CURSOR_PIPE_CSC_ENABLE;
10098
10099 if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
10100 cntl |= CURSOR_ROTATE_180;
10101 }
10102
10103 if (intel_crtc->cursor_cntl != cntl) {
10104 I915_WRITE(CURCNTR(pipe), cntl);
10105 POSTING_READ(CURCNTR(pipe));
10106 intel_crtc->cursor_cntl = cntl;
10107 }
10108
10109 /* and commit changes on next vblank */
10110 I915_WRITE(CURBASE(pipe), base);
10111 POSTING_READ(CURBASE(pipe));
10112
10113 intel_crtc->cursor_base = base;
10114 }
10115
10116 /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
10117 static void intel_crtc_update_cursor(struct drm_crtc *crtc,
10118 const struct intel_plane_state *plane_state)
10119 {
10120 struct drm_device *dev = crtc->dev;
10121 struct drm_i915_private *dev_priv = dev->dev_private;
10122 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10123 int pipe = intel_crtc->pipe;
10124 u32 base = intel_crtc->cursor_addr;
10125 u32 pos = 0;
10126
10127 if (plane_state) {
10128 int x = plane_state->base.crtc_x;
10129 int y = plane_state->base.crtc_y;
10130
10131 if (x < 0) {
10132 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10133 x = -x;
10134 }
10135 pos |= x << CURSOR_X_SHIFT;
10136
10137 if (y < 0) {
10138 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10139 y = -y;
10140 }
10141 pos |= y << CURSOR_Y_SHIFT;
10142
10143 /* ILK+ do this automagically */
10144 if (HAS_GMCH_DISPLAY(dev) &&
10145 plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
10146 base += (plane_state->base.crtc_h *
10147 plane_state->base.crtc_w - 1) * 4;
10148 }
10149 }
10150
10151 I915_WRITE(CURPOS(pipe), pos);
10152
10153 if (IS_845G(dev) || IS_I865G(dev))
10154 i845_update_cursor(crtc, base, plane_state);
10155 else
10156 i9xx_update_cursor(crtc, base, plane_state);
10157 }
10158
10159 static bool cursor_size_ok(struct drm_device *dev,
10160 uint32_t width, uint32_t height)
10161 {
10162 if (width == 0 || height == 0)
10163 return false;
10164
10165 /*
10166 * 845g/865g are special in that they are only limited by
10167 * the width of their cursors, the height is arbitrary up to
10168 * the precision of the register. Everything else requires
10169 * square cursors, limited to a few power-of-two sizes.
10170 */
10171 if (IS_845G(dev) || IS_I865G(dev)) {
10172 if ((width & 63) != 0)
10173 return false;
10174
10175 if (width > (IS_845G(dev) ? 64 : 512))
10176 return false;
10177
10178 if (height > 1023)
10179 return false;
10180 } else {
10181 switch (width | height) {
10182 case 256:
10183 case 128:
10184 if (IS_GEN2(dev))
10185 return false;
10186 case 64:
10187 break;
10188 default:
10189 return false;
10190 }
10191 }
10192
10193 return true;
10194 }
10195
10196 /* VESA 640x480x72Hz mode to set on the pipe */
10197 static struct drm_display_mode load_detect_mode = {
10198 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10199 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10200 };
10201
10202 struct drm_framebuffer *
10203 __intel_framebuffer_create(struct drm_device *dev,
10204 struct drm_mode_fb_cmd2 *mode_cmd,
10205 struct drm_i915_gem_object *obj)
10206 {
10207 struct intel_framebuffer *intel_fb;
10208 int ret;
10209
10210 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
10211 if (!intel_fb)
10212 return ERR_PTR(-ENOMEM);
10213
10214 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
10215 if (ret)
10216 goto err;
10217
10218 return &intel_fb->base;
10219
10220 err:
10221 kfree(intel_fb);
10222 return ERR_PTR(ret);
10223 }
10224
10225 static struct drm_framebuffer *
10226 intel_framebuffer_create(struct drm_device *dev,
10227 struct drm_mode_fb_cmd2 *mode_cmd,
10228 struct drm_i915_gem_object *obj)
10229 {
10230 struct drm_framebuffer *fb;
10231 int ret;
10232
10233 ret = i915_mutex_lock_interruptible(dev);
10234 if (ret)
10235 return ERR_PTR(ret);
10236 fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10237 mutex_unlock(&dev->struct_mutex);
10238
10239 return fb;
10240 }
10241
10242 static u32
10243 intel_framebuffer_pitch_for_width(int width, int bpp)
10244 {
10245 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10246 return ALIGN(pitch, 64);
10247 }
10248
10249 static u32
10250 intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10251 {
10252 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
10253 return PAGE_ALIGN(pitch * mode->vdisplay);
10254 }
10255
10256 static struct drm_framebuffer *
10257 intel_framebuffer_create_for_mode(struct drm_device *dev,
10258 struct drm_display_mode *mode,
10259 int depth, int bpp)
10260 {
10261 struct drm_framebuffer *fb;
10262 struct drm_i915_gem_object *obj;
10263 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
10264
10265 obj = i915_gem_object_create(dev,
10266 intel_framebuffer_size_for_mode(mode, bpp));
10267 if (IS_ERR(obj))
10268 return ERR_CAST(obj);
10269
10270 mode_cmd.width = mode->hdisplay;
10271 mode_cmd.height = mode->vdisplay;
10272 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10273 bpp);
10274 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
10275
10276 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10277 if (IS_ERR(fb))
10278 drm_gem_object_unreference_unlocked(&obj->base);
10279
10280 return fb;
10281 }
10282
10283 static struct drm_framebuffer *
10284 mode_fits_in_fbdev(struct drm_device *dev,
10285 struct drm_display_mode *mode)
10286 {
10287 #ifdef CONFIG_DRM_FBDEV_EMULATION
10288 struct drm_i915_private *dev_priv = dev->dev_private;
10289 struct drm_i915_gem_object *obj;
10290 struct drm_framebuffer *fb;
10291
10292 if (!dev_priv->fbdev)
10293 return NULL;
10294
10295 if (!dev_priv->fbdev->fb)
10296 return NULL;
10297
10298 obj = dev_priv->fbdev->fb->obj;
10299 BUG_ON(!obj);
10300
10301 fb = &dev_priv->fbdev->fb->base;
10302 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10303 fb->bits_per_pixel))
10304 return NULL;
10305
10306 if (obj->base.size < mode->vdisplay * fb->pitches[0])
10307 return NULL;
10308
10309 drm_framebuffer_reference(fb);
10310 return fb;
10311 #else
10312 return NULL;
10313 #endif
10314 }
10315
10316 static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10317 struct drm_crtc *crtc,
10318 struct drm_display_mode *mode,
10319 struct drm_framebuffer *fb,
10320 int x, int y)
10321 {
10322 struct drm_plane_state *plane_state;
10323 int hdisplay, vdisplay;
10324 int ret;
10325
10326 plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10327 if (IS_ERR(plane_state))
10328 return PTR_ERR(plane_state);
10329
10330 if (mode)
10331 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10332 else
10333 hdisplay = vdisplay = 0;
10334
10335 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10336 if (ret)
10337 return ret;
10338 drm_atomic_set_fb_for_plane(plane_state, fb);
10339 plane_state->crtc_x = 0;
10340 plane_state->crtc_y = 0;
10341 plane_state->crtc_w = hdisplay;
10342 plane_state->crtc_h = vdisplay;
10343 plane_state->src_x = x << 16;
10344 plane_state->src_y = y << 16;
10345 plane_state->src_w = hdisplay << 16;
10346 plane_state->src_h = vdisplay << 16;
10347
10348 return 0;
10349 }
10350
10351 bool intel_get_load_detect_pipe(struct drm_connector *connector,
10352 struct drm_display_mode *mode,
10353 struct intel_load_detect_pipe *old,
10354 struct drm_modeset_acquire_ctx *ctx)
10355 {
10356 struct intel_crtc *intel_crtc;
10357 struct intel_encoder *intel_encoder =
10358 intel_attached_encoder(connector);
10359 struct drm_crtc *possible_crtc;
10360 struct drm_encoder *encoder = &intel_encoder->base;
10361 struct drm_crtc *crtc = NULL;
10362 struct drm_device *dev = encoder->dev;
10363 struct drm_framebuffer *fb;
10364 struct drm_mode_config *config = &dev->mode_config;
10365 struct drm_atomic_state *state = NULL, *restore_state = NULL;
10366 struct drm_connector_state *connector_state;
10367 struct intel_crtc_state *crtc_state;
10368 int ret, i = -1;
10369
10370 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10371 connector->base.id, connector->name,
10372 encoder->base.id, encoder->name);
10373
10374 old->restore_state = NULL;
10375
10376 retry:
10377 ret = drm_modeset_lock(&config->connection_mutex, ctx);
10378 if (ret)
10379 goto fail;
10380
10381 /*
10382 * Algorithm gets a little messy:
10383 *
10384 * - if the connector already has an assigned crtc, use it (but make
10385 * sure it's on first)
10386 *
10387 * - try to find the first unused crtc that can drive this connector,
10388 * and use that if we find one
10389 */
10390
10391 /* See if we already have a CRTC for this connector */
10392 if (connector->state->crtc) {
10393 crtc = connector->state->crtc;
10394
10395 ret = drm_modeset_lock(&crtc->mutex, ctx);
10396 if (ret)
10397 goto fail;
10398
10399 /* Make sure the crtc and connector are running */
10400 goto found;
10401 }
10402
10403 /* Find an unused one (if possible) */
10404 for_each_crtc(dev, possible_crtc) {
10405 i++;
10406 if (!(encoder->possible_crtcs & (1 << i)))
10407 continue;
10408
10409 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10410 if (ret)
10411 goto fail;
10412
10413 if (possible_crtc->state->enable) {
10414 drm_modeset_unlock(&possible_crtc->mutex);
10415 continue;
10416 }
10417
10418 crtc = possible_crtc;
10419 break;
10420 }
10421
10422 /*
10423 * If we didn't find an unused CRTC, don't use any.
10424 */
10425 if (!crtc) {
10426 DRM_DEBUG_KMS("no pipe available for load-detect\n");
10427 goto fail;
10428 }
10429
10430 found:
10431 intel_crtc = to_intel_crtc(crtc);
10432
10433 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10434 if (ret)
10435 goto fail;
10436
10437 state = drm_atomic_state_alloc(dev);
10438 restore_state = drm_atomic_state_alloc(dev);
10439 if (!state || !restore_state) {
10440 ret = -ENOMEM;
10441 goto fail;
10442 }
10443
10444 state->acquire_ctx = ctx;
10445 restore_state->acquire_ctx = ctx;
10446
10447 connector_state = drm_atomic_get_connector_state(state, connector);
10448 if (IS_ERR(connector_state)) {
10449 ret = PTR_ERR(connector_state);
10450 goto fail;
10451 }
10452
10453 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10454 if (ret)
10455 goto fail;
10456
10457 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10458 if (IS_ERR(crtc_state)) {
10459 ret = PTR_ERR(crtc_state);
10460 goto fail;
10461 }
10462
10463 crtc_state->base.active = crtc_state->base.enable = true;
10464
10465 if (!mode)
10466 mode = &load_detect_mode;
10467
10468 /* We need a framebuffer large enough to accommodate all accesses
10469 * that the plane may generate whilst we perform load detection.
10470 * We can not rely on the fbcon either being present (we get called
10471 * during its initialisation to detect all boot displays, or it may
10472 * not even exist) or that it is large enough to satisfy the
10473 * requested mode.
10474 */
10475 fb = mode_fits_in_fbdev(dev, mode);
10476 if (fb == NULL) {
10477 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
10478 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
10479 } else
10480 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
10481 if (IS_ERR(fb)) {
10482 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
10483 goto fail;
10484 }
10485
10486 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10487 if (ret)
10488 goto fail;
10489
10490 drm_framebuffer_unreference(fb);
10491
10492 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10493 if (ret)
10494 goto fail;
10495
10496 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10497 if (!ret)
10498 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10499 if (!ret)
10500 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10501 if (ret) {
10502 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10503 goto fail;
10504 }
10505
10506 ret = drm_atomic_commit(state);
10507 if (ret) {
10508 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
10509 goto fail;
10510 }
10511
10512 old->restore_state = restore_state;
10513
10514 /* let the connector get through one full cycle before testing */
10515 intel_wait_for_vblank(dev, intel_crtc->pipe);
10516 return true;
10517
10518 fail:
10519 drm_atomic_state_free(state);
10520 drm_atomic_state_free(restore_state);
10521 restore_state = state = NULL;
10522
10523 if (ret == -EDEADLK) {
10524 drm_modeset_backoff(ctx);
10525 goto retry;
10526 }
10527
10528 return false;
10529 }
10530
10531 void intel_release_load_detect_pipe(struct drm_connector *connector,
10532 struct intel_load_detect_pipe *old,
10533 struct drm_modeset_acquire_ctx *ctx)
10534 {
10535 struct intel_encoder *intel_encoder =
10536 intel_attached_encoder(connector);
10537 struct drm_encoder *encoder = &intel_encoder->base;
10538 struct drm_atomic_state *state = old->restore_state;
10539 int ret;
10540
10541 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
10542 connector->base.id, connector->name,
10543 encoder->base.id, encoder->name);
10544
10545 if (!state)
10546 return;
10547
10548 ret = drm_atomic_commit(state);
10549 if (ret) {
10550 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10551 drm_atomic_state_free(state);
10552 }
10553 }
10554
10555 static int i9xx_pll_refclk(struct drm_device *dev,
10556 const struct intel_crtc_state *pipe_config)
10557 {
10558 struct drm_i915_private *dev_priv = dev->dev_private;
10559 u32 dpll = pipe_config->dpll_hw_state.dpll;
10560
10561 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
10562 return dev_priv->vbt.lvds_ssc_freq;
10563 else if (HAS_PCH_SPLIT(dev))
10564 return 120000;
10565 else if (!IS_GEN2(dev))
10566 return 96000;
10567 else
10568 return 48000;
10569 }
10570
10571 /* Returns the clock of the currently programmed mode of the given pipe. */
10572 static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
10573 struct intel_crtc_state *pipe_config)
10574 {
10575 struct drm_device *dev = crtc->base.dev;
10576 struct drm_i915_private *dev_priv = dev->dev_private;
10577 int pipe = pipe_config->cpu_transcoder;
10578 u32 dpll = pipe_config->dpll_hw_state.dpll;
10579 u32 fp;
10580 struct dpll clock;
10581 int port_clock;
10582 int refclk = i9xx_pll_refclk(dev, pipe_config);
10583
10584 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
10585 fp = pipe_config->dpll_hw_state.fp0;
10586 else
10587 fp = pipe_config->dpll_hw_state.fp1;
10588
10589 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
10590 if (IS_PINEVIEW(dev)) {
10591 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10592 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
10593 } else {
10594 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10595 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10596 }
10597
10598 if (!IS_GEN2(dev)) {
10599 if (IS_PINEVIEW(dev))
10600 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10601 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
10602 else
10603 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
10604 DPLL_FPA01_P1_POST_DIV_SHIFT);
10605
10606 switch (dpll & DPLL_MODE_MASK) {
10607 case DPLLB_MODE_DAC_SERIAL:
10608 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10609 5 : 10;
10610 break;
10611 case DPLLB_MODE_LVDS:
10612 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10613 7 : 14;
10614 break;
10615 default:
10616 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
10617 "mode\n", (int)(dpll & DPLL_MODE_MASK));
10618 return;
10619 }
10620
10621 if (IS_PINEVIEW(dev))
10622 port_clock = pnv_calc_dpll_params(refclk, &clock);
10623 else
10624 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10625 } else {
10626 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
10627 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
10628
10629 if (is_lvds) {
10630 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10631 DPLL_FPA01_P1_POST_DIV_SHIFT);
10632
10633 if (lvds & LVDS_CLKB_POWER_UP)
10634 clock.p2 = 7;
10635 else
10636 clock.p2 = 14;
10637 } else {
10638 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10639 clock.p1 = 2;
10640 else {
10641 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10642 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10643 }
10644 if (dpll & PLL_P2_DIVIDE_BY_4)
10645 clock.p2 = 4;
10646 else
10647 clock.p2 = 2;
10648 }
10649
10650 port_clock = i9xx_calc_dpll_params(refclk, &clock);
10651 }
10652
10653 /*
10654 * This value includes pixel_multiplier. We will use
10655 * port_clock to compute adjusted_mode.crtc_clock in the
10656 * encoder's get_config() function.
10657 */
10658 pipe_config->port_clock = port_clock;
10659 }
10660
10661 int intel_dotclock_calculate(int link_freq,
10662 const struct intel_link_m_n *m_n)
10663 {
10664 /*
10665 * The calculation for the data clock is:
10666 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
10667 * But we want to avoid losing precison if possible, so:
10668 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
10669 *
10670 * and the link clock is simpler:
10671 * link_clock = (m * link_clock) / n
10672 */
10673
10674 if (!m_n->link_n)
10675 return 0;
10676
10677 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10678 }
10679
10680 static void ironlake_pch_clock_get(struct intel_crtc *crtc,
10681 struct intel_crtc_state *pipe_config)
10682 {
10683 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
10684
10685 /* read out port_clock from the DPLL */
10686 i9xx_crtc_clock_get(crtc, pipe_config);
10687
10688 /*
10689 * In case there is an active pipe without active ports,
10690 * we may need some idea for the dotclock anyway.
10691 * Calculate one based on the FDI configuration.
10692 */
10693 pipe_config->base.adjusted_mode.crtc_clock =
10694 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
10695 &pipe_config->fdi_m_n);
10696 }
10697
10698 /** Returns the currently programmed mode of the given pipe. */
10699 struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10700 struct drm_crtc *crtc)
10701 {
10702 struct drm_i915_private *dev_priv = dev->dev_private;
10703 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10704 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
10705 struct drm_display_mode *mode;
10706 struct intel_crtc_state *pipe_config;
10707 int htot = I915_READ(HTOTAL(cpu_transcoder));
10708 int hsync = I915_READ(HSYNC(cpu_transcoder));
10709 int vtot = I915_READ(VTOTAL(cpu_transcoder));
10710 int vsync = I915_READ(VSYNC(cpu_transcoder));
10711 enum pipe pipe = intel_crtc->pipe;
10712
10713 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10714 if (!mode)
10715 return NULL;
10716
10717 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10718 if (!pipe_config) {
10719 kfree(mode);
10720 return NULL;
10721 }
10722
10723 /*
10724 * Construct a pipe_config sufficient for getting the clock info
10725 * back out of crtc_clock_get.
10726 *
10727 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10728 * to use a real value here instead.
10729 */
10730 pipe_config->cpu_transcoder = (enum transcoder) pipe;
10731 pipe_config->pixel_multiplier = 1;
10732 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10733 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10734 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10735 i9xx_crtc_clock_get(intel_crtc, pipe_config);
10736
10737 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
10738 mode->hdisplay = (htot & 0xffff) + 1;
10739 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10740 mode->hsync_start = (hsync & 0xffff) + 1;
10741 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10742 mode->vdisplay = (vtot & 0xffff) + 1;
10743 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10744 mode->vsync_start = (vsync & 0xffff) + 1;
10745 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10746
10747 drm_mode_set_name(mode);
10748
10749 kfree(pipe_config);
10750
10751 return mode;
10752 }
10753
10754 void intel_mark_busy(struct drm_i915_private *dev_priv)
10755 {
10756 if (dev_priv->mm.busy)
10757 return;
10758
10759 intel_runtime_pm_get(dev_priv);
10760 i915_update_gfx_val(dev_priv);
10761 if (INTEL_GEN(dev_priv) >= 6)
10762 gen6_rps_busy(dev_priv);
10763 dev_priv->mm.busy = true;
10764 }
10765
10766 void intel_mark_idle(struct drm_i915_private *dev_priv)
10767 {
10768 if (!dev_priv->mm.busy)
10769 return;
10770
10771 dev_priv->mm.busy = false;
10772
10773 if (INTEL_GEN(dev_priv) >= 6)
10774 gen6_rps_idle(dev_priv);
10775
10776 intel_runtime_pm_put(dev_priv);
10777 }
10778
10779 static void intel_crtc_destroy(struct drm_crtc *crtc)
10780 {
10781 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10782 struct drm_device *dev = crtc->dev;
10783 struct intel_flip_work *work;
10784
10785 spin_lock_irq(&dev->event_lock);
10786 while (!list_empty(&intel_crtc->flip_work)) {
10787 work = list_first_entry(&intel_crtc->flip_work,
10788 struct intel_flip_work, head);
10789 list_del_init(&work->head);
10790 spin_unlock_irq(&dev->event_lock);
10791
10792 cancel_work_sync(&work->mmio_work);
10793 cancel_work_sync(&work->unpin_work);
10794 kfree(work);
10795
10796 spin_lock_irq(&dev->event_lock);
10797 }
10798 spin_unlock_irq(&dev->event_lock);
10799
10800 drm_crtc_cleanup(crtc);
10801
10802 kfree(intel_crtc);
10803 }
10804
10805 static void intel_crtc_post_flip_update(struct intel_flip_work *work,
10806 struct drm_crtc *crtc)
10807 {
10808 struct intel_crtc_state *crtc_state = work->new_crtc_state;
10809 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10810
10811 if (crtc_state->disable_cxsr)
10812 intel_crtc->wm.cxsr_allowed = true;
10813
10814 if (crtc_state->update_wm_post && crtc_state->base.active)
10815 intel_update_watermarks(crtc);
10816
10817 if (work->num_planes > 0 &&
10818 work->old_plane_state[0]->base.plane == crtc->primary) {
10819 struct intel_plane_state *plane_state =
10820 work->new_plane_state[0];
10821
10822 if (plane_state->visible &&
10823 (needs_modeset(&crtc_state->base) ||
10824 !work->old_plane_state[0]->visible))
10825 intel_post_enable_primary(crtc);
10826 }
10827 }
10828
10829 static void intel_unpin_work_fn(struct work_struct *__work)
10830 {
10831 struct intel_flip_work *work =
10832 container_of(__work, struct intel_flip_work, unpin_work);
10833 struct drm_crtc *crtc = work->old_crtc_state->base.crtc;
10834 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10835 struct drm_device *dev = crtc->dev;
10836 struct drm_i915_private *dev_priv = dev->dev_private;
10837 int i;
10838
10839 if (work->fb_bits)
10840 intel_frontbuffer_flip_complete(dev, work->fb_bits);
10841
10842 /*
10843 * Unless work->can_async_unpin is false, there's no way to ensure
10844 * that work->new_crtc_state contains valid memory during unpin
10845 * because intel_atomic_commit may free it before this runs.
10846 */
10847 if (!work->can_async_unpin)
10848 intel_crtc_post_flip_update(work, crtc);
10849
10850 if (work->fb_bits & to_intel_plane(crtc->primary)->frontbuffer_bit)
10851 intel_fbc_post_update(intel_crtc);
10852
10853 if (work->put_power_domains)
10854 modeset_put_power_domains(dev_priv, work->put_power_domains);
10855
10856 /* Make sure mmio work is completely finished before freeing all state here. */
10857 flush_work(&work->mmio_work);
10858
10859 if (!work->can_async_unpin)
10860 /* This must be called before work is unpinned for serialization. */
10861 intel_modeset_verify_crtc(crtc, &work->old_crtc_state->base,
10862 &work->new_crtc_state->base);
10863
10864 if (!work->can_async_unpin || !list_empty(&work->head)) {
10865 spin_lock_irq(&dev->event_lock);
10866 WARN(list_empty(&work->head) != work->can_async_unpin,
10867 "[CRTC:%i] Pin work %p async %i with %i planes, active %i -> %i ms %i\n",
10868 crtc->base.id, work, work->can_async_unpin, work->num_planes,
10869 work->old_crtc_state->base.active, work->new_crtc_state->base.active,
10870 needs_modeset(&work->new_crtc_state->base));
10871
10872 if (!list_empty(&work->head))
10873 list_del(&work->head);
10874
10875 wake_up_all(&dev_priv->pending_flip_queue);
10876 spin_unlock_irq(&dev->event_lock);
10877 }
10878
10879 intel_crtc_destroy_state(crtc, &work->old_crtc_state->base);
10880
10881 for (i = 0; i < work->num_planes; i++) {
10882 struct intel_plane_state *old_plane_state =
10883 work->old_plane_state[i];
10884 struct drm_framebuffer *old_fb = old_plane_state->base.fb;
10885 struct drm_plane *plane = old_plane_state->base.plane;
10886 struct drm_i915_gem_request *req;
10887
10888 req = old_plane_state->wait_req;
10889 old_plane_state->wait_req = NULL;
10890 i915_gem_request_unreference(req);
10891
10892 fence_put(old_plane_state->base.fence);
10893 old_plane_state->base.fence = NULL;
10894
10895 if (old_fb &&
10896 (plane->type != DRM_PLANE_TYPE_CURSOR ||
10897 !INTEL_INFO(dev_priv)->cursor_needs_physical)) {
10898 mutex_lock(&dev->struct_mutex);
10899 intel_unpin_fb_obj(old_fb, old_plane_state->base.rotation);
10900 mutex_unlock(&dev->struct_mutex);
10901 }
10902
10903 intel_plane_destroy_state(plane, &old_plane_state->base);
10904 }
10905
10906 if (!WARN_ON(atomic_read(&intel_crtc->unpin_work_count) == 0))
10907 atomic_dec(&intel_crtc->unpin_work_count);
10908
10909 kfree(work);
10910 }
10911
10912
10913 static bool pageflip_finished(struct intel_crtc *crtc,
10914 struct intel_flip_work *work)
10915 {
10916 if (!atomic_read(&work->pending))
10917 return false;
10918
10919 smp_rmb();
10920
10921 /*
10922 * MMIO work completes when vblank is different from
10923 * flip_queued_vblank.
10924 */
10925 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
10926 }
10927
10928 void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
10929 {
10930 struct drm_device *dev = dev_priv->dev;
10931 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10932 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10933 struct intel_flip_work *work;
10934 unsigned long flags;
10935
10936 /* Ignore early vblank irqs */
10937 if (!crtc)
10938 return;
10939
10940 /*
10941 * This is called both by irq handlers and the reset code (to complete
10942 * lost pageflips) so needs the full irqsave spinlocks.
10943 */
10944 spin_lock_irqsave(&dev->event_lock, flags);
10945 while (!list_empty(&intel_crtc->flip_work)) {
10946 work = list_first_entry(&intel_crtc->flip_work,
10947 struct intel_flip_work,
10948 head);
10949
10950 if (!pageflip_finished(intel_crtc, work) ||
10951 work_busy(&work->unpin_work))
10952 break;
10953
10954 page_flip_completed(intel_crtc, work);
10955 }
10956 spin_unlock_irqrestore(&dev->event_lock, flags);
10957 }
10958
10959 static void intel_mmio_flip_work_func(struct work_struct *w)
10960 {
10961 struct intel_flip_work *work =
10962 container_of(w, struct intel_flip_work, mmio_work);
10963 struct drm_crtc *crtc = work->old_crtc_state->base.crtc;
10964 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10965 struct intel_crtc_state *crtc_state = work->new_crtc_state;
10966 struct drm_device *dev = crtc->dev;
10967 struct drm_i915_private *dev_priv = dev->dev_private;
10968 struct drm_i915_gem_request *req;
10969 int i;
10970
10971 for (i = 0; i < work->num_planes; i++) {
10972 struct intel_plane_state *old_plane_state = work->old_plane_state[i];
10973
10974 /* For framebuffer backed by dmabuf, wait for fence */
10975 if (old_plane_state->base.fence)
10976 WARN_ON(fence_wait(old_plane_state->base.fence, false) < 0);
10977
10978 req = old_plane_state->wait_req;
10979 if (!req)
10980 continue;
10981
10982 WARN_ON(__i915_wait_request(req, false, NULL,
10983 &dev_priv->rps.mmioflips));
10984 }
10985
10986 intel_frontbuffer_flip_prepare(dev, crtc_state->fb_bits);
10987
10988 intel_pipe_update_start(intel_crtc);
10989 if (!needs_modeset(&crtc_state->base)) {
10990 if (crtc_state->base.color_mgmt_changed || crtc_state->update_pipe) {
10991 intel_color_set_csc(&crtc_state->base);
10992 intel_color_load_luts(&crtc_state->base);
10993 }
10994
10995 if (crtc_state->update_pipe)
10996 intel_update_pipe_config(intel_crtc, work->old_crtc_state);
10997 else if (INTEL_INFO(dev)->gen >= 9)
10998 skl_detach_scalers(intel_crtc);
10999 }
11000
11001 for (i = 0; i < work->num_planes; i++) {
11002 struct intel_plane_state *new_plane_state = work->new_plane_state[i];
11003 struct intel_plane *plane = to_intel_plane(new_plane_state->base.plane);
11004
11005 plane->update_plane(&plane->base, crtc_state, new_plane_state);
11006 }
11007
11008 intel_pipe_update_end(intel_crtc, work);
11009 }
11010
11011 static struct fence *intel_get_excl_fence(struct drm_i915_gem_object *obj)
11012 {
11013 struct reservation_object *resv;
11014
11015
11016 if (!obj->base.dma_buf)
11017 return NULL;
11018
11019 resv = obj->base.dma_buf->resv;
11020
11021 /* For framebuffer backed by dmabuf, wait for fence */
11022 while (1) {
11023 struct fence *fence_excl, *ret = NULL;
11024
11025 rcu_read_lock();
11026
11027 fence_excl = rcu_dereference(resv->fence_excl);
11028 if (fence_excl)
11029 ret = fence_get_rcu(fence_excl);
11030
11031 rcu_read_unlock();
11032
11033 if (ret == fence_excl)
11034 return ret;
11035 }
11036 }
11037
11038 static int intel_crtc_page_flip(struct drm_crtc *crtc,
11039 struct drm_framebuffer *fb,
11040 struct drm_pending_vblank_event *event,
11041 uint32_t page_flip_flags)
11042 {
11043 struct drm_device *dev = crtc->dev;
11044 struct drm_i915_private *dev_priv = dev->dev_private;
11045 struct drm_plane_state *old_state, *new_state = NULL;
11046 struct drm_crtc_state *new_crtc_state = NULL;
11047 struct drm_framebuffer *old_fb = crtc->primary->state->fb;
11048 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11049 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11050 struct drm_plane *primary = crtc->primary;
11051 struct intel_flip_work *work;
11052 int ret;
11053
11054 old_state = crtc->primary->state;
11055
11056 if (!crtc->state->active)
11057 return -EINVAL;
11058
11059 /*
11060 * drm_mode_page_flip_ioctl() should already catch this, but double
11061 * check to be safe. In the future we may enable pageflipping from
11062 * a disabled primary plane.
11063 */
11064 if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11065 return -EBUSY;
11066
11067 /* Can't change pixel format via MI display flips. */
11068 if (fb->pixel_format != old_fb->pixel_format)
11069 return -EINVAL;
11070
11071 /*
11072 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11073 * Note that pitch changes could also affect these register.
11074 */
11075 if (INTEL_INFO(dev)->gen > 3 &&
11076 (fb->offsets[0] != old_fb->offsets[0] ||
11077 fb->pitches[0] != old_fb->pitches[0]))
11078 return -EINVAL;
11079
11080 work = kzalloc(sizeof(*work), GFP_KERNEL);
11081 new_crtc_state = intel_crtc_duplicate_state(crtc);
11082 new_state = intel_plane_duplicate_state(primary);
11083
11084 if (!work || !new_crtc_state || !new_state) {
11085 ret = -ENOMEM;
11086 goto cleanup;
11087 }
11088
11089 drm_framebuffer_unreference(new_state->fb);
11090 drm_framebuffer_reference(fb);
11091 new_state->fb = fb;
11092
11093 work->event = event;
11094 INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
11095 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
11096
11097 work->new_crtc_state = to_intel_crtc_state(new_crtc_state);
11098 work->old_crtc_state = intel_crtc->config;
11099
11100 work->fb_bits = to_intel_plane(primary)->frontbuffer_bit;
11101 work->new_crtc_state->fb_bits = work->fb_bits;
11102
11103 work->can_async_unpin = true;
11104 work->num_planes = 1;
11105 work->old_plane_state[0] = to_intel_plane_state(old_state);
11106 work->new_plane_state[0] = to_intel_plane_state(new_state);
11107
11108 /* Step 1: vblank waiting and workqueue throttling,
11109 * similar to intel_atomic_prepare_commit
11110 */
11111 ret = drm_crtc_vblank_get(crtc);
11112 if (ret)
11113 goto cleanup;
11114
11115 /* We borrow the event spin lock for protecting flip_work */
11116 spin_lock_irq(&dev->event_lock);
11117 if (!list_empty(&intel_crtc->flip_work)) {
11118 struct intel_flip_work *old_work;
11119
11120 old_work = list_last_entry(&intel_crtc->flip_work,
11121 struct intel_flip_work, head);
11122
11123 /* Before declaring the flip queue wedged, check if
11124 * the hardware completed the operation behind our backs.
11125 */
11126 if (pageflip_finished(intel_crtc, old_work)) {
11127 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11128 page_flip_completed(intel_crtc, old_work);
11129 } else {
11130 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11131 spin_unlock_irq(&dev->event_lock);
11132
11133 ret = -EBUSY;
11134 goto cleanup_vblank;
11135 }
11136 }
11137 list_add_tail(&work->head, &intel_crtc->flip_work);
11138 spin_unlock_irq(&dev->event_lock);
11139
11140 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11141 flush_workqueue(dev_priv->wq);
11142
11143 /* step 2, similar to intel_prepare_plane_fb */
11144 ret = mutex_lock_interruptible(&dev->struct_mutex);
11145 if (ret)
11146 goto cleanup_work;
11147
11148 ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
11149 if (ret)
11150 goto cleanup_unlock;
11151
11152 i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11153 to_intel_plane(primary)->frontbuffer_bit);
11154
11155 /* point of no return, swap state */
11156 primary->state = new_state;
11157 crtc->state = new_crtc_state;
11158 intel_crtc->config = to_intel_crtc_state(new_crtc_state);
11159 primary->fb = fb;
11160
11161 /* scheduling flip work */
11162 atomic_inc(&intel_crtc->unpin_work_count);
11163
11164 if (obj->last_write_req &&
11165 !i915_gem_request_completed(obj->last_write_req, true))
11166 i915_gem_request_assign(&work->old_plane_state[0]->wait_req,
11167 obj->last_write_req);
11168
11169 if (obj->base.dma_buf)
11170 work->old_plane_state[0]->base.fence = intel_get_excl_fence(obj);
11171
11172 intel_fbc_pre_update(intel_crtc);
11173
11174 schedule_work(&work->mmio_work);
11175
11176 mutex_unlock(&dev->struct_mutex);
11177
11178 trace_i915_flip_request(intel_crtc->plane, obj);
11179
11180 return 0;
11181
11182 cleanup_unlock:
11183 mutex_unlock(&dev->struct_mutex);
11184 cleanup_work:
11185 spin_lock_irq(&dev->event_lock);
11186 list_del(&work->head);
11187 spin_unlock_irq(&dev->event_lock);
11188
11189 cleanup_vblank:
11190 drm_crtc_vblank_put(crtc);
11191 cleanup:
11192 if (new_state)
11193 intel_plane_destroy_state(primary, new_state);
11194
11195 if (new_crtc_state)
11196 intel_crtc_destroy_state(crtc, new_crtc_state);
11197
11198 kfree(work);
11199 return ret;
11200 }
11201
11202
11203 /**
11204 * intel_wm_need_update - Check whether watermarks need updating
11205 * @plane: drm plane
11206 * @state: new plane state
11207 *
11208 * Check current plane state versus the new one to determine whether
11209 * watermarks need to be recalculated.
11210 *
11211 * Returns true or false.
11212 */
11213 static bool intel_wm_need_update(struct drm_plane *plane,
11214 struct drm_plane_state *state)
11215 {
11216 struct intel_plane_state *new = to_intel_plane_state(state);
11217 struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11218
11219 /* Update watermarks on tiling or size changes. */
11220 if (new->visible != cur->visible)
11221 return true;
11222
11223 if (!cur->base.fb || !new->base.fb)
11224 return false;
11225
11226 if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11227 cur->base.rotation != new->base.rotation ||
11228 drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11229 drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11230 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11231 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11232 return true;
11233
11234 return false;
11235 }
11236
11237 static bool needs_scaling(struct intel_plane_state *state)
11238 {
11239 int src_w = drm_rect_width(&state->src) >> 16;
11240 int src_h = drm_rect_height(&state->src) >> 16;
11241 int dst_w = drm_rect_width(&state->dst);
11242 int dst_h = drm_rect_height(&state->dst);
11243
11244 return (src_w != dst_w || src_h != dst_h);
11245 }
11246
11247 int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11248 struct drm_plane_state *plane_state)
11249 {
11250 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
11251 struct drm_crtc *crtc = crtc_state->crtc;
11252 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11253 struct drm_plane *plane = plane_state->plane;
11254 struct drm_device *dev = crtc->dev;
11255 struct drm_i915_private *dev_priv = to_i915(dev);
11256 struct intel_plane_state *old_plane_state =
11257 to_intel_plane_state(plane->state);
11258 int idx = intel_crtc->base.base.id, ret;
11259 bool mode_changed = needs_modeset(crtc_state);
11260 bool was_crtc_enabled = crtc->state->active;
11261 bool is_crtc_enabled = crtc_state->active;
11262 bool turn_off, turn_on, visible, was_visible;
11263 struct drm_framebuffer *fb = plane_state->fb;
11264
11265 if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11266 plane->type != DRM_PLANE_TYPE_CURSOR) {
11267 ret = skl_update_scaler_plane(
11268 to_intel_crtc_state(crtc_state),
11269 to_intel_plane_state(plane_state));
11270 if (ret)
11271 return ret;
11272 }
11273
11274 was_visible = old_plane_state->visible;
11275 visible = to_intel_plane_state(plane_state)->visible;
11276
11277 if (!was_crtc_enabled && WARN_ON(was_visible))
11278 was_visible = false;
11279
11280 /*
11281 * Visibility is calculated as if the crtc was on, but
11282 * after scaler setup everything depends on it being off
11283 * when the crtc isn't active.
11284 *
11285 * FIXME this is wrong for watermarks. Watermarks should also
11286 * be computed as if the pipe would be active. Perhaps move
11287 * per-plane wm computation to the .check_plane() hook, and
11288 * only combine the results from all planes in the current place?
11289 */
11290 if (!is_crtc_enabled)
11291 to_intel_plane_state(plane_state)->visible = visible = false;
11292
11293 if (!was_visible && !visible)
11294 return 0;
11295
11296 if (fb != old_plane_state->base.fb)
11297 pipe_config->fb_changed = true;
11298
11299 turn_off = was_visible && (!visible || mode_changed);
11300 turn_on = visible && (!was_visible || mode_changed);
11301
11302 DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11303 plane->base.id, fb ? fb->base.id : -1);
11304
11305 DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11306 plane->base.id, was_visible, visible,
11307 turn_off, turn_on, mode_changed);
11308
11309 if (turn_on) {
11310 pipe_config->update_wm_pre = true;
11311
11312 /* must disable cxsr around plane enable/disable */
11313 if (plane->type != DRM_PLANE_TYPE_CURSOR)
11314 pipe_config->disable_cxsr = true;
11315 } else if (turn_off) {
11316 pipe_config->update_wm_post = true;
11317
11318 /* must disable cxsr around plane enable/disable */
11319 if (plane->type != DRM_PLANE_TYPE_CURSOR)
11320 pipe_config->disable_cxsr = true;
11321 } else if (intel_wm_need_update(plane, plane_state)) {
11322 /* FIXME bollocks */
11323 pipe_config->update_wm_pre = true;
11324 pipe_config->update_wm_post = true;
11325 }
11326
11327 /* Pre-gen9 platforms need two-step watermark updates */
11328 if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
11329 INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
11330 to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
11331
11332 if (visible || was_visible)
11333 pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
11334
11335 /*
11336 * WaCxSRDisabledForSpriteScaling:ivb
11337 *
11338 * cstate->update_wm was already set above, so this flag will
11339 * take effect when we commit and program watermarks.
11340 */
11341 if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
11342 needs_scaling(to_intel_plane_state(plane_state)) &&
11343 !needs_scaling(old_plane_state))
11344 pipe_config->disable_lp_wm = true;
11345
11346 return 0;
11347 }
11348
11349 static bool encoders_cloneable(const struct intel_encoder *a,
11350 const struct intel_encoder *b)
11351 {
11352 /* masks could be asymmetric, so check both ways */
11353 return a == b || (a->cloneable & (1 << b->type) &&
11354 b->cloneable & (1 << a->type));
11355 }
11356
11357 static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11358 struct intel_crtc *crtc,
11359 struct intel_encoder *encoder)
11360 {
11361 struct intel_encoder *source_encoder;
11362 struct drm_connector *connector;
11363 struct drm_connector_state *connector_state;
11364 int i;
11365
11366 for_each_connector_in_state(state, connector, connector_state, i) {
11367 if (connector_state->crtc != &crtc->base)
11368 continue;
11369
11370 source_encoder =
11371 to_intel_encoder(connector_state->best_encoder);
11372 if (!encoders_cloneable(encoder, source_encoder))
11373 return false;
11374 }
11375
11376 return true;
11377 }
11378
11379 static bool check_encoder_cloning(struct drm_atomic_state *state,
11380 struct intel_crtc *crtc)
11381 {
11382 struct intel_encoder *encoder;
11383 struct drm_connector *connector;
11384 struct drm_connector_state *connector_state;
11385 int i;
11386
11387 for_each_connector_in_state(state, connector, connector_state, i) {
11388 if (connector_state->crtc != &crtc->base)
11389 continue;
11390
11391 encoder = to_intel_encoder(connector_state->best_encoder);
11392 if (!check_single_encoder_cloning(state, crtc, encoder))
11393 return false;
11394 }
11395
11396 return true;
11397 }
11398
11399 static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11400 struct drm_crtc_state *crtc_state)
11401 {
11402 struct drm_device *dev = crtc->dev;
11403 struct drm_i915_private *dev_priv = dev->dev_private;
11404 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11405 struct intel_crtc_state *pipe_config =
11406 to_intel_crtc_state(crtc_state);
11407 struct drm_atomic_state *state = crtc_state->state;
11408 int ret;
11409 bool mode_changed = needs_modeset(crtc_state);
11410
11411 if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11412 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11413 return -EINVAL;
11414 }
11415
11416 if (mode_changed && !crtc_state->active)
11417 pipe_config->update_wm_post = true;
11418
11419 if (mode_changed && crtc_state->enable &&
11420 dev_priv->display.crtc_compute_clock &&
11421 !WARN_ON(pipe_config->shared_dpll)) {
11422 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11423 pipe_config);
11424 if (ret)
11425 return ret;
11426 }
11427
11428 if (crtc_state->color_mgmt_changed) {
11429 ret = intel_color_check(crtc, crtc_state);
11430 if (ret)
11431 return ret;
11432 }
11433
11434 ret = 0;
11435 if (dev_priv->display.compute_pipe_wm) {
11436 ret = dev_priv->display.compute_pipe_wm(pipe_config);
11437 if (ret) {
11438 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11439 return ret;
11440 }
11441 }
11442
11443 if (dev_priv->display.compute_intermediate_wm &&
11444 !to_intel_atomic_state(state)->skip_intermediate_wm) {
11445 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11446 return 0;
11447
11448 /*
11449 * Calculate 'intermediate' watermarks that satisfy both the
11450 * old state and the new state. We can program these
11451 * immediately.
11452 */
11453 ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
11454 intel_crtc,
11455 pipe_config);
11456 if (ret) {
11457 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
11458 return ret;
11459 }
11460 } else if (dev_priv->display.compute_intermediate_wm) {
11461 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
11462 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
11463 }
11464
11465 if (INTEL_INFO(dev)->gen >= 9) {
11466 if (mode_changed)
11467 ret = skl_update_scaler_crtc(pipe_config);
11468
11469 if (!ret)
11470 ret = intel_atomic_setup_scalers(dev, intel_crtc,
11471 pipe_config);
11472 }
11473
11474 return ret;
11475 }
11476
11477 static const struct drm_crtc_helper_funcs intel_helper_funcs = {
11478 .mode_set_base_atomic = intel_pipe_set_base_atomic,
11479 .atomic_begin = intel_begin_crtc_commit,
11480 .atomic_flush = intel_finish_crtc_commit,
11481 .atomic_check = intel_crtc_atomic_check,
11482 };
11483
11484 static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11485 {
11486 struct intel_connector *connector;
11487
11488 for_each_intel_connector(dev, connector) {
11489 if (connector->base.state->crtc)
11490 drm_connector_unreference(&connector->base);
11491
11492 if (connector->base.encoder) {
11493 connector->base.state->best_encoder =
11494 connector->base.encoder;
11495 connector->base.state->crtc =
11496 connector->base.encoder->crtc;
11497
11498 drm_connector_reference(&connector->base);
11499 } else {
11500 connector->base.state->best_encoder = NULL;
11501 connector->base.state->crtc = NULL;
11502 }
11503 }
11504 }
11505
11506 static void
11507 connected_sink_compute_bpp(struct intel_connector *connector,
11508 struct intel_crtc_state *pipe_config)
11509 {
11510 int bpp = pipe_config->pipe_bpp;
11511
11512 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
11513 connector->base.base.id,
11514 connector->base.name);
11515
11516 /* Don't use an invalid EDID bpc value */
11517 if (connector->base.display_info.bpc &&
11518 connector->base.display_info.bpc * 3 < bpp) {
11519 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
11520 bpp, connector->base.display_info.bpc*3);
11521 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
11522 }
11523
11524 /* Clamp bpp to default limit on screens without EDID 1.4 */
11525 if (connector->base.display_info.bpc == 0) {
11526 int type = connector->base.connector_type;
11527 int clamp_bpp = 24;
11528
11529 /* Fall back to 18 bpp when DP sink capability is unknown. */
11530 if (type == DRM_MODE_CONNECTOR_DisplayPort ||
11531 type == DRM_MODE_CONNECTOR_eDP)
11532 clamp_bpp = 18;
11533
11534 if (bpp > clamp_bpp) {
11535 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
11536 bpp, clamp_bpp);
11537 pipe_config->pipe_bpp = clamp_bpp;
11538 }
11539 }
11540 }
11541
11542 static int
11543 compute_baseline_pipe_bpp(struct intel_crtc *crtc,
11544 struct intel_crtc_state *pipe_config)
11545 {
11546 struct drm_device *dev = crtc->base.dev;
11547 struct drm_atomic_state *state;
11548 struct drm_connector *connector;
11549 struct drm_connector_state *connector_state;
11550 int bpp, i;
11551
11552 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
11553 bpp = 10*3;
11554 else if (INTEL_INFO(dev)->gen >= 5)
11555 bpp = 12*3;
11556 else
11557 bpp = 8*3;
11558
11559
11560 pipe_config->pipe_bpp = bpp;
11561
11562 state = pipe_config->base.state;
11563
11564 /* Clamp display bpp to EDID value */
11565 for_each_connector_in_state(state, connector, connector_state, i) {
11566 if (connector_state->crtc != &crtc->base)
11567 continue;
11568
11569 connected_sink_compute_bpp(to_intel_connector(connector),
11570 pipe_config);
11571 }
11572
11573 return bpp;
11574 }
11575
11576 static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11577 {
11578 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
11579 "type: 0x%x flags: 0x%x\n",
11580 mode->crtc_clock,
11581 mode->crtc_hdisplay, mode->crtc_hsync_start,
11582 mode->crtc_hsync_end, mode->crtc_htotal,
11583 mode->crtc_vdisplay, mode->crtc_vsync_start,
11584 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
11585 }
11586
11587 static void intel_dump_pipe_config(struct intel_crtc *crtc,
11588 struct intel_crtc_state *pipe_config,
11589 const char *context)
11590 {
11591 struct drm_device *dev = crtc->base.dev;
11592 struct drm_plane *plane;
11593 struct intel_plane *intel_plane;
11594 struct intel_plane_state *state;
11595 struct drm_framebuffer *fb;
11596
11597 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
11598 context, pipe_config, pipe_name(crtc->pipe));
11599
11600 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
11601 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
11602 pipe_config->pipe_bpp, pipe_config->dither);
11603 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11604 pipe_config->has_pch_encoder,
11605 pipe_config->fdi_lanes,
11606 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
11607 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
11608 pipe_config->fdi_m_n.tu);
11609 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11610 pipe_config->has_dp_encoder,
11611 pipe_config->lane_count,
11612 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
11613 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
11614 pipe_config->dp_m_n.tu);
11615
11616 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
11617 pipe_config->has_dp_encoder,
11618 pipe_config->lane_count,
11619 pipe_config->dp_m2_n2.gmch_m,
11620 pipe_config->dp_m2_n2.gmch_n,
11621 pipe_config->dp_m2_n2.link_m,
11622 pipe_config->dp_m2_n2.link_n,
11623 pipe_config->dp_m2_n2.tu);
11624
11625 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
11626 pipe_config->has_audio,
11627 pipe_config->has_infoframe);
11628
11629 DRM_DEBUG_KMS("requested mode:\n");
11630 drm_mode_debug_printmodeline(&pipe_config->base.mode);
11631 DRM_DEBUG_KMS("adjusted mode:\n");
11632 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11633 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
11634 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
11635 DRM_DEBUG_KMS("pipe src size: %dx%d\n",
11636 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
11637 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11638 crtc->num_scalers,
11639 pipe_config->scaler_state.scaler_users,
11640 pipe_config->scaler_state.scaler_id);
11641 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11642 pipe_config->gmch_pfit.control,
11643 pipe_config->gmch_pfit.pgm_ratios,
11644 pipe_config->gmch_pfit.lvds_border_bits);
11645 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
11646 pipe_config->pch_pfit.pos,
11647 pipe_config->pch_pfit.size,
11648 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
11649 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
11650 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
11651
11652 if (IS_BROXTON(dev)) {
11653 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
11654 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
11655 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
11656 pipe_config->ddi_pll_sel,
11657 pipe_config->dpll_hw_state.ebb0,
11658 pipe_config->dpll_hw_state.ebb4,
11659 pipe_config->dpll_hw_state.pll0,
11660 pipe_config->dpll_hw_state.pll1,
11661 pipe_config->dpll_hw_state.pll2,
11662 pipe_config->dpll_hw_state.pll3,
11663 pipe_config->dpll_hw_state.pll6,
11664 pipe_config->dpll_hw_state.pll8,
11665 pipe_config->dpll_hw_state.pll9,
11666 pipe_config->dpll_hw_state.pll10,
11667 pipe_config->dpll_hw_state.pcsdw12);
11668 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
11669 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
11670 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
11671 pipe_config->ddi_pll_sel,
11672 pipe_config->dpll_hw_state.ctrl1,
11673 pipe_config->dpll_hw_state.cfgcr1,
11674 pipe_config->dpll_hw_state.cfgcr2);
11675 } else if (HAS_DDI(dev)) {
11676 DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
11677 pipe_config->ddi_pll_sel,
11678 pipe_config->dpll_hw_state.wrpll,
11679 pipe_config->dpll_hw_state.spll);
11680 } else {
11681 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
11682 "fp0: 0x%x, fp1: 0x%x\n",
11683 pipe_config->dpll_hw_state.dpll,
11684 pipe_config->dpll_hw_state.dpll_md,
11685 pipe_config->dpll_hw_state.fp0,
11686 pipe_config->dpll_hw_state.fp1);
11687 }
11688
11689 DRM_DEBUG_KMS("planes on this crtc\n");
11690 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
11691 intel_plane = to_intel_plane(plane);
11692 if (intel_plane->pipe != crtc->pipe)
11693 continue;
11694
11695 state = to_intel_plane_state(plane->state);
11696 fb = state->base.fb;
11697 if (!fb) {
11698 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
11699 "disabled, scaler_id = %d\n",
11700 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
11701 plane->base.id, intel_plane->pipe,
11702 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
11703 drm_plane_index(plane), state->scaler_id);
11704 continue;
11705 }
11706
11707 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
11708 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
11709 plane->base.id, intel_plane->pipe,
11710 crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
11711 drm_plane_index(plane));
11712 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
11713 fb->base.id, fb->width, fb->height, fb->pixel_format);
11714 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
11715 state->scaler_id,
11716 state->src.x1 >> 16, state->src.y1 >> 16,
11717 drm_rect_width(&state->src) >> 16,
11718 drm_rect_height(&state->src) >> 16,
11719 state->dst.x1, state->dst.y1,
11720 drm_rect_width(&state->dst), drm_rect_height(&state->dst));
11721 }
11722 }
11723
11724 static bool check_digital_port_conflicts(struct drm_atomic_state *state)
11725 {
11726 struct drm_device *dev = state->dev;
11727 struct drm_connector *connector;
11728 unsigned int used_ports = 0;
11729
11730 /*
11731 * Walk the connector list instead of the encoder
11732 * list to detect the problem on ddi platforms
11733 * where there's just one encoder per digital port.
11734 */
11735 drm_for_each_connector(connector, dev) {
11736 struct drm_connector_state *connector_state;
11737 struct intel_encoder *encoder;
11738
11739 connector_state = drm_atomic_get_existing_connector_state(state, connector);
11740 if (!connector_state)
11741 connector_state = connector->state;
11742
11743 if (!connector_state->best_encoder)
11744 continue;
11745
11746 encoder = to_intel_encoder(connector_state->best_encoder);
11747
11748 WARN_ON(!connector_state->crtc);
11749
11750 switch (encoder->type) {
11751 unsigned int port_mask;
11752 case INTEL_OUTPUT_UNKNOWN:
11753 if (WARN_ON(!HAS_DDI(dev)))
11754 break;
11755 case INTEL_OUTPUT_DISPLAYPORT:
11756 case INTEL_OUTPUT_HDMI:
11757 case INTEL_OUTPUT_EDP:
11758 port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
11759
11760 /* the same port mustn't appear more than once */
11761 if (used_ports & port_mask)
11762 return false;
11763
11764 used_ports |= port_mask;
11765 default:
11766 break;
11767 }
11768 }
11769
11770 return true;
11771 }
11772
11773 static void
11774 clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11775 {
11776 struct drm_crtc_state tmp_state;
11777 struct intel_crtc_scaler_state scaler_state;
11778 struct intel_dpll_hw_state dpll_hw_state;
11779 struct intel_shared_dpll *shared_dpll;
11780 uint32_t ddi_pll_sel;
11781 bool force_thru;
11782
11783 /* FIXME: before the switch to atomic started, a new pipe_config was
11784 * kzalloc'd. Code that depends on any field being zero should be
11785 * fixed, so that the crtc_state can be safely duplicated. For now,
11786 * only fields that are know to not cause problems are preserved. */
11787
11788 tmp_state = crtc_state->base;
11789 scaler_state = crtc_state->scaler_state;
11790 shared_dpll = crtc_state->shared_dpll;
11791 dpll_hw_state = crtc_state->dpll_hw_state;
11792 ddi_pll_sel = crtc_state->ddi_pll_sel;
11793 force_thru = crtc_state->pch_pfit.force_thru;
11794
11795 memset(crtc_state, 0, sizeof *crtc_state);
11796
11797 crtc_state->base = tmp_state;
11798 crtc_state->scaler_state = scaler_state;
11799 crtc_state->shared_dpll = shared_dpll;
11800 crtc_state->dpll_hw_state = dpll_hw_state;
11801 crtc_state->ddi_pll_sel = ddi_pll_sel;
11802 crtc_state->pch_pfit.force_thru = force_thru;
11803 }
11804
11805 static int
11806 intel_modeset_pipe_config(struct drm_crtc *crtc,
11807 struct intel_crtc_state *pipe_config)
11808 {
11809 struct drm_atomic_state *state = pipe_config->base.state;
11810 struct intel_encoder *encoder;
11811 struct drm_connector *connector;
11812 struct drm_connector_state *connector_state;
11813 int base_bpp, ret = -EINVAL;
11814 int i;
11815 bool retry = true;
11816
11817 clear_intel_crtc_state(pipe_config);
11818
11819 pipe_config->cpu_transcoder =
11820 (enum transcoder) to_intel_crtc(crtc)->pipe;
11821
11822 /*
11823 * Sanitize sync polarity flags based on requested ones. If neither
11824 * positive or negative polarity is requested, treat this as meaning
11825 * negative polarity.
11826 */
11827 if (!(pipe_config->base.adjusted_mode.flags &
11828 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
11829 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
11830
11831 if (!(pipe_config->base.adjusted_mode.flags &
11832 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
11833 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
11834
11835 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
11836 pipe_config);
11837 if (base_bpp < 0)
11838 goto fail;
11839
11840 /*
11841 * Determine the real pipe dimensions. Note that stereo modes can
11842 * increase the actual pipe size due to the frame doubling and
11843 * insertion of additional space for blanks between the frame. This
11844 * is stored in the crtc timings. We use the requested mode to do this
11845 * computation to clearly distinguish it from the adjusted mode, which
11846 * can be changed by the connectors in the below retry loop.
11847 */
11848 drm_crtc_get_hv_timing(&pipe_config->base.mode,
11849 &pipe_config->pipe_src_w,
11850 &pipe_config->pipe_src_h);
11851
11852 encoder_retry:
11853 /* Ensure the port clock defaults are reset when retrying. */
11854 pipe_config->port_clock = 0;
11855 pipe_config->pixel_multiplier = 1;
11856
11857 /* Fill in default crtc timings, allow encoders to overwrite them. */
11858 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
11859 CRTC_STEREO_DOUBLE);
11860
11861 /* Pass our mode to the connectors and the CRTC to give them a chance to
11862 * adjust it according to limitations or connector properties, and also
11863 * a chance to reject the mode entirely.
11864 */
11865 for_each_connector_in_state(state, connector, connector_state, i) {
11866 if (connector_state->crtc != crtc)
11867 continue;
11868
11869 encoder = to_intel_encoder(connector_state->best_encoder);
11870
11871 if (!(encoder->compute_config(encoder, pipe_config))) {
11872 DRM_DEBUG_KMS("Encoder config failure\n");
11873 goto fail;
11874 }
11875 }
11876
11877 /* Set default port clock if not overwritten by the encoder. Needs to be
11878 * done afterwards in case the encoder adjusts the mode. */
11879 if (!pipe_config->port_clock)
11880 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
11881 * pipe_config->pixel_multiplier;
11882
11883 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
11884 if (ret < 0) {
11885 DRM_DEBUG_KMS("CRTC fixup failed\n");
11886 goto fail;
11887 }
11888
11889 if (ret == RETRY) {
11890 if (WARN(!retry, "loop in pipe configuration computation\n")) {
11891 ret = -EINVAL;
11892 goto fail;
11893 }
11894
11895 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
11896 retry = false;
11897 goto encoder_retry;
11898 }
11899
11900 /* Dithering seems to not pass-through bits correctly when it should, so
11901 * only enable it on 6bpc panels. */
11902 pipe_config->dither = pipe_config->pipe_bpp == 6*3;
11903 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
11904 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
11905
11906 fail:
11907 return ret;
11908 }
11909
11910 static void
11911 intel_modeset_update_crtc_state(struct drm_atomic_state *state)
11912 {
11913 struct drm_crtc *crtc;
11914 struct drm_crtc_state *crtc_state;
11915 int i;
11916
11917 /* Double check state. */
11918 for_each_crtc_in_state(state, crtc, crtc_state, i) {
11919 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
11920
11921 /* Update hwmode for vblank functions */
11922 if (crtc->state->active)
11923 crtc->hwmode = crtc->state->adjusted_mode;
11924 else
11925 crtc->hwmode.crtc_clock = 0;
11926
11927 /*
11928 * Update legacy state to satisfy fbc code. This can
11929 * be removed when fbc uses the atomic state.
11930 */
11931 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
11932 struct drm_plane_state *plane_state = crtc->primary->state;
11933
11934 crtc->primary->fb = plane_state->fb;
11935 crtc->x = plane_state->src_x >> 16;
11936 crtc->y = plane_state->src_y >> 16;
11937 }
11938 }
11939 }
11940
11941 static bool intel_fuzzy_clock_check(int clock1, int clock2)
11942 {
11943 int diff;
11944
11945 if (clock1 == clock2)
11946 return true;
11947
11948 if (!clock1 || !clock2)
11949 return false;
11950
11951 diff = abs(clock1 - clock2);
11952
11953 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
11954 return true;
11955
11956 return false;
11957 }
11958
11959 #define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
11960 list_for_each_entry((intel_crtc), \
11961 &(dev)->mode_config.crtc_list, \
11962 base.head) \
11963 for_each_if (mask & (1 <<(intel_crtc)->pipe))
11964
11965 static bool
11966 intel_compare_m_n(unsigned int m, unsigned int n,
11967 unsigned int m2, unsigned int n2,
11968 bool exact)
11969 {
11970 if (m == m2 && n == n2)
11971 return true;
11972
11973 if (exact || !m || !n || !m2 || !n2)
11974 return false;
11975
11976 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
11977
11978 if (n > n2) {
11979 while (n > n2) {
11980 m2 <<= 1;
11981 n2 <<= 1;
11982 }
11983 } else if (n < n2) {
11984 while (n < n2) {
11985 m <<= 1;
11986 n <<= 1;
11987 }
11988 }
11989
11990 if (n != n2)
11991 return false;
11992
11993 return intel_fuzzy_clock_check(m, m2);
11994 }
11995
11996 static bool
11997 intel_compare_link_m_n(const struct intel_link_m_n *m_n,
11998 struct intel_link_m_n *m2_n2,
11999 bool adjust)
12000 {
12001 if (m_n->tu == m2_n2->tu &&
12002 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12003 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12004 intel_compare_m_n(m_n->link_m, m_n->link_n,
12005 m2_n2->link_m, m2_n2->link_n, !adjust)) {
12006 if (adjust)
12007 *m2_n2 = *m_n;
12008
12009 return true;
12010 }
12011
12012 return false;
12013 }
12014
12015 static bool
12016 intel_pipe_config_compare(struct drm_device *dev,
12017 struct intel_crtc_state *current_config,
12018 struct intel_crtc_state *pipe_config,
12019 bool adjust)
12020 {
12021 bool ret = true;
12022
12023 #define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12024 do { \
12025 if (!adjust) \
12026 DRM_ERROR(fmt, ##__VA_ARGS__); \
12027 else \
12028 DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12029 } while (0)
12030
12031 #define PIPE_CONF_CHECK_X(name) \
12032 if (current_config->name != pipe_config->name) { \
12033 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12034 "(expected 0x%08x, found 0x%08x)\n", \
12035 current_config->name, \
12036 pipe_config->name); \
12037 ret = false; \
12038 }
12039
12040 #define PIPE_CONF_CHECK_I(name) \
12041 if (current_config->name != pipe_config->name) { \
12042 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12043 "(expected %i, found %i)\n", \
12044 current_config->name, \
12045 pipe_config->name); \
12046 ret = false; \
12047 }
12048
12049 #define PIPE_CONF_CHECK_P(name) \
12050 if (current_config->name != pipe_config->name) { \
12051 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12052 "(expected %p, found %p)\n", \
12053 current_config->name, \
12054 pipe_config->name); \
12055 ret = false; \
12056 }
12057
12058 #define PIPE_CONF_CHECK_M_N(name) \
12059 if (!intel_compare_link_m_n(&current_config->name, \
12060 &pipe_config->name,\
12061 adjust)) { \
12062 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12063 "(expected tu %i gmch %i/%i link %i/%i, " \
12064 "found tu %i, gmch %i/%i link %i/%i)\n", \
12065 current_config->name.tu, \
12066 current_config->name.gmch_m, \
12067 current_config->name.gmch_n, \
12068 current_config->name.link_m, \
12069 current_config->name.link_n, \
12070 pipe_config->name.tu, \
12071 pipe_config->name.gmch_m, \
12072 pipe_config->name.gmch_n, \
12073 pipe_config->name.link_m, \
12074 pipe_config->name.link_n); \
12075 ret = false; \
12076 }
12077
12078 /* This is required for BDW+ where there is only one set of registers for
12079 * switching between high and low RR.
12080 * This macro can be used whenever a comparison has to be made between one
12081 * hw state and multiple sw state variables.
12082 */
12083 #define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12084 if (!intel_compare_link_m_n(&current_config->name, \
12085 &pipe_config->name, adjust) && \
12086 !intel_compare_link_m_n(&current_config->alt_name, \
12087 &pipe_config->name, adjust)) { \
12088 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12089 "(expected tu %i gmch %i/%i link %i/%i, " \
12090 "or tu %i gmch %i/%i link %i/%i, " \
12091 "found tu %i, gmch %i/%i link %i/%i)\n", \
12092 current_config->name.tu, \
12093 current_config->name.gmch_m, \
12094 current_config->name.gmch_n, \
12095 current_config->name.link_m, \
12096 current_config->name.link_n, \
12097 current_config->alt_name.tu, \
12098 current_config->alt_name.gmch_m, \
12099 current_config->alt_name.gmch_n, \
12100 current_config->alt_name.link_m, \
12101 current_config->alt_name.link_n, \
12102 pipe_config->name.tu, \
12103 pipe_config->name.gmch_m, \
12104 pipe_config->name.gmch_n, \
12105 pipe_config->name.link_m, \
12106 pipe_config->name.link_n); \
12107 ret = false; \
12108 }
12109
12110 #define PIPE_CONF_CHECK_FLAGS(name, mask) \
12111 if ((current_config->name ^ pipe_config->name) & (mask)) { \
12112 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
12113 "(expected %i, found %i)\n", \
12114 current_config->name & (mask), \
12115 pipe_config->name & (mask)); \
12116 ret = false; \
12117 }
12118
12119 #define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12120 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
12121 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12122 "(expected %i, found %i)\n", \
12123 current_config->name, \
12124 pipe_config->name); \
12125 ret = false; \
12126 }
12127
12128 #define PIPE_CONF_QUIRK(quirk) \
12129 ((current_config->quirks | pipe_config->quirks) & (quirk))
12130
12131 PIPE_CONF_CHECK_I(cpu_transcoder);
12132
12133 PIPE_CONF_CHECK_I(has_pch_encoder);
12134 PIPE_CONF_CHECK_I(fdi_lanes);
12135 PIPE_CONF_CHECK_M_N(fdi_m_n);
12136
12137 PIPE_CONF_CHECK_I(has_dp_encoder);
12138 PIPE_CONF_CHECK_I(lane_count);
12139
12140 if (INTEL_INFO(dev)->gen < 8) {
12141 PIPE_CONF_CHECK_M_N(dp_m_n);
12142
12143 if (current_config->has_drrs)
12144 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12145 } else
12146 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
12147
12148 PIPE_CONF_CHECK_I(has_dsi_encoder);
12149
12150 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12151 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12152 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12153 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12154 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12155 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
12156
12157 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12158 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12159 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12160 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12161 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12162 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
12163
12164 PIPE_CONF_CHECK_I(pixel_multiplier);
12165 PIPE_CONF_CHECK_I(has_hdmi_sink);
12166 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
12167 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
12168 PIPE_CONF_CHECK_I(limited_color_range);
12169 PIPE_CONF_CHECK_I(has_infoframe);
12170
12171 PIPE_CONF_CHECK_I(has_audio);
12172
12173 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12174 DRM_MODE_FLAG_INTERLACE);
12175
12176 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
12177 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12178 DRM_MODE_FLAG_PHSYNC);
12179 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12180 DRM_MODE_FLAG_NHSYNC);
12181 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12182 DRM_MODE_FLAG_PVSYNC);
12183 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
12184 DRM_MODE_FLAG_NVSYNC);
12185 }
12186
12187 PIPE_CONF_CHECK_X(gmch_pfit.control);
12188 /* pfit ratios are autocomputed by the hw on gen4+ */
12189 if (INTEL_INFO(dev)->gen < 4)
12190 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
12191 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
12192
12193 if (!adjust) {
12194 PIPE_CONF_CHECK_I(pipe_src_w);
12195 PIPE_CONF_CHECK_I(pipe_src_h);
12196
12197 PIPE_CONF_CHECK_I(pch_pfit.enabled);
12198 if (current_config->pch_pfit.enabled) {
12199 PIPE_CONF_CHECK_X(pch_pfit.pos);
12200 PIPE_CONF_CHECK_X(pch_pfit.size);
12201 }
12202
12203 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12204 }
12205
12206 /* BDW+ don't expose a synchronous way to read the state */
12207 if (IS_HASWELL(dev))
12208 PIPE_CONF_CHECK_I(ips_enabled);
12209
12210 PIPE_CONF_CHECK_I(double_wide);
12211
12212 PIPE_CONF_CHECK_X(ddi_pll_sel);
12213
12214 PIPE_CONF_CHECK_P(shared_dpll);
12215 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
12216 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
12217 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12218 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
12219 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
12220 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
12221 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12222 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12223 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
12224
12225 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12226 PIPE_CONF_CHECK_X(dsi_pll.div);
12227
12228 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12229 PIPE_CONF_CHECK_I(pipe_bpp);
12230
12231 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
12232 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
12233
12234 #undef PIPE_CONF_CHECK_X
12235 #undef PIPE_CONF_CHECK_I
12236 #undef PIPE_CONF_CHECK_P
12237 #undef PIPE_CONF_CHECK_FLAGS
12238 #undef PIPE_CONF_CHECK_CLOCK_FUZZY
12239 #undef PIPE_CONF_QUIRK
12240 #undef INTEL_ERR_OR_DBG_KMS
12241
12242 return ret;
12243 }
12244
12245 static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12246 const struct intel_crtc_state *pipe_config)
12247 {
12248 if (pipe_config->has_pch_encoder) {
12249 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
12250 &pipe_config->fdi_m_n);
12251 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12252
12253 /*
12254 * FDI already provided one idea for the dotclock.
12255 * Yell if the encoder disagrees.
12256 */
12257 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12258 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12259 fdi_dotclock, dotclock);
12260 }
12261 }
12262
12263 static void verify_wm_state(struct drm_crtc *crtc,
12264 struct drm_crtc_state *new_state)
12265 {
12266 struct drm_device *dev = crtc->dev;
12267 struct drm_i915_private *dev_priv = dev->dev_private;
12268 struct skl_ddb_allocation hw_ddb, *sw_ddb;
12269 struct skl_ddb_entry *hw_entry, *sw_entry;
12270 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12271 const enum pipe pipe = intel_crtc->pipe;
12272 int plane;
12273
12274 if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
12275 return;
12276
12277 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12278 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12279
12280 /* planes */
12281 for_each_plane(dev_priv, pipe, plane) {
12282 hw_entry = &hw_ddb.plane[pipe][plane];
12283 sw_entry = &sw_ddb->plane[pipe][plane];
12284
12285 if (skl_ddb_entry_equal(hw_entry, sw_entry))
12286 continue;
12287
12288 DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12289 "(expected (%u,%u), found (%u,%u))\n",
12290 pipe_name(pipe), plane + 1,
12291 sw_entry->start, sw_entry->end,
12292 hw_entry->start, hw_entry->end);
12293 }
12294
12295 /* cursor */
12296 hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12297 sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
12298
12299 if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
12300 DRM_ERROR("mismatch in DDB state pipe %c cursor "
12301 "(expected (%u,%u), found (%u,%u))\n",
12302 pipe_name(pipe),
12303 sw_entry->start, sw_entry->end,
12304 hw_entry->start, hw_entry->end);
12305 }
12306 }
12307
12308 static void
12309 verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
12310 {
12311 struct drm_connector *connector;
12312
12313 drm_for_each_connector(connector, dev) {
12314 struct drm_encoder *encoder = connector->encoder;
12315 struct drm_connector_state *state = connector->state;
12316
12317 if (state->crtc != crtc)
12318 continue;
12319
12320 intel_connector_verify_state(to_intel_connector(connector));
12321
12322 I915_STATE_WARN(state->best_encoder != encoder,
12323 "connector's atomic encoder doesn't match legacy encoder\n");
12324 }
12325 }
12326
12327 static void
12328 verify_encoder_state(struct drm_device *dev)
12329 {
12330 struct intel_encoder *encoder;
12331 struct intel_connector *connector;
12332
12333 for_each_intel_encoder(dev, encoder) {
12334 bool enabled = false;
12335 enum pipe pipe;
12336
12337 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12338 encoder->base.base.id,
12339 encoder->base.name);
12340
12341 for_each_intel_connector(dev, connector) {
12342 if (connector->base.state->best_encoder != &encoder->base)
12343 continue;
12344 enabled = true;
12345
12346 I915_STATE_WARN(connector->base.state->crtc !=
12347 encoder->base.crtc,
12348 "connector's crtc doesn't match encoder crtc\n");
12349 }
12350
12351 I915_STATE_WARN(!!encoder->base.crtc != enabled,
12352 "encoder's enabled state mismatch "
12353 "(expected %i, found %i)\n",
12354 !!encoder->base.crtc, enabled);
12355
12356 if (!encoder->base.crtc) {
12357 bool active;
12358
12359 active = encoder->get_hw_state(encoder, &pipe);
12360 I915_STATE_WARN(active,
12361 "encoder detached but still enabled on pipe %c.\n",
12362 pipe_name(pipe));
12363 }
12364 }
12365 }
12366
12367 static void
12368 verify_crtc_state(struct drm_crtc *crtc,
12369 struct drm_crtc_state *old_crtc_state,
12370 struct drm_crtc_state *new_crtc_state)
12371 {
12372 struct drm_device *dev = crtc->dev;
12373 struct drm_i915_private *dev_priv = dev->dev_private;
12374 struct intel_encoder *encoder;
12375 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12376 struct intel_crtc_state *pipe_config, *sw_config;
12377 struct drm_atomic_state *old_state;
12378 bool active;
12379
12380 old_state = old_crtc_state->state;
12381 __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12382 pipe_config = to_intel_crtc_state(old_crtc_state);
12383 memset(pipe_config, 0, sizeof(*pipe_config));
12384 pipe_config->base.crtc = crtc;
12385 pipe_config->base.state = old_state;
12386
12387 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
12388
12389 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
12390
12391 /* hw state is inconsistent with the pipe quirk */
12392 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12393 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12394 active = new_crtc_state->active;
12395
12396 I915_STATE_WARN(new_crtc_state->active != active,
12397 "crtc active state doesn't match with hw state "
12398 "(expected %i, found %i)\n", new_crtc_state->active, active);
12399
12400 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12401 "transitional active state does not match atomic hw state "
12402 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
12403
12404 for_each_encoder_on_crtc(dev, crtc, encoder) {
12405 enum pipe pipe;
12406
12407 active = encoder->get_hw_state(encoder, &pipe);
12408 I915_STATE_WARN(active != new_crtc_state->active,
12409 "[ENCODER:%i] active %i with crtc active %i\n",
12410 encoder->base.base.id, active, new_crtc_state->active);
12411
12412 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12413 "Encoder connected to wrong pipe %c\n",
12414 pipe_name(pipe));
12415
12416 if (active)
12417 encoder->get_config(encoder, pipe_config);
12418 }
12419
12420 if (!new_crtc_state->active)
12421 return;
12422
12423 intel_pipe_config_sanity_check(dev_priv, pipe_config);
12424
12425 sw_config = to_intel_crtc_state(crtc->state);
12426 if (!intel_pipe_config_compare(dev, sw_config,
12427 pipe_config, false)) {
12428 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12429 intel_dump_pipe_config(intel_crtc, pipe_config,
12430 "[hw state]");
12431 intel_dump_pipe_config(intel_crtc, sw_config,
12432 "[sw state]");
12433 }
12434 }
12435
12436 static void
12437 verify_single_dpll_state(struct drm_i915_private *dev_priv,
12438 struct intel_shared_dpll *pll,
12439 struct drm_crtc *crtc,
12440 struct drm_crtc_state *new_state)
12441 {
12442 struct intel_dpll_hw_state dpll_hw_state;
12443 unsigned crtc_mask;
12444 bool active;
12445
12446 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
12447
12448 DRM_DEBUG_KMS("%s\n", pll->name);
12449
12450 active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
12451
12452 if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
12453 I915_STATE_WARN(!pll->on && pll->active_mask,
12454 "pll in active use but not on in sw tracking\n");
12455 I915_STATE_WARN(pll->on && !pll->active_mask,
12456 "pll is on but not used by any active crtc\n");
12457 I915_STATE_WARN(pll->on != active,
12458 "pll on state mismatch (expected %i, found %i)\n",
12459 pll->on, active);
12460 }
12461
12462 if (!crtc) {
12463 I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
12464 "more active pll users than references: %x vs %x\n",
12465 pll->active_mask, pll->config.crtc_mask);
12466
12467 return;
12468 }
12469
12470 crtc_mask = 1 << drm_crtc_index(crtc);
12471
12472 if (new_state->active)
12473 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12474 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12475 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12476 else
12477 I915_STATE_WARN(pll->active_mask & crtc_mask,
12478 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12479 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12480
12481 I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
12482 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12483 crtc_mask, pll->config.crtc_mask);
12484
12485 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
12486 &dpll_hw_state,
12487 sizeof(dpll_hw_state)),
12488 "pll hw state mismatch\n");
12489 }
12490
12491 static void
12492 verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12493 struct drm_crtc_state *old_crtc_state,
12494 struct drm_crtc_state *new_crtc_state)
12495 {
12496 struct drm_i915_private *dev_priv = dev->dev_private;
12497 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12498 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12499
12500 if (new_state->shared_dpll)
12501 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
12502
12503 if (old_state->shared_dpll &&
12504 old_state->shared_dpll != new_state->shared_dpll) {
12505 unsigned crtc_mask = 1 << drm_crtc_index(crtc);
12506 struct intel_shared_dpll *pll = old_state->shared_dpll;
12507
12508 I915_STATE_WARN(pll->active_mask & crtc_mask,
12509 "pll active mismatch (didn't expect pipe %c in active mask)\n",
12510 pipe_name(drm_crtc_index(crtc)));
12511 I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
12512 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
12513 pipe_name(drm_crtc_index(crtc)));
12514 }
12515 }
12516
12517 static void
12518 intel_modeset_verify_crtc(struct drm_crtc *crtc,
12519 struct drm_crtc_state *old_state,
12520 struct drm_crtc_state *new_state)
12521 {
12522 if (!needs_modeset(new_state) &&
12523 !to_intel_crtc_state(new_state)->update_pipe)
12524 return;
12525
12526 verify_wm_state(crtc, new_state);
12527 verify_connector_state(crtc->dev, crtc);
12528 verify_crtc_state(crtc, old_state, new_state);
12529 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
12530 }
12531
12532 static void
12533 verify_disabled_dpll_state(struct drm_device *dev)
12534 {
12535 struct drm_i915_private *dev_priv = dev->dev_private;
12536 int i;
12537
12538 for (i = 0; i < dev_priv->num_shared_dpll; i++)
12539 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
12540 }
12541
12542 static void
12543 intel_modeset_verify_disabled(struct drm_device *dev)
12544 {
12545 verify_encoder_state(dev);
12546 verify_connector_state(dev, NULL);
12547 verify_disabled_dpll_state(dev);
12548 }
12549
12550 static void update_scanline_offset(struct intel_crtc *crtc)
12551 {
12552 struct drm_device *dev = crtc->base.dev;
12553
12554 /*
12555 * The scanline counter increments at the leading edge of hsync.
12556 *
12557 * On most platforms it starts counting from vtotal-1 on the
12558 * first active line. That means the scanline counter value is
12559 * always one less than what we would expect. Ie. just after
12560 * start of vblank, which also occurs at start of hsync (on the
12561 * last active line), the scanline counter will read vblank_start-1.
12562 *
12563 * On gen2 the scanline counter starts counting from 1 instead
12564 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
12565 * to keep the value positive), instead of adding one.
12566 *
12567 * On HSW+ the behaviour of the scanline counter depends on the output
12568 * type. For DP ports it behaves like most other platforms, but on HDMI
12569 * there's an extra 1 line difference. So we need to add two instead of
12570 * one to the value.
12571 */
12572 if (IS_GEN2(dev)) {
12573 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
12574 int vtotal;
12575
12576 vtotal = adjusted_mode->crtc_vtotal;
12577 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
12578 vtotal /= 2;
12579
12580 crtc->scanline_offset = vtotal - 1;
12581 } else if (HAS_DDI(dev) &&
12582 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
12583 crtc->scanline_offset = 2;
12584 } else
12585 crtc->scanline_offset = 1;
12586 }
12587
12588 static void intel_modeset_clear_plls(struct drm_atomic_state *state)
12589 {
12590 struct drm_device *dev = state->dev;
12591 struct drm_i915_private *dev_priv = to_i915(dev);
12592 struct intel_shared_dpll_config *shared_dpll = NULL;
12593 struct drm_crtc *crtc;
12594 struct drm_crtc_state *crtc_state;
12595 int i;
12596
12597 if (!dev_priv->display.crtc_compute_clock)
12598 return;
12599
12600 for_each_crtc_in_state(state, crtc, crtc_state, i) {
12601 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12602 struct intel_shared_dpll *old_dpll =
12603 to_intel_crtc_state(crtc->state)->shared_dpll;
12604
12605 if (!needs_modeset(crtc_state))
12606 continue;
12607
12608 to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
12609
12610 if (!old_dpll)
12611 continue;
12612
12613 if (!shared_dpll)
12614 shared_dpll = intel_atomic_get_shared_dpll_state(state);
12615
12616 intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
12617 }
12618 }
12619
12620 /*
12621 * This implements the workaround described in the "notes" section of the mode
12622 * set sequence documentation. When going from no pipes or single pipe to
12623 * multiple pipes, and planes are enabled after the pipe, we need to wait at
12624 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
12625 */
12626 static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
12627 {
12628 struct drm_crtc_state *crtc_state;
12629 struct intel_crtc *intel_crtc;
12630 struct drm_crtc *crtc;
12631 struct intel_crtc_state *first_crtc_state = NULL;
12632 struct intel_crtc_state *other_crtc_state = NULL;
12633 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
12634 int i;
12635
12636 /* look at all crtc's that are going to be enabled in during modeset */
12637 for_each_crtc_in_state(state, crtc, crtc_state, i) {
12638 intel_crtc = to_intel_crtc(crtc);
12639
12640 if (!crtc_state->active || !needs_modeset(crtc_state))
12641 continue;
12642
12643 if (first_crtc_state) {
12644 other_crtc_state = to_intel_crtc_state(crtc_state);
12645 break;
12646 } else {
12647 first_crtc_state = to_intel_crtc_state(crtc_state);
12648 first_pipe = intel_crtc->pipe;
12649 }
12650 }
12651
12652 /* No workaround needed? */
12653 if (!first_crtc_state)
12654 return 0;
12655
12656 /* w/a possibly needed, check how many crtc's are already enabled. */
12657 for_each_intel_crtc(state->dev, intel_crtc) {
12658 struct intel_crtc_state *pipe_config;
12659
12660 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
12661 if (IS_ERR(pipe_config))
12662 return PTR_ERR(pipe_config);
12663
12664 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
12665
12666 if (!pipe_config->base.active ||
12667 needs_modeset(&pipe_config->base))
12668 continue;
12669
12670 /* 2 or more enabled crtcs means no need for w/a */
12671 if (enabled_pipe != INVALID_PIPE)
12672 return 0;
12673
12674 enabled_pipe = intel_crtc->pipe;
12675 }
12676
12677 if (enabled_pipe != INVALID_PIPE)
12678 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
12679 else if (other_crtc_state)
12680 other_crtc_state->hsw_workaround_pipe = first_pipe;
12681
12682 return 0;
12683 }
12684
12685 static int intel_modeset_all_pipes(struct drm_atomic_state *state)
12686 {
12687 struct drm_crtc *crtc;
12688 struct drm_crtc_state *crtc_state;
12689 int ret = 0;
12690
12691 /* add all active pipes to the state */
12692 for_each_crtc(state->dev, crtc) {
12693 crtc_state = drm_atomic_get_crtc_state(state, crtc);
12694 if (IS_ERR(crtc_state))
12695 return PTR_ERR(crtc_state);
12696
12697 if (!crtc_state->active || needs_modeset(crtc_state))
12698 continue;
12699
12700 crtc_state->mode_changed = true;
12701
12702 ret = drm_atomic_add_affected_connectors(state, crtc);
12703 if (ret)
12704 break;
12705
12706 ret = drm_atomic_add_affected_planes(state, crtc);
12707 if (ret)
12708 break;
12709 }
12710
12711 return ret;
12712 }
12713
12714 static int intel_modeset_checks(struct drm_atomic_state *state)
12715 {
12716 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12717 struct drm_i915_private *dev_priv = state->dev->dev_private;
12718 struct drm_crtc *crtc;
12719 struct drm_crtc_state *crtc_state;
12720 int ret = 0, i;
12721
12722 if (!check_digital_port_conflicts(state)) {
12723 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
12724 return -EINVAL;
12725 }
12726
12727 intel_state->modeset = true;
12728 intel_state->active_crtcs = dev_priv->active_crtcs;
12729
12730 for_each_crtc_in_state(state, crtc, crtc_state, i) {
12731 if (crtc_state->active)
12732 intel_state->active_crtcs |= 1 << i;
12733 else
12734 intel_state->active_crtcs &= ~(1 << i);
12735
12736 if (crtc_state->active != crtc->state->active)
12737 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
12738 }
12739
12740 /*
12741 * See if the config requires any additional preparation, e.g.
12742 * to adjust global state with pipes off. We need to do this
12743 * here so we can get the modeset_pipe updated config for the new
12744 * mode set on this crtc. For other crtcs we need to use the
12745 * adjusted_mode bits in the crtc directly.
12746 */
12747 if (dev_priv->display.modeset_calc_cdclk) {
12748 ret = dev_priv->display.modeset_calc_cdclk(state);
12749
12750 if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
12751 ret = intel_modeset_all_pipes(state);
12752
12753 if (ret < 0)
12754 return ret;
12755
12756 DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
12757 intel_state->cdclk, intel_state->dev_cdclk);
12758 } else
12759 to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
12760
12761 intel_modeset_clear_plls(state);
12762
12763 if (IS_HASWELL(dev_priv))
12764 return haswell_mode_set_planes_workaround(state);
12765
12766 return 0;
12767 }
12768
12769 /*
12770 * Handle calculation of various watermark data at the end of the atomic check
12771 * phase. The code here should be run after the per-crtc and per-plane 'check'
12772 * handlers to ensure that all derived state has been updated.
12773 */
12774 static int calc_watermark_data(struct drm_atomic_state *state)
12775 {
12776 struct drm_device *dev = state->dev;
12777 struct drm_i915_private *dev_priv = to_i915(dev);
12778
12779 /* Is there platform-specific watermark information to calculate? */
12780 if (dev_priv->display.compute_global_watermarks)
12781 return dev_priv->display.compute_global_watermarks(state);
12782
12783 return 0;
12784 }
12785
12786 /**
12787 * intel_atomic_check - validate state object
12788 * @dev: drm device
12789 * @state: state to validate
12790 */
12791 static int intel_atomic_check(struct drm_device *dev,
12792 struct drm_atomic_state *state)
12793 {
12794 struct drm_i915_private *dev_priv = to_i915(dev);
12795 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
12796 struct drm_crtc *crtc;
12797 struct drm_crtc_state *crtc_state;
12798 int ret, i;
12799 bool any_ms = false;
12800
12801 ret = drm_atomic_helper_check_modeset(dev, state);
12802 if (ret)
12803 return ret;
12804
12805 for_each_crtc_in_state(state, crtc, crtc_state, i) {
12806 struct intel_crtc_state *pipe_config =
12807 to_intel_crtc_state(crtc_state);
12808
12809 /* Catch I915_MODE_FLAG_INHERITED */
12810 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
12811 crtc_state->mode_changed = true;
12812
12813 if (!needs_modeset(crtc_state))
12814 continue;
12815
12816 if (!crtc_state->enable) {
12817 any_ms = true;
12818 continue;
12819 }
12820
12821 /* FIXME: For only active_changed we shouldn't need to do any
12822 * state recomputation at all. */
12823
12824 ret = drm_atomic_add_affected_connectors(state, crtc);
12825 if (ret)
12826 return ret;
12827
12828 ret = intel_modeset_pipe_config(crtc, pipe_config);
12829 if (ret) {
12830 intel_dump_pipe_config(to_intel_crtc(crtc),
12831 pipe_config, "[failed]");
12832 return ret;
12833 }
12834
12835 if (i915.fastboot &&
12836 intel_pipe_config_compare(dev,
12837 to_intel_crtc_state(crtc->state),
12838 pipe_config, true)) {
12839 crtc_state->mode_changed = false;
12840 to_intel_crtc_state(crtc_state)->update_pipe = true;
12841 }
12842
12843 if (needs_modeset(crtc_state))
12844 any_ms = true;
12845
12846 ret = drm_atomic_add_affected_planes(state, crtc);
12847 if (ret)
12848 return ret;
12849
12850 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
12851 needs_modeset(crtc_state) ?
12852 "[modeset]" : "[fastset]");
12853 }
12854
12855 if (any_ms) {
12856 ret = intel_modeset_checks(state);
12857
12858 if (ret)
12859 return ret;
12860 } else
12861 intel_state->cdclk = dev_priv->cdclk_freq;
12862
12863 ret = drm_atomic_helper_check_planes(dev, state);
12864 if (ret)
12865 return ret;
12866
12867 intel_fbc_choose_crtc(dev_priv, state);
12868 return calc_watermark_data(state);
12869 }
12870
12871 static int intel_atomic_prepare_commit(struct drm_device *dev,
12872 struct drm_atomic_state *state,
12873 bool nonblock)
12874 {
12875 struct drm_i915_private *dev_priv = dev->dev_private;
12876 struct drm_plane_state *plane_state;
12877 struct drm_crtc_state *crtc_state;
12878 struct drm_plane *plane;
12879 struct drm_crtc *crtc;
12880 int i, ret;
12881
12882 if (nonblock) {
12883 DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
12884 return -EINVAL;
12885 }
12886
12887 for_each_crtc_in_state(state, crtc, crtc_state, i) {
12888 ret = intel_crtc_wait_for_pending_flips(crtc);
12889 if (ret)
12890 return ret;
12891
12892 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
12893 flush_workqueue(dev_priv->wq);
12894 }
12895
12896 ret = mutex_lock_interruptible(&dev->struct_mutex);
12897 if (ret)
12898 return ret;
12899
12900 ret = drm_atomic_helper_prepare_planes(dev, state);
12901 mutex_unlock(&dev->struct_mutex);
12902
12903 if (!ret && !nonblock) {
12904 for_each_plane_in_state(state, plane, plane_state, i) {
12905 struct intel_plane_state *intel_plane_state =
12906 to_intel_plane_state(plane_state);
12907
12908 if (plane_state->fence) {
12909 long lret = fence_wait(plane_state->fence, true);
12910
12911 if (lret < 0) {
12912 ret = lret;
12913 break;
12914 }
12915 }
12916
12917 if (!intel_plane_state->wait_req)
12918 continue;
12919
12920 ret = __i915_wait_request(intel_plane_state->wait_req,
12921 true, NULL, NULL);
12922 if (ret) {
12923 /* Any hang should be swallowed by the wait */
12924 WARN_ON(ret == -EIO);
12925 mutex_lock(&dev->struct_mutex);
12926 drm_atomic_helper_cleanup_planes(dev, state);
12927 mutex_unlock(&dev->struct_mutex);
12928 break;
12929 }
12930 }
12931 }
12932
12933 return ret;
12934 }
12935
12936 u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
12937 {
12938 struct drm_device *dev = crtc->base.dev;
12939
12940 if (!dev->max_vblank_count)
12941 return drm_accurate_vblank_count(&crtc->base);
12942
12943 return dev->driver->get_vblank_counter(dev, crtc->pipe);
12944 }
12945
12946 static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
12947 struct drm_i915_private *dev_priv,
12948 unsigned crtc_mask)
12949 {
12950 unsigned last_vblank_count[I915_MAX_PIPES];
12951 enum pipe pipe;
12952 int ret;
12953
12954 if (!crtc_mask)
12955 return;
12956
12957 for_each_pipe(dev_priv, pipe) {
12958 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
12959
12960 if (!((1 << pipe) & crtc_mask))
12961 continue;
12962
12963 ret = drm_crtc_vblank_get(crtc);
12964 if (WARN_ON(ret != 0)) {
12965 crtc_mask &= ~(1 << pipe);
12966 continue;
12967 }
12968
12969 last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
12970 }
12971
12972 for_each_pipe(dev_priv, pipe) {
12973 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
12974 long lret;
12975
12976 if (!((1 << pipe) & crtc_mask))
12977 continue;
12978
12979 lret = wait_event_timeout(dev->vblank[pipe].queue,
12980 last_vblank_count[pipe] !=
12981 drm_crtc_vblank_count(crtc),
12982 msecs_to_jiffies(50));
12983
12984 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
12985
12986 drm_crtc_vblank_put(crtc);
12987 }
12988 }
12989
12990 static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
12991 {
12992 /* fb updated, need to unpin old fb */
12993 if (crtc_state->fb_changed)
12994 return true;
12995
12996 /* wm changes, need vblank before final wm's */
12997 if (crtc_state->update_wm_post)
12998 return true;
12999
13000 /*
13001 * cxsr is re-enabled after vblank.
13002 * This is already handled by crtc_state->update_wm_post,
13003 * but added for clarity.
13004 */
13005 if (crtc_state->disable_cxsr)
13006 return true;
13007
13008 return false;
13009 }
13010
13011 /**
13012 * intel_atomic_commit - commit validated state object
13013 * @dev: DRM device
13014 * @state: the top-level driver state object
13015 * @nonblock: nonblocking commit
13016 *
13017 * This function commits a top-level state object that has been validated
13018 * with drm_atomic_helper_check().
13019 *
13020 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
13021 * we can only handle plane-related operations and do not yet support
13022 * nonblocking commit.
13023 *
13024 * RETURNS
13025 * Zero for success or -errno.
13026 */
13027 static int intel_atomic_commit(struct drm_device *dev,
13028 struct drm_atomic_state *state,
13029 bool nonblock)
13030 {
13031 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13032 struct drm_i915_private *dev_priv = dev->dev_private;
13033 struct drm_crtc_state *old_crtc_state;
13034 struct drm_crtc *crtc;
13035 struct intel_crtc_state *intel_cstate;
13036 int ret = 0, i;
13037 bool hw_check = intel_state->modeset;
13038 unsigned long put_domains[I915_MAX_PIPES] = {};
13039 unsigned crtc_vblank_mask = 0;
13040
13041 ret = intel_atomic_prepare_commit(dev, state, nonblock);
13042 if (ret) {
13043 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13044 return ret;
13045 }
13046
13047 drm_atomic_helper_swap_state(dev, state);
13048 dev_priv->wm.distrust_bios_wm = false;
13049 dev_priv->wm.skl_results = intel_state->wm_results;
13050 intel_shared_dpll_commit(state);
13051
13052 if (intel_state->modeset) {
13053 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13054 sizeof(intel_state->min_pixclk));
13055 dev_priv->active_crtcs = intel_state->active_crtcs;
13056 dev_priv->atomic_cdclk_freq = intel_state->cdclk;
13057
13058 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
13059 }
13060
13061 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13062 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13063
13064 if (needs_modeset(crtc->state) ||
13065 to_intel_crtc_state(crtc->state)->update_pipe) {
13066 hw_check = true;
13067
13068 put_domains[to_intel_crtc(crtc)->pipe] =
13069 modeset_get_crtc_power_domains(crtc,
13070 to_intel_crtc_state(crtc->state));
13071 }
13072
13073 if (!needs_modeset(crtc->state))
13074 continue;
13075
13076 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13077
13078 if (old_crtc_state->active) {
13079 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
13080 dev_priv->display.crtc_disable(crtc);
13081 intel_crtc->active = false;
13082 intel_fbc_disable(intel_crtc);
13083 intel_disable_shared_dpll(intel_crtc);
13084
13085 /*
13086 * Underruns don't always raise
13087 * interrupts, so check manually.
13088 */
13089 intel_check_cpu_fifo_underruns(dev_priv);
13090 intel_check_pch_fifo_underruns(dev_priv);
13091
13092 if (!crtc->state->active)
13093 intel_update_watermarks(crtc);
13094 }
13095 }
13096
13097 /* Only after disabling all output pipelines that will be changed can we
13098 * update the the output configuration. */
13099 intel_modeset_update_crtc_state(state);
13100
13101 if (intel_state->modeset) {
13102 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
13103
13104 if (dev_priv->display.modeset_commit_cdclk &&
13105 intel_state->dev_cdclk != dev_priv->cdclk_freq)
13106 dev_priv->display.modeset_commit_cdclk(state);
13107
13108 intel_modeset_verify_disabled(dev);
13109 }
13110
13111 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
13112 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13113 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13114 bool modeset = needs_modeset(crtc->state);
13115 struct intel_crtc_state *pipe_config =
13116 to_intel_crtc_state(crtc->state);
13117 bool update_pipe = !modeset && pipe_config->update_pipe;
13118
13119 if (modeset && crtc->state->active) {
13120 update_scanline_offset(to_intel_crtc(crtc));
13121 dev_priv->display.crtc_enable(crtc);
13122 }
13123
13124 if (!modeset)
13125 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
13126
13127 if (crtc->state->active &&
13128 drm_atomic_get_existing_plane_state(state, crtc->primary))
13129 intel_fbc_enable(intel_crtc);
13130
13131 if (crtc->state->active &&
13132 (crtc->state->planes_changed || update_pipe))
13133 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
13134
13135 if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13136 crtc_vblank_mask |= 1 << i;
13137 }
13138
13139 /* FIXME: add subpixel order */
13140
13141 if (!state->legacy_cursor_update)
13142 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13143
13144 /*
13145 * Now that the vblank has passed, we can go ahead and program the
13146 * optimal watermarks on platforms that need two-step watermark
13147 * programming.
13148 *
13149 * TODO: Move this (and other cleanup) to an async worker eventually.
13150 */
13151 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13152 intel_cstate = to_intel_crtc_state(crtc->state);
13153
13154 if (dev_priv->display.optimize_watermarks)
13155 dev_priv->display.optimize_watermarks(intel_cstate);
13156 }
13157
13158 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13159 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13160
13161 if (put_domains[i])
13162 modeset_put_power_domains(dev_priv, put_domains[i]);
13163
13164 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13165 }
13166
13167 if (intel_state->modeset)
13168 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13169
13170 mutex_lock(&dev->struct_mutex);
13171 drm_atomic_helper_cleanup_planes(dev, state);
13172 mutex_unlock(&dev->struct_mutex);
13173
13174 drm_atomic_state_free(state);
13175
13176 /* As one of the primary mmio accessors, KMS has a high likelihood
13177 * of triggering bugs in unclaimed access. After we finish
13178 * modesetting, see if an error has been flagged, and if so
13179 * enable debugging for the next modeset - and hope we catch
13180 * the culprit.
13181 *
13182 * XXX note that we assume display power is on at this point.
13183 * This might hold true now but we need to add pm helper to check
13184 * unclaimed only when the hardware is on, as atomic commits
13185 * can happen also when the device is completely off.
13186 */
13187 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13188
13189 return 0;
13190 }
13191
13192 void intel_crtc_restore_mode(struct drm_crtc *crtc)
13193 {
13194 struct drm_device *dev = crtc->dev;
13195 struct drm_atomic_state *state;
13196 struct drm_crtc_state *crtc_state;
13197 int ret;
13198
13199 state = drm_atomic_state_alloc(dev);
13200 if (!state) {
13201 DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
13202 crtc->base.id);
13203 return;
13204 }
13205
13206 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
13207
13208 retry:
13209 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13210 ret = PTR_ERR_OR_ZERO(crtc_state);
13211 if (!ret) {
13212 if (!crtc_state->active)
13213 goto out;
13214
13215 crtc_state->mode_changed = true;
13216 ret = drm_atomic_commit(state);
13217 }
13218
13219 if (ret == -EDEADLK) {
13220 drm_atomic_state_clear(state);
13221 drm_modeset_backoff(state->acquire_ctx);
13222 goto retry;
13223 }
13224
13225 if (ret)
13226 out:
13227 drm_atomic_state_free(state);
13228 }
13229
13230 #undef for_each_intel_crtc_masked
13231
13232 static const struct drm_crtc_funcs intel_crtc_funcs = {
13233 .gamma_set = drm_atomic_helper_legacy_gamma_set,
13234 .set_config = drm_atomic_helper_set_config,
13235 .set_property = drm_atomic_helper_crtc_set_property,
13236 .destroy = intel_crtc_destroy,
13237 .page_flip = intel_crtc_page_flip,
13238 .atomic_duplicate_state = intel_crtc_duplicate_state,
13239 .atomic_destroy_state = intel_crtc_destroy_state,
13240 };
13241
13242 /**
13243 * intel_prepare_plane_fb - Prepare fb for usage on plane
13244 * @plane: drm plane to prepare for
13245 * @fb: framebuffer to prepare for presentation
13246 *
13247 * Prepares a framebuffer for usage on a display plane. Generally this
13248 * involves pinning the underlying object and updating the frontbuffer tracking
13249 * bits. Some older platforms need special physical address handling for
13250 * cursor planes.
13251 *
13252 * Must be called with struct_mutex held.
13253 *
13254 * Returns 0 on success, negative error code on failure.
13255 */
13256 int
13257 intel_prepare_plane_fb(struct drm_plane *plane,
13258 const struct drm_plane_state *new_state)
13259 {
13260 struct drm_device *dev = plane->dev;
13261 struct drm_framebuffer *fb = new_state->fb;
13262 struct intel_plane *intel_plane = to_intel_plane(plane);
13263 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13264 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
13265 int ret = 0;
13266
13267 if (!obj && !old_obj)
13268 return 0;
13269
13270 if (old_obj) {
13271 struct drm_crtc_state *crtc_state =
13272 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13273
13274 /* Big Hammer, we also need to ensure that any pending
13275 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13276 * current scanout is retired before unpinning the old
13277 * framebuffer. Note that we rely on userspace rendering
13278 * into the buffer attached to the pipe they are waiting
13279 * on. If not, userspace generates a GPU hang with IPEHR
13280 * point to the MI_WAIT_FOR_EVENT.
13281 *
13282 * This should only fail upon a hung GPU, in which case we
13283 * can safely continue.
13284 */
13285 if (needs_modeset(crtc_state))
13286 ret = i915_gem_object_wait_rendering(old_obj, true);
13287 if (ret) {
13288 /* GPU hangs should have been swallowed by the wait */
13289 WARN_ON(ret == -EIO);
13290 return ret;
13291 }
13292 }
13293
13294 if (!obj) {
13295 ret = 0;
13296 } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
13297 INTEL_INFO(dev)->cursor_needs_physical) {
13298 int align = IS_I830(dev) ? 16 * 1024 : 256;
13299 ret = i915_gem_object_attach_phys(obj, align);
13300 if (ret)
13301 DRM_DEBUG_KMS("failed to attach phys object\n");
13302 } else {
13303 ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
13304 }
13305
13306 if (ret == 0) {
13307 if (obj) {
13308 struct intel_plane_state *plane_state =
13309 to_intel_plane_state(new_state);
13310
13311 i915_gem_request_assign(&plane_state->wait_req,
13312 obj->last_write_req);
13313
13314 plane_state->base.fence = intel_get_excl_fence(obj);
13315 }
13316
13317 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13318 }
13319
13320 return ret;
13321 }
13322
13323 /**
13324 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13325 * @plane: drm plane to clean up for
13326 * @fb: old framebuffer that was on plane
13327 *
13328 * Cleans up a framebuffer that has just been removed from a plane.
13329 *
13330 * Must be called with struct_mutex held.
13331 */
13332 void
13333 intel_cleanup_plane_fb(struct drm_plane *plane,
13334 const struct drm_plane_state *old_state)
13335 {
13336 struct drm_device *dev = plane->dev;
13337 struct intel_plane *intel_plane = to_intel_plane(plane);
13338 struct intel_plane_state *old_intel_state;
13339 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13340 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
13341
13342 old_intel_state = to_intel_plane_state(old_state);
13343
13344 if (!obj && !old_obj)
13345 return;
13346
13347 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13348 !INTEL_INFO(dev)->cursor_needs_physical))
13349 intel_unpin_fb_obj(old_state->fb, old_state->rotation);
13350
13351 /* prepare_fb aborted? */
13352 if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13353 (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13354 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13355
13356 i915_gem_request_assign(&old_intel_state->wait_req, NULL);
13357
13358 fence_put(old_intel_state->base.fence);
13359 old_intel_state->base.fence = NULL;
13360 }
13361
13362 int
13363 skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13364 {
13365 int max_scale;
13366 struct drm_device *dev;
13367 struct drm_i915_private *dev_priv;
13368 int crtc_clock, cdclk;
13369
13370 if (!intel_crtc || !crtc_state->base.enable)
13371 return DRM_PLANE_HELPER_NO_SCALING;
13372
13373 dev = intel_crtc->base.dev;
13374 dev_priv = dev->dev_private;
13375 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
13376 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
13377
13378 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
13379 return DRM_PLANE_HELPER_NO_SCALING;
13380
13381 /*
13382 * skl max scale is lower of:
13383 * close to 3 but not 3, -1 is for that purpose
13384 * or
13385 * cdclk/crtc_clock
13386 */
13387 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13388
13389 return max_scale;
13390 }
13391
13392 static int
13393 intel_check_primary_plane(struct drm_plane *plane,
13394 struct intel_crtc_state *crtc_state,
13395 struct intel_plane_state *state)
13396 {
13397 struct drm_crtc *crtc = state->base.crtc;
13398 struct drm_framebuffer *fb = state->base.fb;
13399 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
13400 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13401 bool can_position = false;
13402
13403 if (INTEL_INFO(plane->dev)->gen >= 9) {
13404 /* use scaler when colorkey is not required */
13405 if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13406 min_scale = 1;
13407 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13408 }
13409 can_position = true;
13410 }
13411
13412 return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13413 &state->dst, &state->clip,
13414 min_scale, max_scale,
13415 can_position, true,
13416 &state->visible);
13417 }
13418
13419 static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13420 struct drm_crtc_state *old_crtc_state)
13421 {
13422 struct drm_device *dev = crtc->dev;
13423 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13424 struct intel_crtc_state *old_intel_state =
13425 to_intel_crtc_state(old_crtc_state);
13426 bool modeset = needs_modeset(crtc->state);
13427
13428 /* Perform vblank evasion around commit operation */
13429 intel_pipe_update_start(intel_crtc);
13430
13431 if (modeset)
13432 return;
13433
13434 if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
13435 intel_color_set_csc(crtc->state);
13436 intel_color_load_luts(crtc->state);
13437 }
13438
13439 if (to_intel_crtc_state(crtc->state)->update_pipe)
13440 intel_update_pipe_config(intel_crtc, old_intel_state);
13441 else if (INTEL_INFO(dev)->gen >= 9)
13442 skl_detach_scalers(intel_crtc);
13443 }
13444
13445 static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13446 struct drm_crtc_state *old_crtc_state)
13447 {
13448 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13449
13450 intel_pipe_update_end(intel_crtc, NULL);
13451 }
13452
13453 /**
13454 * intel_plane_destroy - destroy a plane
13455 * @plane: plane to destroy
13456 *
13457 * Common destruction function for all types of planes (primary, cursor,
13458 * sprite).
13459 */
13460 void intel_plane_destroy(struct drm_plane *plane)
13461 {
13462 struct intel_plane *intel_plane = to_intel_plane(plane);
13463 drm_plane_cleanup(plane);
13464 kfree(intel_plane);
13465 }
13466
13467 const struct drm_plane_funcs intel_plane_funcs = {
13468 .update_plane = drm_atomic_helper_update_plane,
13469 .disable_plane = drm_atomic_helper_disable_plane,
13470 .destroy = intel_plane_destroy,
13471 .set_property = drm_atomic_helper_plane_set_property,
13472 .atomic_get_property = intel_plane_atomic_get_property,
13473 .atomic_set_property = intel_plane_atomic_set_property,
13474 .atomic_duplicate_state = intel_plane_duplicate_state,
13475 .atomic_destroy_state = intel_plane_destroy_state,
13476
13477 };
13478
13479 static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
13480 int pipe)
13481 {
13482 struct intel_plane *primary = NULL;
13483 struct intel_plane_state *state = NULL;
13484 const uint32_t *intel_primary_formats;
13485 unsigned int num_formats;
13486 int ret;
13487
13488 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
13489 if (!primary)
13490 goto fail;
13491
13492 state = intel_create_plane_state(&primary->base);
13493 if (!state)
13494 goto fail;
13495 primary->base.state = &state->base;
13496
13497 primary->can_scale = false;
13498 primary->max_downscale = 1;
13499 if (INTEL_INFO(dev)->gen >= 9) {
13500 primary->can_scale = true;
13501 state->scaler_id = -1;
13502 }
13503 primary->pipe = pipe;
13504 primary->plane = pipe;
13505 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
13506 primary->check_plane = intel_check_primary_plane;
13507 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
13508 primary->plane = !pipe;
13509
13510 if (INTEL_INFO(dev)->gen >= 9) {
13511 intel_primary_formats = skl_primary_formats;
13512 num_formats = ARRAY_SIZE(skl_primary_formats);
13513
13514 primary->update_plane = skylake_update_primary_plane;
13515 primary->disable_plane = skylake_disable_primary_plane;
13516 } else if (HAS_PCH_SPLIT(dev)) {
13517 intel_primary_formats = i965_primary_formats;
13518 num_formats = ARRAY_SIZE(i965_primary_formats);
13519
13520 primary->update_plane = ironlake_update_primary_plane;
13521 primary->disable_plane = i9xx_disable_primary_plane;
13522 } else if (INTEL_INFO(dev)->gen >= 4) {
13523 intel_primary_formats = i965_primary_formats;
13524 num_formats = ARRAY_SIZE(i965_primary_formats);
13525
13526 primary->update_plane = i9xx_update_primary_plane;
13527 primary->disable_plane = i9xx_disable_primary_plane;
13528 } else {
13529 intel_primary_formats = i8xx_primary_formats;
13530 num_formats = ARRAY_SIZE(i8xx_primary_formats);
13531
13532 primary->update_plane = i9xx_update_primary_plane;
13533 primary->disable_plane = i9xx_disable_primary_plane;
13534 }
13535
13536 ret = drm_universal_plane_init(dev, &primary->base, 0,
13537 &intel_plane_funcs,
13538 intel_primary_formats, num_formats,
13539 DRM_PLANE_TYPE_PRIMARY, NULL);
13540 if (ret)
13541 goto fail;
13542
13543 if (INTEL_INFO(dev)->gen >= 4)
13544 intel_create_rotation_property(dev, primary);
13545
13546 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
13547
13548 return &primary->base;
13549
13550 fail:
13551 kfree(state);
13552 kfree(primary);
13553
13554 return NULL;
13555 }
13556
13557 void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
13558 {
13559 if (!dev->mode_config.rotation_property) {
13560 unsigned long flags = BIT(DRM_ROTATE_0) |
13561 BIT(DRM_ROTATE_180);
13562
13563 if (INTEL_INFO(dev)->gen >= 9)
13564 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
13565
13566 dev->mode_config.rotation_property =
13567 drm_mode_create_rotation_property(dev, flags);
13568 }
13569 if (dev->mode_config.rotation_property)
13570 drm_object_attach_property(&plane->base.base,
13571 dev->mode_config.rotation_property,
13572 plane->base.state->rotation);
13573 }
13574
13575 static int
13576 intel_check_cursor_plane(struct drm_plane *plane,
13577 struct intel_crtc_state *crtc_state,
13578 struct intel_plane_state *state)
13579 {
13580 struct drm_crtc *crtc = crtc_state->base.crtc;
13581 struct drm_framebuffer *fb = state->base.fb;
13582 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
13583 enum pipe pipe = to_intel_plane(plane)->pipe;
13584 unsigned stride;
13585 int ret;
13586
13587 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13588 &state->dst, &state->clip,
13589 DRM_PLANE_HELPER_NO_SCALING,
13590 DRM_PLANE_HELPER_NO_SCALING,
13591 true, true, &state->visible);
13592 if (ret)
13593 return ret;
13594
13595 /* if we want to turn off the cursor ignore width and height */
13596 if (!obj)
13597 return 0;
13598
13599 /* Check for which cursor types we support */
13600 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
13601 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
13602 state->base.crtc_w, state->base.crtc_h);
13603 return -EINVAL;
13604 }
13605
13606 stride = roundup_pow_of_two(state->base.crtc_w) * 4;
13607 if (obj->base.size < stride * state->base.crtc_h) {
13608 DRM_DEBUG_KMS("buffer is too small\n");
13609 return -ENOMEM;
13610 }
13611
13612 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
13613 DRM_DEBUG_KMS("cursor cannot be tiled\n");
13614 return -EINVAL;
13615 }
13616
13617 /*
13618 * There's something wrong with the cursor on CHV pipe C.
13619 * If it straddles the left edge of the screen then
13620 * moving it away from the edge or disabling it often
13621 * results in a pipe underrun, and often that can lead to
13622 * dead pipe (constant underrun reported, and it scans
13623 * out just a solid color). To recover from that, the
13624 * display power well must be turned off and on again.
13625 * Refuse the put the cursor into that compromised position.
13626 */
13627 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
13628 state->visible && state->base.crtc_x < 0) {
13629 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
13630 return -EINVAL;
13631 }
13632
13633 return 0;
13634 }
13635
13636 static void
13637 intel_disable_cursor_plane(struct drm_plane *plane,
13638 struct drm_crtc *crtc)
13639 {
13640 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13641
13642 intel_crtc->cursor_addr = 0;
13643 intel_crtc_update_cursor(crtc, NULL);
13644 }
13645
13646 static void
13647 intel_update_cursor_plane(struct drm_plane *plane,
13648 const struct intel_crtc_state *crtc_state,
13649 const struct intel_plane_state *state)
13650 {
13651 struct drm_crtc *crtc = crtc_state->base.crtc;
13652 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13653 struct drm_device *dev = plane->dev;
13654 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
13655 uint32_t addr;
13656
13657 if (!obj)
13658 addr = 0;
13659 else if (!INTEL_INFO(dev)->cursor_needs_physical)
13660 addr = i915_gem_obj_ggtt_offset(obj);
13661 else
13662 addr = obj->phys_handle->busaddr;
13663
13664 intel_crtc->cursor_addr = addr;
13665 intel_crtc_update_cursor(crtc, state);
13666 }
13667
13668 static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
13669 int pipe)
13670 {
13671 struct intel_plane *cursor = NULL;
13672 struct intel_plane_state *state = NULL;
13673 int ret;
13674
13675 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
13676 if (!cursor)
13677 goto fail;
13678
13679 state = intel_create_plane_state(&cursor->base);
13680 if (!state)
13681 goto fail;
13682 cursor->base.state = &state->base;
13683
13684 cursor->can_scale = false;
13685 cursor->max_downscale = 1;
13686 cursor->pipe = pipe;
13687 cursor->plane = pipe;
13688 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
13689 cursor->check_plane = intel_check_cursor_plane;
13690 cursor->update_plane = intel_update_cursor_plane;
13691 cursor->disable_plane = intel_disable_cursor_plane;
13692
13693 ret = drm_universal_plane_init(dev, &cursor->base, 0,
13694 &intel_plane_funcs,
13695 intel_cursor_formats,
13696 ARRAY_SIZE(intel_cursor_formats),
13697 DRM_PLANE_TYPE_CURSOR, NULL);
13698 if (ret)
13699 goto fail;
13700
13701 if (INTEL_INFO(dev)->gen >= 4) {
13702 if (!dev->mode_config.rotation_property)
13703 dev->mode_config.rotation_property =
13704 drm_mode_create_rotation_property(dev,
13705 BIT(DRM_ROTATE_0) |
13706 BIT(DRM_ROTATE_180));
13707 if (dev->mode_config.rotation_property)
13708 drm_object_attach_property(&cursor->base.base,
13709 dev->mode_config.rotation_property,
13710 state->base.rotation);
13711 }
13712
13713 if (INTEL_INFO(dev)->gen >=9)
13714 state->scaler_id = -1;
13715
13716 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
13717
13718 return &cursor->base;
13719
13720 fail:
13721 kfree(state);
13722 kfree(cursor);
13723
13724 return NULL;
13725 }
13726
13727 static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
13728 struct intel_crtc_state *crtc_state)
13729 {
13730 int i;
13731 struct intel_scaler *intel_scaler;
13732 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
13733
13734 for (i = 0; i < intel_crtc->num_scalers; i++) {
13735 intel_scaler = &scaler_state->scalers[i];
13736 intel_scaler->in_use = 0;
13737 intel_scaler->mode = PS_SCALER_MODE_DYN;
13738 }
13739
13740 scaler_state->scaler_id = -1;
13741 }
13742
13743 static void intel_crtc_init(struct drm_device *dev, int pipe)
13744 {
13745 struct drm_i915_private *dev_priv = dev->dev_private;
13746 struct intel_crtc *intel_crtc;
13747 struct intel_crtc_state *crtc_state = NULL;
13748 struct drm_plane *primary = NULL;
13749 struct drm_plane *cursor = NULL;
13750 int ret;
13751
13752 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
13753 if (intel_crtc == NULL)
13754 return;
13755
13756 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
13757 if (!crtc_state)
13758 goto fail;
13759 intel_crtc->config = crtc_state;
13760 intel_crtc->base.state = &crtc_state->base;
13761 crtc_state->base.crtc = &intel_crtc->base;
13762
13763 INIT_LIST_HEAD(&intel_crtc->flip_work);
13764
13765 /* initialize shared scalers */
13766 if (INTEL_INFO(dev)->gen >= 9) {
13767 if (pipe == PIPE_C)
13768 intel_crtc->num_scalers = 1;
13769 else
13770 intel_crtc->num_scalers = SKL_NUM_SCALERS;
13771
13772 skl_init_scalers(dev, intel_crtc, crtc_state);
13773 }
13774
13775 primary = intel_primary_plane_create(dev, pipe);
13776 if (!primary)
13777 goto fail;
13778
13779 cursor = intel_cursor_plane_create(dev, pipe);
13780 if (!cursor)
13781 goto fail;
13782
13783 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
13784 cursor, &intel_crtc_funcs, NULL);
13785 if (ret)
13786 goto fail;
13787
13788 /*
13789 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
13790 * is hooked to pipe B. Hence we want plane A feeding pipe B.
13791 */
13792 intel_crtc->pipe = pipe;
13793 intel_crtc->plane = pipe;
13794 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
13795 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
13796 intel_crtc->plane = !pipe;
13797 }
13798
13799 intel_crtc->cursor_base = ~0;
13800 intel_crtc->cursor_cntl = ~0;
13801 intel_crtc->cursor_size = ~0;
13802
13803 intel_crtc->wm.cxsr_allowed = true;
13804
13805 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
13806 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
13807 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
13808 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
13809
13810 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
13811
13812 intel_color_init(&intel_crtc->base);
13813
13814 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
13815 return;
13816
13817 fail:
13818 if (primary)
13819 drm_plane_cleanup(primary);
13820 if (cursor)
13821 drm_plane_cleanup(cursor);
13822 kfree(crtc_state);
13823 kfree(intel_crtc);
13824 }
13825
13826 enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
13827 {
13828 struct drm_encoder *encoder = connector->base.encoder;
13829 struct drm_device *dev = connector->base.dev;
13830
13831 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
13832
13833 if (!encoder || WARN_ON(!encoder->crtc))
13834 return INVALID_PIPE;
13835
13836 return to_intel_crtc(encoder->crtc)->pipe;
13837 }
13838
13839 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
13840 struct drm_file *file)
13841 {
13842 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
13843 struct drm_crtc *drmmode_crtc;
13844 struct intel_crtc *crtc;
13845
13846 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
13847
13848 if (!drmmode_crtc) {
13849 DRM_ERROR("no such CRTC id\n");
13850 return -ENOENT;
13851 }
13852
13853 crtc = to_intel_crtc(drmmode_crtc);
13854 pipe_from_crtc_id->pipe = crtc->pipe;
13855
13856 return 0;
13857 }
13858
13859 static int intel_encoder_clones(struct intel_encoder *encoder)
13860 {
13861 struct drm_device *dev = encoder->base.dev;
13862 struct intel_encoder *source_encoder;
13863 int index_mask = 0;
13864 int entry = 0;
13865
13866 for_each_intel_encoder(dev, source_encoder) {
13867 if (encoders_cloneable(encoder, source_encoder))
13868 index_mask |= (1 << entry);
13869
13870 entry++;
13871 }
13872
13873 return index_mask;
13874 }
13875
13876 static bool has_edp_a(struct drm_device *dev)
13877 {
13878 struct drm_i915_private *dev_priv = dev->dev_private;
13879
13880 if (!IS_MOBILE(dev))
13881 return false;
13882
13883 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
13884 return false;
13885
13886 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
13887 return false;
13888
13889 return true;
13890 }
13891
13892 static bool intel_crt_present(struct drm_device *dev)
13893 {
13894 struct drm_i915_private *dev_priv = dev->dev_private;
13895
13896 if (INTEL_INFO(dev)->gen >= 9)
13897 return false;
13898
13899 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
13900 return false;
13901
13902 if (IS_CHERRYVIEW(dev))
13903 return false;
13904
13905 if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
13906 return false;
13907
13908 /* DDI E can't be used if DDI A requires 4 lanes */
13909 if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
13910 return false;
13911
13912 if (!dev_priv->vbt.int_crt_support)
13913 return false;
13914
13915 return true;
13916 }
13917
13918 static void intel_setup_outputs(struct drm_device *dev)
13919 {
13920 struct drm_i915_private *dev_priv = dev->dev_private;
13921 struct intel_encoder *encoder;
13922 bool dpd_is_edp = false;
13923
13924 intel_lvds_init(dev);
13925
13926 if (intel_crt_present(dev))
13927 intel_crt_init(dev);
13928
13929 if (IS_BROXTON(dev)) {
13930 /*
13931 * FIXME: Broxton doesn't support port detection via the
13932 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
13933 * detect the ports.
13934 */
13935 intel_ddi_init(dev, PORT_A);
13936 intel_ddi_init(dev, PORT_B);
13937 intel_ddi_init(dev, PORT_C);
13938
13939 intel_dsi_init(dev);
13940 } else if (HAS_DDI(dev)) {
13941 int found;
13942
13943 /*
13944 * Haswell uses DDI functions to detect digital outputs.
13945 * On SKL pre-D0 the strap isn't connected, so we assume
13946 * it's there.
13947 */
13948 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
13949 /* WaIgnoreDDIAStrap: skl */
13950 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
13951 intel_ddi_init(dev, PORT_A);
13952
13953 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
13954 * register */
13955 found = I915_READ(SFUSE_STRAP);
13956
13957 if (found & SFUSE_STRAP_DDIB_DETECTED)
13958 intel_ddi_init(dev, PORT_B);
13959 if (found & SFUSE_STRAP_DDIC_DETECTED)
13960 intel_ddi_init(dev, PORT_C);
13961 if (found & SFUSE_STRAP_DDID_DETECTED)
13962 intel_ddi_init(dev, PORT_D);
13963 /*
13964 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
13965 */
13966 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
13967 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
13968 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
13969 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
13970 intel_ddi_init(dev, PORT_E);
13971
13972 } else if (HAS_PCH_SPLIT(dev)) {
13973 int found;
13974 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
13975
13976 if (has_edp_a(dev))
13977 intel_dp_init(dev, DP_A, PORT_A);
13978
13979 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
13980 /* PCH SDVOB multiplex with HDMIB */
13981 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
13982 if (!found)
13983 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
13984 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
13985 intel_dp_init(dev, PCH_DP_B, PORT_B);
13986 }
13987
13988 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
13989 intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
13990
13991 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
13992 intel_hdmi_init(dev, PCH_HDMID, PORT_D);
13993
13994 if (I915_READ(PCH_DP_C) & DP_DETECTED)
13995 intel_dp_init(dev, PCH_DP_C, PORT_C);
13996
13997 if (I915_READ(PCH_DP_D) & DP_DETECTED)
13998 intel_dp_init(dev, PCH_DP_D, PORT_D);
13999 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
14000 /*
14001 * The DP_DETECTED bit is the latched state of the DDC
14002 * SDA pin at boot. However since eDP doesn't require DDC
14003 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14004 * eDP ports may have been muxed to an alternate function.
14005 * Thus we can't rely on the DP_DETECTED bit alone to detect
14006 * eDP ports. Consult the VBT as well as DP_DETECTED to
14007 * detect eDP ports.
14008 */
14009 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
14010 !intel_dp_is_edp(dev, PORT_B))
14011 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14012 if (I915_READ(VLV_DP_B) & DP_DETECTED ||
14013 intel_dp_is_edp(dev, PORT_B))
14014 intel_dp_init(dev, VLV_DP_B, PORT_B);
14015
14016 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
14017 !intel_dp_is_edp(dev, PORT_C))
14018 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14019 if (I915_READ(VLV_DP_C) & DP_DETECTED ||
14020 intel_dp_is_edp(dev, PORT_C))
14021 intel_dp_init(dev, VLV_DP_C, PORT_C);
14022
14023 if (IS_CHERRYVIEW(dev)) {
14024 /* eDP not supported on port D, so don't check VBT */
14025 if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
14026 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14027 if (I915_READ(CHV_DP_D) & DP_DETECTED)
14028 intel_dp_init(dev, CHV_DP_D, PORT_D);
14029 }
14030
14031 intel_dsi_init(dev);
14032 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
14033 bool found = false;
14034
14035 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14036 DRM_DEBUG_KMS("probing SDVOB\n");
14037 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
14038 if (!found && IS_G4X(dev)) {
14039 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
14040 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
14041 }
14042
14043 if (!found && IS_G4X(dev))
14044 intel_dp_init(dev, DP_B, PORT_B);
14045 }
14046
14047 /* Before G4X SDVOC doesn't have its own detect register */
14048
14049 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
14050 DRM_DEBUG_KMS("probing SDVOC\n");
14051 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
14052 }
14053
14054 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
14055
14056 if (IS_G4X(dev)) {
14057 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
14058 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
14059 }
14060 if (IS_G4X(dev))
14061 intel_dp_init(dev, DP_C, PORT_C);
14062 }
14063
14064 if (IS_G4X(dev) &&
14065 (I915_READ(DP_D) & DP_DETECTED))
14066 intel_dp_init(dev, DP_D, PORT_D);
14067 } else if (IS_GEN2(dev))
14068 intel_dvo_init(dev);
14069
14070 if (SUPPORTS_TV(dev))
14071 intel_tv_init(dev);
14072
14073 intel_psr_init(dev);
14074
14075 for_each_intel_encoder(dev, encoder) {
14076 encoder->base.possible_crtcs = encoder->crtc_mask;
14077 encoder->base.possible_clones =
14078 intel_encoder_clones(encoder);
14079 }
14080
14081 intel_init_pch_refclk(dev);
14082
14083 drm_helper_move_panel_connectors_to_head(dev);
14084 }
14085
14086 static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14087 {
14088 struct drm_device *dev = fb->dev;
14089 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14090
14091 drm_framebuffer_cleanup(fb);
14092 mutex_lock(&dev->struct_mutex);
14093 WARN_ON(!intel_fb->obj->framebuffer_references--);
14094 drm_gem_object_unreference(&intel_fb->obj->base);
14095 mutex_unlock(&dev->struct_mutex);
14096 kfree(intel_fb);
14097 }
14098
14099 static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
14100 struct drm_file *file,
14101 unsigned int *handle)
14102 {
14103 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14104 struct drm_i915_gem_object *obj = intel_fb->obj;
14105
14106 if (obj->userptr.mm) {
14107 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14108 return -EINVAL;
14109 }
14110
14111 return drm_gem_handle_create(file, &obj->base, handle);
14112 }
14113
14114 static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14115 struct drm_file *file,
14116 unsigned flags, unsigned color,
14117 struct drm_clip_rect *clips,
14118 unsigned num_clips)
14119 {
14120 struct drm_device *dev = fb->dev;
14121 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14122 struct drm_i915_gem_object *obj = intel_fb->obj;
14123
14124 mutex_lock(&dev->struct_mutex);
14125 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
14126 mutex_unlock(&dev->struct_mutex);
14127
14128 return 0;
14129 }
14130
14131 static const struct drm_framebuffer_funcs intel_fb_funcs = {
14132 .destroy = intel_user_framebuffer_destroy,
14133 .create_handle = intel_user_framebuffer_create_handle,
14134 .dirty = intel_user_framebuffer_dirty,
14135 };
14136
14137 static
14138 u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14139 uint32_t pixel_format)
14140 {
14141 u32 gen = INTEL_INFO(dev)->gen;
14142
14143 if (gen >= 9) {
14144 int cpp = drm_format_plane_cpp(pixel_format, 0);
14145
14146 /* "The stride in bytes must not exceed the of the size of 8K
14147 * pixels and 32K bytes."
14148 */
14149 return min(8192 * cpp, 32768);
14150 } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14151 return 32*1024;
14152 } else if (gen >= 4) {
14153 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14154 return 16*1024;
14155 else
14156 return 32*1024;
14157 } else if (gen >= 3) {
14158 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14159 return 8*1024;
14160 else
14161 return 16*1024;
14162 } else {
14163 /* XXX DSPC is limited to 4k tiled */
14164 return 8*1024;
14165 }
14166 }
14167
14168 static int intel_framebuffer_init(struct drm_device *dev,
14169 struct intel_framebuffer *intel_fb,
14170 struct drm_mode_fb_cmd2 *mode_cmd,
14171 struct drm_i915_gem_object *obj)
14172 {
14173 struct drm_i915_private *dev_priv = to_i915(dev);
14174 unsigned int aligned_height;
14175 int ret;
14176 u32 pitch_limit, stride_alignment;
14177
14178 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14179
14180 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14181 /* Enforce that fb modifier and tiling mode match, but only for
14182 * X-tiled. This is needed for FBC. */
14183 if (!!(obj->tiling_mode == I915_TILING_X) !=
14184 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14185 DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14186 return -EINVAL;
14187 }
14188 } else {
14189 if (obj->tiling_mode == I915_TILING_X)
14190 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14191 else if (obj->tiling_mode == I915_TILING_Y) {
14192 DRM_DEBUG("No Y tiling for legacy addfb\n");
14193 return -EINVAL;
14194 }
14195 }
14196
14197 /* Passed in modifier sanity checking. */
14198 switch (mode_cmd->modifier[0]) {
14199 case I915_FORMAT_MOD_Y_TILED:
14200 case I915_FORMAT_MOD_Yf_TILED:
14201 if (INTEL_INFO(dev)->gen < 9) {
14202 DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14203 mode_cmd->modifier[0]);
14204 return -EINVAL;
14205 }
14206 case DRM_FORMAT_MOD_NONE:
14207 case I915_FORMAT_MOD_X_TILED:
14208 break;
14209 default:
14210 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14211 mode_cmd->modifier[0]);
14212 return -EINVAL;
14213 }
14214
14215 stride_alignment = intel_fb_stride_alignment(dev_priv,
14216 mode_cmd->modifier[0],
14217 mode_cmd->pixel_format);
14218 if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14219 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14220 mode_cmd->pitches[0], stride_alignment);
14221 return -EINVAL;
14222 }
14223
14224 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14225 mode_cmd->pixel_format);
14226 if (mode_cmd->pitches[0] > pitch_limit) {
14227 DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14228 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
14229 "tiled" : "linear",
14230 mode_cmd->pitches[0], pitch_limit);
14231 return -EINVAL;
14232 }
14233
14234 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
14235 mode_cmd->pitches[0] != obj->stride) {
14236 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14237 mode_cmd->pitches[0], obj->stride);
14238 return -EINVAL;
14239 }
14240
14241 /* Reject formats not supported by any plane early. */
14242 switch (mode_cmd->pixel_format) {
14243 case DRM_FORMAT_C8:
14244 case DRM_FORMAT_RGB565:
14245 case DRM_FORMAT_XRGB8888:
14246 case DRM_FORMAT_ARGB8888:
14247 break;
14248 case DRM_FORMAT_XRGB1555:
14249 if (INTEL_INFO(dev)->gen > 3) {
14250 DRM_DEBUG("unsupported pixel format: %s\n",
14251 drm_get_format_name(mode_cmd->pixel_format));
14252 return -EINVAL;
14253 }
14254 break;
14255 case DRM_FORMAT_ABGR8888:
14256 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14257 INTEL_INFO(dev)->gen < 9) {
14258 DRM_DEBUG("unsupported pixel format: %s\n",
14259 drm_get_format_name(mode_cmd->pixel_format));
14260 return -EINVAL;
14261 }
14262 break;
14263 case DRM_FORMAT_XBGR8888:
14264 case DRM_FORMAT_XRGB2101010:
14265 case DRM_FORMAT_XBGR2101010:
14266 if (INTEL_INFO(dev)->gen < 4) {
14267 DRM_DEBUG("unsupported pixel format: %s\n",
14268 drm_get_format_name(mode_cmd->pixel_format));
14269 return -EINVAL;
14270 }
14271 break;
14272 case DRM_FORMAT_ABGR2101010:
14273 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
14274 DRM_DEBUG("unsupported pixel format: %s\n",
14275 drm_get_format_name(mode_cmd->pixel_format));
14276 return -EINVAL;
14277 }
14278 break;
14279 case DRM_FORMAT_YUYV:
14280 case DRM_FORMAT_UYVY:
14281 case DRM_FORMAT_YVYU:
14282 case DRM_FORMAT_VYUY:
14283 if (INTEL_INFO(dev)->gen < 5) {
14284 DRM_DEBUG("unsupported pixel format: %s\n",
14285 drm_get_format_name(mode_cmd->pixel_format));
14286 return -EINVAL;
14287 }
14288 break;
14289 default:
14290 DRM_DEBUG("unsupported pixel format: %s\n",
14291 drm_get_format_name(mode_cmd->pixel_format));
14292 return -EINVAL;
14293 }
14294
14295 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14296 if (mode_cmd->offsets[0] != 0)
14297 return -EINVAL;
14298
14299 aligned_height = intel_fb_align_height(dev, mode_cmd->height,
14300 mode_cmd->pixel_format,
14301 mode_cmd->modifier[0]);
14302 /* FIXME drm helper for size checks (especially planar formats)? */
14303 if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14304 return -EINVAL;
14305
14306 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14307 intel_fb->obj = obj;
14308
14309 intel_fill_fb_info(dev_priv, &intel_fb->base);
14310
14311 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14312 if (ret) {
14313 DRM_ERROR("framebuffer init failed %d\n", ret);
14314 return ret;
14315 }
14316
14317 intel_fb->obj->framebuffer_references++;
14318
14319 return 0;
14320 }
14321
14322 static struct drm_framebuffer *
14323 intel_user_framebuffer_create(struct drm_device *dev,
14324 struct drm_file *filp,
14325 const struct drm_mode_fb_cmd2 *user_mode_cmd)
14326 {
14327 struct drm_framebuffer *fb;
14328 struct drm_i915_gem_object *obj;
14329 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
14330
14331 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
14332 mode_cmd.handles[0]));
14333 if (&obj->base == NULL)
14334 return ERR_PTR(-ENOENT);
14335
14336 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
14337 if (IS_ERR(fb))
14338 drm_gem_object_unreference_unlocked(&obj->base);
14339
14340 return fb;
14341 }
14342
14343 #ifndef CONFIG_DRM_FBDEV_EMULATION
14344 static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
14345 {
14346 }
14347 #endif
14348
14349 static const struct drm_mode_config_funcs intel_mode_funcs = {
14350 .fb_create = intel_user_framebuffer_create,
14351 .output_poll_changed = intel_fbdev_output_poll_changed,
14352 .atomic_check = intel_atomic_check,
14353 .atomic_commit = intel_atomic_commit,
14354 .atomic_state_alloc = intel_atomic_state_alloc,
14355 .atomic_state_clear = intel_atomic_state_clear,
14356 };
14357
14358 /**
14359 * intel_init_display_hooks - initialize the display modesetting hooks
14360 * @dev_priv: device private
14361 */
14362 void intel_init_display_hooks(struct drm_i915_private *dev_priv)
14363 {
14364 if (INTEL_INFO(dev_priv)->gen >= 9) {
14365 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14366 dev_priv->display.get_initial_plane_config =
14367 skylake_get_initial_plane_config;
14368 dev_priv->display.crtc_compute_clock =
14369 haswell_crtc_compute_clock;
14370 dev_priv->display.crtc_enable = haswell_crtc_enable;
14371 dev_priv->display.crtc_disable = haswell_crtc_disable;
14372 } else if (HAS_DDI(dev_priv)) {
14373 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
14374 dev_priv->display.get_initial_plane_config =
14375 ironlake_get_initial_plane_config;
14376 dev_priv->display.crtc_compute_clock =
14377 haswell_crtc_compute_clock;
14378 dev_priv->display.crtc_enable = haswell_crtc_enable;
14379 dev_priv->display.crtc_disable = haswell_crtc_disable;
14380 } else if (HAS_PCH_SPLIT(dev_priv)) {
14381 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
14382 dev_priv->display.get_initial_plane_config =
14383 ironlake_get_initial_plane_config;
14384 dev_priv->display.crtc_compute_clock =
14385 ironlake_crtc_compute_clock;
14386 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14387 dev_priv->display.crtc_disable = ironlake_crtc_disable;
14388 } else if (IS_CHERRYVIEW(dev_priv)) {
14389 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14390 dev_priv->display.get_initial_plane_config =
14391 i9xx_get_initial_plane_config;
14392 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14393 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14394 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14395 } else if (IS_VALLEYVIEW(dev_priv)) {
14396 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14397 dev_priv->display.get_initial_plane_config =
14398 i9xx_get_initial_plane_config;
14399 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
14400 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14401 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14402 } else if (IS_G4X(dev_priv)) {
14403 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14404 dev_priv->display.get_initial_plane_config =
14405 i9xx_get_initial_plane_config;
14406 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14407 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14408 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14409 } else if (IS_PINEVIEW(dev_priv)) {
14410 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14411 dev_priv->display.get_initial_plane_config =
14412 i9xx_get_initial_plane_config;
14413 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14414 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14415 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14416 } else if (!IS_GEN2(dev_priv)) {
14417 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14418 dev_priv->display.get_initial_plane_config =
14419 i9xx_get_initial_plane_config;
14420 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
14421 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14422 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14423 } else {
14424 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14425 dev_priv->display.get_initial_plane_config =
14426 i9xx_get_initial_plane_config;
14427 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14428 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14429 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14430 }
14431
14432 /* Returns the core display clock speed */
14433 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
14434 dev_priv->display.get_display_clock_speed =
14435 skylake_get_display_clock_speed;
14436 else if (IS_BROXTON(dev_priv))
14437 dev_priv->display.get_display_clock_speed =
14438 broxton_get_display_clock_speed;
14439 else if (IS_BROADWELL(dev_priv))
14440 dev_priv->display.get_display_clock_speed =
14441 broadwell_get_display_clock_speed;
14442 else if (IS_HASWELL(dev_priv))
14443 dev_priv->display.get_display_clock_speed =
14444 haswell_get_display_clock_speed;
14445 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
14446 dev_priv->display.get_display_clock_speed =
14447 valleyview_get_display_clock_speed;
14448 else if (IS_GEN5(dev_priv))
14449 dev_priv->display.get_display_clock_speed =
14450 ilk_get_display_clock_speed;
14451 else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
14452 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
14453 dev_priv->display.get_display_clock_speed =
14454 i945_get_display_clock_speed;
14455 else if (IS_GM45(dev_priv))
14456 dev_priv->display.get_display_clock_speed =
14457 gm45_get_display_clock_speed;
14458 else if (IS_CRESTLINE(dev_priv))
14459 dev_priv->display.get_display_clock_speed =
14460 i965gm_get_display_clock_speed;
14461 else if (IS_PINEVIEW(dev_priv))
14462 dev_priv->display.get_display_clock_speed =
14463 pnv_get_display_clock_speed;
14464 else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
14465 dev_priv->display.get_display_clock_speed =
14466 g33_get_display_clock_speed;
14467 else if (IS_I915G(dev_priv))
14468 dev_priv->display.get_display_clock_speed =
14469 i915_get_display_clock_speed;
14470 else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
14471 dev_priv->display.get_display_clock_speed =
14472 i9xx_misc_get_display_clock_speed;
14473 else if (IS_I915GM(dev_priv))
14474 dev_priv->display.get_display_clock_speed =
14475 i915gm_get_display_clock_speed;
14476 else if (IS_I865G(dev_priv))
14477 dev_priv->display.get_display_clock_speed =
14478 i865_get_display_clock_speed;
14479 else if (IS_I85X(dev_priv))
14480 dev_priv->display.get_display_clock_speed =
14481 i85x_get_display_clock_speed;
14482 else { /* 830 */
14483 WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
14484 dev_priv->display.get_display_clock_speed =
14485 i830_get_display_clock_speed;
14486 }
14487
14488 if (IS_GEN5(dev_priv)) {
14489 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
14490 } else if (IS_GEN6(dev_priv)) {
14491 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
14492 } else if (IS_IVYBRIDGE(dev_priv)) {
14493 /* FIXME: detect B0+ stepping and use auto training */
14494 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
14495 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
14496 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
14497 }
14498
14499 if (IS_BROADWELL(dev_priv)) {
14500 dev_priv->display.modeset_commit_cdclk =
14501 broadwell_modeset_commit_cdclk;
14502 dev_priv->display.modeset_calc_cdclk =
14503 broadwell_modeset_calc_cdclk;
14504 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
14505 dev_priv->display.modeset_commit_cdclk =
14506 valleyview_modeset_commit_cdclk;
14507 dev_priv->display.modeset_calc_cdclk =
14508 valleyview_modeset_calc_cdclk;
14509 } else if (IS_BROXTON(dev_priv)) {
14510 dev_priv->display.modeset_commit_cdclk =
14511 broxton_modeset_commit_cdclk;
14512 dev_priv->display.modeset_calc_cdclk =
14513 broxton_modeset_calc_cdclk;
14514 }
14515 }
14516
14517 /*
14518 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
14519 * resume, or other times. This quirk makes sure that's the case for
14520 * affected systems.
14521 */
14522 static void quirk_pipea_force(struct drm_device *dev)
14523 {
14524 struct drm_i915_private *dev_priv = dev->dev_private;
14525
14526 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
14527 DRM_INFO("applying pipe a force quirk\n");
14528 }
14529
14530 static void quirk_pipeb_force(struct drm_device *dev)
14531 {
14532 struct drm_i915_private *dev_priv = dev->dev_private;
14533
14534 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
14535 DRM_INFO("applying pipe b force quirk\n");
14536 }
14537
14538 /*
14539 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
14540 */
14541 static void quirk_ssc_force_disable(struct drm_device *dev)
14542 {
14543 struct drm_i915_private *dev_priv = dev->dev_private;
14544 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
14545 DRM_INFO("applying lvds SSC disable quirk\n");
14546 }
14547
14548 /*
14549 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
14550 * brightness value
14551 */
14552 static void quirk_invert_brightness(struct drm_device *dev)
14553 {
14554 struct drm_i915_private *dev_priv = dev->dev_private;
14555 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
14556 DRM_INFO("applying inverted panel brightness quirk\n");
14557 }
14558
14559 /* Some VBT's incorrectly indicate no backlight is present */
14560 static void quirk_backlight_present(struct drm_device *dev)
14561 {
14562 struct drm_i915_private *dev_priv = dev->dev_private;
14563 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
14564 DRM_INFO("applying backlight present quirk\n");
14565 }
14566
14567 struct intel_quirk {
14568 int device;
14569 int subsystem_vendor;
14570 int subsystem_device;
14571 void (*hook)(struct drm_device *dev);
14572 };
14573
14574 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
14575 struct intel_dmi_quirk {
14576 void (*hook)(struct drm_device *dev);
14577 const struct dmi_system_id (*dmi_id_list)[];
14578 };
14579
14580 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
14581 {
14582 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
14583 return 1;
14584 }
14585
14586 static const struct intel_dmi_quirk intel_dmi_quirks[] = {
14587 {
14588 .dmi_id_list = &(const struct dmi_system_id[]) {
14589 {
14590 .callback = intel_dmi_reverse_brightness,
14591 .ident = "NCR Corporation",
14592 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
14593 DMI_MATCH(DMI_PRODUCT_NAME, ""),
14594 },
14595 },
14596 { } /* terminating entry */
14597 },
14598 .hook = quirk_invert_brightness,
14599 },
14600 };
14601
14602 static struct intel_quirk intel_quirks[] = {
14603 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
14604 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
14605
14606 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
14607 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
14608
14609 /* 830 needs to leave pipe A & dpll A up */
14610 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
14611
14612 /* 830 needs to leave pipe B & dpll B up */
14613 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
14614
14615 /* Lenovo U160 cannot use SSC on LVDS */
14616 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
14617
14618 /* Sony Vaio Y cannot use SSC on LVDS */
14619 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
14620
14621 /* Acer Aspire 5734Z must invert backlight brightness */
14622 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
14623
14624 /* Acer/eMachines G725 */
14625 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
14626
14627 /* Acer/eMachines e725 */
14628 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
14629
14630 /* Acer/Packard Bell NCL20 */
14631 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
14632
14633 /* Acer Aspire 4736Z */
14634 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
14635
14636 /* Acer Aspire 5336 */
14637 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
14638
14639 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
14640 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
14641
14642 /* Acer C720 Chromebook (Core i3 4005U) */
14643 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
14644
14645 /* Apple Macbook 2,1 (Core 2 T7400) */
14646 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
14647
14648 /* Apple Macbook 4,1 */
14649 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
14650
14651 /* Toshiba CB35 Chromebook (Celeron 2955U) */
14652 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
14653
14654 /* HP Chromebook 14 (Celeron 2955U) */
14655 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
14656
14657 /* Dell Chromebook 11 */
14658 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
14659
14660 /* Dell Chromebook 11 (2015 version) */
14661 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
14662 };
14663
14664 static void intel_init_quirks(struct drm_device *dev)
14665 {
14666 struct pci_dev *d = dev->pdev;
14667 int i;
14668
14669 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
14670 struct intel_quirk *q = &intel_quirks[i];
14671
14672 if (d->device == q->device &&
14673 (d->subsystem_vendor == q->subsystem_vendor ||
14674 q->subsystem_vendor == PCI_ANY_ID) &&
14675 (d->subsystem_device == q->subsystem_device ||
14676 q->subsystem_device == PCI_ANY_ID))
14677 q->hook(dev);
14678 }
14679 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
14680 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
14681 intel_dmi_quirks[i].hook(dev);
14682 }
14683 }
14684
14685 /* Disable the VGA plane that we never use */
14686 static void i915_disable_vga(struct drm_device *dev)
14687 {
14688 struct drm_i915_private *dev_priv = dev->dev_private;
14689 u8 sr1;
14690 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
14691
14692 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
14693 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
14694 outb(SR01, VGA_SR_INDEX);
14695 sr1 = inb(VGA_SR_DATA);
14696 outb(sr1 | 1<<5, VGA_SR_DATA);
14697 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
14698 udelay(300);
14699
14700 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
14701 POSTING_READ(vga_reg);
14702 }
14703
14704 void intel_modeset_init_hw(struct drm_device *dev)
14705 {
14706 struct drm_i915_private *dev_priv = dev->dev_private;
14707
14708 intel_update_cdclk(dev);
14709
14710 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
14711
14712 intel_init_clock_gating(dev);
14713 intel_enable_gt_powersave(dev_priv);
14714 }
14715
14716 /*
14717 * Calculate what we think the watermarks should be for the state we've read
14718 * out of the hardware and then immediately program those watermarks so that
14719 * we ensure the hardware settings match our internal state.
14720 *
14721 * We can calculate what we think WM's should be by creating a duplicate of the
14722 * current state (which was constructed during hardware readout) and running it
14723 * through the atomic check code to calculate new watermark values in the
14724 * state object.
14725 */
14726 static void sanitize_watermarks(struct drm_device *dev)
14727 {
14728 struct drm_i915_private *dev_priv = to_i915(dev);
14729 struct drm_atomic_state *state;
14730 struct drm_crtc *crtc;
14731 struct drm_crtc_state *cstate;
14732 struct drm_modeset_acquire_ctx ctx;
14733 int ret;
14734 int i;
14735
14736 /* Only supported on platforms that use atomic watermark design */
14737 if (!dev_priv->display.optimize_watermarks)
14738 return;
14739
14740 /*
14741 * We need to hold connection_mutex before calling duplicate_state so
14742 * that the connector loop is protected.
14743 */
14744 drm_modeset_acquire_init(&ctx, 0);
14745 retry:
14746 ret = drm_modeset_lock_all_ctx(dev, &ctx);
14747 if (ret == -EDEADLK) {
14748 drm_modeset_backoff(&ctx);
14749 goto retry;
14750 } else if (WARN_ON(ret)) {
14751 goto fail;
14752 }
14753
14754 state = drm_atomic_helper_duplicate_state(dev, &ctx);
14755 if (WARN_ON(IS_ERR(state)))
14756 goto fail;
14757
14758 /*
14759 * Hardware readout is the only time we don't want to calculate
14760 * intermediate watermarks (since we don't trust the current
14761 * watermarks).
14762 */
14763 to_intel_atomic_state(state)->skip_intermediate_wm = true;
14764
14765 ret = intel_atomic_check(dev, state);
14766 if (ret) {
14767 /*
14768 * If we fail here, it means that the hardware appears to be
14769 * programmed in a way that shouldn't be possible, given our
14770 * understanding of watermark requirements. This might mean a
14771 * mistake in the hardware readout code or a mistake in the
14772 * watermark calculations for a given platform. Raise a WARN
14773 * so that this is noticeable.
14774 *
14775 * If this actually happens, we'll have to just leave the
14776 * BIOS-programmed watermarks untouched and hope for the best.
14777 */
14778 WARN(true, "Could not determine valid watermarks for inherited state\n");
14779 goto fail;
14780 }
14781
14782 /* Write calculated watermark values back */
14783 for_each_crtc_in_state(state, crtc, cstate, i) {
14784 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
14785
14786 cs->wm.need_postvbl_update = true;
14787 dev_priv->display.optimize_watermarks(cs);
14788 }
14789
14790 drm_atomic_state_free(state);
14791 fail:
14792 drm_modeset_drop_locks(&ctx);
14793 drm_modeset_acquire_fini(&ctx);
14794 }
14795
14796 void intel_modeset_init(struct drm_device *dev)
14797 {
14798 struct drm_i915_private *dev_priv = to_i915(dev);
14799 struct i915_ggtt *ggtt = &dev_priv->ggtt;
14800 int sprite, ret;
14801 enum pipe pipe;
14802 struct intel_crtc *crtc;
14803
14804 drm_mode_config_init(dev);
14805
14806 dev->mode_config.min_width = 0;
14807 dev->mode_config.min_height = 0;
14808
14809 dev->mode_config.preferred_depth = 24;
14810 dev->mode_config.prefer_shadow = 1;
14811
14812 dev->mode_config.allow_fb_modifiers = true;
14813
14814 dev->mode_config.funcs = &intel_mode_funcs;
14815
14816 intel_init_quirks(dev);
14817
14818 intel_init_pm(dev);
14819
14820 if (INTEL_INFO(dev)->num_pipes == 0)
14821 return;
14822
14823 /*
14824 * There may be no VBT; and if the BIOS enabled SSC we can
14825 * just keep using it to avoid unnecessary flicker. Whereas if the
14826 * BIOS isn't using it, don't assume it will work even if the VBT
14827 * indicates as much.
14828 */
14829 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
14830 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
14831 DREF_SSC1_ENABLE);
14832
14833 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
14834 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
14835 bios_lvds_use_ssc ? "en" : "dis",
14836 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
14837 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
14838 }
14839 }
14840
14841 if (IS_GEN2(dev)) {
14842 dev->mode_config.max_width = 2048;
14843 dev->mode_config.max_height = 2048;
14844 } else if (IS_GEN3(dev)) {
14845 dev->mode_config.max_width = 4096;
14846 dev->mode_config.max_height = 4096;
14847 } else {
14848 dev->mode_config.max_width = 8192;
14849 dev->mode_config.max_height = 8192;
14850 }
14851
14852 if (IS_845G(dev) || IS_I865G(dev)) {
14853 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
14854 dev->mode_config.cursor_height = 1023;
14855 } else if (IS_GEN2(dev)) {
14856 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
14857 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
14858 } else {
14859 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
14860 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
14861 }
14862
14863 dev->mode_config.fb_base = ggtt->mappable_base;
14864
14865 DRM_DEBUG_KMS("%d display pipe%s available.\n",
14866 INTEL_INFO(dev)->num_pipes,
14867 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
14868
14869 for_each_pipe(dev_priv, pipe) {
14870 intel_crtc_init(dev, pipe);
14871 for_each_sprite(dev_priv, pipe, sprite) {
14872 ret = intel_plane_init(dev, pipe, sprite);
14873 if (ret)
14874 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
14875 pipe_name(pipe), sprite_name(pipe, sprite), ret);
14876 }
14877 }
14878
14879 intel_update_czclk(dev_priv);
14880 intel_update_cdclk(dev);
14881
14882 intel_shared_dpll_init(dev);
14883
14884 /* Just disable it once at startup */
14885 i915_disable_vga(dev);
14886 intel_setup_outputs(dev);
14887
14888 drm_modeset_lock_all(dev);
14889 intel_modeset_setup_hw_state(dev);
14890 drm_modeset_unlock_all(dev);
14891
14892 for_each_intel_crtc(dev, crtc) {
14893 struct intel_initial_plane_config plane_config = {};
14894
14895 if (!crtc->active)
14896 continue;
14897
14898 /*
14899 * Note that reserving the BIOS fb up front prevents us
14900 * from stuffing other stolen allocations like the ring
14901 * on top. This prevents some ugliness at boot time, and
14902 * can even allow for smooth boot transitions if the BIOS
14903 * fb is large enough for the active pipe configuration.
14904 */
14905 dev_priv->display.get_initial_plane_config(crtc,
14906 &plane_config);
14907
14908 /*
14909 * If the fb is shared between multiple heads, we'll
14910 * just get the first one.
14911 */
14912 intel_find_initial_plane_obj(crtc, &plane_config);
14913 }
14914
14915 /*
14916 * Make sure hardware watermarks really match the state we read out.
14917 * Note that we need to do this after reconstructing the BIOS fb's
14918 * since the watermark calculation done here will use pstate->fb.
14919 */
14920 sanitize_watermarks(dev);
14921 }
14922
14923 static void intel_enable_pipe_a(struct drm_device *dev)
14924 {
14925 struct intel_connector *connector;
14926 struct drm_connector *crt = NULL;
14927 struct intel_load_detect_pipe load_detect_temp;
14928 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
14929
14930 /* We can't just switch on the pipe A, we need to set things up with a
14931 * proper mode and output configuration. As a gross hack, enable pipe A
14932 * by enabling the load detect pipe once. */
14933 for_each_intel_connector(dev, connector) {
14934 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
14935 crt = &connector->base;
14936 break;
14937 }
14938 }
14939
14940 if (!crt)
14941 return;
14942
14943 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
14944 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
14945 }
14946
14947 static bool
14948 intel_check_plane_mapping(struct intel_crtc *crtc)
14949 {
14950 struct drm_device *dev = crtc->base.dev;
14951 struct drm_i915_private *dev_priv = dev->dev_private;
14952 u32 val;
14953
14954 if (INTEL_INFO(dev)->num_pipes == 1)
14955 return true;
14956
14957 val = I915_READ(DSPCNTR(!crtc->plane));
14958
14959 if ((val & DISPLAY_PLANE_ENABLE) &&
14960 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
14961 return false;
14962
14963 return true;
14964 }
14965
14966 static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
14967 {
14968 struct drm_device *dev = crtc->base.dev;
14969 struct intel_encoder *encoder;
14970
14971 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
14972 return true;
14973
14974 return false;
14975 }
14976
14977 static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
14978 {
14979 struct drm_device *dev = encoder->base.dev;
14980 struct intel_connector *connector;
14981
14982 for_each_connector_on_encoder(dev, &encoder->base, connector)
14983 return true;
14984
14985 return false;
14986 }
14987
14988 static void intel_sanitize_crtc(struct intel_crtc *crtc)
14989 {
14990 struct drm_device *dev = crtc->base.dev;
14991 struct drm_i915_private *dev_priv = dev->dev_private;
14992 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
14993
14994 /* Clear any frame start delays used for debugging left by the BIOS */
14995 if (!transcoder_is_dsi(cpu_transcoder)) {
14996 i915_reg_t reg = PIPECONF(cpu_transcoder);
14997
14998 I915_WRITE(reg,
14999 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15000 }
15001
15002 /* restore vblank interrupts to correct state */
15003 drm_crtc_vblank_reset(&crtc->base);
15004 if (crtc->active) {
15005 struct intel_plane *plane;
15006
15007 drm_crtc_vblank_on(&crtc->base);
15008
15009 /* Disable everything but the primary plane */
15010 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15011 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15012 continue;
15013
15014 plane->disable_plane(&plane->base, &crtc->base);
15015 }
15016 }
15017
15018 /* We need to sanitize the plane -> pipe mapping first because this will
15019 * disable the crtc (and hence change the state) if it is wrong. Note
15020 * that gen4+ has a fixed plane -> pipe mapping. */
15021 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
15022 bool plane;
15023
15024 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15025 crtc->base.base.id);
15026
15027 /* Pipe has the wrong plane attached and the plane is active.
15028 * Temporarily change the plane mapping and disable everything
15029 * ... */
15030 plane = crtc->plane;
15031 to_intel_plane_state(crtc->base.primary->state)->visible = true;
15032 crtc->plane = !plane;
15033 intel_crtc_disable_noatomic(&crtc->base);
15034 crtc->plane = plane;
15035 }
15036
15037 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15038 crtc->pipe == PIPE_A && !crtc->active) {
15039 /* BIOS forgot to enable pipe A, this mostly happens after
15040 * resume. Force-enable the pipe to fix this, the update_dpms
15041 * call below we restore the pipe to the right state, but leave
15042 * the required bits on. */
15043 intel_enable_pipe_a(dev);
15044 }
15045
15046 /* Adjust the state of the output pipe according to whether we
15047 * have active connectors/encoders. */
15048 if (crtc->active && !intel_crtc_has_encoders(crtc))
15049 intel_crtc_disable_noatomic(&crtc->base);
15050
15051 if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
15052 /*
15053 * We start out with underrun reporting disabled to avoid races.
15054 * For correct bookkeeping mark this on active crtcs.
15055 *
15056 * Also on gmch platforms we dont have any hardware bits to
15057 * disable the underrun reporting. Which means we need to start
15058 * out with underrun reporting disabled also on inactive pipes,
15059 * since otherwise we'll complain about the garbage we read when
15060 * e.g. coming up after runtime pm.
15061 *
15062 * No protection against concurrent access is required - at
15063 * worst a fifo underrun happens which also sets this to false.
15064 */
15065 crtc->cpu_fifo_underrun_disabled = true;
15066 crtc->pch_fifo_underrun_disabled = true;
15067 }
15068 }
15069
15070 static void intel_sanitize_encoder(struct intel_encoder *encoder)
15071 {
15072 struct intel_connector *connector;
15073 struct drm_device *dev = encoder->base.dev;
15074
15075 /* We need to check both for a crtc link (meaning that the
15076 * encoder is active and trying to read from a pipe) and the
15077 * pipe itself being active. */
15078 bool has_active_crtc = encoder->base.crtc &&
15079 to_intel_crtc(encoder->base.crtc)->active;
15080
15081 if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
15082 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15083 encoder->base.base.id,
15084 encoder->base.name);
15085
15086 /* Connector is active, but has no active pipe. This is
15087 * fallout from our resume register restoring. Disable
15088 * the encoder manually again. */
15089 if (encoder->base.crtc) {
15090 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15091 encoder->base.base.id,
15092 encoder->base.name);
15093 encoder->disable(encoder);
15094 if (encoder->post_disable)
15095 encoder->post_disable(encoder);
15096 }
15097 encoder->base.crtc = NULL;
15098
15099 /* Inconsistent output/port/pipe state happens presumably due to
15100 * a bug in one of the get_hw_state functions. Or someplace else
15101 * in our code, like the register restore mess on resume. Clamp
15102 * things to off as a safer default. */
15103 for_each_intel_connector(dev, connector) {
15104 if (connector->encoder != encoder)
15105 continue;
15106 connector->base.dpms = DRM_MODE_DPMS_OFF;
15107 connector->base.encoder = NULL;
15108 }
15109 }
15110 /* Enabled encoders without active connectors will be fixed in
15111 * the crtc fixup. */
15112 }
15113
15114 void i915_redisable_vga_power_on(struct drm_device *dev)
15115 {
15116 struct drm_i915_private *dev_priv = dev->dev_private;
15117 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
15118
15119 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15120 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15121 i915_disable_vga(dev);
15122 }
15123 }
15124
15125 void i915_redisable_vga(struct drm_device *dev)
15126 {
15127 struct drm_i915_private *dev_priv = dev->dev_private;
15128
15129 /* This function can be called both from intel_modeset_setup_hw_state or
15130 * at a very early point in our resume sequence, where the power well
15131 * structures are not yet restored. Since this function is at a very
15132 * paranoid "someone might have enabled VGA while we were not looking"
15133 * level, just check if the power well is enabled instead of trying to
15134 * follow the "don't touch the power well if we don't need it" policy
15135 * the rest of the driver uses. */
15136 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
15137 return;
15138
15139 i915_redisable_vga_power_on(dev);
15140
15141 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
15142 }
15143
15144 static bool primary_get_hw_state(struct intel_plane *plane)
15145 {
15146 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
15147
15148 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
15149 }
15150
15151 /* FIXME read out full plane state for all planes */
15152 static void readout_plane_state(struct intel_crtc *crtc)
15153 {
15154 struct drm_plane *primary = crtc->base.primary;
15155 struct intel_plane_state *plane_state =
15156 to_intel_plane_state(primary->state);
15157
15158 plane_state->visible = crtc->active &&
15159 primary_get_hw_state(to_intel_plane(primary));
15160
15161 if (plane_state->visible)
15162 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
15163 }
15164
15165 static void intel_modeset_readout_hw_state(struct drm_device *dev)
15166 {
15167 struct drm_i915_private *dev_priv = dev->dev_private;
15168 enum pipe pipe;
15169 struct intel_crtc *crtc;
15170 struct intel_encoder *encoder;
15171 struct intel_connector *connector;
15172 int i;
15173
15174 dev_priv->active_crtcs = 0;
15175
15176 for_each_intel_crtc(dev, crtc) {
15177 struct intel_crtc_state *crtc_state = crtc->config;
15178 int pixclk = 0;
15179
15180 __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
15181 memset(crtc_state, 0, sizeof(*crtc_state));
15182 crtc_state->base.crtc = &crtc->base;
15183
15184 crtc_state->base.active = crtc_state->base.enable =
15185 dev_priv->display.get_pipe_config(crtc, crtc_state);
15186
15187 crtc->base.enabled = crtc_state->base.enable;
15188 crtc->active = crtc_state->base.active;
15189
15190 if (crtc_state->base.active) {
15191 dev_priv->active_crtcs |= 1 << crtc->pipe;
15192
15193 if (IS_BROADWELL(dev_priv)) {
15194 pixclk = ilk_pipe_pixel_rate(crtc_state);
15195
15196 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15197 if (crtc_state->ips_enabled)
15198 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15199 } else if (IS_VALLEYVIEW(dev_priv) ||
15200 IS_CHERRYVIEW(dev_priv) ||
15201 IS_BROXTON(dev_priv))
15202 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15203 else
15204 WARN_ON(dev_priv->display.modeset_calc_cdclk);
15205 }
15206
15207 dev_priv->min_pixclk[crtc->pipe] = pixclk;
15208
15209 readout_plane_state(crtc);
15210
15211 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15212 crtc->base.base.id,
15213 crtc->active ? "enabled" : "disabled");
15214 }
15215
15216 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15217 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15218
15219 pll->on = pll->funcs.get_hw_state(dev_priv, pll,
15220 &pll->config.hw_state);
15221 pll->config.crtc_mask = 0;
15222 for_each_intel_crtc(dev, crtc) {
15223 if (crtc->active && crtc->config->shared_dpll == pll)
15224 pll->config.crtc_mask |= 1 << crtc->pipe;
15225 }
15226 pll->active_mask = pll->config.crtc_mask;
15227
15228 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
15229 pll->name, pll->config.crtc_mask, pll->on);
15230 }
15231
15232 for_each_intel_encoder(dev, encoder) {
15233 pipe = 0;
15234
15235 if (encoder->get_hw_state(encoder, &pipe)) {
15236 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15237 encoder->base.crtc = &crtc->base;
15238 encoder->get_config(encoder, crtc->config);
15239 } else {
15240 encoder->base.crtc = NULL;
15241 }
15242
15243 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
15244 encoder->base.base.id,
15245 encoder->base.name,
15246 encoder->base.crtc ? "enabled" : "disabled",
15247 pipe_name(pipe));
15248 }
15249
15250 for_each_intel_connector(dev, connector) {
15251 if (connector->get_hw_state(connector)) {
15252 connector->base.dpms = DRM_MODE_DPMS_ON;
15253
15254 encoder = connector->encoder;
15255 connector->base.encoder = &encoder->base;
15256
15257 if (encoder->base.crtc &&
15258 encoder->base.crtc->state->active) {
15259 /*
15260 * This has to be done during hardware readout
15261 * because anything calling .crtc_disable may
15262 * rely on the connector_mask being accurate.
15263 */
15264 encoder->base.crtc->state->connector_mask |=
15265 1 << drm_connector_index(&connector->base);
15266 encoder->base.crtc->state->encoder_mask |=
15267 1 << drm_encoder_index(&encoder->base);
15268 }
15269
15270 } else {
15271 connector->base.dpms = DRM_MODE_DPMS_OFF;
15272 connector->base.encoder = NULL;
15273 }
15274 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15275 connector->base.base.id,
15276 connector->base.name,
15277 connector->base.encoder ? "enabled" : "disabled");
15278 }
15279
15280 for_each_intel_crtc(dev, crtc) {
15281 crtc->base.hwmode = crtc->config->base.adjusted_mode;
15282
15283 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15284 if (crtc->base.state->active) {
15285 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15286 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15287 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15288
15289 /*
15290 * The initial mode needs to be set in order to keep
15291 * the atomic core happy. It wants a valid mode if the
15292 * crtc's enabled, so we do the above call.
15293 *
15294 * At this point some state updated by the connectors
15295 * in their ->detect() callback has not run yet, so
15296 * no recalculation can be done yet.
15297 *
15298 * Even if we could do a recalculation and modeset
15299 * right now it would cause a double modeset if
15300 * fbdev or userspace chooses a different initial mode.
15301 *
15302 * If that happens, someone indicated they wanted a
15303 * mode change, which means it's safe to do a full
15304 * recalculation.
15305 */
15306 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
15307
15308 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15309 update_scanline_offset(crtc);
15310 }
15311
15312 intel_pipe_config_sanity_check(dev_priv, crtc->config);
15313 }
15314 }
15315
15316 /* Scan out the current hw modeset state,
15317 * and sanitizes it to the current state
15318 */
15319 static void
15320 intel_modeset_setup_hw_state(struct drm_device *dev)
15321 {
15322 struct drm_i915_private *dev_priv = dev->dev_private;
15323 enum pipe pipe;
15324 struct intel_crtc *crtc;
15325 struct intel_encoder *encoder;
15326 int i;
15327
15328 intel_modeset_readout_hw_state(dev);
15329
15330 /* HW state is read out, now we need to sanitize this mess. */
15331 for_each_intel_encoder(dev, encoder) {
15332 intel_sanitize_encoder(encoder);
15333 }
15334
15335 for_each_pipe(dev_priv, pipe) {
15336 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15337 intel_sanitize_crtc(crtc);
15338 intel_dump_pipe_config(crtc, crtc->config,
15339 "[setup_hw_state]");
15340 }
15341
15342 intel_modeset_update_connector_atomic_state(dev);
15343
15344 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15345 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15346
15347 if (!pll->on || pll->active_mask)
15348 continue;
15349
15350 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15351
15352 pll->funcs.disable(dev_priv, pll);
15353 pll->on = false;
15354 }
15355
15356 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
15357 vlv_wm_get_hw_state(dev);
15358 else if (IS_GEN9(dev))
15359 skl_wm_get_hw_state(dev);
15360 else if (HAS_PCH_SPLIT(dev))
15361 ilk_wm_get_hw_state(dev);
15362
15363 for_each_intel_crtc(dev, crtc) {
15364 unsigned long put_domains;
15365
15366 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
15367 if (WARN_ON(put_domains))
15368 modeset_put_power_domains(dev_priv, put_domains);
15369 }
15370 intel_display_set_init_power(dev_priv, false);
15371
15372 intel_fbc_init_pipe_state(dev_priv);
15373 }
15374
15375 void intel_display_resume(struct drm_device *dev)
15376 {
15377 struct drm_i915_private *dev_priv = to_i915(dev);
15378 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15379 struct drm_modeset_acquire_ctx ctx;
15380 int ret;
15381 bool setup = false;
15382
15383 dev_priv->modeset_restore_state = NULL;
15384
15385 /*
15386 * This is a cludge because with real atomic modeset mode_config.mutex
15387 * won't be taken. Unfortunately some probed state like
15388 * audio_codec_enable is still protected by mode_config.mutex, so lock
15389 * it here for now.
15390 */
15391 mutex_lock(&dev->mode_config.mutex);
15392 drm_modeset_acquire_init(&ctx, 0);
15393
15394 retry:
15395 ret = drm_modeset_lock_all_ctx(dev, &ctx);
15396
15397 if (ret == 0 && !setup) {
15398 setup = true;
15399
15400 intel_modeset_setup_hw_state(dev);
15401 i915_redisable_vga(dev);
15402 }
15403
15404 if (ret == 0 && state) {
15405 struct drm_crtc_state *crtc_state;
15406 struct drm_crtc *crtc;
15407 int i;
15408
15409 state->acquire_ctx = &ctx;
15410
15411 /* ignore any reset values/BIOS leftovers in the WM registers */
15412 to_intel_atomic_state(state)->skip_intermediate_wm = true;
15413
15414 for_each_crtc_in_state(state, crtc, crtc_state, i) {
15415 /*
15416 * Force recalculation even if we restore
15417 * current state. With fast modeset this may not result
15418 * in a modeset when the state is compatible.
15419 */
15420 crtc_state->mode_changed = true;
15421 }
15422
15423 ret = drm_atomic_commit(state);
15424 }
15425
15426 if (ret == -EDEADLK) {
15427 drm_modeset_backoff(&ctx);
15428 goto retry;
15429 }
15430
15431 drm_modeset_drop_locks(&ctx);
15432 drm_modeset_acquire_fini(&ctx);
15433 mutex_unlock(&dev->mode_config.mutex);
15434
15435 if (ret) {
15436 DRM_ERROR("Restoring old state failed with %i\n", ret);
15437 drm_atomic_state_free(state);
15438 }
15439 }
15440
15441 void intel_modeset_gem_init(struct drm_device *dev)
15442 {
15443 struct drm_i915_private *dev_priv = to_i915(dev);
15444 struct drm_crtc *c;
15445 struct drm_i915_gem_object *obj;
15446 int ret;
15447
15448 intel_init_gt_powersave(dev_priv);
15449
15450 intel_modeset_init_hw(dev);
15451
15452 intel_setup_overlay(dev_priv);
15453
15454 /*
15455 * Make sure any fbs we allocated at startup are properly
15456 * pinned & fenced. When we do the allocation it's too early
15457 * for this.
15458 */
15459 for_each_crtc(dev, c) {
15460 obj = intel_fb_obj(c->primary->fb);
15461 if (obj == NULL)
15462 continue;
15463
15464 mutex_lock(&dev->struct_mutex);
15465 ret = intel_pin_and_fence_fb_obj(c->primary->fb,
15466 c->primary->state->rotation);
15467 mutex_unlock(&dev->struct_mutex);
15468 if (ret) {
15469 DRM_ERROR("failed to pin boot fb on pipe %d\n",
15470 to_intel_crtc(c)->pipe);
15471 drm_framebuffer_unreference(c->primary->fb);
15472 drm_framebuffer_unreference(c->primary->state->fb);
15473 c->primary->fb = c->primary->state->fb = NULL;
15474 c->primary->crtc = c->primary->state->crtc = NULL;
15475 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
15476 }
15477 }
15478
15479 intel_backlight_register(dev);
15480 }
15481
15482 void intel_connector_unregister(struct intel_connector *intel_connector)
15483 {
15484 struct drm_connector *connector = &intel_connector->base;
15485
15486 intel_panel_destroy_backlight(connector);
15487 drm_connector_unregister(connector);
15488 }
15489
15490 void intel_modeset_cleanup(struct drm_device *dev)
15491 {
15492 struct drm_i915_private *dev_priv = dev->dev_private;
15493 struct intel_connector *connector;
15494
15495 intel_disable_gt_powersave(dev_priv);
15496
15497 intel_backlight_unregister(dev);
15498
15499 /*
15500 * Interrupts and polling as the first thing to avoid creating havoc.
15501 * Too much stuff here (turning of connectors, ...) would
15502 * experience fancy races otherwise.
15503 */
15504 intel_irq_uninstall(dev_priv);
15505
15506 /*
15507 * Due to the hpd irq storm handling the hotplug work can re-arm the
15508 * poll handlers. Hence disable polling after hpd handling is shut down.
15509 */
15510 drm_kms_helper_poll_fini(dev);
15511
15512 intel_unregister_dsm_handler();
15513
15514 intel_fbc_global_disable(dev_priv);
15515
15516 /* flush any delayed tasks or pending work */
15517 flush_scheduled_work();
15518
15519 /* destroy the backlight and sysfs files before encoders/connectors */
15520 for_each_intel_connector(dev, connector)
15521 connector->unregister(connector);
15522
15523 drm_mode_config_cleanup(dev);
15524
15525 intel_cleanup_overlay(dev_priv);
15526
15527 intel_cleanup_gt_powersave(dev_priv);
15528
15529 intel_teardown_gmbus(dev);
15530 }
15531
15532 /*
15533 * Return which encoder is currently attached for connector.
15534 */
15535 struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
15536 {
15537 return &intel_attached_encoder(connector)->base;
15538 }
15539
15540 void intel_connector_attach_encoder(struct intel_connector *connector,
15541 struct intel_encoder *encoder)
15542 {
15543 connector->encoder = encoder;
15544 drm_mode_connector_attach_encoder(&connector->base,
15545 &encoder->base);
15546 }
15547
15548 /*
15549 * set vga decode state - true == enable VGA decode
15550 */
15551 int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
15552 {
15553 struct drm_i915_private *dev_priv = dev->dev_private;
15554 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
15555 u16 gmch_ctrl;
15556
15557 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
15558 DRM_ERROR("failed to read control word\n");
15559 return -EIO;
15560 }
15561
15562 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
15563 return 0;
15564
15565 if (state)
15566 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
15567 else
15568 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
15569
15570 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
15571 DRM_ERROR("failed to write control word\n");
15572 return -EIO;
15573 }
15574
15575 return 0;
15576 }
15577
15578 struct intel_display_error_state {
15579
15580 u32 power_well_driver;
15581
15582 int num_transcoders;
15583
15584 struct intel_cursor_error_state {
15585 u32 control;
15586 u32 position;
15587 u32 base;
15588 u32 size;
15589 } cursor[I915_MAX_PIPES];
15590
15591 struct intel_pipe_error_state {
15592 bool power_domain_on;
15593 u32 source;
15594 u32 stat;
15595 } pipe[I915_MAX_PIPES];
15596
15597 struct intel_plane_error_state {
15598 u32 control;
15599 u32 stride;
15600 u32 size;
15601 u32 pos;
15602 u32 addr;
15603 u32 surface;
15604 u32 tile_offset;
15605 } plane[I915_MAX_PIPES];
15606
15607 struct intel_transcoder_error_state {
15608 bool power_domain_on;
15609 enum transcoder cpu_transcoder;
15610
15611 u32 conf;
15612
15613 u32 htotal;
15614 u32 hblank;
15615 u32 hsync;
15616 u32 vtotal;
15617 u32 vblank;
15618 u32 vsync;
15619 } transcoder[4];
15620 };
15621
15622 struct intel_display_error_state *
15623 intel_display_capture_error_state(struct drm_i915_private *dev_priv)
15624 {
15625 struct intel_display_error_state *error;
15626 int transcoders[] = {
15627 TRANSCODER_A,
15628 TRANSCODER_B,
15629 TRANSCODER_C,
15630 TRANSCODER_EDP,
15631 };
15632 int i;
15633
15634 if (INTEL_INFO(dev_priv)->num_pipes == 0)
15635 return NULL;
15636
15637 error = kzalloc(sizeof(*error), GFP_ATOMIC);
15638 if (error == NULL)
15639 return NULL;
15640
15641 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
15642 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
15643
15644 for_each_pipe(dev_priv, i) {
15645 error->pipe[i].power_domain_on =
15646 __intel_display_power_is_enabled(dev_priv,
15647 POWER_DOMAIN_PIPE(i));
15648 if (!error->pipe[i].power_domain_on)
15649 continue;
15650
15651 error->cursor[i].control = I915_READ(CURCNTR(i));
15652 error->cursor[i].position = I915_READ(CURPOS(i));
15653 error->cursor[i].base = I915_READ(CURBASE(i));
15654
15655 error->plane[i].control = I915_READ(DSPCNTR(i));
15656 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
15657 if (INTEL_GEN(dev_priv) <= 3) {
15658 error->plane[i].size = I915_READ(DSPSIZE(i));
15659 error->plane[i].pos = I915_READ(DSPPOS(i));
15660 }
15661 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
15662 error->plane[i].addr = I915_READ(DSPADDR(i));
15663 if (INTEL_GEN(dev_priv) >= 4) {
15664 error->plane[i].surface = I915_READ(DSPSURF(i));
15665 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
15666 }
15667
15668 error->pipe[i].source = I915_READ(PIPESRC(i));
15669
15670 if (HAS_GMCH_DISPLAY(dev_priv))
15671 error->pipe[i].stat = I915_READ(PIPESTAT(i));
15672 }
15673
15674 /* Note: this does not include DSI transcoders. */
15675 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
15676 if (HAS_DDI(dev_priv))
15677 error->num_transcoders++; /* Account for eDP. */
15678
15679 for (i = 0; i < error->num_transcoders; i++) {
15680 enum transcoder cpu_transcoder = transcoders[i];
15681
15682 error->transcoder[i].power_domain_on =
15683 __intel_display_power_is_enabled(dev_priv,
15684 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
15685 if (!error->transcoder[i].power_domain_on)
15686 continue;
15687
15688 error->transcoder[i].cpu_transcoder = cpu_transcoder;
15689
15690 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
15691 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
15692 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
15693 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
15694 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
15695 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
15696 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
15697 }
15698
15699 return error;
15700 }
15701
15702 #define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
15703
15704 void
15705 intel_display_print_error_state(struct drm_i915_error_state_buf *m,
15706 struct drm_device *dev,
15707 struct intel_display_error_state *error)
15708 {
15709 struct drm_i915_private *dev_priv = dev->dev_private;
15710 int i;
15711
15712 if (!error)
15713 return;
15714
15715 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
15716 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
15717 err_printf(m, "PWR_WELL_CTL2: %08x\n",
15718 error->power_well_driver);
15719 for_each_pipe(dev_priv, i) {
15720 err_printf(m, "Pipe [%d]:\n", i);
15721 err_printf(m, " Power: %s\n",
15722 onoff(error->pipe[i].power_domain_on));
15723 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
15724 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
15725
15726 err_printf(m, "Plane [%d]:\n", i);
15727 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
15728 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
15729 if (INTEL_INFO(dev)->gen <= 3) {
15730 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
15731 err_printf(m, " POS: %08x\n", error->plane[i].pos);
15732 }
15733 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
15734 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
15735 if (INTEL_INFO(dev)->gen >= 4) {
15736 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
15737 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
15738 }
15739
15740 err_printf(m, "Cursor [%d]:\n", i);
15741 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
15742 err_printf(m, " POS: %08x\n", error->cursor[i].position);
15743 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
15744 }
15745
15746 for (i = 0; i < error->num_transcoders; i++) {
15747 err_printf(m, "CPU transcoder: %s\n",
15748 transcoder_name(error->transcoder[i].cpu_transcoder));
15749 err_printf(m, " Power: %s\n",
15750 onoff(error->transcoder[i].power_domain_on));
15751 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
15752 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
15753 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
15754 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
15755 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
15756 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
15757 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
15758 }
15759 }