]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_display.c
drm/i915: Add output_types bitmask into the crtc state
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_display.c
CommitLineData
79e53945
JB
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
618563e3 27#include <linux/dmi.h>
c1c7af60
JB
28#include <linux/module.h>
29#include <linux/input.h>
79e53945 30#include <linux/i2c.h>
7662c8bd 31#include <linux/kernel.h>
5a0e3ad6 32#include <linux/slab.h>
9cce37f4 33#include <linux/vgaarb.h>
e0dac65e 34#include <drm/drm_edid.h>
760285e7 35#include <drm/drmP.h>
79e53945 36#include "intel_drv.h"
760285e7 37#include <drm/i915_drm.h>
79e53945 38#include "i915_drv.h"
c37efb99 39#include "i915_gem_dmabuf.h"
db18b6a6 40#include "intel_dsi.h"
e5510fac 41#include "i915_trace.h"
319c1d42 42#include <drm/drm_atomic.h>
c196e1d6 43#include <drm/drm_atomic_helper.h>
760285e7
DH
44#include <drm/drm_dp_helper.h>
45#include <drm/drm_crtc_helper.h>
465c120c
MR
46#include <drm/drm_plane_helper.h>
47#include <drm/drm_rect.h>
c0f372b3 48#include <linux/dma_remapping.h>
fd8e058a 49#include <linux/reservation.h>
79e53945 50
5a21b665
DV
51static bool is_mmio_work(struct intel_flip_work *work)
52{
53 return work->mmio_work.func;
54}
55
465c120c 56/* Primary plane formats for gen <= 3 */
568db4f2 57static const uint32_t i8xx_primary_formats[] = {
67fe7dc5
DL
58 DRM_FORMAT_C8,
59 DRM_FORMAT_RGB565,
465c120c 60 DRM_FORMAT_XRGB1555,
67fe7dc5 61 DRM_FORMAT_XRGB8888,
465c120c
MR
62};
63
64/* Primary plane formats for gen >= 4 */
568db4f2 65static const uint32_t i965_primary_formats[] = {
6c0fd451
DL
66 DRM_FORMAT_C8,
67 DRM_FORMAT_RGB565,
68 DRM_FORMAT_XRGB8888,
69 DRM_FORMAT_XBGR8888,
70 DRM_FORMAT_XRGB2101010,
71 DRM_FORMAT_XBGR2101010,
72};
73
74static const uint32_t skl_primary_formats[] = {
67fe7dc5
DL
75 DRM_FORMAT_C8,
76 DRM_FORMAT_RGB565,
77 DRM_FORMAT_XRGB8888,
465c120c 78 DRM_FORMAT_XBGR8888,
67fe7dc5 79 DRM_FORMAT_ARGB8888,
465c120c
MR
80 DRM_FORMAT_ABGR8888,
81 DRM_FORMAT_XRGB2101010,
465c120c 82 DRM_FORMAT_XBGR2101010,
ea916ea0
KM
83 DRM_FORMAT_YUYV,
84 DRM_FORMAT_YVYU,
85 DRM_FORMAT_UYVY,
86 DRM_FORMAT_VYUY,
465c120c
MR
87};
88
3d7d6510
MR
89/* Cursor formats */
90static const uint32_t intel_cursor_formats[] = {
91 DRM_FORMAT_ARGB8888,
92};
93
f1f644dc 94static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 95 struct intel_crtc_state *pipe_config);
18442d08 96static void ironlake_pch_clock_get(struct intel_crtc *crtc,
5cec258b 97 struct intel_crtc_state *pipe_config);
f1f644dc 98
eb1bfe80
JB
99static int intel_framebuffer_init(struct drm_device *dev,
100 struct intel_framebuffer *ifb,
101 struct drm_mode_fb_cmd2 *mode_cmd,
102 struct drm_i915_gem_object *obj);
5b18e57c
DV
103static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
104static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
bc58be60 105static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
29407aab 106static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
f769cd24
VK
107 struct intel_link_m_n *m_n,
108 struct intel_link_m_n *m2_n2);
29407aab 109static void ironlake_set_pipeconf(struct drm_crtc *crtc);
229fca97 110static void haswell_set_pipeconf(struct drm_crtc *crtc);
391bf048 111static void haswell_set_pipemisc(struct drm_crtc *crtc);
d288f65f 112static void vlv_prepare_pll(struct intel_crtc *crtc,
5cec258b 113 const struct intel_crtc_state *pipe_config);
d288f65f 114static void chv_prepare_pll(struct intel_crtc *crtc,
5cec258b 115 const struct intel_crtc_state *pipe_config);
5a21b665
DV
116static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
117static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
549e2bfb
CK
118static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
119 struct intel_crtc_state *crtc_state);
bfd16b2a
ML
120static void skylake_pfit_enable(struct intel_crtc *crtc);
121static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
122static void ironlake_pfit_enable(struct intel_crtc *crtc);
043e9bda 123static void intel_modeset_setup_hw_state(struct drm_device *dev);
2622a081 124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
4e5ca60f 125static int ilk_max_pixel_rate(struct drm_atomic_state *state);
324513c0 126static int bxt_calc_cdclk(int max_pixclk);
e7457a9a 127
d4906093 128struct intel_limit {
4c5def93
ACO
129 struct {
130 int min, max;
131 } dot, vco, n, m, m1, m2, p, p1;
132
133 struct {
134 int dot_limit;
135 int p2_slow, p2_fast;
136 } p2;
d4906093 137};
79e53945 138
bfa7df01
VS
139/* returns HPLL frequency in kHz */
140static int valleyview_get_vco(struct drm_i915_private *dev_priv)
141{
142 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
143
144 /* Obtain SKU information */
145 mutex_lock(&dev_priv->sb_lock);
146 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
147 CCK_FUSE_HPLL_FREQ_MASK;
148 mutex_unlock(&dev_priv->sb_lock);
149
150 return vco_freq[hpll_freq] * 1000;
151}
152
c30fec65
VS
153int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
154 const char *name, u32 reg, int ref_freq)
bfa7df01
VS
155{
156 u32 val;
157 int divider;
158
bfa7df01
VS
159 mutex_lock(&dev_priv->sb_lock);
160 val = vlv_cck_read(dev_priv, reg);
161 mutex_unlock(&dev_priv->sb_lock);
162
163 divider = val & CCK_FREQUENCY_VALUES;
164
165 WARN((val & CCK_FREQUENCY_STATUS) !=
166 (divider << CCK_FREQUENCY_STATUS_SHIFT),
167 "%s change in progress\n", name);
168
c30fec65
VS
169 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
170}
171
172static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
173 const char *name, u32 reg)
174{
175 if (dev_priv->hpll_freq == 0)
176 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
177
178 return vlv_get_cck_clock(dev_priv, name, reg,
179 dev_priv->hpll_freq);
bfa7df01
VS
180}
181
e7dc33f3
VS
182static int
183intel_pch_rawclk(struct drm_i915_private *dev_priv)
d2acd215 184{
e7dc33f3
VS
185 return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
186}
d2acd215 187
e7dc33f3
VS
188static int
189intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
190{
19ab4ed3 191 /* RAWCLK_FREQ_VLV register updated from power well code */
35d38d1f
VS
192 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
193 CCK_DISPLAY_REF_CLOCK_CONTROL);
d2acd215
DV
194}
195
e7dc33f3
VS
196static int
197intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
79e50a4f 198{
79e50a4f
JN
199 uint32_t clkcfg;
200
e7dc33f3 201 /* hrawclock is 1/4 the FSB frequency */
79e50a4f
JN
202 clkcfg = I915_READ(CLKCFG);
203 switch (clkcfg & CLKCFG_FSB_MASK) {
204 case CLKCFG_FSB_400:
e7dc33f3 205 return 100000;
79e50a4f 206 case CLKCFG_FSB_533:
e7dc33f3 207 return 133333;
79e50a4f 208 case CLKCFG_FSB_667:
e7dc33f3 209 return 166667;
79e50a4f 210 case CLKCFG_FSB_800:
e7dc33f3 211 return 200000;
79e50a4f 212 case CLKCFG_FSB_1067:
e7dc33f3 213 return 266667;
79e50a4f 214 case CLKCFG_FSB_1333:
e7dc33f3 215 return 333333;
79e50a4f
JN
216 /* these two are just a guess; one of them might be right */
217 case CLKCFG_FSB_1600:
218 case CLKCFG_FSB_1600_ALT:
e7dc33f3 219 return 400000;
79e50a4f 220 default:
e7dc33f3 221 return 133333;
79e50a4f
JN
222 }
223}
224
19ab4ed3 225void intel_update_rawclk(struct drm_i915_private *dev_priv)
e7dc33f3
VS
226{
227 if (HAS_PCH_SPLIT(dev_priv))
228 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
229 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
230 dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
231 else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
232 dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
233 else
234 return; /* no rawclk on other platforms, or no need to know it */
235
236 DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
237}
238
bfa7df01
VS
239static void intel_update_czclk(struct drm_i915_private *dev_priv)
240{
666a4537 241 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
bfa7df01
VS
242 return;
243
244 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
245 CCK_CZ_CLOCK_CONTROL);
246
247 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
248}
249
021357ac 250static inline u32 /* units of 100MHz */
21a727b3
VS
251intel_fdi_link_freq(struct drm_i915_private *dev_priv,
252 const struct intel_crtc_state *pipe_config)
021357ac 253{
21a727b3
VS
254 if (HAS_DDI(dev_priv))
255 return pipe_config->port_clock; /* SPLL */
256 else if (IS_GEN5(dev_priv))
257 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
e3b247da 258 else
21a727b3 259 return 270000;
021357ac
CW
260}
261
1b6f4958 262static const struct intel_limit intel_limits_i8xx_dac = {
0206e353 263 .dot = { .min = 25000, .max = 350000 },
9c333719 264 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 265 .n = { .min = 2, .max = 16 },
0206e353
AJ
266 .m = { .min = 96, .max = 140 },
267 .m1 = { .min = 18, .max = 26 },
268 .m2 = { .min = 6, .max = 16 },
269 .p = { .min = 4, .max = 128 },
270 .p1 = { .min = 2, .max = 33 },
273e27ca
EA
271 .p2 = { .dot_limit = 165000,
272 .p2_slow = 4, .p2_fast = 2 },
e4b36699
KP
273};
274
1b6f4958 275static const struct intel_limit intel_limits_i8xx_dvo = {
5d536e28 276 .dot = { .min = 25000, .max = 350000 },
9c333719 277 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 278 .n = { .min = 2, .max = 16 },
5d536e28
DV
279 .m = { .min = 96, .max = 140 },
280 .m1 = { .min = 18, .max = 26 },
281 .m2 = { .min = 6, .max = 16 },
282 .p = { .min = 4, .max = 128 },
283 .p1 = { .min = 2, .max = 33 },
284 .p2 = { .dot_limit = 165000,
285 .p2_slow = 4, .p2_fast = 4 },
286};
287
1b6f4958 288static const struct intel_limit intel_limits_i8xx_lvds = {
0206e353 289 .dot = { .min = 25000, .max = 350000 },
9c333719 290 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 291 .n = { .min = 2, .max = 16 },
0206e353
AJ
292 .m = { .min = 96, .max = 140 },
293 .m1 = { .min = 18, .max = 26 },
294 .m2 = { .min = 6, .max = 16 },
295 .p = { .min = 4, .max = 128 },
296 .p1 = { .min = 1, .max = 6 },
273e27ca
EA
297 .p2 = { .dot_limit = 165000,
298 .p2_slow = 14, .p2_fast = 7 },
e4b36699 299};
273e27ca 300
1b6f4958 301static const struct intel_limit intel_limits_i9xx_sdvo = {
0206e353
AJ
302 .dot = { .min = 20000, .max = 400000 },
303 .vco = { .min = 1400000, .max = 2800000 },
304 .n = { .min = 1, .max = 6 },
305 .m = { .min = 70, .max = 120 },
4f7dfb67
PJ
306 .m1 = { .min = 8, .max = 18 },
307 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
308 .p = { .min = 5, .max = 80 },
309 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
310 .p2 = { .dot_limit = 200000,
311 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
312};
313
1b6f4958 314static const struct intel_limit intel_limits_i9xx_lvds = {
0206e353
AJ
315 .dot = { .min = 20000, .max = 400000 },
316 .vco = { .min = 1400000, .max = 2800000 },
317 .n = { .min = 1, .max = 6 },
318 .m = { .min = 70, .max = 120 },
53a7d2d1
PJ
319 .m1 = { .min = 8, .max = 18 },
320 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
321 .p = { .min = 7, .max = 98 },
322 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
323 .p2 = { .dot_limit = 112000,
324 .p2_slow = 14, .p2_fast = 7 },
e4b36699
KP
325};
326
273e27ca 327
1b6f4958 328static const struct intel_limit intel_limits_g4x_sdvo = {
273e27ca
EA
329 .dot = { .min = 25000, .max = 270000 },
330 .vco = { .min = 1750000, .max = 3500000},
331 .n = { .min = 1, .max = 4 },
332 .m = { .min = 104, .max = 138 },
333 .m1 = { .min = 17, .max = 23 },
334 .m2 = { .min = 5, .max = 11 },
335 .p = { .min = 10, .max = 30 },
336 .p1 = { .min = 1, .max = 3},
337 .p2 = { .dot_limit = 270000,
338 .p2_slow = 10,
339 .p2_fast = 10
044c7c41 340 },
e4b36699
KP
341};
342
1b6f4958 343static const struct intel_limit intel_limits_g4x_hdmi = {
273e27ca
EA
344 .dot = { .min = 22000, .max = 400000 },
345 .vco = { .min = 1750000, .max = 3500000},
346 .n = { .min = 1, .max = 4 },
347 .m = { .min = 104, .max = 138 },
348 .m1 = { .min = 16, .max = 23 },
349 .m2 = { .min = 5, .max = 11 },
350 .p = { .min = 5, .max = 80 },
351 .p1 = { .min = 1, .max = 8},
352 .p2 = { .dot_limit = 165000,
353 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
354};
355
1b6f4958 356static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
273e27ca
EA
357 .dot = { .min = 20000, .max = 115000 },
358 .vco = { .min = 1750000, .max = 3500000 },
359 .n = { .min = 1, .max = 3 },
360 .m = { .min = 104, .max = 138 },
361 .m1 = { .min = 17, .max = 23 },
362 .m2 = { .min = 5, .max = 11 },
363 .p = { .min = 28, .max = 112 },
364 .p1 = { .min = 2, .max = 8 },
365 .p2 = { .dot_limit = 0,
366 .p2_slow = 14, .p2_fast = 14
044c7c41 367 },
e4b36699
KP
368};
369
1b6f4958 370static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
273e27ca
EA
371 .dot = { .min = 80000, .max = 224000 },
372 .vco = { .min = 1750000, .max = 3500000 },
373 .n = { .min = 1, .max = 3 },
374 .m = { .min = 104, .max = 138 },
375 .m1 = { .min = 17, .max = 23 },
376 .m2 = { .min = 5, .max = 11 },
377 .p = { .min = 14, .max = 42 },
378 .p1 = { .min = 2, .max = 6 },
379 .p2 = { .dot_limit = 0,
380 .p2_slow = 7, .p2_fast = 7
044c7c41 381 },
e4b36699
KP
382};
383
1b6f4958 384static const struct intel_limit intel_limits_pineview_sdvo = {
0206e353
AJ
385 .dot = { .min = 20000, .max = 400000},
386 .vco = { .min = 1700000, .max = 3500000 },
273e27ca 387 /* Pineview's Ncounter is a ring counter */
0206e353
AJ
388 .n = { .min = 3, .max = 6 },
389 .m = { .min = 2, .max = 256 },
273e27ca 390 /* Pineview only has one combined m divider, which we treat as m2. */
0206e353
AJ
391 .m1 = { .min = 0, .max = 0 },
392 .m2 = { .min = 0, .max = 254 },
393 .p = { .min = 5, .max = 80 },
394 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
395 .p2 = { .dot_limit = 200000,
396 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
397};
398
1b6f4958 399static const struct intel_limit intel_limits_pineview_lvds = {
0206e353
AJ
400 .dot = { .min = 20000, .max = 400000 },
401 .vco = { .min = 1700000, .max = 3500000 },
402 .n = { .min = 3, .max = 6 },
403 .m = { .min = 2, .max = 256 },
404 .m1 = { .min = 0, .max = 0 },
405 .m2 = { .min = 0, .max = 254 },
406 .p = { .min = 7, .max = 112 },
407 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
408 .p2 = { .dot_limit = 112000,
409 .p2_slow = 14, .p2_fast = 14 },
e4b36699
KP
410};
411
273e27ca
EA
412/* Ironlake / Sandybridge
413 *
414 * We calculate clock using (register_value + 2) for N/M1/M2, so here
415 * the range value for them is (actual_value - 2).
416 */
1b6f4958 417static const struct intel_limit intel_limits_ironlake_dac = {
273e27ca
EA
418 .dot = { .min = 25000, .max = 350000 },
419 .vco = { .min = 1760000, .max = 3510000 },
420 .n = { .min = 1, .max = 5 },
421 .m = { .min = 79, .max = 127 },
422 .m1 = { .min = 12, .max = 22 },
423 .m2 = { .min = 5, .max = 9 },
424 .p = { .min = 5, .max = 80 },
425 .p1 = { .min = 1, .max = 8 },
426 .p2 = { .dot_limit = 225000,
427 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
428};
429
1b6f4958 430static const struct intel_limit intel_limits_ironlake_single_lvds = {
273e27ca
EA
431 .dot = { .min = 25000, .max = 350000 },
432 .vco = { .min = 1760000, .max = 3510000 },
433 .n = { .min = 1, .max = 3 },
434 .m = { .min = 79, .max = 118 },
435 .m1 = { .min = 12, .max = 22 },
436 .m2 = { .min = 5, .max = 9 },
437 .p = { .min = 28, .max = 112 },
438 .p1 = { .min = 2, .max = 8 },
439 .p2 = { .dot_limit = 225000,
440 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
441};
442
1b6f4958 443static const struct intel_limit intel_limits_ironlake_dual_lvds = {
273e27ca
EA
444 .dot = { .min = 25000, .max = 350000 },
445 .vco = { .min = 1760000, .max = 3510000 },
446 .n = { .min = 1, .max = 3 },
447 .m = { .min = 79, .max = 127 },
448 .m1 = { .min = 12, .max = 22 },
449 .m2 = { .min = 5, .max = 9 },
450 .p = { .min = 14, .max = 56 },
451 .p1 = { .min = 2, .max = 8 },
452 .p2 = { .dot_limit = 225000,
453 .p2_slow = 7, .p2_fast = 7 },
b91ad0ec
ZW
454};
455
273e27ca 456/* LVDS 100mhz refclk limits. */
1b6f4958 457static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
273e27ca
EA
458 .dot = { .min = 25000, .max = 350000 },
459 .vco = { .min = 1760000, .max = 3510000 },
460 .n = { .min = 1, .max = 2 },
461 .m = { .min = 79, .max = 126 },
462 .m1 = { .min = 12, .max = 22 },
463 .m2 = { .min = 5, .max = 9 },
464 .p = { .min = 28, .max = 112 },
0206e353 465 .p1 = { .min = 2, .max = 8 },
273e27ca
EA
466 .p2 = { .dot_limit = 225000,
467 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
468};
469
1b6f4958 470static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
273e27ca
EA
471 .dot = { .min = 25000, .max = 350000 },
472 .vco = { .min = 1760000, .max = 3510000 },
473 .n = { .min = 1, .max = 3 },
474 .m = { .min = 79, .max = 126 },
475 .m1 = { .min = 12, .max = 22 },
476 .m2 = { .min = 5, .max = 9 },
477 .p = { .min = 14, .max = 42 },
0206e353 478 .p1 = { .min = 2, .max = 6 },
273e27ca
EA
479 .p2 = { .dot_limit = 225000,
480 .p2_slow = 7, .p2_fast = 7 },
4547668a
ZY
481};
482
1b6f4958 483static const struct intel_limit intel_limits_vlv = {
f01b7962
VS
484 /*
485 * These are the data rate limits (measured in fast clocks)
486 * since those are the strictest limits we have. The fast
487 * clock and actual rate limits are more relaxed, so checking
488 * them would make no difference.
489 */
490 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
75e53986 491 .vco = { .min = 4000000, .max = 6000000 },
a0c4da24 492 .n = { .min = 1, .max = 7 },
a0c4da24
JB
493 .m1 = { .min = 2, .max = 3 },
494 .m2 = { .min = 11, .max = 156 },
b99ab663 495 .p1 = { .min = 2, .max = 3 },
5fdc9c49 496 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
a0c4da24
JB
497};
498
1b6f4958 499static const struct intel_limit intel_limits_chv = {
ef9348c8
CML
500 /*
501 * These are the data rate limits (measured in fast clocks)
502 * since those are the strictest limits we have. The fast
503 * clock and actual rate limits are more relaxed, so checking
504 * them would make no difference.
505 */
506 .dot = { .min = 25000 * 5, .max = 540000 * 5},
17fe1021 507 .vco = { .min = 4800000, .max = 6480000 },
ef9348c8
CML
508 .n = { .min = 1, .max = 1 },
509 .m1 = { .min = 2, .max = 2 },
510 .m2 = { .min = 24 << 22, .max = 175 << 22 },
511 .p1 = { .min = 2, .max = 4 },
512 .p2 = { .p2_slow = 1, .p2_fast = 14 },
513};
514
1b6f4958 515static const struct intel_limit intel_limits_bxt = {
5ab7b0b7
ID
516 /* FIXME: find real dot limits */
517 .dot = { .min = 0, .max = INT_MAX },
e6292556 518 .vco = { .min = 4800000, .max = 6700000 },
5ab7b0b7
ID
519 .n = { .min = 1, .max = 1 },
520 .m1 = { .min = 2, .max = 2 },
521 /* FIXME: find real m2 limits */
522 .m2 = { .min = 2 << 22, .max = 255 << 22 },
523 .p1 = { .min = 2, .max = 4 },
524 .p2 = { .p2_slow = 1, .p2_fast = 20 },
525};
526
cdba954e
ACO
527static bool
528needs_modeset(struct drm_crtc_state *state)
529{
fc596660 530 return drm_atomic_crtc_needs_modeset(state);
cdba954e
ACO
531}
532
e0638cdf
PZ
533/**
534 * Returns whether any output on the specified pipe is of the specified type
535 */
4093561b 536bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
e0638cdf 537{
253c84c8 538 return crtc->config->output_types & (1 << type);
e0638cdf
PZ
539}
540
d0737e1d
ACO
541/**
542 * Returns whether any output on the specified pipe will have the specified
543 * type after a staged modeset is complete, i.e., the same as
544 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
545 * encoder->crtc.
546 */
a93e255f 547static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
253c84c8 548 enum intel_output_type type)
d0737e1d 549{
253c84c8 550 return crtc_state->output_types & (1 << type);
d0737e1d
ACO
551}
552
dccbea3b
ID
553/*
554 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
555 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
556 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
557 * The helpers' return value is the rate of the clock that is fed to the
558 * display engine's pipe which can be the above fast dot clock rate or a
559 * divided-down version of it.
560 */
f2b115e6 561/* m1 is reserved as 0 in Pineview, n is a ring counter */
9e2c8475 562static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
79e53945 563{
2177832f
SL
564 clock->m = clock->m2 + 2;
565 clock->p = clock->p1 * clock->p2;
ed5ca77e 566 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 567 return 0;
fb03ac01
VS
568 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
569 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
570
571 return clock->dot;
2177832f
SL
572}
573
7429e9d4
DV
574static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
575{
576 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
577}
578
9e2c8475 579static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
2177832f 580{
7429e9d4 581 clock->m = i9xx_dpll_compute_m(clock);
79e53945 582 clock->p = clock->p1 * clock->p2;
ed5ca77e 583 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
dccbea3b 584 return 0;
fb03ac01
VS
585 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
586 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
587
588 return clock->dot;
79e53945
JB
589}
590
9e2c8475 591static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
589eca67
ID
592{
593 clock->m = clock->m1 * clock->m2;
594 clock->p = clock->p1 * clock->p2;
595 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 596 return 0;
589eca67
ID
597 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
598 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
599
600 return clock->dot / 5;
589eca67
ID
601}
602
9e2c8475 603int chv_calc_dpll_params(int refclk, struct dpll *clock)
ef9348c8
CML
604{
605 clock->m = clock->m1 * clock->m2;
606 clock->p = clock->p1 * clock->p2;
607 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 608 return 0;
ef9348c8
CML
609 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
610 clock->n << 22);
611 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
612
613 return clock->dot / 5;
ef9348c8
CML
614}
615
7c04d1d9 616#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
79e53945
JB
617/**
618 * Returns whether the given set of divisors are valid for a given refclk with
619 * the given connectors.
620 */
621
1b894b59 622static bool intel_PLL_is_valid(struct drm_device *dev,
1b6f4958 623 const struct intel_limit *limit,
9e2c8475 624 const struct dpll *clock)
79e53945 625{
f01b7962
VS
626 if (clock->n < limit->n.min || limit->n.max < clock->n)
627 INTELPllInvalid("n out of range\n");
79e53945 628 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
0206e353 629 INTELPllInvalid("p1 out of range\n");
79e53945 630 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
0206e353 631 INTELPllInvalid("m2 out of range\n");
79e53945 632 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
0206e353 633 INTELPllInvalid("m1 out of range\n");
f01b7962 634
666a4537
WB
635 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
636 !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
f01b7962
VS
637 if (clock->m1 <= clock->m2)
638 INTELPllInvalid("m1 <= m2\n");
639
666a4537 640 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
f01b7962
VS
641 if (clock->p < limit->p.min || limit->p.max < clock->p)
642 INTELPllInvalid("p out of range\n");
643 if (clock->m < limit->m.min || limit->m.max < clock->m)
644 INTELPllInvalid("m out of range\n");
645 }
646
79e53945 647 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
0206e353 648 INTELPllInvalid("vco out of range\n");
79e53945
JB
649 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
650 * connector, etc., rather than just a single range.
651 */
652 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
0206e353 653 INTELPllInvalid("dot out of range\n");
79e53945
JB
654
655 return true;
656}
657
3b1429d9 658static int
1b6f4958 659i9xx_select_p2_div(const struct intel_limit *limit,
3b1429d9
VS
660 const struct intel_crtc_state *crtc_state,
661 int target)
79e53945 662{
3b1429d9 663 struct drm_device *dev = crtc_state->base.crtc->dev;
79e53945 664
a93e255f 665 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
79e53945 666 /*
a210b028
DV
667 * For LVDS just rely on its current settings for dual-channel.
668 * We haven't figured out how to reliably set up different
669 * single/dual channel state, if we even can.
79e53945 670 */
1974cad0 671 if (intel_is_dual_link_lvds(dev))
3b1429d9 672 return limit->p2.p2_fast;
79e53945 673 else
3b1429d9 674 return limit->p2.p2_slow;
79e53945
JB
675 } else {
676 if (target < limit->p2.dot_limit)
3b1429d9 677 return limit->p2.p2_slow;
79e53945 678 else
3b1429d9 679 return limit->p2.p2_fast;
79e53945 680 }
3b1429d9
VS
681}
682
70e8aa21
ACO
683/*
684 * Returns a set of divisors for the desired target clock with the given
685 * refclk, or FALSE. The returned values represent the clock equation:
686 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
687 *
688 * Target and reference clocks are specified in kHz.
689 *
690 * If match_clock is provided, then best_clock P divider must match the P
691 * divider from @match_clock used for LVDS downclocking.
692 */
3b1429d9 693static bool
1b6f4958 694i9xx_find_best_dpll(const struct intel_limit *limit,
3b1429d9 695 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
696 int target, int refclk, struct dpll *match_clock,
697 struct dpll *best_clock)
3b1429d9
VS
698{
699 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 700 struct dpll clock;
3b1429d9 701 int err = target;
79e53945 702
0206e353 703 memset(best_clock, 0, sizeof(*best_clock));
79e53945 704
3b1429d9
VS
705 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
706
42158660
ZY
707 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
708 clock.m1++) {
709 for (clock.m2 = limit->m2.min;
710 clock.m2 <= limit->m2.max; clock.m2++) {
c0efc387 711 if (clock.m2 >= clock.m1)
42158660
ZY
712 break;
713 for (clock.n = limit->n.min;
714 clock.n <= limit->n.max; clock.n++) {
715 for (clock.p1 = limit->p1.min;
716 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
717 int this_err;
718
dccbea3b 719 i9xx_calc_dpll_params(refclk, &clock);
ac58c3f0
DV
720 if (!intel_PLL_is_valid(dev, limit,
721 &clock))
722 continue;
723 if (match_clock &&
724 clock.p != match_clock->p)
725 continue;
726
727 this_err = abs(clock.dot - target);
728 if (this_err < err) {
729 *best_clock = clock;
730 err = this_err;
731 }
732 }
733 }
734 }
735 }
736
737 return (err != target);
738}
739
70e8aa21
ACO
740/*
741 * Returns a set of divisors for the desired target clock with the given
742 * refclk, or FALSE. The returned values represent the clock equation:
743 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
744 *
745 * Target and reference clocks are specified in kHz.
746 *
747 * If match_clock is provided, then best_clock P divider must match the P
748 * divider from @match_clock used for LVDS downclocking.
749 */
ac58c3f0 750static bool
1b6f4958 751pnv_find_best_dpll(const struct intel_limit *limit,
a93e255f 752 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
753 int target, int refclk, struct dpll *match_clock,
754 struct dpll *best_clock)
79e53945 755{
3b1429d9 756 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 757 struct dpll clock;
79e53945
JB
758 int err = target;
759
0206e353 760 memset(best_clock, 0, sizeof(*best_clock));
79e53945 761
3b1429d9
VS
762 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
763
42158660
ZY
764 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
765 clock.m1++) {
766 for (clock.m2 = limit->m2.min;
767 clock.m2 <= limit->m2.max; clock.m2++) {
42158660
ZY
768 for (clock.n = limit->n.min;
769 clock.n <= limit->n.max; clock.n++) {
770 for (clock.p1 = limit->p1.min;
771 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
772 int this_err;
773
dccbea3b 774 pnv_calc_dpll_params(refclk, &clock);
1b894b59
CW
775 if (!intel_PLL_is_valid(dev, limit,
776 &clock))
79e53945 777 continue;
cec2f356
SP
778 if (match_clock &&
779 clock.p != match_clock->p)
780 continue;
79e53945
JB
781
782 this_err = abs(clock.dot - target);
783 if (this_err < err) {
784 *best_clock = clock;
785 err = this_err;
786 }
787 }
788 }
789 }
790 }
791
792 return (err != target);
793}
794
997c030c
ACO
795/*
796 * Returns a set of divisors for the desired target clock with the given
797 * refclk, or FALSE. The returned values represent the clock equation:
798 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
70e8aa21
ACO
799 *
800 * Target and reference clocks are specified in kHz.
801 *
802 * If match_clock is provided, then best_clock P divider must match the P
803 * divider from @match_clock used for LVDS downclocking.
997c030c 804 */
d4906093 805static bool
1b6f4958 806g4x_find_best_dpll(const struct intel_limit *limit,
a93e255f 807 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
808 int target, int refclk, struct dpll *match_clock,
809 struct dpll *best_clock)
d4906093 810{
3b1429d9 811 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 812 struct dpll clock;
d4906093 813 int max_n;
3b1429d9 814 bool found = false;
6ba770dc
AJ
815 /* approximately equals target * 0.00585 */
816 int err_most = (target >> 8) + (target >> 9);
d4906093
ML
817
818 memset(best_clock, 0, sizeof(*best_clock));
3b1429d9
VS
819
820 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
821
d4906093 822 max_n = limit->n.max;
f77f13e2 823 /* based on hardware requirement, prefer smaller n to precision */
d4906093 824 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
f77f13e2 825 /* based on hardware requirement, prefere larger m1,m2 */
d4906093
ML
826 for (clock.m1 = limit->m1.max;
827 clock.m1 >= limit->m1.min; clock.m1--) {
828 for (clock.m2 = limit->m2.max;
829 clock.m2 >= limit->m2.min; clock.m2--) {
830 for (clock.p1 = limit->p1.max;
831 clock.p1 >= limit->p1.min; clock.p1--) {
832 int this_err;
833
dccbea3b 834 i9xx_calc_dpll_params(refclk, &clock);
1b894b59
CW
835 if (!intel_PLL_is_valid(dev, limit,
836 &clock))
d4906093 837 continue;
1b894b59
CW
838
839 this_err = abs(clock.dot - target);
d4906093
ML
840 if (this_err < err_most) {
841 *best_clock = clock;
842 err_most = this_err;
843 max_n = clock.n;
844 found = true;
845 }
846 }
847 }
848 }
849 }
2c07245f
ZW
850 return found;
851}
852
d5dd62bd
ID
853/*
854 * Check if the calculated PLL configuration is more optimal compared to the
855 * best configuration and error found so far. Return the calculated error.
856 */
857static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
9e2c8475
ACO
858 const struct dpll *calculated_clock,
859 const struct dpll *best_clock,
d5dd62bd
ID
860 unsigned int best_error_ppm,
861 unsigned int *error_ppm)
862{
9ca3ba01
ID
863 /*
864 * For CHV ignore the error and consider only the P value.
865 * Prefer a bigger P value based on HW requirements.
866 */
867 if (IS_CHERRYVIEW(dev)) {
868 *error_ppm = 0;
869
870 return calculated_clock->p > best_clock->p;
871 }
872
24be4e46
ID
873 if (WARN_ON_ONCE(!target_freq))
874 return false;
875
d5dd62bd
ID
876 *error_ppm = div_u64(1000000ULL *
877 abs(target_freq - calculated_clock->dot),
878 target_freq);
879 /*
880 * Prefer a better P value over a better (smaller) error if the error
881 * is small. Ensure this preference for future configurations too by
882 * setting the error to 0.
883 */
884 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
885 *error_ppm = 0;
886
887 return true;
888 }
889
890 return *error_ppm + 10 < best_error_ppm;
891}
892
65b3d6a9
ACO
893/*
894 * Returns a set of divisors for the desired target clock with the given
895 * refclk, or FALSE. The returned values represent the clock equation:
896 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
897 */
a0c4da24 898static bool
1b6f4958 899vlv_find_best_dpll(const struct intel_limit *limit,
a93e255f 900 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
901 int target, int refclk, struct dpll *match_clock,
902 struct dpll *best_clock)
a0c4da24 903{
a93e255f 904 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a919ff14 905 struct drm_device *dev = crtc->base.dev;
9e2c8475 906 struct dpll clock;
69e4f900 907 unsigned int bestppm = 1000000;
27e639bf
VS
908 /* min update 19.2 MHz */
909 int max_n = min(limit->n.max, refclk / 19200);
49e497ef 910 bool found = false;
a0c4da24 911
6b4bf1c4
VS
912 target *= 5; /* fast clock */
913
914 memset(best_clock, 0, sizeof(*best_clock));
a0c4da24
JB
915
916 /* based on hardware requirement, prefer smaller n to precision */
27e639bf 917 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
811bbf05 918 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
889059d8 919 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
c1a9ae43 920 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
6b4bf1c4 921 clock.p = clock.p1 * clock.p2;
a0c4da24 922 /* based on hardware requirement, prefer bigger m1,m2 values */
6b4bf1c4 923 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
d5dd62bd 924 unsigned int ppm;
69e4f900 925
6b4bf1c4
VS
926 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
927 refclk * clock.m1);
928
dccbea3b 929 vlv_calc_dpll_params(refclk, &clock);
43b0ac53 930
f01b7962
VS
931 if (!intel_PLL_is_valid(dev, limit,
932 &clock))
43b0ac53
VS
933 continue;
934
d5dd62bd
ID
935 if (!vlv_PLL_is_optimal(dev, target,
936 &clock,
937 best_clock,
938 bestppm, &ppm))
939 continue;
6b4bf1c4 940
d5dd62bd
ID
941 *best_clock = clock;
942 bestppm = ppm;
943 found = true;
a0c4da24
JB
944 }
945 }
946 }
947 }
a0c4da24 948
49e497ef 949 return found;
a0c4da24 950}
a4fc5ed6 951
65b3d6a9
ACO
952/*
953 * Returns a set of divisors for the desired target clock with the given
954 * refclk, or FALSE. The returned values represent the clock equation:
955 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
956 */
ef9348c8 957static bool
1b6f4958 958chv_find_best_dpll(const struct intel_limit *limit,
a93e255f 959 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
960 int target, int refclk, struct dpll *match_clock,
961 struct dpll *best_clock)
ef9348c8 962{
a93e255f 963 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a919ff14 964 struct drm_device *dev = crtc->base.dev;
9ca3ba01 965 unsigned int best_error_ppm;
9e2c8475 966 struct dpll clock;
ef9348c8
CML
967 uint64_t m2;
968 int found = false;
969
970 memset(best_clock, 0, sizeof(*best_clock));
9ca3ba01 971 best_error_ppm = 1000000;
ef9348c8
CML
972
973 /*
974 * Based on hardware doc, the n always set to 1, and m1 always
975 * set to 2. If requires to support 200Mhz refclk, we need to
976 * revisit this because n may not 1 anymore.
977 */
978 clock.n = 1, clock.m1 = 2;
979 target *= 5; /* fast clock */
980
981 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
982 for (clock.p2 = limit->p2.p2_fast;
983 clock.p2 >= limit->p2.p2_slow;
984 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
9ca3ba01 985 unsigned int error_ppm;
ef9348c8
CML
986
987 clock.p = clock.p1 * clock.p2;
988
989 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
990 clock.n) << 22, refclk * clock.m1);
991
992 if (m2 > INT_MAX/clock.m1)
993 continue;
994
995 clock.m2 = m2;
996
dccbea3b 997 chv_calc_dpll_params(refclk, &clock);
ef9348c8
CML
998
999 if (!intel_PLL_is_valid(dev, limit, &clock))
1000 continue;
1001
9ca3ba01
ID
1002 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1003 best_error_ppm, &error_ppm))
1004 continue;
1005
1006 *best_clock = clock;
1007 best_error_ppm = error_ppm;
1008 found = true;
ef9348c8
CML
1009 }
1010 }
1011
1012 return found;
1013}
1014
5ab7b0b7 1015bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
9e2c8475 1016 struct dpll *best_clock)
5ab7b0b7 1017{
65b3d6a9 1018 int refclk = 100000;
1b6f4958 1019 const struct intel_limit *limit = &intel_limits_bxt;
5ab7b0b7 1020
65b3d6a9 1021 return chv_find_best_dpll(limit, crtc_state,
5ab7b0b7
ID
1022 target_clock, refclk, NULL, best_clock);
1023}
1024
20ddf665
VS
1025bool intel_crtc_active(struct drm_crtc *crtc)
1026{
1027 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1028
1029 /* Be paranoid as we can arrive here with only partial
1030 * state retrieved from the hardware during setup.
1031 *
241bfc38 1032 * We can ditch the adjusted_mode.crtc_clock check as soon
20ddf665
VS
1033 * as Haswell has gained clock readout/fastboot support.
1034 *
66e514c1 1035 * We can ditch the crtc->primary->fb check as soon as we can
20ddf665 1036 * properly reconstruct framebuffers.
c3d1f436
MR
1037 *
1038 * FIXME: The intel_crtc->active here should be switched to
1039 * crtc->state->active once we have proper CRTC states wired up
1040 * for atomic.
20ddf665 1041 */
c3d1f436 1042 return intel_crtc->active && crtc->primary->state->fb &&
6e3c9717 1043 intel_crtc->config->base.adjusted_mode.crtc_clock;
20ddf665
VS
1044}
1045
a5c961d1
PZ
1046enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1047 enum pipe pipe)
1048{
1049 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1050 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1051
6e3c9717 1052 return intel_crtc->config->cpu_transcoder;
a5c961d1
PZ
1053}
1054
fbf49ea2
VS
1055static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1056{
fac5e23e 1057 struct drm_i915_private *dev_priv = to_i915(dev);
f0f59a00 1058 i915_reg_t reg = PIPEDSL(pipe);
fbf49ea2
VS
1059 u32 line1, line2;
1060 u32 line_mask;
1061
1062 if (IS_GEN2(dev))
1063 line_mask = DSL_LINEMASK_GEN2;
1064 else
1065 line_mask = DSL_LINEMASK_GEN3;
1066
1067 line1 = I915_READ(reg) & line_mask;
6adfb1ef 1068 msleep(5);
fbf49ea2
VS
1069 line2 = I915_READ(reg) & line_mask;
1070
1071 return line1 == line2;
1072}
1073
ab7ad7f6
KP
1074/*
1075 * intel_wait_for_pipe_off - wait for pipe to turn off
575f7ab7 1076 * @crtc: crtc whose pipe to wait for
9d0498a2
JB
1077 *
1078 * After disabling a pipe, we can't wait for vblank in the usual way,
1079 * spinning on the vblank interrupt status bit, since we won't actually
1080 * see an interrupt when the pipe is disabled.
1081 *
ab7ad7f6
KP
1082 * On Gen4 and above:
1083 * wait for the pipe register state bit to turn off
1084 *
1085 * Otherwise:
1086 * wait for the display line value to settle (it usually
1087 * ends up stopping at the start of the next frame).
58e10eb9 1088 *
9d0498a2 1089 */
575f7ab7 1090static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
9d0498a2 1091{
575f7ab7 1092 struct drm_device *dev = crtc->base.dev;
fac5e23e 1093 struct drm_i915_private *dev_priv = to_i915(dev);
6e3c9717 1094 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
575f7ab7 1095 enum pipe pipe = crtc->pipe;
ab7ad7f6
KP
1096
1097 if (INTEL_INFO(dev)->gen >= 4) {
f0f59a00 1098 i915_reg_t reg = PIPECONF(cpu_transcoder);
ab7ad7f6
KP
1099
1100 /* Wait for the Pipe State to go off */
b8511f53
CW
1101 if (intel_wait_for_register(dev_priv,
1102 reg, I965_PIPECONF_ACTIVE, 0,
1103 100))
284637d9 1104 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 1105 } else {
ab7ad7f6 1106 /* Wait for the display line to settle */
fbf49ea2 1107 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
284637d9 1108 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 1109 }
79e53945
JB
1110}
1111
b24e7179 1112/* Only for pre-ILK configs */
55607e8a
DV
1113void assert_pll(struct drm_i915_private *dev_priv,
1114 enum pipe pipe, bool state)
b24e7179 1115{
b24e7179
JB
1116 u32 val;
1117 bool cur_state;
1118
649636ef 1119 val = I915_READ(DPLL(pipe));
b24e7179 1120 cur_state = !!(val & DPLL_VCO_ENABLE);
e2c719b7 1121 I915_STATE_WARN(cur_state != state,
b24e7179 1122 "PLL state assertion failure (expected %s, current %s)\n",
87ad3212 1123 onoff(state), onoff(cur_state));
b24e7179 1124}
b24e7179 1125
23538ef1 1126/* XXX: the dsi pll is shared between MIPI DSI ports */
8563b1e8 1127void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
23538ef1
JN
1128{
1129 u32 val;
1130 bool cur_state;
1131
a580516d 1132 mutex_lock(&dev_priv->sb_lock);
23538ef1 1133 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
a580516d 1134 mutex_unlock(&dev_priv->sb_lock);
23538ef1
JN
1135
1136 cur_state = val & DSI_PLL_VCO_EN;
e2c719b7 1137 I915_STATE_WARN(cur_state != state,
23538ef1 1138 "DSI PLL state assertion failure (expected %s, current %s)\n",
87ad3212 1139 onoff(state), onoff(cur_state));
23538ef1 1140}
23538ef1 1141
040484af
JB
1142static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1143 enum pipe pipe, bool state)
1144{
040484af 1145 bool cur_state;
ad80a810
PZ
1146 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1147 pipe);
040484af 1148
2d1fe073 1149 if (HAS_DDI(dev_priv)) {
affa9354 1150 /* DDI does not have a specific FDI_TX register */
649636ef 1151 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
ad80a810 1152 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
bf507ef7 1153 } else {
649636ef 1154 u32 val = I915_READ(FDI_TX_CTL(pipe));
bf507ef7
ED
1155 cur_state = !!(val & FDI_TX_ENABLE);
1156 }
e2c719b7 1157 I915_STATE_WARN(cur_state != state,
040484af 1158 "FDI TX state assertion failure (expected %s, current %s)\n",
87ad3212 1159 onoff(state), onoff(cur_state));
040484af
JB
1160}
1161#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1162#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1163
1164static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1165 enum pipe pipe, bool state)
1166{
040484af
JB
1167 u32 val;
1168 bool cur_state;
1169
649636ef 1170 val = I915_READ(FDI_RX_CTL(pipe));
d63fa0dc 1171 cur_state = !!(val & FDI_RX_ENABLE);
e2c719b7 1172 I915_STATE_WARN(cur_state != state,
040484af 1173 "FDI RX state assertion failure (expected %s, current %s)\n",
87ad3212 1174 onoff(state), onoff(cur_state));
040484af
JB
1175}
1176#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1177#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1178
1179static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1180 enum pipe pipe)
1181{
040484af
JB
1182 u32 val;
1183
1184 /* ILK FDI PLL is always enabled */
7e22dbbb 1185 if (IS_GEN5(dev_priv))
040484af
JB
1186 return;
1187
bf507ef7 1188 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
2d1fe073 1189 if (HAS_DDI(dev_priv))
bf507ef7
ED
1190 return;
1191
649636ef 1192 val = I915_READ(FDI_TX_CTL(pipe));
e2c719b7 1193 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
040484af
JB
1194}
1195
55607e8a
DV
1196void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1197 enum pipe pipe, bool state)
040484af 1198{
040484af 1199 u32 val;
55607e8a 1200 bool cur_state;
040484af 1201
649636ef 1202 val = I915_READ(FDI_RX_CTL(pipe));
55607e8a 1203 cur_state = !!(val & FDI_RX_PLL_ENABLE);
e2c719b7 1204 I915_STATE_WARN(cur_state != state,
55607e8a 1205 "FDI RX PLL assertion failure (expected %s, current %s)\n",
87ad3212 1206 onoff(state), onoff(cur_state));
040484af
JB
1207}
1208
b680c37a
DV
1209void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1210 enum pipe pipe)
ea0760cf 1211{
91c8a326 1212 struct drm_device *dev = &dev_priv->drm;
f0f59a00 1213 i915_reg_t pp_reg;
ea0760cf
JB
1214 u32 val;
1215 enum pipe panel_pipe = PIPE_A;
0de3b485 1216 bool locked = true;
ea0760cf 1217
bedd4dba
JN
1218 if (WARN_ON(HAS_DDI(dev)))
1219 return;
1220
1221 if (HAS_PCH_SPLIT(dev)) {
1222 u32 port_sel;
1223
ea0760cf 1224 pp_reg = PCH_PP_CONTROL;
bedd4dba
JN
1225 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1226
1227 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1228 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1229 panel_pipe = PIPE_B;
1230 /* XXX: else fix for eDP */
666a4537 1231 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
bedd4dba
JN
1232 /* presumably write lock depends on pipe, not port select */
1233 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1234 panel_pipe = pipe;
ea0760cf
JB
1235 } else {
1236 pp_reg = PP_CONTROL;
bedd4dba
JN
1237 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1238 panel_pipe = PIPE_B;
ea0760cf
JB
1239 }
1240
1241 val = I915_READ(pp_reg);
1242 if (!(val & PANEL_POWER_ON) ||
ec49ba2d 1243 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
ea0760cf
JB
1244 locked = false;
1245
e2c719b7 1246 I915_STATE_WARN(panel_pipe == pipe && locked,
ea0760cf 1247 "panel assertion failure, pipe %c regs locked\n",
9db4a9c7 1248 pipe_name(pipe));
ea0760cf
JB
1249}
1250
93ce0ba6
JN
1251static void assert_cursor(struct drm_i915_private *dev_priv,
1252 enum pipe pipe, bool state)
1253{
91c8a326 1254 struct drm_device *dev = &dev_priv->drm;
93ce0ba6
JN
1255 bool cur_state;
1256
d9d82081 1257 if (IS_845G(dev) || IS_I865G(dev))
0b87c24e 1258 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
d9d82081 1259 else
5efb3e28 1260 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
93ce0ba6 1261
e2c719b7 1262 I915_STATE_WARN(cur_state != state,
93ce0ba6 1263 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
87ad3212 1264 pipe_name(pipe), onoff(state), onoff(cur_state));
93ce0ba6
JN
1265}
1266#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1267#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1268
b840d907
JB
1269void assert_pipe(struct drm_i915_private *dev_priv,
1270 enum pipe pipe, bool state)
b24e7179 1271{
63d7bbe9 1272 bool cur_state;
702e7a56
PZ
1273 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1274 pipe);
4feed0eb 1275 enum intel_display_power_domain power_domain;
b24e7179 1276
b6b5d049
VS
1277 /* if we need the pipe quirk it must be always on */
1278 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1279 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
8e636784
DV
1280 state = true;
1281
4feed0eb
ID
1282 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1283 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
649636ef 1284 u32 val = I915_READ(PIPECONF(cpu_transcoder));
69310161 1285 cur_state = !!(val & PIPECONF_ENABLE);
4feed0eb
ID
1286
1287 intel_display_power_put(dev_priv, power_domain);
1288 } else {
1289 cur_state = false;
69310161
PZ
1290 }
1291
e2c719b7 1292 I915_STATE_WARN(cur_state != state,
63d7bbe9 1293 "pipe %c assertion failure (expected %s, current %s)\n",
87ad3212 1294 pipe_name(pipe), onoff(state), onoff(cur_state));
b24e7179
JB
1295}
1296
931872fc
CW
1297static void assert_plane(struct drm_i915_private *dev_priv,
1298 enum plane plane, bool state)
b24e7179 1299{
b24e7179 1300 u32 val;
931872fc 1301 bool cur_state;
b24e7179 1302
649636ef 1303 val = I915_READ(DSPCNTR(plane));
931872fc 1304 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
e2c719b7 1305 I915_STATE_WARN(cur_state != state,
931872fc 1306 "plane %c assertion failure (expected %s, current %s)\n",
87ad3212 1307 plane_name(plane), onoff(state), onoff(cur_state));
b24e7179
JB
1308}
1309
931872fc
CW
1310#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1311#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1312
b24e7179
JB
1313static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1314 enum pipe pipe)
1315{
91c8a326 1316 struct drm_device *dev = &dev_priv->drm;
649636ef 1317 int i;
b24e7179 1318
653e1026
VS
1319 /* Primary planes are fixed to pipes on gen4+ */
1320 if (INTEL_INFO(dev)->gen >= 4) {
649636ef 1321 u32 val = I915_READ(DSPCNTR(pipe));
e2c719b7 1322 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
28c05794
AJ
1323 "plane %c assertion failure, should be disabled but not\n",
1324 plane_name(pipe));
19ec1358 1325 return;
28c05794 1326 }
19ec1358 1327
b24e7179 1328 /* Need to check both planes against the pipe */
055e393f 1329 for_each_pipe(dev_priv, i) {
649636ef
VS
1330 u32 val = I915_READ(DSPCNTR(i));
1331 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
b24e7179 1332 DISPPLANE_SEL_PIPE_SHIFT;
e2c719b7 1333 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
9db4a9c7
JB
1334 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1335 plane_name(i), pipe_name(pipe));
b24e7179
JB
1336 }
1337}
1338
19332d7a
JB
1339static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1340 enum pipe pipe)
1341{
91c8a326 1342 struct drm_device *dev = &dev_priv->drm;
649636ef 1343 int sprite;
19332d7a 1344
7feb8b88 1345 if (INTEL_INFO(dev)->gen >= 9) {
3bdcfc0c 1346 for_each_sprite(dev_priv, pipe, sprite) {
649636ef 1347 u32 val = I915_READ(PLANE_CTL(pipe, sprite));
e2c719b7 1348 I915_STATE_WARN(val & PLANE_CTL_ENABLE,
7feb8b88
DL
1349 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1350 sprite, pipe_name(pipe));
1351 }
666a4537 1352 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3bdcfc0c 1353 for_each_sprite(dev_priv, pipe, sprite) {
649636ef 1354 u32 val = I915_READ(SPCNTR(pipe, sprite));
e2c719b7 1355 I915_STATE_WARN(val & SP_ENABLE,
20674eef 1356 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1fe47785 1357 sprite_name(pipe, sprite), pipe_name(pipe));
20674eef
VS
1358 }
1359 } else if (INTEL_INFO(dev)->gen >= 7) {
649636ef 1360 u32 val = I915_READ(SPRCTL(pipe));
e2c719b7 1361 I915_STATE_WARN(val & SPRITE_ENABLE,
06da8da2 1362 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
20674eef
VS
1363 plane_name(pipe), pipe_name(pipe));
1364 } else if (INTEL_INFO(dev)->gen >= 5) {
649636ef 1365 u32 val = I915_READ(DVSCNTR(pipe));
e2c719b7 1366 I915_STATE_WARN(val & DVS_ENABLE,
06da8da2 1367 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
20674eef 1368 plane_name(pipe), pipe_name(pipe));
19332d7a
JB
1369 }
1370}
1371
08c71e5e
VS
1372static void assert_vblank_disabled(struct drm_crtc *crtc)
1373{
e2c719b7 1374 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
08c71e5e
VS
1375 drm_crtc_vblank_put(crtc);
1376}
1377
7abd4b35
ACO
1378void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1379 enum pipe pipe)
92f2584a 1380{
92f2584a
JB
1381 u32 val;
1382 bool enabled;
1383
649636ef 1384 val = I915_READ(PCH_TRANSCONF(pipe));
92f2584a 1385 enabled = !!(val & TRANS_ENABLE);
e2c719b7 1386 I915_STATE_WARN(enabled,
9db4a9c7
JB
1387 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1388 pipe_name(pipe));
92f2584a
JB
1389}
1390
4e634389
KP
1391static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1392 enum pipe pipe, u32 port_sel, u32 val)
f0575e92
KP
1393{
1394 if ((val & DP_PORT_EN) == 0)
1395 return false;
1396
2d1fe073 1397 if (HAS_PCH_CPT(dev_priv)) {
f0f59a00 1398 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
f0575e92
KP
1399 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1400 return false;
2d1fe073 1401 } else if (IS_CHERRYVIEW(dev_priv)) {
44f37d1f
CML
1402 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1403 return false;
f0575e92
KP
1404 } else {
1405 if ((val & DP_PIPE_MASK) != (pipe << 30))
1406 return false;
1407 }
1408 return true;
1409}
1410
1519b995
KP
1411static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1412 enum pipe pipe, u32 val)
1413{
dc0fa718 1414 if ((val & SDVO_ENABLE) == 0)
1519b995
KP
1415 return false;
1416
2d1fe073 1417 if (HAS_PCH_CPT(dev_priv)) {
dc0fa718 1418 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1519b995 1419 return false;
2d1fe073 1420 } else if (IS_CHERRYVIEW(dev_priv)) {
44f37d1f
CML
1421 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1422 return false;
1519b995 1423 } else {
dc0fa718 1424 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1519b995
KP
1425 return false;
1426 }
1427 return true;
1428}
1429
1430static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1431 enum pipe pipe, u32 val)
1432{
1433 if ((val & LVDS_PORT_EN) == 0)
1434 return false;
1435
2d1fe073 1436 if (HAS_PCH_CPT(dev_priv)) {
1519b995
KP
1437 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1438 return false;
1439 } else {
1440 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1441 return false;
1442 }
1443 return true;
1444}
1445
1446static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1447 enum pipe pipe, u32 val)
1448{
1449 if ((val & ADPA_DAC_ENABLE) == 0)
1450 return false;
2d1fe073 1451 if (HAS_PCH_CPT(dev_priv)) {
1519b995
KP
1452 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1453 return false;
1454 } else {
1455 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1456 return false;
1457 }
1458 return true;
1459}
1460
291906f1 1461static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
f0f59a00
VS
1462 enum pipe pipe, i915_reg_t reg,
1463 u32 port_sel)
291906f1 1464{
47a05eca 1465 u32 val = I915_READ(reg);
e2c719b7 1466 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
291906f1 1467 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
f0f59a00 1468 i915_mmio_reg_offset(reg), pipe_name(pipe));
de9a35ab 1469
2d1fe073 1470 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
75c5da27 1471 && (val & DP_PIPEB_SELECT),
de9a35ab 1472 "IBX PCH dp port still using transcoder B\n");
291906f1
JB
1473}
1474
1475static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
f0f59a00 1476 enum pipe pipe, i915_reg_t reg)
291906f1 1477{
47a05eca 1478 u32 val = I915_READ(reg);
e2c719b7 1479 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
23c99e77 1480 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
f0f59a00 1481 i915_mmio_reg_offset(reg), pipe_name(pipe));
de9a35ab 1482
2d1fe073 1483 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
75c5da27 1484 && (val & SDVO_PIPE_B_SELECT),
de9a35ab 1485 "IBX PCH hdmi port still using transcoder B\n");
291906f1
JB
1486}
1487
1488static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1489 enum pipe pipe)
1490{
291906f1 1491 u32 val;
291906f1 1492
f0575e92
KP
1493 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1494 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1495 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
291906f1 1496
649636ef 1497 val = I915_READ(PCH_ADPA);
e2c719b7 1498 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
291906f1 1499 "PCH VGA enabled on transcoder %c, should be disabled\n",
9db4a9c7 1500 pipe_name(pipe));
291906f1 1501
649636ef 1502 val = I915_READ(PCH_LVDS);
e2c719b7 1503 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
291906f1 1504 "PCH LVDS enabled on transcoder %c, should be disabled\n",
9db4a9c7 1505 pipe_name(pipe));
291906f1 1506
e2debe91
PZ
1507 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1508 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1509 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
291906f1
JB
1510}
1511
cd2d34d9
VS
1512static void _vlv_enable_pll(struct intel_crtc *crtc,
1513 const struct intel_crtc_state *pipe_config)
1514{
1515 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1516 enum pipe pipe = crtc->pipe;
1517
1518 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1519 POSTING_READ(DPLL(pipe));
1520 udelay(150);
1521
2c30b43b
CW
1522 if (intel_wait_for_register(dev_priv,
1523 DPLL(pipe),
1524 DPLL_LOCK_VLV,
1525 DPLL_LOCK_VLV,
1526 1))
cd2d34d9
VS
1527 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1528}
1529
d288f65f 1530static void vlv_enable_pll(struct intel_crtc *crtc,
5cec258b 1531 const struct intel_crtc_state *pipe_config)
87442f73 1532{
cd2d34d9 1533 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8bd3f301 1534 enum pipe pipe = crtc->pipe;
87442f73 1535
8bd3f301 1536 assert_pipe_disabled(dev_priv, pipe);
87442f73 1537
87442f73 1538 /* PLL is protected by panel, make sure we can write it */
7d1a83cb 1539 assert_panel_unlocked(dev_priv, pipe);
87442f73 1540
cd2d34d9
VS
1541 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1542 _vlv_enable_pll(crtc, pipe_config);
426115cf 1543
8bd3f301
VS
1544 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1545 POSTING_READ(DPLL_MD(pipe));
87442f73
DV
1546}
1547
cd2d34d9
VS
1548
1549static void _chv_enable_pll(struct intel_crtc *crtc,
1550 const struct intel_crtc_state *pipe_config)
9d556c99 1551{
cd2d34d9 1552 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8bd3f301 1553 enum pipe pipe = crtc->pipe;
9d556c99 1554 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9d556c99
CML
1555 u32 tmp;
1556
a580516d 1557 mutex_lock(&dev_priv->sb_lock);
9d556c99
CML
1558
1559 /* Enable back the 10bit clock to display controller */
1560 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1561 tmp |= DPIO_DCLKP_EN;
1562 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1563
54433e91
VS
1564 mutex_unlock(&dev_priv->sb_lock);
1565
9d556c99
CML
1566 /*
1567 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1568 */
1569 udelay(1);
1570
1571 /* Enable PLL */
d288f65f 1572 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
9d556c99
CML
1573
1574 /* Check PLL is locked */
6b18826a
CW
1575 if (intel_wait_for_register(dev_priv,
1576 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1577 1))
9d556c99 1578 DRM_ERROR("PLL %d failed to lock\n", pipe);
cd2d34d9
VS
1579}
1580
1581static void chv_enable_pll(struct intel_crtc *crtc,
1582 const struct intel_crtc_state *pipe_config)
1583{
1584 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1585 enum pipe pipe = crtc->pipe;
1586
1587 assert_pipe_disabled(dev_priv, pipe);
1588
1589 /* PLL is protected by panel, make sure we can write it */
1590 assert_panel_unlocked(dev_priv, pipe);
1591
1592 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1593 _chv_enable_pll(crtc, pipe_config);
9d556c99 1594
c231775c
VS
1595 if (pipe != PIPE_A) {
1596 /*
1597 * WaPixelRepeatModeFixForC0:chv
1598 *
1599 * DPLLCMD is AWOL. Use chicken bits to propagate
1600 * the value from DPLLBMD to either pipe B or C.
1601 */
1602 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
1603 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1604 I915_WRITE(CBR4_VLV, 0);
1605 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1606
1607 /*
1608 * DPLLB VGA mode also seems to cause problems.
1609 * We should always have it disabled.
1610 */
1611 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1612 } else {
1613 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1614 POSTING_READ(DPLL_MD(pipe));
1615 }
9d556c99
CML
1616}
1617
1c4e0274
VS
1618static int intel_num_dvo_pipes(struct drm_device *dev)
1619{
1620 struct intel_crtc *crtc;
1621 int count = 0;
1622
1623 for_each_intel_crtc(dev, crtc)
3538b9df 1624 count += crtc->base.state->active &&
409ee761 1625 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1c4e0274
VS
1626
1627 return count;
1628}
1629
66e3d5c0 1630static void i9xx_enable_pll(struct intel_crtc *crtc)
63d7bbe9 1631{
66e3d5c0 1632 struct drm_device *dev = crtc->base.dev;
fac5e23e 1633 struct drm_i915_private *dev_priv = to_i915(dev);
f0f59a00 1634 i915_reg_t reg = DPLL(crtc->pipe);
6e3c9717 1635 u32 dpll = crtc->config->dpll_hw_state.dpll;
63d7bbe9 1636
66e3d5c0 1637 assert_pipe_disabled(dev_priv, crtc->pipe);
58c6eaa2 1638
63d7bbe9 1639 /* PLL is protected by panel, make sure we can write it */
66e3d5c0
DV
1640 if (IS_MOBILE(dev) && !IS_I830(dev))
1641 assert_panel_unlocked(dev_priv, crtc->pipe);
63d7bbe9 1642
1c4e0274
VS
1643 /* Enable DVO 2x clock on both PLLs if necessary */
1644 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1645 /*
1646 * It appears to be important that we don't enable this
1647 * for the current pipe before otherwise configuring the
1648 * PLL. No idea how this should be handled if multiple
1649 * DVO outputs are enabled simultaneosly.
1650 */
1651 dpll |= DPLL_DVO_2X_MODE;
1652 I915_WRITE(DPLL(!crtc->pipe),
1653 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1654 }
66e3d5c0 1655
c2b63374
VS
1656 /*
1657 * Apparently we need to have VGA mode enabled prior to changing
1658 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1659 * dividers, even though the register value does change.
1660 */
1661 I915_WRITE(reg, 0);
1662
8e7a65aa
VS
1663 I915_WRITE(reg, dpll);
1664
66e3d5c0
DV
1665 /* Wait for the clocks to stabilize. */
1666 POSTING_READ(reg);
1667 udelay(150);
1668
1669 if (INTEL_INFO(dev)->gen >= 4) {
1670 I915_WRITE(DPLL_MD(crtc->pipe),
6e3c9717 1671 crtc->config->dpll_hw_state.dpll_md);
66e3d5c0
DV
1672 } else {
1673 /* The pixel multiplier can only be updated once the
1674 * DPLL is enabled and the clocks are stable.
1675 *
1676 * So write it again.
1677 */
1678 I915_WRITE(reg, dpll);
1679 }
63d7bbe9
JB
1680
1681 /* We do this three times for luck */
66e3d5c0 1682 I915_WRITE(reg, dpll);
63d7bbe9
JB
1683 POSTING_READ(reg);
1684 udelay(150); /* wait for warmup */
66e3d5c0 1685 I915_WRITE(reg, dpll);
63d7bbe9
JB
1686 POSTING_READ(reg);
1687 udelay(150); /* wait for warmup */
66e3d5c0 1688 I915_WRITE(reg, dpll);
63d7bbe9
JB
1689 POSTING_READ(reg);
1690 udelay(150); /* wait for warmup */
1691}
1692
1693/**
50b44a44 1694 * i9xx_disable_pll - disable a PLL
63d7bbe9
JB
1695 * @dev_priv: i915 private structure
1696 * @pipe: pipe PLL to disable
1697 *
1698 * Disable the PLL for @pipe, making sure the pipe is off first.
1699 *
1700 * Note! This is for pre-ILK only.
1701 */
1c4e0274 1702static void i9xx_disable_pll(struct intel_crtc *crtc)
63d7bbe9 1703{
1c4e0274 1704 struct drm_device *dev = crtc->base.dev;
fac5e23e 1705 struct drm_i915_private *dev_priv = to_i915(dev);
1c4e0274
VS
1706 enum pipe pipe = crtc->pipe;
1707
1708 /* Disable DVO 2x clock on both PLLs if necessary */
1709 if (IS_I830(dev) &&
409ee761 1710 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
3538b9df 1711 !intel_num_dvo_pipes(dev)) {
1c4e0274
VS
1712 I915_WRITE(DPLL(PIPE_B),
1713 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1714 I915_WRITE(DPLL(PIPE_A),
1715 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1716 }
1717
b6b5d049
VS
1718 /* Don't disable pipe or pipe PLLs if needed */
1719 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1720 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
63d7bbe9
JB
1721 return;
1722
1723 /* Make sure the pipe isn't still relying on us */
1724 assert_pipe_disabled(dev_priv, pipe);
1725
b8afb911 1726 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
50b44a44 1727 POSTING_READ(DPLL(pipe));
63d7bbe9
JB
1728}
1729
f6071166
JB
1730static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1731{
b8afb911 1732 u32 val;
f6071166
JB
1733
1734 /* Make sure the pipe isn't still relying on us */
1735 assert_pipe_disabled(dev_priv, pipe);
1736
03ed5cbf
VS
1737 val = DPLL_INTEGRATED_REF_CLK_VLV |
1738 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1739 if (pipe != PIPE_A)
1740 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1741
f6071166
JB
1742 I915_WRITE(DPLL(pipe), val);
1743 POSTING_READ(DPLL(pipe));
076ed3b2
CML
1744}
1745
1746static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1747{
d752048d 1748 enum dpio_channel port = vlv_pipe_to_channel(pipe);
076ed3b2
CML
1749 u32 val;
1750
a11b0703
VS
1751 /* Make sure the pipe isn't still relying on us */
1752 assert_pipe_disabled(dev_priv, pipe);
076ed3b2 1753
60bfe44f
VS
1754 val = DPLL_SSC_REF_CLK_CHV |
1755 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
a11b0703
VS
1756 if (pipe != PIPE_A)
1757 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
03ed5cbf 1758
a11b0703
VS
1759 I915_WRITE(DPLL(pipe), val);
1760 POSTING_READ(DPLL(pipe));
d752048d 1761
a580516d 1762 mutex_lock(&dev_priv->sb_lock);
d752048d
VS
1763
1764 /* Disable 10bit clock to display controller */
1765 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1766 val &= ~DPIO_DCLKP_EN;
1767 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1768
a580516d 1769 mutex_unlock(&dev_priv->sb_lock);
f6071166
JB
1770}
1771
e4607fcf 1772void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
9b6de0a1
VS
1773 struct intel_digital_port *dport,
1774 unsigned int expected_mask)
89b667f8
JB
1775{
1776 u32 port_mask;
f0f59a00 1777 i915_reg_t dpll_reg;
89b667f8 1778
e4607fcf
CML
1779 switch (dport->port) {
1780 case PORT_B:
89b667f8 1781 port_mask = DPLL_PORTB_READY_MASK;
00fc31b7 1782 dpll_reg = DPLL(0);
e4607fcf
CML
1783 break;
1784 case PORT_C:
89b667f8 1785 port_mask = DPLL_PORTC_READY_MASK;
00fc31b7 1786 dpll_reg = DPLL(0);
9b6de0a1 1787 expected_mask <<= 4;
00fc31b7
CML
1788 break;
1789 case PORT_D:
1790 port_mask = DPLL_PORTD_READY_MASK;
1791 dpll_reg = DPIO_PHY_STATUS;
e4607fcf
CML
1792 break;
1793 default:
1794 BUG();
1795 }
89b667f8 1796
370004d3
CW
1797 if (intel_wait_for_register(dev_priv,
1798 dpll_reg, port_mask, expected_mask,
1799 1000))
9b6de0a1
VS
1800 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1801 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
89b667f8
JB
1802}
1803
b8a4f404
PZ
1804static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1805 enum pipe pipe)
040484af 1806{
91c8a326 1807 struct drm_device *dev = &dev_priv->drm;
7c26e5c6 1808 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
e2b78267 1809 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
f0f59a00
VS
1810 i915_reg_t reg;
1811 uint32_t val, pipeconf_val;
040484af 1812
040484af 1813 /* Make sure PCH DPLL is enabled */
8106ddbd 1814 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
040484af
JB
1815
1816 /* FDI must be feeding us bits for PCH ports */
1817 assert_fdi_tx_enabled(dev_priv, pipe);
1818 assert_fdi_rx_enabled(dev_priv, pipe);
1819
23670b32
DV
1820 if (HAS_PCH_CPT(dev)) {
1821 /* Workaround: Set the timing override bit before enabling the
1822 * pch transcoder. */
1823 reg = TRANS_CHICKEN2(pipe);
1824 val = I915_READ(reg);
1825 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1826 I915_WRITE(reg, val);
59c859d6 1827 }
23670b32 1828
ab9412ba 1829 reg = PCH_TRANSCONF(pipe);
040484af 1830 val = I915_READ(reg);
5f7f726d 1831 pipeconf_val = I915_READ(PIPECONF(pipe));
e9bcff5c 1832
2d1fe073 1833 if (HAS_PCH_IBX(dev_priv)) {
e9bcff5c 1834 /*
c5de7c6f
VS
1835 * Make the BPC in transcoder be consistent with
1836 * that in pipeconf reg. For HDMI we must use 8bpc
1837 * here for both 8bpc and 12bpc.
e9bcff5c 1838 */
dfd07d72 1839 val &= ~PIPECONF_BPC_MASK;
c5de7c6f
VS
1840 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1841 val |= PIPECONF_8BPC;
1842 else
1843 val |= pipeconf_val & PIPECONF_BPC_MASK;
e9bcff5c 1844 }
5f7f726d
PZ
1845
1846 val &= ~TRANS_INTERLACE_MASK;
1847 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2d1fe073 1848 if (HAS_PCH_IBX(dev_priv) &&
409ee761 1849 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7c26e5c6
PZ
1850 val |= TRANS_LEGACY_INTERLACED_ILK;
1851 else
1852 val |= TRANS_INTERLACED;
5f7f726d
PZ
1853 else
1854 val |= TRANS_PROGRESSIVE;
1855
040484af 1856 I915_WRITE(reg, val | TRANS_ENABLE);
650fbd84
CW
1857 if (intel_wait_for_register(dev_priv,
1858 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1859 100))
4bb6f1f3 1860 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
040484af
JB
1861}
1862
8fb033d7 1863static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
937bb610 1864 enum transcoder cpu_transcoder)
040484af 1865{
8fb033d7 1866 u32 val, pipeconf_val;
8fb033d7 1867
8fb033d7 1868 /* FDI must be feeding us bits for PCH ports */
1a240d4d 1869 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
937bb610 1870 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
8fb033d7 1871
223a6fdf 1872 /* Workaround: set timing override bit. */
36c0d0cf 1873 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
23670b32 1874 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
36c0d0cf 1875 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
223a6fdf 1876
25f3ef11 1877 val = TRANS_ENABLE;
937bb610 1878 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
8fb033d7 1879
9a76b1c6
PZ
1880 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1881 PIPECONF_INTERLACED_ILK)
a35f2679 1882 val |= TRANS_INTERLACED;
8fb033d7
PZ
1883 else
1884 val |= TRANS_PROGRESSIVE;
1885
ab9412ba 1886 I915_WRITE(LPT_TRANSCONF, val);
d9f96244
CW
1887 if (intel_wait_for_register(dev_priv,
1888 LPT_TRANSCONF,
1889 TRANS_STATE_ENABLE,
1890 TRANS_STATE_ENABLE,
1891 100))
937bb610 1892 DRM_ERROR("Failed to enable PCH transcoder\n");
8fb033d7
PZ
1893}
1894
b8a4f404
PZ
1895static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1896 enum pipe pipe)
040484af 1897{
91c8a326 1898 struct drm_device *dev = &dev_priv->drm;
f0f59a00
VS
1899 i915_reg_t reg;
1900 uint32_t val;
040484af
JB
1901
1902 /* FDI relies on the transcoder */
1903 assert_fdi_tx_disabled(dev_priv, pipe);
1904 assert_fdi_rx_disabled(dev_priv, pipe);
1905
291906f1
JB
1906 /* Ports must be off as well */
1907 assert_pch_ports_disabled(dev_priv, pipe);
1908
ab9412ba 1909 reg = PCH_TRANSCONF(pipe);
040484af
JB
1910 val = I915_READ(reg);
1911 val &= ~TRANS_ENABLE;
1912 I915_WRITE(reg, val);
1913 /* wait for PCH transcoder off, transcoder state */
a7d04662
CW
1914 if (intel_wait_for_register(dev_priv,
1915 reg, TRANS_STATE_ENABLE, 0,
1916 50))
4bb6f1f3 1917 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
23670b32 1918
c465613b 1919 if (HAS_PCH_CPT(dev)) {
23670b32
DV
1920 /* Workaround: Clear the timing override chicken bit again. */
1921 reg = TRANS_CHICKEN2(pipe);
1922 val = I915_READ(reg);
1923 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1924 I915_WRITE(reg, val);
1925 }
040484af
JB
1926}
1927
ab4d966c 1928static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
8fb033d7 1929{
8fb033d7
PZ
1930 u32 val;
1931
ab9412ba 1932 val = I915_READ(LPT_TRANSCONF);
8fb033d7 1933 val &= ~TRANS_ENABLE;
ab9412ba 1934 I915_WRITE(LPT_TRANSCONF, val);
8fb033d7 1935 /* wait for PCH transcoder off, transcoder state */
dfdb4749
CW
1936 if (intel_wait_for_register(dev_priv,
1937 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1938 50))
8a52fd9f 1939 DRM_ERROR("Failed to disable PCH transcoder\n");
223a6fdf
PZ
1940
1941 /* Workaround: clear timing override bit. */
36c0d0cf 1942 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
23670b32 1943 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
36c0d0cf 1944 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
040484af
JB
1945}
1946
b24e7179 1947/**
309cfea8 1948 * intel_enable_pipe - enable a pipe, asserting requirements
0372264a 1949 * @crtc: crtc responsible for the pipe
b24e7179 1950 *
0372264a 1951 * Enable @crtc's pipe, making sure that various hardware specific requirements
b24e7179 1952 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
b24e7179 1953 */
e1fdc473 1954static void intel_enable_pipe(struct intel_crtc *crtc)
b24e7179 1955{
0372264a 1956 struct drm_device *dev = crtc->base.dev;
fac5e23e 1957 struct drm_i915_private *dev_priv = to_i915(dev);
0372264a 1958 enum pipe pipe = crtc->pipe;
1a70a728 1959 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1a240d4d 1960 enum pipe pch_transcoder;
f0f59a00 1961 i915_reg_t reg;
b24e7179
JB
1962 u32 val;
1963
9e2ee2dd
VS
1964 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1965
58c6eaa2 1966 assert_planes_disabled(dev_priv, pipe);
93ce0ba6 1967 assert_cursor_disabled(dev_priv, pipe);
58c6eaa2
DV
1968 assert_sprites_disabled(dev_priv, pipe);
1969
2d1fe073 1970 if (HAS_PCH_LPT(dev_priv))
cc391bbb
PZ
1971 pch_transcoder = TRANSCODER_A;
1972 else
1973 pch_transcoder = pipe;
1974
b24e7179
JB
1975 /*
1976 * A pipe without a PLL won't actually be able to drive bits from
1977 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1978 * need the check.
1979 */
2d1fe073 1980 if (HAS_GMCH_DISPLAY(dev_priv))
a65347ba 1981 if (crtc->config->has_dsi_encoder)
23538ef1
JN
1982 assert_dsi_pll_enabled(dev_priv);
1983 else
1984 assert_pll_enabled(dev_priv, pipe);
040484af 1985 else {
6e3c9717 1986 if (crtc->config->has_pch_encoder) {
040484af 1987 /* if driving the PCH, we need FDI enabled */
cc391bbb 1988 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1a240d4d
DV
1989 assert_fdi_tx_pll_enabled(dev_priv,
1990 (enum pipe) cpu_transcoder);
040484af
JB
1991 }
1992 /* FIXME: assert CPU port conditions for SNB+ */
1993 }
b24e7179 1994
702e7a56 1995 reg = PIPECONF(cpu_transcoder);
b24e7179 1996 val = I915_READ(reg);
7ad25d48 1997 if (val & PIPECONF_ENABLE) {
b6b5d049
VS
1998 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1999 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
00d70b15 2000 return;
7ad25d48 2001 }
00d70b15
CW
2002
2003 I915_WRITE(reg, val | PIPECONF_ENABLE);
851855d8 2004 POSTING_READ(reg);
b7792d8b
VS
2005
2006 /*
2007 * Until the pipe starts DSL will read as 0, which would cause
2008 * an apparent vblank timestamp jump, which messes up also the
2009 * frame count when it's derived from the timestamps. So let's
2010 * wait for the pipe to start properly before we call
2011 * drm_crtc_vblank_on()
2012 */
2013 if (dev->max_vblank_count == 0 &&
2014 wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2015 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
b24e7179
JB
2016}
2017
2018/**
309cfea8 2019 * intel_disable_pipe - disable a pipe, asserting requirements
575f7ab7 2020 * @crtc: crtc whose pipes is to be disabled
b24e7179 2021 *
575f7ab7
VS
2022 * Disable the pipe of @crtc, making sure that various hardware
2023 * specific requirements are met, if applicable, e.g. plane
2024 * disabled, panel fitter off, etc.
b24e7179
JB
2025 *
2026 * Will wait until the pipe has shut down before returning.
2027 */
575f7ab7 2028static void intel_disable_pipe(struct intel_crtc *crtc)
b24e7179 2029{
fac5e23e 2030 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6e3c9717 2031 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
575f7ab7 2032 enum pipe pipe = crtc->pipe;
f0f59a00 2033 i915_reg_t reg;
b24e7179
JB
2034 u32 val;
2035
9e2ee2dd
VS
2036 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2037
b24e7179
JB
2038 /*
2039 * Make sure planes won't keep trying to pump pixels to us,
2040 * or we might hang the display.
2041 */
2042 assert_planes_disabled(dev_priv, pipe);
93ce0ba6 2043 assert_cursor_disabled(dev_priv, pipe);
19332d7a 2044 assert_sprites_disabled(dev_priv, pipe);
b24e7179 2045
702e7a56 2046 reg = PIPECONF(cpu_transcoder);
b24e7179 2047 val = I915_READ(reg);
00d70b15
CW
2048 if ((val & PIPECONF_ENABLE) == 0)
2049 return;
2050
67adc644
VS
2051 /*
2052 * Double wide has implications for planes
2053 * so best keep it disabled when not needed.
2054 */
6e3c9717 2055 if (crtc->config->double_wide)
67adc644
VS
2056 val &= ~PIPECONF_DOUBLE_WIDE;
2057
2058 /* Don't disable pipe or pipe PLLs if needed */
b6b5d049
VS
2059 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2060 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
67adc644
VS
2061 val &= ~PIPECONF_ENABLE;
2062
2063 I915_WRITE(reg, val);
2064 if ((val & PIPECONF_ENABLE) == 0)
2065 intel_wait_for_pipe_off(crtc);
b24e7179
JB
2066}
2067
693db184
CW
2068static bool need_vtd_wa(struct drm_device *dev)
2069{
2070#ifdef CONFIG_INTEL_IOMMU
2071 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2072 return true;
2073#endif
2074 return false;
2075}
2076
832be82f
VS
2077static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2078{
2079 return IS_GEN2(dev_priv) ? 2048 : 4096;
2080}
2081
27ba3910
VS
2082static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
2083 uint64_t fb_modifier, unsigned int cpp)
7b49f948
VS
2084{
2085 switch (fb_modifier) {
2086 case DRM_FORMAT_MOD_NONE:
2087 return cpp;
2088 case I915_FORMAT_MOD_X_TILED:
2089 if (IS_GEN2(dev_priv))
2090 return 128;
2091 else
2092 return 512;
2093 case I915_FORMAT_MOD_Y_TILED:
2094 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2095 return 128;
2096 else
2097 return 512;
2098 case I915_FORMAT_MOD_Yf_TILED:
2099 switch (cpp) {
2100 case 1:
2101 return 64;
2102 case 2:
2103 case 4:
2104 return 128;
2105 case 8:
2106 case 16:
2107 return 256;
2108 default:
2109 MISSING_CASE(cpp);
2110 return cpp;
2111 }
2112 break;
2113 default:
2114 MISSING_CASE(fb_modifier);
2115 return cpp;
2116 }
2117}
2118
832be82f
VS
2119unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2120 uint64_t fb_modifier, unsigned int cpp)
a57ce0b2 2121{
832be82f
VS
2122 if (fb_modifier == DRM_FORMAT_MOD_NONE)
2123 return 1;
2124 else
2125 return intel_tile_size(dev_priv) /
27ba3910 2126 intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
6761dd31
TU
2127}
2128
8d0deca8
VS
2129/* Return the tile dimensions in pixel units */
2130static void intel_tile_dims(const struct drm_i915_private *dev_priv,
2131 unsigned int *tile_width,
2132 unsigned int *tile_height,
2133 uint64_t fb_modifier,
2134 unsigned int cpp)
2135{
2136 unsigned int tile_width_bytes =
2137 intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2138
2139 *tile_width = tile_width_bytes / cpp;
2140 *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
2141}
2142
6761dd31
TU
2143unsigned int
2144intel_fb_align_height(struct drm_device *dev, unsigned int height,
832be82f 2145 uint32_t pixel_format, uint64_t fb_modifier)
6761dd31 2146{
832be82f
VS
2147 unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2148 unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2149
2150 return ALIGN(height, tile_height);
a57ce0b2
JB
2151}
2152
1663b9d6
VS
2153unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2154{
2155 unsigned int size = 0;
2156 int i;
2157
2158 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2159 size += rot_info->plane[i].width * rot_info->plane[i].height;
2160
2161 return size;
2162}
2163
75c82a53 2164static void
3465c580
VS
2165intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2166 const struct drm_framebuffer *fb,
2167 unsigned int rotation)
f64b98cd 2168{
2d7a215f
VS
2169 if (intel_rotation_90_or_270(rotation)) {
2170 *view = i915_ggtt_view_rotated;
2171 view->params.rotated = to_intel_framebuffer(fb)->rot_info;
2172 } else {
2173 *view = i915_ggtt_view_normal;
2174 }
2175}
50470bb0 2176
2d7a215f
VS
2177static void
2178intel_fill_fb_info(struct drm_i915_private *dev_priv,
2179 struct drm_framebuffer *fb)
2180{
2181 struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
2182 unsigned int tile_size, tile_width, tile_height, cpp;
50470bb0 2183
d9b3288e
VS
2184 tile_size = intel_tile_size(dev_priv);
2185
2186 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
8d0deca8
VS
2187 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2188 fb->modifier[0], cpp);
d9b3288e 2189
1663b9d6
VS
2190 info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
2191 info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
84fe03f7 2192
89e3e142 2193 if (info->pixel_format == DRM_FORMAT_NV12) {
832be82f 2194 cpp = drm_format_plane_cpp(fb->pixel_format, 1);
8d0deca8
VS
2195 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2196 fb->modifier[1], cpp);
d9b3288e 2197
2d7a215f 2198 info->uv_offset = fb->offsets[1];
1663b9d6
VS
2199 info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
2200 info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
89e3e142 2201 }
f64b98cd
TU
2202}
2203
603525d7 2204static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
4e9a86b6
VS
2205{
2206 if (INTEL_INFO(dev_priv)->gen >= 9)
2207 return 256 * 1024;
985b8bb4 2208 else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
666a4537 2209 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4e9a86b6
VS
2210 return 128 * 1024;
2211 else if (INTEL_INFO(dev_priv)->gen >= 4)
2212 return 4 * 1024;
2213 else
44c5905e 2214 return 0;
4e9a86b6
VS
2215}
2216
603525d7
VS
2217static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2218 uint64_t fb_modifier)
2219{
2220 switch (fb_modifier) {
2221 case DRM_FORMAT_MOD_NONE:
2222 return intel_linear_alignment(dev_priv);
2223 case I915_FORMAT_MOD_X_TILED:
2224 if (INTEL_INFO(dev_priv)->gen >= 9)
2225 return 256 * 1024;
2226 return 0;
2227 case I915_FORMAT_MOD_Y_TILED:
2228 case I915_FORMAT_MOD_Yf_TILED:
2229 return 1 * 1024 * 1024;
2230 default:
2231 MISSING_CASE(fb_modifier);
2232 return 0;
2233 }
2234}
2235
127bd2ac 2236int
3465c580
VS
2237intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2238 unsigned int rotation)
6b95a207 2239{
850c4cdc 2240 struct drm_device *dev = fb->dev;
fac5e23e 2241 struct drm_i915_private *dev_priv = to_i915(dev);
850c4cdc 2242 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
f64b98cd 2243 struct i915_ggtt_view view;
6b95a207
KH
2244 u32 alignment;
2245 int ret;
2246
ebcdd39e
MR
2247 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2248
603525d7 2249 alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
6b95a207 2250
3465c580 2251 intel_fill_fb_ggtt_view(&view, fb, rotation);
f64b98cd 2252
693db184
CW
2253 /* Note that the w/a also requires 64 PTE of padding following the
2254 * bo. We currently fill all unused PTE with the shadow page and so
2255 * we should always have valid PTE following the scanout preventing
2256 * the VT-d warning.
2257 */
2258 if (need_vtd_wa(dev) && alignment < 256 * 1024)
2259 alignment = 256 * 1024;
2260
d6dd6843
PZ
2261 /*
2262 * Global gtt pte registers are special registers which actually forward
2263 * writes to a chunk of system memory. Which means that there is no risk
2264 * that the register values disappear as soon as we call
2265 * intel_runtime_pm_put(), so it is correct to wrap only the
2266 * pin/unpin/fence and not more.
2267 */
2268 intel_runtime_pm_get(dev_priv);
2269
7580d774
ML
2270 ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2271 &view);
48b956c5 2272 if (ret)
b26a6b35 2273 goto err_pm;
6b95a207
KH
2274
2275 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2276 * fence, whereas 965+ only requires a fence if using
2277 * framebuffer compression. For simplicity, we always install
2278 * a fence as the cost is not that onerous.
2279 */
9807216f
VK
2280 if (view.type == I915_GGTT_VIEW_NORMAL) {
2281 ret = i915_gem_object_get_fence(obj);
2282 if (ret == -EDEADLK) {
2283 /*
2284 * -EDEADLK means there are no free fences
2285 * no pending flips.
2286 *
2287 * This is propagated to atomic, but it uses
2288 * -EDEADLK to force a locking recovery, so
2289 * change the returned error to -EBUSY.
2290 */
2291 ret = -EBUSY;
2292 goto err_unpin;
2293 } else if (ret)
2294 goto err_unpin;
1690e1eb 2295
9807216f
VK
2296 i915_gem_object_pin_fence(obj);
2297 }
6b95a207 2298
d6dd6843 2299 intel_runtime_pm_put(dev_priv);
6b95a207 2300 return 0;
48b956c5
CW
2301
2302err_unpin:
f64b98cd 2303 i915_gem_object_unpin_from_display_plane(obj, &view);
b26a6b35 2304err_pm:
d6dd6843 2305 intel_runtime_pm_put(dev_priv);
48b956c5 2306 return ret;
6b95a207
KH
2307}
2308
fb4b8ce1 2309void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
1690e1eb 2310{
82bc3b2d 2311 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
f64b98cd 2312 struct i915_ggtt_view view;
82bc3b2d 2313
ebcdd39e
MR
2314 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2315
3465c580 2316 intel_fill_fb_ggtt_view(&view, fb, rotation);
f64b98cd 2317
9807216f
VK
2318 if (view.type == I915_GGTT_VIEW_NORMAL)
2319 i915_gem_object_unpin_fence(obj);
2320
f64b98cd 2321 i915_gem_object_unpin_from_display_plane(obj, &view);
1690e1eb
CW
2322}
2323
29cf9491
VS
2324/*
2325 * Adjust the tile offset by moving the difference into
2326 * the x/y offsets.
2327 *
2328 * Input tile dimensions and pitch must already be
2329 * rotated to match x and y, and in pixel units.
2330 */
2331static u32 intel_adjust_tile_offset(int *x, int *y,
2332 unsigned int tile_width,
2333 unsigned int tile_height,
2334 unsigned int tile_size,
2335 unsigned int pitch_tiles,
2336 u32 old_offset,
2337 u32 new_offset)
2338{
2339 unsigned int tiles;
2340
2341 WARN_ON(old_offset & (tile_size - 1));
2342 WARN_ON(new_offset & (tile_size - 1));
2343 WARN_ON(new_offset > old_offset);
2344
2345 tiles = (old_offset - new_offset) / tile_size;
2346
2347 *y += tiles / pitch_tiles * tile_height;
2348 *x += tiles % pitch_tiles * tile_width;
2349
2350 return new_offset;
2351}
2352
8d0deca8
VS
2353/*
2354 * Computes the linear offset to the base tile and adjusts
2355 * x, y. bytes per pixel is assumed to be a power-of-two.
2356 *
2357 * In the 90/270 rotated case, x and y are assumed
2358 * to be already rotated to match the rotated GTT view, and
2359 * pitch is the tile_height aligned framebuffer height.
2360 */
4f2d9934
VS
2361u32 intel_compute_tile_offset(int *x, int *y,
2362 const struct drm_framebuffer *fb, int plane,
8d0deca8
VS
2363 unsigned int pitch,
2364 unsigned int rotation)
c2c75131 2365{
4f2d9934
VS
2366 const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2367 uint64_t fb_modifier = fb->modifier[plane];
2368 unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
29cf9491
VS
2369 u32 offset, offset_aligned, alignment;
2370
2371 alignment = intel_surf_alignment(dev_priv, fb_modifier);
2372 if (alignment)
2373 alignment--;
2374
b5c65338 2375 if (fb_modifier != DRM_FORMAT_MOD_NONE) {
8d0deca8
VS
2376 unsigned int tile_size, tile_width, tile_height;
2377 unsigned int tile_rows, tiles, pitch_tiles;
c2c75131 2378
d843310d 2379 tile_size = intel_tile_size(dev_priv);
8d0deca8
VS
2380 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2381 fb_modifier, cpp);
2382
2383 if (intel_rotation_90_or_270(rotation)) {
2384 pitch_tiles = pitch / tile_height;
2385 swap(tile_width, tile_height);
2386 } else {
2387 pitch_tiles = pitch / (tile_width * cpp);
2388 }
d843310d
VS
2389
2390 tile_rows = *y / tile_height;
2391 *y %= tile_height;
c2c75131 2392
8d0deca8
VS
2393 tiles = *x / tile_width;
2394 *x %= tile_width;
bc752862 2395
29cf9491
VS
2396 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2397 offset_aligned = offset & ~alignment;
bc752862 2398
29cf9491
VS
2399 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2400 tile_size, pitch_tiles,
2401 offset, offset_aligned);
2402 } else {
bc752862 2403 offset = *y * pitch + *x * cpp;
29cf9491
VS
2404 offset_aligned = offset & ~alignment;
2405
4e9a86b6
VS
2406 *y = (offset & alignment) / pitch;
2407 *x = ((offset & alignment) - *y * pitch) / cpp;
bc752862 2408 }
29cf9491
VS
2409
2410 return offset_aligned;
c2c75131
DV
2411}
2412
b35d63fa 2413static int i9xx_format_to_fourcc(int format)
46f297fb
JB
2414{
2415 switch (format) {
2416 case DISPPLANE_8BPP:
2417 return DRM_FORMAT_C8;
2418 case DISPPLANE_BGRX555:
2419 return DRM_FORMAT_XRGB1555;
2420 case DISPPLANE_BGRX565:
2421 return DRM_FORMAT_RGB565;
2422 default:
2423 case DISPPLANE_BGRX888:
2424 return DRM_FORMAT_XRGB8888;
2425 case DISPPLANE_RGBX888:
2426 return DRM_FORMAT_XBGR8888;
2427 case DISPPLANE_BGRX101010:
2428 return DRM_FORMAT_XRGB2101010;
2429 case DISPPLANE_RGBX101010:
2430 return DRM_FORMAT_XBGR2101010;
2431 }
2432}
2433
bc8d7dff
DL
2434static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2435{
2436 switch (format) {
2437 case PLANE_CTL_FORMAT_RGB_565:
2438 return DRM_FORMAT_RGB565;
2439 default:
2440 case PLANE_CTL_FORMAT_XRGB_8888:
2441 if (rgb_order) {
2442 if (alpha)
2443 return DRM_FORMAT_ABGR8888;
2444 else
2445 return DRM_FORMAT_XBGR8888;
2446 } else {
2447 if (alpha)
2448 return DRM_FORMAT_ARGB8888;
2449 else
2450 return DRM_FORMAT_XRGB8888;
2451 }
2452 case PLANE_CTL_FORMAT_XRGB_2101010:
2453 if (rgb_order)
2454 return DRM_FORMAT_XBGR2101010;
2455 else
2456 return DRM_FORMAT_XRGB2101010;
2457 }
2458}
2459
5724dbd1 2460static bool
f6936e29
DV
2461intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2462 struct intel_initial_plane_config *plane_config)
46f297fb
JB
2463{
2464 struct drm_device *dev = crtc->base.dev;
3badb49f 2465 struct drm_i915_private *dev_priv = to_i915(dev);
72e96d64 2466 struct i915_ggtt *ggtt = &dev_priv->ggtt;
46f297fb
JB
2467 struct drm_i915_gem_object *obj = NULL;
2468 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2d14030b 2469 struct drm_framebuffer *fb = &plane_config->fb->base;
f37b5c2b
DV
2470 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2471 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2472 PAGE_SIZE);
2473
2474 size_aligned -= base_aligned;
46f297fb 2475
ff2652ea
CW
2476 if (plane_config->size == 0)
2477 return false;
2478
3badb49f
PZ
2479 /* If the FB is too big, just don't use it since fbdev is not very
2480 * important and we should probably use that space with FBC or other
2481 * features. */
72e96d64 2482 if (size_aligned * 2 > ggtt->stolen_usable_size)
3badb49f
PZ
2483 return false;
2484
12c83d99
TU
2485 mutex_lock(&dev->struct_mutex);
2486
f37b5c2b
DV
2487 obj = i915_gem_object_create_stolen_for_preallocated(dev,
2488 base_aligned,
2489 base_aligned,
2490 size_aligned);
12c83d99
TU
2491 if (!obj) {
2492 mutex_unlock(&dev->struct_mutex);
484b41dd 2493 return false;
12c83d99 2494 }
46f297fb 2495
49af449b
DL
2496 obj->tiling_mode = plane_config->tiling;
2497 if (obj->tiling_mode == I915_TILING_X)
6bf129df 2498 obj->stride = fb->pitches[0];
46f297fb 2499
6bf129df
DL
2500 mode_cmd.pixel_format = fb->pixel_format;
2501 mode_cmd.width = fb->width;
2502 mode_cmd.height = fb->height;
2503 mode_cmd.pitches[0] = fb->pitches[0];
18c5247e
DV
2504 mode_cmd.modifier[0] = fb->modifier[0];
2505 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
46f297fb 2506
6bf129df 2507 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
484b41dd 2508 &mode_cmd, obj)) {
46f297fb
JB
2509 DRM_DEBUG_KMS("intel fb init failed\n");
2510 goto out_unref_obj;
2511 }
12c83d99 2512
46f297fb 2513 mutex_unlock(&dev->struct_mutex);
484b41dd 2514
f6936e29 2515 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
484b41dd 2516 return true;
46f297fb
JB
2517
2518out_unref_obj:
2519 drm_gem_object_unreference(&obj->base);
2520 mutex_unlock(&dev->struct_mutex);
484b41dd
JB
2521 return false;
2522}
2523
5a21b665
DV
2524/* Update plane->state->fb to match plane->fb after driver-internal updates */
2525static void
2526update_state_fb(struct drm_plane *plane)
2527{
2528 if (plane->fb == plane->state->fb)
2529 return;
2530
2531 if (plane->state->fb)
2532 drm_framebuffer_unreference(plane->state->fb);
2533 plane->state->fb = plane->fb;
2534 if (plane->state->fb)
2535 drm_framebuffer_reference(plane->state->fb);
2536}
2537
5724dbd1 2538static void
f6936e29
DV
2539intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2540 struct intel_initial_plane_config *plane_config)
484b41dd
JB
2541{
2542 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 2543 struct drm_i915_private *dev_priv = to_i915(dev);
484b41dd
JB
2544 struct drm_crtc *c;
2545 struct intel_crtc *i;
2ff8fde1 2546 struct drm_i915_gem_object *obj;
88595ac9 2547 struct drm_plane *primary = intel_crtc->base.primary;
be5651f2 2548 struct drm_plane_state *plane_state = primary->state;
200757f5
MR
2549 struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2550 struct intel_plane *intel_plane = to_intel_plane(primary);
0a8d8a86
MR
2551 struct intel_plane_state *intel_state =
2552 to_intel_plane_state(plane_state);
88595ac9 2553 struct drm_framebuffer *fb;
484b41dd 2554
2d14030b 2555 if (!plane_config->fb)
484b41dd
JB
2556 return;
2557
f6936e29 2558 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
88595ac9
DV
2559 fb = &plane_config->fb->base;
2560 goto valid_fb;
f55548b5 2561 }
484b41dd 2562
2d14030b 2563 kfree(plane_config->fb);
484b41dd
JB
2564
2565 /*
2566 * Failed to alloc the obj, check to see if we should share
2567 * an fb with another CRTC instead
2568 */
70e1e0ec 2569 for_each_crtc(dev, c) {
484b41dd
JB
2570 i = to_intel_crtc(c);
2571
2572 if (c == &intel_crtc->base)
2573 continue;
2574
2ff8fde1
MR
2575 if (!i->active)
2576 continue;
2577
88595ac9
DV
2578 fb = c->primary->fb;
2579 if (!fb)
484b41dd
JB
2580 continue;
2581
88595ac9 2582 obj = intel_fb_obj(fb);
2ff8fde1 2583 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
88595ac9
DV
2584 drm_framebuffer_reference(fb);
2585 goto valid_fb;
484b41dd
JB
2586 }
2587 }
88595ac9 2588
200757f5
MR
2589 /*
2590 * We've failed to reconstruct the BIOS FB. Current display state
2591 * indicates that the primary plane is visible, but has a NULL FB,
2592 * which will lead to problems later if we don't fix it up. The
2593 * simplest solution is to just disable the primary plane now and
2594 * pretend the BIOS never had it enabled.
2595 */
2596 to_intel_plane_state(plane_state)->visible = false;
2597 crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2622a081 2598 intel_pre_disable_primary_noatomic(&intel_crtc->base);
200757f5
MR
2599 intel_plane->disable_plane(primary, &intel_crtc->base);
2600
88595ac9
DV
2601 return;
2602
2603valid_fb:
f44e2659
VS
2604 plane_state->src_x = 0;
2605 plane_state->src_y = 0;
be5651f2
ML
2606 plane_state->src_w = fb->width << 16;
2607 plane_state->src_h = fb->height << 16;
2608
f44e2659
VS
2609 plane_state->crtc_x = 0;
2610 plane_state->crtc_y = 0;
be5651f2
ML
2611 plane_state->crtc_w = fb->width;
2612 plane_state->crtc_h = fb->height;
2613
0a8d8a86
MR
2614 intel_state->src.x1 = plane_state->src_x;
2615 intel_state->src.y1 = plane_state->src_y;
2616 intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2617 intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2618 intel_state->dst.x1 = plane_state->crtc_x;
2619 intel_state->dst.y1 = plane_state->crtc_y;
2620 intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2621 intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2622
88595ac9
DV
2623 obj = intel_fb_obj(fb);
2624 if (obj->tiling_mode != I915_TILING_NONE)
2625 dev_priv->preserve_bios_swizzle = true;
2626
be5651f2
ML
2627 drm_framebuffer_reference(fb);
2628 primary->fb = primary->state->fb = fb;
36750f28 2629 primary->crtc = primary->state->crtc = &intel_crtc->base;
36750f28 2630 intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
a9ff8714 2631 obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
46f297fb
JB
2632}
2633
a8d201af
ML
2634static void i9xx_update_primary_plane(struct drm_plane *primary,
2635 const struct intel_crtc_state *crtc_state,
2636 const struct intel_plane_state *plane_state)
81255565 2637{
a8d201af 2638 struct drm_device *dev = primary->dev;
fac5e23e 2639 struct drm_i915_private *dev_priv = to_i915(dev);
a8d201af
ML
2640 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2641 struct drm_framebuffer *fb = plane_state->base.fb;
2642 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
81255565 2643 int plane = intel_crtc->plane;
54ea9da8 2644 u32 linear_offset;
81255565 2645 u32 dspcntr;
f0f59a00 2646 i915_reg_t reg = DSPCNTR(plane);
8d0deca8 2647 unsigned int rotation = plane_state->base.rotation;
ac484963 2648 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
54ea9da8
VS
2649 int x = plane_state->src.x1 >> 16;
2650 int y = plane_state->src.y1 >> 16;
c9ba6fad 2651
f45651ba
VS
2652 dspcntr = DISPPLANE_GAMMA_ENABLE;
2653
fdd508a6 2654 dspcntr |= DISPLAY_PLANE_ENABLE;
f45651ba
VS
2655
2656 if (INTEL_INFO(dev)->gen < 4) {
2657 if (intel_crtc->pipe == PIPE_B)
2658 dspcntr |= DISPPLANE_SEL_PIPE_B;
2659
2660 /* pipesrc and dspsize control the size that is scaled from,
2661 * which should always be the user's requested size.
2662 */
2663 I915_WRITE(DSPSIZE(plane),
a8d201af
ML
2664 ((crtc_state->pipe_src_h - 1) << 16) |
2665 (crtc_state->pipe_src_w - 1));
f45651ba 2666 I915_WRITE(DSPPOS(plane), 0);
c14b0485
VS
2667 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2668 I915_WRITE(PRIMSIZE(plane),
a8d201af
ML
2669 ((crtc_state->pipe_src_h - 1) << 16) |
2670 (crtc_state->pipe_src_w - 1));
c14b0485
VS
2671 I915_WRITE(PRIMPOS(plane), 0);
2672 I915_WRITE(PRIMCNSTALPHA(plane), 0);
f45651ba 2673 }
81255565 2674
57779d06
VS
2675 switch (fb->pixel_format) {
2676 case DRM_FORMAT_C8:
81255565
JB
2677 dspcntr |= DISPPLANE_8BPP;
2678 break;
57779d06 2679 case DRM_FORMAT_XRGB1555:
57779d06 2680 dspcntr |= DISPPLANE_BGRX555;
81255565 2681 break;
57779d06
VS
2682 case DRM_FORMAT_RGB565:
2683 dspcntr |= DISPPLANE_BGRX565;
2684 break;
2685 case DRM_FORMAT_XRGB8888:
57779d06
VS
2686 dspcntr |= DISPPLANE_BGRX888;
2687 break;
2688 case DRM_FORMAT_XBGR8888:
57779d06
VS
2689 dspcntr |= DISPPLANE_RGBX888;
2690 break;
2691 case DRM_FORMAT_XRGB2101010:
57779d06
VS
2692 dspcntr |= DISPPLANE_BGRX101010;
2693 break;
2694 case DRM_FORMAT_XBGR2101010:
57779d06 2695 dspcntr |= DISPPLANE_RGBX101010;
81255565
JB
2696 break;
2697 default:
baba133a 2698 BUG();
81255565 2699 }
57779d06 2700
f45651ba
VS
2701 if (INTEL_INFO(dev)->gen >= 4 &&
2702 obj->tiling_mode != I915_TILING_NONE)
2703 dspcntr |= DISPPLANE_TILED;
81255565 2704
de1aa629
VS
2705 if (IS_G4X(dev))
2706 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2707
ac484963 2708 linear_offset = y * fb->pitches[0] + x * cpp;
81255565 2709
c2c75131
DV
2710 if (INTEL_INFO(dev)->gen >= 4) {
2711 intel_crtc->dspaddr_offset =
4f2d9934 2712 intel_compute_tile_offset(&x, &y, fb, 0,
8d0deca8 2713 fb->pitches[0], rotation);
c2c75131
DV
2714 linear_offset -= intel_crtc->dspaddr_offset;
2715 } else {
e506a0c6 2716 intel_crtc->dspaddr_offset = linear_offset;
c2c75131 2717 }
e506a0c6 2718
8d0deca8 2719 if (rotation == BIT(DRM_ROTATE_180)) {
48404c1e
SJ
2720 dspcntr |= DISPPLANE_ROTATE_180;
2721
a8d201af
ML
2722 x += (crtc_state->pipe_src_w - 1);
2723 y += (crtc_state->pipe_src_h - 1);
48404c1e
SJ
2724
2725 /* Finding the last pixel of the last line of the display
2726 data and adding to linear_offset*/
2727 linear_offset +=
a8d201af 2728 (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
ac484963 2729 (crtc_state->pipe_src_w - 1) * cpp;
48404c1e
SJ
2730 }
2731
2db3366b
PZ
2732 intel_crtc->adjusted_x = x;
2733 intel_crtc->adjusted_y = y;
2734
48404c1e
SJ
2735 I915_WRITE(reg, dspcntr);
2736
01f2c773 2737 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
a6c45cf0 2738 if (INTEL_INFO(dev)->gen >= 4) {
85ba7b7d
DV
2739 I915_WRITE(DSPSURF(plane),
2740 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
5eddb70b 2741 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
e506a0c6 2742 I915_WRITE(DSPLINOFF(plane), linear_offset);
5eddb70b 2743 } else
f343c5f6 2744 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
5eddb70b 2745 POSTING_READ(reg);
17638cd6
JB
2746}
2747
a8d201af
ML
2748static void i9xx_disable_primary_plane(struct drm_plane *primary,
2749 struct drm_crtc *crtc)
17638cd6
JB
2750{
2751 struct drm_device *dev = crtc->dev;
fac5e23e 2752 struct drm_i915_private *dev_priv = to_i915(dev);
17638cd6 2753 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
17638cd6 2754 int plane = intel_crtc->plane;
f45651ba 2755
a8d201af
ML
2756 I915_WRITE(DSPCNTR(plane), 0);
2757 if (INTEL_INFO(dev_priv)->gen >= 4)
fdd508a6 2758 I915_WRITE(DSPSURF(plane), 0);
a8d201af
ML
2759 else
2760 I915_WRITE(DSPADDR(plane), 0);
2761 POSTING_READ(DSPCNTR(plane));
2762}
c9ba6fad 2763
a8d201af
ML
2764static void ironlake_update_primary_plane(struct drm_plane *primary,
2765 const struct intel_crtc_state *crtc_state,
2766 const struct intel_plane_state *plane_state)
2767{
2768 struct drm_device *dev = primary->dev;
fac5e23e 2769 struct drm_i915_private *dev_priv = to_i915(dev);
a8d201af
ML
2770 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2771 struct drm_framebuffer *fb = plane_state->base.fb;
2772 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2773 int plane = intel_crtc->plane;
54ea9da8 2774 u32 linear_offset;
a8d201af
ML
2775 u32 dspcntr;
2776 i915_reg_t reg = DSPCNTR(plane);
8d0deca8 2777 unsigned int rotation = plane_state->base.rotation;
ac484963 2778 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
a8d201af
ML
2779 int x = plane_state->src.x1 >> 16;
2780 int y = plane_state->src.y1 >> 16;
c9ba6fad 2781
f45651ba 2782 dspcntr = DISPPLANE_GAMMA_ENABLE;
fdd508a6 2783 dspcntr |= DISPLAY_PLANE_ENABLE;
f45651ba
VS
2784
2785 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2786 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
17638cd6 2787
57779d06
VS
2788 switch (fb->pixel_format) {
2789 case DRM_FORMAT_C8:
17638cd6
JB
2790 dspcntr |= DISPPLANE_8BPP;
2791 break;
57779d06
VS
2792 case DRM_FORMAT_RGB565:
2793 dspcntr |= DISPPLANE_BGRX565;
17638cd6 2794 break;
57779d06 2795 case DRM_FORMAT_XRGB8888:
57779d06
VS
2796 dspcntr |= DISPPLANE_BGRX888;
2797 break;
2798 case DRM_FORMAT_XBGR8888:
57779d06
VS
2799 dspcntr |= DISPPLANE_RGBX888;
2800 break;
2801 case DRM_FORMAT_XRGB2101010:
57779d06
VS
2802 dspcntr |= DISPPLANE_BGRX101010;
2803 break;
2804 case DRM_FORMAT_XBGR2101010:
57779d06 2805 dspcntr |= DISPPLANE_RGBX101010;
17638cd6
JB
2806 break;
2807 default:
baba133a 2808 BUG();
17638cd6
JB
2809 }
2810
2811 if (obj->tiling_mode != I915_TILING_NONE)
2812 dspcntr |= DISPPLANE_TILED;
17638cd6 2813
f45651ba 2814 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1f5d76db 2815 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
17638cd6 2816
ac484963 2817 linear_offset = y * fb->pitches[0] + x * cpp;
c2c75131 2818 intel_crtc->dspaddr_offset =
4f2d9934 2819 intel_compute_tile_offset(&x, &y, fb, 0,
8d0deca8 2820 fb->pitches[0], rotation);
c2c75131 2821 linear_offset -= intel_crtc->dspaddr_offset;
8d0deca8 2822 if (rotation == BIT(DRM_ROTATE_180)) {
48404c1e
SJ
2823 dspcntr |= DISPPLANE_ROTATE_180;
2824
2825 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
a8d201af
ML
2826 x += (crtc_state->pipe_src_w - 1);
2827 y += (crtc_state->pipe_src_h - 1);
48404c1e
SJ
2828
2829 /* Finding the last pixel of the last line of the display
2830 data and adding to linear_offset*/
2831 linear_offset +=
a8d201af 2832 (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
ac484963 2833 (crtc_state->pipe_src_w - 1) * cpp;
48404c1e
SJ
2834 }
2835 }
2836
2db3366b
PZ
2837 intel_crtc->adjusted_x = x;
2838 intel_crtc->adjusted_y = y;
2839
48404c1e 2840 I915_WRITE(reg, dspcntr);
17638cd6 2841
01f2c773 2842 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
85ba7b7d
DV
2843 I915_WRITE(DSPSURF(plane),
2844 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
b3dc685e 2845 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
bc1c91eb
DL
2846 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2847 } else {
2848 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2849 I915_WRITE(DSPLINOFF(plane), linear_offset);
2850 }
17638cd6 2851 POSTING_READ(reg);
17638cd6
JB
2852}
2853
7b49f948
VS
2854u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2855 uint64_t fb_modifier, uint32_t pixel_format)
b321803d 2856{
7b49f948 2857 if (fb_modifier == DRM_FORMAT_MOD_NONE) {
b321803d 2858 return 64;
7b49f948
VS
2859 } else {
2860 int cpp = drm_format_plane_cpp(pixel_format, 0);
2861
27ba3910 2862 return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
b321803d
DL
2863 }
2864}
2865
44eb0cb9
MK
2866u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2867 struct drm_i915_gem_object *obj,
2868 unsigned int plane)
121920fa 2869{
ce7f1728 2870 struct i915_ggtt_view view;
dedf278c 2871 struct i915_vma *vma;
44eb0cb9 2872 u64 offset;
121920fa 2873
e7941294 2874 intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
3465c580 2875 intel_plane->base.state->rotation);
121920fa 2876
ce7f1728 2877 vma = i915_gem_obj_to_ggtt_view(obj, &view);
dedf278c 2878 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
ce7f1728 2879 view.type))
dedf278c
TU
2880 return -1;
2881
44eb0cb9 2882 offset = vma->node.start;
dedf278c
TU
2883
2884 if (plane == 1) {
7723f47d 2885 offset += vma->ggtt_view.params.rotated.uv_start_page *
dedf278c
TU
2886 PAGE_SIZE;
2887 }
2888
44eb0cb9
MK
2889 WARN_ON(upper_32_bits(offset));
2890
2891 return lower_32_bits(offset);
121920fa
TU
2892}
2893
e435d6e5
ML
2894static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2895{
2896 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 2897 struct drm_i915_private *dev_priv = to_i915(dev);
e435d6e5
ML
2898
2899 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2900 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2901 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
e435d6e5
ML
2902}
2903
a1b2278e
CK
2904/*
2905 * This function detaches (aka. unbinds) unused scalers in hardware
2906 */
0583236e 2907static void skl_detach_scalers(struct intel_crtc *intel_crtc)
a1b2278e 2908{
a1b2278e
CK
2909 struct intel_crtc_scaler_state *scaler_state;
2910 int i;
2911
a1b2278e
CK
2912 scaler_state = &intel_crtc->config->scaler_state;
2913
2914 /* loop through and disable scalers that aren't in use */
2915 for (i = 0; i < intel_crtc->num_scalers; i++) {
e435d6e5
ML
2916 if (!scaler_state->scalers[i].in_use)
2917 skl_detach_scaler(intel_crtc, i);
a1b2278e
CK
2918 }
2919}
2920
6156a456 2921u32 skl_plane_ctl_format(uint32_t pixel_format)
70d21f0e 2922{
6156a456 2923 switch (pixel_format) {
d161cf7a 2924 case DRM_FORMAT_C8:
c34ce3d1 2925 return PLANE_CTL_FORMAT_INDEXED;
70d21f0e 2926 case DRM_FORMAT_RGB565:
c34ce3d1 2927 return PLANE_CTL_FORMAT_RGB_565;
70d21f0e 2928 case DRM_FORMAT_XBGR8888:
c34ce3d1 2929 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
6156a456 2930 case DRM_FORMAT_XRGB8888:
c34ce3d1 2931 return PLANE_CTL_FORMAT_XRGB_8888;
6156a456
CK
2932 /*
2933 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
2934 * to be already pre-multiplied. We need to add a knob (or a different
2935 * DRM_FORMAT) for user-space to configure that.
2936 */
f75fb42a 2937 case DRM_FORMAT_ABGR8888:
c34ce3d1 2938 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
6156a456 2939 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
6156a456 2940 case DRM_FORMAT_ARGB8888:
c34ce3d1 2941 return PLANE_CTL_FORMAT_XRGB_8888 |
6156a456 2942 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
70d21f0e 2943 case DRM_FORMAT_XRGB2101010:
c34ce3d1 2944 return PLANE_CTL_FORMAT_XRGB_2101010;
70d21f0e 2945 case DRM_FORMAT_XBGR2101010:
c34ce3d1 2946 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
6156a456 2947 case DRM_FORMAT_YUYV:
c34ce3d1 2948 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
6156a456 2949 case DRM_FORMAT_YVYU:
c34ce3d1 2950 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
6156a456 2951 case DRM_FORMAT_UYVY:
c34ce3d1 2952 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
6156a456 2953 case DRM_FORMAT_VYUY:
c34ce3d1 2954 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
70d21f0e 2955 default:
4249eeef 2956 MISSING_CASE(pixel_format);
70d21f0e 2957 }
8cfcba41 2958
c34ce3d1 2959 return 0;
6156a456 2960}
70d21f0e 2961
6156a456
CK
2962u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
2963{
6156a456 2964 switch (fb_modifier) {
30af77c4 2965 case DRM_FORMAT_MOD_NONE:
70d21f0e 2966 break;
30af77c4 2967 case I915_FORMAT_MOD_X_TILED:
c34ce3d1 2968 return PLANE_CTL_TILED_X;
b321803d 2969 case I915_FORMAT_MOD_Y_TILED:
c34ce3d1 2970 return PLANE_CTL_TILED_Y;
b321803d 2971 case I915_FORMAT_MOD_Yf_TILED:
c34ce3d1 2972 return PLANE_CTL_TILED_YF;
70d21f0e 2973 default:
6156a456 2974 MISSING_CASE(fb_modifier);
70d21f0e 2975 }
8cfcba41 2976
c34ce3d1 2977 return 0;
6156a456 2978}
70d21f0e 2979
6156a456
CK
2980u32 skl_plane_ctl_rotation(unsigned int rotation)
2981{
3b7a5119 2982 switch (rotation) {
6156a456
CK
2983 case BIT(DRM_ROTATE_0):
2984 break;
1e8df167
SJ
2985 /*
2986 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
2987 * while i915 HW rotation is clockwise, thats why this swapping.
2988 */
3b7a5119 2989 case BIT(DRM_ROTATE_90):
1e8df167 2990 return PLANE_CTL_ROTATE_270;
3b7a5119 2991 case BIT(DRM_ROTATE_180):
c34ce3d1 2992 return PLANE_CTL_ROTATE_180;
3b7a5119 2993 case BIT(DRM_ROTATE_270):
1e8df167 2994 return PLANE_CTL_ROTATE_90;
6156a456
CK
2995 default:
2996 MISSING_CASE(rotation);
2997 }
2998
c34ce3d1 2999 return 0;
6156a456
CK
3000}
3001
a8d201af
ML
3002static void skylake_update_primary_plane(struct drm_plane *plane,
3003 const struct intel_crtc_state *crtc_state,
3004 const struct intel_plane_state *plane_state)
6156a456 3005{
a8d201af 3006 struct drm_device *dev = plane->dev;
fac5e23e 3007 struct drm_i915_private *dev_priv = to_i915(dev);
a8d201af
ML
3008 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3009 struct drm_framebuffer *fb = plane_state->base.fb;
3010 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6156a456
CK
3011 int pipe = intel_crtc->pipe;
3012 u32 plane_ctl, stride_div, stride;
3013 u32 tile_height, plane_offset, plane_size;
a8d201af 3014 unsigned int rotation = plane_state->base.rotation;
6156a456 3015 int x_offset, y_offset;
44eb0cb9 3016 u32 surf_addr;
a8d201af
ML
3017 int scaler_id = plane_state->scaler_id;
3018 int src_x = plane_state->src.x1 >> 16;
3019 int src_y = plane_state->src.y1 >> 16;
3020 int src_w = drm_rect_width(&plane_state->src) >> 16;
3021 int src_h = drm_rect_height(&plane_state->src) >> 16;
3022 int dst_x = plane_state->dst.x1;
3023 int dst_y = plane_state->dst.y1;
3024 int dst_w = drm_rect_width(&plane_state->dst);
3025 int dst_h = drm_rect_height(&plane_state->dst);
70d21f0e 3026
6156a456
CK
3027 plane_ctl = PLANE_CTL_ENABLE |
3028 PLANE_CTL_PIPE_GAMMA_ENABLE |
3029 PLANE_CTL_PIPE_CSC_ENABLE;
3030
3031 plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3032 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3033 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
6156a456
CK
3034 plane_ctl |= skl_plane_ctl_rotation(rotation);
3035
7b49f948 3036 stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
b321803d 3037 fb->pixel_format);
dedf278c 3038 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3b7a5119 3039
a42e5a23
PZ
3040 WARN_ON(drm_rect_width(&plane_state->src) == 0);
3041
3b7a5119 3042 if (intel_rotation_90_or_270(rotation)) {
832be82f
VS
3043 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3044
3b7a5119 3045 /* stride = Surface height in tiles */
832be82f 3046 tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3b7a5119 3047 stride = DIV_ROUND_UP(fb->height, tile_height);
a8d201af
ML
3048 x_offset = stride * tile_height - src_y - src_h;
3049 y_offset = src_x;
6156a456 3050 plane_size = (src_w - 1) << 16 | (src_h - 1);
3b7a5119
SJ
3051 } else {
3052 stride = fb->pitches[0] / stride_div;
a8d201af
ML
3053 x_offset = src_x;
3054 y_offset = src_y;
6156a456 3055 plane_size = (src_h - 1) << 16 | (src_w - 1);
3b7a5119
SJ
3056 }
3057 plane_offset = y_offset << 16 | x_offset;
b321803d 3058
2db3366b
PZ
3059 intel_crtc->adjusted_x = x_offset;
3060 intel_crtc->adjusted_y = y_offset;
3061
70d21f0e 3062 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3b7a5119
SJ
3063 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3064 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3065 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
6156a456
CK
3066
3067 if (scaler_id >= 0) {
3068 uint32_t ps_ctrl = 0;
3069
3070 WARN_ON(!dst_w || !dst_h);
3071 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3072 crtc_state->scaler_state.scalers[scaler_id].mode;
3073 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3074 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3075 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3076 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3077 I915_WRITE(PLANE_POS(pipe, 0), 0);
3078 } else {
3079 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3080 }
3081
121920fa 3082 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
70d21f0e
DL
3083
3084 POSTING_READ(PLANE_SURF(pipe, 0));
3085}
3086
a8d201af
ML
3087static void skylake_disable_primary_plane(struct drm_plane *primary,
3088 struct drm_crtc *crtc)
17638cd6
JB
3089{
3090 struct drm_device *dev = crtc->dev;
fac5e23e 3091 struct drm_i915_private *dev_priv = to_i915(dev);
a8d201af 3092 int pipe = to_intel_crtc(crtc)->pipe;
17638cd6 3093
a8d201af
ML
3094 I915_WRITE(PLANE_CTL(pipe, 0), 0);
3095 I915_WRITE(PLANE_SURF(pipe, 0), 0);
3096 POSTING_READ(PLANE_SURF(pipe, 0));
3097}
29b9bde6 3098
a8d201af
ML
3099/* Assume fb object is pinned & idle & fenced and just update base pointers */
3100static int
3101intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3102 int x, int y, enum mode_set_atomic state)
3103{
3104 /* Support for kgdboc is disabled, this needs a major rework. */
3105 DRM_ERROR("legacy panic handler not supported any more.\n");
3106
3107 return -ENODEV;
81255565
JB
3108}
3109
5a21b665
DV
3110static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3111{
3112 struct intel_crtc *crtc;
3113
91c8a326 3114 for_each_intel_crtc(&dev_priv->drm, crtc)
5a21b665
DV
3115 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3116}
3117
7514747d
VS
3118static void intel_update_primary_planes(struct drm_device *dev)
3119{
7514747d 3120 struct drm_crtc *crtc;
96a02917 3121
70e1e0ec 3122 for_each_crtc(dev, crtc) {
11c22da6
ML
3123 struct intel_plane *plane = to_intel_plane(crtc->primary);
3124 struct intel_plane_state *plane_state;
96a02917 3125
11c22da6 3126 drm_modeset_lock_crtc(crtc, &plane->base);
11c22da6
ML
3127 plane_state = to_intel_plane_state(plane->base.state);
3128
a8d201af
ML
3129 if (plane_state->visible)
3130 plane->update_plane(&plane->base,
3131 to_intel_crtc_state(crtc->state),
3132 plane_state);
11c22da6
ML
3133
3134 drm_modeset_unlock_crtc(crtc);
96a02917
VS
3135 }
3136}
3137
c033666a 3138void intel_prepare_reset(struct drm_i915_private *dev_priv)
7514747d
VS
3139{
3140 /* no reset support for gen2 */
c033666a 3141 if (IS_GEN2(dev_priv))
7514747d
VS
3142 return;
3143
3144 /* reset doesn't touch the display */
c033666a 3145 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
7514747d
VS
3146 return;
3147
91c8a326 3148 drm_modeset_lock_all(&dev_priv->drm);
f98ce92f
VS
3149 /*
3150 * Disabling the crtcs gracefully seems nicer. Also the
3151 * g33 docs say we should at least disable all the planes.
3152 */
91c8a326 3153 intel_display_suspend(&dev_priv->drm);
7514747d
VS
3154}
3155
c033666a 3156void intel_finish_reset(struct drm_i915_private *dev_priv)
7514747d 3157{
5a21b665
DV
3158 /*
3159 * Flips in the rings will be nuked by the reset,
3160 * so complete all pending flips so that user space
3161 * will get its events and not get stuck.
3162 */
3163 intel_complete_page_flips(dev_priv);
3164
7514747d 3165 /* no reset support for gen2 */
c033666a 3166 if (IS_GEN2(dev_priv))
7514747d
VS
3167 return;
3168
3169 /* reset doesn't touch the display */
c033666a 3170 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
7514747d
VS
3171 /*
3172 * Flips in the rings have been nuked by the reset,
3173 * so update the base address of all primary
3174 * planes to the the last fb to make sure we're
3175 * showing the correct fb after a reset.
11c22da6
ML
3176 *
3177 * FIXME: Atomic will make this obsolete since we won't schedule
3178 * CS-based flips (which might get lost in gpu resets) any more.
7514747d 3179 */
91c8a326 3180 intel_update_primary_planes(&dev_priv->drm);
7514747d
VS
3181 return;
3182 }
3183
3184 /*
3185 * The display has been reset as well,
3186 * so need a full re-initialization.
3187 */
3188 intel_runtime_pm_disable_interrupts(dev_priv);
3189 intel_runtime_pm_enable_interrupts(dev_priv);
3190
91c8a326 3191 intel_modeset_init_hw(&dev_priv->drm);
7514747d
VS
3192
3193 spin_lock_irq(&dev_priv->irq_lock);
3194 if (dev_priv->display.hpd_irq_setup)
91d14251 3195 dev_priv->display.hpd_irq_setup(dev_priv);
7514747d
VS
3196 spin_unlock_irq(&dev_priv->irq_lock);
3197
91c8a326 3198 intel_display_resume(&dev_priv->drm);
7514747d
VS
3199
3200 intel_hpd_init(dev_priv);
3201
91c8a326 3202 drm_modeset_unlock_all(&dev_priv->drm);
7514747d
VS
3203}
3204
7d5e3799
CW
3205static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3206{
5a21b665
DV
3207 struct drm_device *dev = crtc->dev;
3208 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3209 unsigned reset_counter;
3210 bool pending;
3211
3212 reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
3213 if (intel_crtc->reset_counter != reset_counter)
3214 return false;
3215
3216 spin_lock_irq(&dev->event_lock);
3217 pending = to_intel_crtc(crtc)->flip_work != NULL;
3218 spin_unlock_irq(&dev->event_lock);
3219
3220 return pending;
7d5e3799
CW
3221}
3222
bfd16b2a
ML
3223static void intel_update_pipe_config(struct intel_crtc *crtc,
3224 struct intel_crtc_state *old_crtc_state)
e30e8f75
GP
3225{
3226 struct drm_device *dev = crtc->base.dev;
fac5e23e 3227 struct drm_i915_private *dev_priv = to_i915(dev);
bfd16b2a
ML
3228 struct intel_crtc_state *pipe_config =
3229 to_intel_crtc_state(crtc->base.state);
e30e8f75 3230
bfd16b2a
ML
3231 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3232 crtc->base.mode = crtc->base.state->mode;
3233
3234 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3235 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3236 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
e30e8f75
GP
3237
3238 /*
3239 * Update pipe size and adjust fitter if needed: the reason for this is
3240 * that in compute_mode_changes we check the native mode (not the pfit
3241 * mode) to see if we can flip rather than do a full mode set. In the
3242 * fastboot case, we'll flip, but if we don't update the pipesrc and
3243 * pfit state, we'll end up with a big fb scanned out into the wrong
3244 * sized surface.
e30e8f75
GP
3245 */
3246
e30e8f75 3247 I915_WRITE(PIPESRC(crtc->pipe),
bfd16b2a
ML
3248 ((pipe_config->pipe_src_w - 1) << 16) |
3249 (pipe_config->pipe_src_h - 1));
3250
3251 /* on skylake this is done by detaching scalers */
3252 if (INTEL_INFO(dev)->gen >= 9) {
3253 skl_detach_scalers(crtc);
3254
3255 if (pipe_config->pch_pfit.enabled)
3256 skylake_pfit_enable(crtc);
3257 } else if (HAS_PCH_SPLIT(dev)) {
3258 if (pipe_config->pch_pfit.enabled)
3259 ironlake_pfit_enable(crtc);
3260 else if (old_crtc_state->pch_pfit.enabled)
3261 ironlake_pfit_disable(crtc, true);
e30e8f75 3262 }
e30e8f75
GP
3263}
3264
5e84e1a4
ZW
3265static void intel_fdi_normal_train(struct drm_crtc *crtc)
3266{
3267 struct drm_device *dev = crtc->dev;
fac5e23e 3268 struct drm_i915_private *dev_priv = to_i915(dev);
5e84e1a4
ZW
3269 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3270 int pipe = intel_crtc->pipe;
f0f59a00
VS
3271 i915_reg_t reg;
3272 u32 temp;
5e84e1a4
ZW
3273
3274 /* enable normal train */
3275 reg = FDI_TX_CTL(pipe);
3276 temp = I915_READ(reg);
61e499bf 3277 if (IS_IVYBRIDGE(dev)) {
357555c0
JB
3278 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3279 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
61e499bf
KP
3280 } else {
3281 temp &= ~FDI_LINK_TRAIN_NONE;
3282 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
357555c0 3283 }
5e84e1a4
ZW
3284 I915_WRITE(reg, temp);
3285
3286 reg = FDI_RX_CTL(pipe);
3287 temp = I915_READ(reg);
3288 if (HAS_PCH_CPT(dev)) {
3289 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3290 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3291 } else {
3292 temp &= ~FDI_LINK_TRAIN_NONE;
3293 temp |= FDI_LINK_TRAIN_NONE;
3294 }
3295 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3296
3297 /* wait one idle pattern time */
3298 POSTING_READ(reg);
3299 udelay(1000);
357555c0
JB
3300
3301 /* IVB wants error correction enabled */
3302 if (IS_IVYBRIDGE(dev))
3303 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3304 FDI_FE_ERRC_ENABLE);
5e84e1a4
ZW
3305}
3306
8db9d77b
ZW
3307/* The FDI link training functions for ILK/Ibexpeak. */
3308static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3309{
3310 struct drm_device *dev = crtc->dev;
fac5e23e 3311 struct drm_i915_private *dev_priv = to_i915(dev);
8db9d77b
ZW
3312 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3313 int pipe = intel_crtc->pipe;
f0f59a00
VS
3314 i915_reg_t reg;
3315 u32 temp, tries;
8db9d77b 3316
1c8562f6 3317 /* FDI needs bits from pipe first */
0fc932b8 3318 assert_pipe_enabled(dev_priv, pipe);
0fc932b8 3319
e1a44743
AJ
3320 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3321 for train result */
5eddb70b
CW
3322 reg = FDI_RX_IMR(pipe);
3323 temp = I915_READ(reg);
e1a44743
AJ
3324 temp &= ~FDI_RX_SYMBOL_LOCK;
3325 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
3326 I915_WRITE(reg, temp);
3327 I915_READ(reg);
e1a44743
AJ
3328 udelay(150);
3329
8db9d77b 3330 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
3331 reg = FDI_TX_CTL(pipe);
3332 temp = I915_READ(reg);
627eb5a3 3333 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3334 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
8db9d77b
ZW
3335 temp &= ~FDI_LINK_TRAIN_NONE;
3336 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b 3337 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 3338
5eddb70b
CW
3339 reg = FDI_RX_CTL(pipe);
3340 temp = I915_READ(reg);
8db9d77b
ZW
3341 temp &= ~FDI_LINK_TRAIN_NONE;
3342 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b
CW
3343 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3344
3345 POSTING_READ(reg);
8db9d77b
ZW
3346 udelay(150);
3347
5b2adf89 3348 /* Ironlake workaround, enable clock pointer after FDI enable*/
8f5718a6
DV
3349 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3350 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3351 FDI_RX_PHASE_SYNC_POINTER_EN);
5b2adf89 3352
5eddb70b 3353 reg = FDI_RX_IIR(pipe);
e1a44743 3354 for (tries = 0; tries < 5; tries++) {
5eddb70b 3355 temp = I915_READ(reg);
8db9d77b
ZW
3356 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3357
3358 if ((temp & FDI_RX_BIT_LOCK)) {
3359 DRM_DEBUG_KMS("FDI train 1 done.\n");
5eddb70b 3360 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
8db9d77b
ZW
3361 break;
3362 }
8db9d77b 3363 }
e1a44743 3364 if (tries == 5)
5eddb70b 3365 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
3366
3367 /* Train 2 */
5eddb70b
CW
3368 reg = FDI_TX_CTL(pipe);
3369 temp = I915_READ(reg);
8db9d77b
ZW
3370 temp &= ~FDI_LINK_TRAIN_NONE;
3371 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 3372 I915_WRITE(reg, temp);
8db9d77b 3373
5eddb70b
CW
3374 reg = FDI_RX_CTL(pipe);
3375 temp = I915_READ(reg);
8db9d77b
ZW
3376 temp &= ~FDI_LINK_TRAIN_NONE;
3377 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 3378 I915_WRITE(reg, temp);
8db9d77b 3379
5eddb70b
CW
3380 POSTING_READ(reg);
3381 udelay(150);
8db9d77b 3382
5eddb70b 3383 reg = FDI_RX_IIR(pipe);
e1a44743 3384 for (tries = 0; tries < 5; tries++) {
5eddb70b 3385 temp = I915_READ(reg);
8db9d77b
ZW
3386 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3387
3388 if (temp & FDI_RX_SYMBOL_LOCK) {
5eddb70b 3389 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
8db9d77b
ZW
3390 DRM_DEBUG_KMS("FDI train 2 done.\n");
3391 break;
3392 }
8db9d77b 3393 }
e1a44743 3394 if (tries == 5)
5eddb70b 3395 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
3396
3397 DRM_DEBUG_KMS("FDI train done\n");
5c5313c8 3398
8db9d77b
ZW
3399}
3400
0206e353 3401static const int snb_b_fdi_train_param[] = {
8db9d77b
ZW
3402 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3403 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3404 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3405 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3406};
3407
3408/* The FDI link training functions for SNB/Cougarpoint. */
3409static void gen6_fdi_link_train(struct drm_crtc *crtc)
3410{
3411 struct drm_device *dev = crtc->dev;
fac5e23e 3412 struct drm_i915_private *dev_priv = to_i915(dev);
8db9d77b
ZW
3413 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3414 int pipe = intel_crtc->pipe;
f0f59a00
VS
3415 i915_reg_t reg;
3416 u32 temp, i, retry;
8db9d77b 3417
e1a44743
AJ
3418 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3419 for train result */
5eddb70b
CW
3420 reg = FDI_RX_IMR(pipe);
3421 temp = I915_READ(reg);
e1a44743
AJ
3422 temp &= ~FDI_RX_SYMBOL_LOCK;
3423 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
3424 I915_WRITE(reg, temp);
3425
3426 POSTING_READ(reg);
e1a44743
AJ
3427 udelay(150);
3428
8db9d77b 3429 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
3430 reg = FDI_TX_CTL(pipe);
3431 temp = I915_READ(reg);
627eb5a3 3432 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3433 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
8db9d77b
ZW
3434 temp &= ~FDI_LINK_TRAIN_NONE;
3435 temp |= FDI_LINK_TRAIN_PATTERN_1;
3436 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3437 /* SNB-B */
3438 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5eddb70b 3439 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 3440
d74cf324
DV
3441 I915_WRITE(FDI_RX_MISC(pipe),
3442 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3443
5eddb70b
CW
3444 reg = FDI_RX_CTL(pipe);
3445 temp = I915_READ(reg);
8db9d77b
ZW
3446 if (HAS_PCH_CPT(dev)) {
3447 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3448 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3449 } else {
3450 temp &= ~FDI_LINK_TRAIN_NONE;
3451 temp |= FDI_LINK_TRAIN_PATTERN_1;
3452 }
5eddb70b
CW
3453 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3454
3455 POSTING_READ(reg);
8db9d77b
ZW
3456 udelay(150);
3457
0206e353 3458 for (i = 0; i < 4; i++) {
5eddb70b
CW
3459 reg = FDI_TX_CTL(pipe);
3460 temp = I915_READ(reg);
8db9d77b
ZW
3461 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3462 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
3463 I915_WRITE(reg, temp);
3464
3465 POSTING_READ(reg);
8db9d77b
ZW
3466 udelay(500);
3467
fa37d39e
SP
3468 for (retry = 0; retry < 5; retry++) {
3469 reg = FDI_RX_IIR(pipe);
3470 temp = I915_READ(reg);
3471 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3472 if (temp & FDI_RX_BIT_LOCK) {
3473 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3474 DRM_DEBUG_KMS("FDI train 1 done.\n");
3475 break;
3476 }
3477 udelay(50);
8db9d77b 3478 }
fa37d39e
SP
3479 if (retry < 5)
3480 break;
8db9d77b
ZW
3481 }
3482 if (i == 4)
5eddb70b 3483 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
3484
3485 /* Train 2 */
5eddb70b
CW
3486 reg = FDI_TX_CTL(pipe);
3487 temp = I915_READ(reg);
8db9d77b
ZW
3488 temp &= ~FDI_LINK_TRAIN_NONE;
3489 temp |= FDI_LINK_TRAIN_PATTERN_2;
3490 if (IS_GEN6(dev)) {
3491 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3492 /* SNB-B */
3493 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3494 }
5eddb70b 3495 I915_WRITE(reg, temp);
8db9d77b 3496
5eddb70b
CW
3497 reg = FDI_RX_CTL(pipe);
3498 temp = I915_READ(reg);
8db9d77b
ZW
3499 if (HAS_PCH_CPT(dev)) {
3500 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3501 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3502 } else {
3503 temp &= ~FDI_LINK_TRAIN_NONE;
3504 temp |= FDI_LINK_TRAIN_PATTERN_2;
3505 }
5eddb70b
CW
3506 I915_WRITE(reg, temp);
3507
3508 POSTING_READ(reg);
8db9d77b
ZW
3509 udelay(150);
3510
0206e353 3511 for (i = 0; i < 4; i++) {
5eddb70b
CW
3512 reg = FDI_TX_CTL(pipe);
3513 temp = I915_READ(reg);
8db9d77b
ZW
3514 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3515 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
3516 I915_WRITE(reg, temp);
3517
3518 POSTING_READ(reg);
8db9d77b
ZW
3519 udelay(500);
3520
fa37d39e
SP
3521 for (retry = 0; retry < 5; retry++) {
3522 reg = FDI_RX_IIR(pipe);
3523 temp = I915_READ(reg);
3524 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3525 if (temp & FDI_RX_SYMBOL_LOCK) {
3526 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3527 DRM_DEBUG_KMS("FDI train 2 done.\n");
3528 break;
3529 }
3530 udelay(50);
8db9d77b 3531 }
fa37d39e
SP
3532 if (retry < 5)
3533 break;
8db9d77b
ZW
3534 }
3535 if (i == 4)
5eddb70b 3536 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
3537
3538 DRM_DEBUG_KMS("FDI train done.\n");
3539}
3540
357555c0
JB
3541/* Manual link training for Ivy Bridge A0 parts */
3542static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3543{
3544 struct drm_device *dev = crtc->dev;
fac5e23e 3545 struct drm_i915_private *dev_priv = to_i915(dev);
357555c0
JB
3546 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3547 int pipe = intel_crtc->pipe;
f0f59a00
VS
3548 i915_reg_t reg;
3549 u32 temp, i, j;
357555c0
JB
3550
3551 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3552 for train result */
3553 reg = FDI_RX_IMR(pipe);
3554 temp = I915_READ(reg);
3555 temp &= ~FDI_RX_SYMBOL_LOCK;
3556 temp &= ~FDI_RX_BIT_LOCK;
3557 I915_WRITE(reg, temp);
3558
3559 POSTING_READ(reg);
3560 udelay(150);
3561
01a415fd
DV
3562 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3563 I915_READ(FDI_RX_IIR(pipe)));
3564
139ccd3f
JB
3565 /* Try each vswing and preemphasis setting twice before moving on */
3566 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3567 /* disable first in case we need to retry */
3568 reg = FDI_TX_CTL(pipe);
3569 temp = I915_READ(reg);
3570 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3571 temp &= ~FDI_TX_ENABLE;
3572 I915_WRITE(reg, temp);
357555c0 3573
139ccd3f
JB
3574 reg = FDI_RX_CTL(pipe);
3575 temp = I915_READ(reg);
3576 temp &= ~FDI_LINK_TRAIN_AUTO;
3577 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3578 temp &= ~FDI_RX_ENABLE;
3579 I915_WRITE(reg, temp);
357555c0 3580
139ccd3f 3581 /* enable CPU FDI TX and PCH FDI RX */
357555c0
JB
3582 reg = FDI_TX_CTL(pipe);
3583 temp = I915_READ(reg);
139ccd3f 3584 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3585 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
139ccd3f 3586 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
357555c0 3587 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
139ccd3f
JB
3588 temp |= snb_b_fdi_train_param[j/2];
3589 temp |= FDI_COMPOSITE_SYNC;
3590 I915_WRITE(reg, temp | FDI_TX_ENABLE);
357555c0 3591
139ccd3f
JB
3592 I915_WRITE(FDI_RX_MISC(pipe),
3593 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
357555c0 3594
139ccd3f 3595 reg = FDI_RX_CTL(pipe);
357555c0 3596 temp = I915_READ(reg);
139ccd3f
JB
3597 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3598 temp |= FDI_COMPOSITE_SYNC;
3599 I915_WRITE(reg, temp | FDI_RX_ENABLE);
357555c0 3600
139ccd3f
JB
3601 POSTING_READ(reg);
3602 udelay(1); /* should be 0.5us */
357555c0 3603
139ccd3f
JB
3604 for (i = 0; i < 4; i++) {
3605 reg = FDI_RX_IIR(pipe);
3606 temp = I915_READ(reg);
3607 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 3608
139ccd3f
JB
3609 if (temp & FDI_RX_BIT_LOCK ||
3610 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3611 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3612 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3613 i);
3614 break;
3615 }
3616 udelay(1); /* should be 0.5us */
3617 }
3618 if (i == 4) {
3619 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3620 continue;
3621 }
357555c0 3622
139ccd3f 3623 /* Train 2 */
357555c0
JB
3624 reg = FDI_TX_CTL(pipe);
3625 temp = I915_READ(reg);
139ccd3f
JB
3626 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3627 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3628 I915_WRITE(reg, temp);
3629
3630 reg = FDI_RX_CTL(pipe);
3631 temp = I915_READ(reg);
3632 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3633 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
357555c0
JB
3634 I915_WRITE(reg, temp);
3635
3636 POSTING_READ(reg);
139ccd3f 3637 udelay(2); /* should be 1.5us */
357555c0 3638
139ccd3f
JB
3639 for (i = 0; i < 4; i++) {
3640 reg = FDI_RX_IIR(pipe);
3641 temp = I915_READ(reg);
3642 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 3643
139ccd3f
JB
3644 if (temp & FDI_RX_SYMBOL_LOCK ||
3645 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3646 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3647 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3648 i);
3649 goto train_done;
3650 }
3651 udelay(2); /* should be 1.5us */
357555c0 3652 }
139ccd3f
JB
3653 if (i == 4)
3654 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
357555c0 3655 }
357555c0 3656
139ccd3f 3657train_done:
357555c0
JB
3658 DRM_DEBUG_KMS("FDI train done.\n");
3659}
3660
88cefb6c 3661static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2c07245f 3662{
88cefb6c 3663 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 3664 struct drm_i915_private *dev_priv = to_i915(dev);
2c07245f 3665 int pipe = intel_crtc->pipe;
f0f59a00
VS
3666 i915_reg_t reg;
3667 u32 temp;
c64e311e 3668
c98e9dcf 3669 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5eddb70b
CW
3670 reg = FDI_RX_CTL(pipe);
3671 temp = I915_READ(reg);
627eb5a3 3672 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
6e3c9717 3673 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
dfd07d72 3674 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5eddb70b
CW
3675 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3676
3677 POSTING_READ(reg);
c98e9dcf
JB
3678 udelay(200);
3679
3680 /* Switch from Rawclk to PCDclk */
5eddb70b
CW
3681 temp = I915_READ(reg);
3682 I915_WRITE(reg, temp | FDI_PCDCLK);
3683
3684 POSTING_READ(reg);
c98e9dcf
JB
3685 udelay(200);
3686
20749730
PZ
3687 /* Enable CPU FDI TX PLL, always on for Ironlake */
3688 reg = FDI_TX_CTL(pipe);
3689 temp = I915_READ(reg);
3690 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3691 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5eddb70b 3692
20749730
PZ
3693 POSTING_READ(reg);
3694 udelay(100);
6be4a607 3695 }
0e23b99d
JB
3696}
3697
88cefb6c
DV
3698static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3699{
3700 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 3701 struct drm_i915_private *dev_priv = to_i915(dev);
88cefb6c 3702 int pipe = intel_crtc->pipe;
f0f59a00
VS
3703 i915_reg_t reg;
3704 u32 temp;
88cefb6c
DV
3705
3706 /* Switch from PCDclk to Rawclk */
3707 reg = FDI_RX_CTL(pipe);
3708 temp = I915_READ(reg);
3709 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3710
3711 /* Disable CPU FDI TX PLL */
3712 reg = FDI_TX_CTL(pipe);
3713 temp = I915_READ(reg);
3714 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3715
3716 POSTING_READ(reg);
3717 udelay(100);
3718
3719 reg = FDI_RX_CTL(pipe);
3720 temp = I915_READ(reg);
3721 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3722
3723 /* Wait for the clocks to turn off. */
3724 POSTING_READ(reg);
3725 udelay(100);
3726}
3727
0fc932b8
JB
3728static void ironlake_fdi_disable(struct drm_crtc *crtc)
3729{
3730 struct drm_device *dev = crtc->dev;
fac5e23e 3731 struct drm_i915_private *dev_priv = to_i915(dev);
0fc932b8
JB
3732 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3733 int pipe = intel_crtc->pipe;
f0f59a00
VS
3734 i915_reg_t reg;
3735 u32 temp;
0fc932b8
JB
3736
3737 /* disable CPU FDI tx and PCH FDI rx */
3738 reg = FDI_TX_CTL(pipe);
3739 temp = I915_READ(reg);
3740 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3741 POSTING_READ(reg);
3742
3743 reg = FDI_RX_CTL(pipe);
3744 temp = I915_READ(reg);
3745 temp &= ~(0x7 << 16);
dfd07d72 3746 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
3747 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3748
3749 POSTING_READ(reg);
3750 udelay(100);
3751
3752 /* Ironlake workaround, disable clock pointer after downing FDI */
eba905b2 3753 if (HAS_PCH_IBX(dev))
6f06ce18 3754 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
0fc932b8
JB
3755
3756 /* still set train pattern 1 */
3757 reg = FDI_TX_CTL(pipe);
3758 temp = I915_READ(reg);
3759 temp &= ~FDI_LINK_TRAIN_NONE;
3760 temp |= FDI_LINK_TRAIN_PATTERN_1;
3761 I915_WRITE(reg, temp);
3762
3763 reg = FDI_RX_CTL(pipe);
3764 temp = I915_READ(reg);
3765 if (HAS_PCH_CPT(dev)) {
3766 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3767 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3768 } else {
3769 temp &= ~FDI_LINK_TRAIN_NONE;
3770 temp |= FDI_LINK_TRAIN_PATTERN_1;
3771 }
3772 /* BPC in FDI rx is consistent with that in PIPECONF */
3773 temp &= ~(0x07 << 16);
dfd07d72 3774 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
3775 I915_WRITE(reg, temp);
3776
3777 POSTING_READ(reg);
3778 udelay(100);
3779}
3780
5dce5b93
CW
3781bool intel_has_pending_fb_unpin(struct drm_device *dev)
3782{
3783 struct intel_crtc *crtc;
3784
3785 /* Note that we don't need to be called with mode_config.lock here
3786 * as our list of CRTC objects is static for the lifetime of the
3787 * device and so cannot disappear as we iterate. Similarly, we can
3788 * happily treat the predicates as racy, atomic checks as userspace
3789 * cannot claim and pin a new fb without at least acquring the
3790 * struct_mutex and so serialising with us.
3791 */
d3fcc808 3792 for_each_intel_crtc(dev, crtc) {
5dce5b93
CW
3793 if (atomic_read(&crtc->unpin_work_count) == 0)
3794 continue;
3795
5a21b665 3796 if (crtc->flip_work)
5dce5b93
CW
3797 intel_wait_for_vblank(dev, crtc->pipe);
3798
3799 return true;
3800 }
3801
3802 return false;
3803}
3804
5a21b665 3805static void page_flip_completed(struct intel_crtc *intel_crtc)
d6bbafa1
CW
3806{
3807 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5a21b665
DV
3808 struct intel_flip_work *work = intel_crtc->flip_work;
3809
3810 intel_crtc->flip_work = NULL;
d6bbafa1
CW
3811
3812 if (work->event)
560ce1dc 3813 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
d6bbafa1
CW
3814
3815 drm_crtc_vblank_put(&intel_crtc->base);
3816
5a21b665 3817 wake_up_all(&dev_priv->pending_flip_queue);
143f73b3 3818 queue_work(dev_priv->wq, &work->unpin_work);
5a21b665
DV
3819
3820 trace_i915_flip_complete(intel_crtc->plane,
3821 work->pending_flip_obj);
d6bbafa1
CW
3822}
3823
5008e874 3824static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
e6c3a2a6 3825{
0f91128d 3826 struct drm_device *dev = crtc->dev;
fac5e23e 3827 struct drm_i915_private *dev_priv = to_i915(dev);
5008e874 3828 long ret;
e6c3a2a6 3829
2c10d571 3830 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
5008e874
ML
3831
3832 ret = wait_event_interruptible_timeout(
3833 dev_priv->pending_flip_queue,
3834 !intel_crtc_has_pending_flip(crtc),
3835 60*HZ);
3836
3837 if (ret < 0)
3838 return ret;
3839
5a21b665
DV
3840 if (ret == 0) {
3841 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3842 struct intel_flip_work *work;
3843
3844 spin_lock_irq(&dev->event_lock);
3845 work = intel_crtc->flip_work;
3846 if (work && !is_mmio_work(work)) {
3847 WARN_ONCE(1, "Removing stuck page flip\n");
3848 page_flip_completed(intel_crtc);
3849 }
3850 spin_unlock_irq(&dev->event_lock);
3851 }
5bb61643 3852
5008e874 3853 return 0;
e6c3a2a6
CW
3854}
3855
060f02d8
VS
3856static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3857{
3858 u32 temp;
3859
3860 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3861
3862 mutex_lock(&dev_priv->sb_lock);
3863
3864 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3865 temp |= SBI_SSCCTL_DISABLE;
3866 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3867
3868 mutex_unlock(&dev_priv->sb_lock);
3869}
3870
e615efe4
ED
3871/* Program iCLKIP clock to the desired frequency */
3872static void lpt_program_iclkip(struct drm_crtc *crtc)
3873{
64b46a06 3874 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6e3c9717 3875 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
e615efe4
ED
3876 u32 divsel, phaseinc, auxdiv, phasedir = 0;
3877 u32 temp;
3878
060f02d8 3879 lpt_disable_iclkip(dev_priv);
e615efe4 3880
64b46a06
VS
3881 /* The iCLK virtual clock root frequency is in MHz,
3882 * but the adjusted_mode->crtc_clock in in KHz. To get the
3883 * divisors, it is necessary to divide one by another, so we
3884 * convert the virtual clock precision to KHz here for higher
3885 * precision.
3886 */
3887 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
e615efe4
ED
3888 u32 iclk_virtual_root_freq = 172800 * 1000;
3889 u32 iclk_pi_range = 64;
64b46a06 3890 u32 desired_divisor;
e615efe4 3891
64b46a06
VS
3892 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3893 clock << auxdiv);
3894 divsel = (desired_divisor / iclk_pi_range) - 2;
3895 phaseinc = desired_divisor % iclk_pi_range;
e615efe4 3896
64b46a06
VS
3897 /*
3898 * Near 20MHz is a corner case which is
3899 * out of range for the 7-bit divisor
3900 */
3901 if (divsel <= 0x7f)
3902 break;
e615efe4
ED
3903 }
3904
3905 /* This should not happen with any sane values */
3906 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3907 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3908 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3909 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3910
3911 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
12d7ceed 3912 clock,
e615efe4
ED
3913 auxdiv,
3914 divsel,
3915 phasedir,
3916 phaseinc);
3917
060f02d8
VS
3918 mutex_lock(&dev_priv->sb_lock);
3919
e615efe4 3920 /* Program SSCDIVINTPHASE6 */
988d6ee8 3921 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
e615efe4
ED
3922 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3923 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3924 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3925 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3926 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3927 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
988d6ee8 3928 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
e615efe4
ED
3929
3930 /* Program SSCAUXDIV */
988d6ee8 3931 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
e615efe4
ED
3932 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3933 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
988d6ee8 3934 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
e615efe4
ED
3935
3936 /* Enable modulator and associated divider */
988d6ee8 3937 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
e615efe4 3938 temp &= ~SBI_SSCCTL_DISABLE;
988d6ee8 3939 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
e615efe4 3940
060f02d8
VS
3941 mutex_unlock(&dev_priv->sb_lock);
3942
e615efe4
ED
3943 /* Wait for initialization time */
3944 udelay(24);
3945
3946 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3947}
3948
8802e5b6
VS
3949int lpt_get_iclkip(struct drm_i915_private *dev_priv)
3950{
3951 u32 divsel, phaseinc, auxdiv;
3952 u32 iclk_virtual_root_freq = 172800 * 1000;
3953 u32 iclk_pi_range = 64;
3954 u32 desired_divisor;
3955 u32 temp;
3956
3957 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
3958 return 0;
3959
3960 mutex_lock(&dev_priv->sb_lock);
3961
3962 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3963 if (temp & SBI_SSCCTL_DISABLE) {
3964 mutex_unlock(&dev_priv->sb_lock);
3965 return 0;
3966 }
3967
3968 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3969 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
3970 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
3971 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
3972 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
3973
3974 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3975 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
3976 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
3977
3978 mutex_unlock(&dev_priv->sb_lock);
3979
3980 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
3981
3982 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3983 desired_divisor << auxdiv);
3984}
3985
275f01b2
DV
3986static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3987 enum pipe pch_transcoder)
3988{
3989 struct drm_device *dev = crtc->base.dev;
fac5e23e 3990 struct drm_i915_private *dev_priv = to_i915(dev);
6e3c9717 3991 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
275f01b2
DV
3992
3993 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3994 I915_READ(HTOTAL(cpu_transcoder)));
3995 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3996 I915_READ(HBLANK(cpu_transcoder)));
3997 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3998 I915_READ(HSYNC(cpu_transcoder)));
3999
4000 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4001 I915_READ(VTOTAL(cpu_transcoder)));
4002 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4003 I915_READ(VBLANK(cpu_transcoder)));
4004 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4005 I915_READ(VSYNC(cpu_transcoder)));
4006 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4007 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4008}
4009
003632d9 4010static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
1fbc0d78 4011{
fac5e23e 4012 struct drm_i915_private *dev_priv = to_i915(dev);
1fbc0d78
DV
4013 uint32_t temp;
4014
4015 temp = I915_READ(SOUTH_CHICKEN1);
003632d9 4016 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
1fbc0d78
DV
4017 return;
4018
4019 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4020 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4021
003632d9
ACO
4022 temp &= ~FDI_BC_BIFURCATION_SELECT;
4023 if (enable)
4024 temp |= FDI_BC_BIFURCATION_SELECT;
4025
4026 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
1fbc0d78
DV
4027 I915_WRITE(SOUTH_CHICKEN1, temp);
4028 POSTING_READ(SOUTH_CHICKEN1);
4029}
4030
4031static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4032{
4033 struct drm_device *dev = intel_crtc->base.dev;
1fbc0d78
DV
4034
4035 switch (intel_crtc->pipe) {
4036 case PIPE_A:
4037 break;
4038 case PIPE_B:
6e3c9717 4039 if (intel_crtc->config->fdi_lanes > 2)
003632d9 4040 cpt_set_fdi_bc_bifurcation(dev, false);
1fbc0d78 4041 else
003632d9 4042 cpt_set_fdi_bc_bifurcation(dev, true);
1fbc0d78
DV
4043
4044 break;
4045 case PIPE_C:
003632d9 4046 cpt_set_fdi_bc_bifurcation(dev, true);
1fbc0d78
DV
4047
4048 break;
4049 default:
4050 BUG();
4051 }
4052}
4053
c48b5305
VS
4054/* Return which DP Port should be selected for Transcoder DP control */
4055static enum port
4056intel_trans_dp_port_sel(struct drm_crtc *crtc)
4057{
4058 struct drm_device *dev = crtc->dev;
4059 struct intel_encoder *encoder;
4060
4061 for_each_encoder_on_crtc(dev, crtc, encoder) {
4062 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4063 encoder->type == INTEL_OUTPUT_EDP)
4064 return enc_to_dig_port(&encoder->base)->port;
4065 }
4066
4067 return -1;
4068}
4069
f67a559d
JB
4070/*
4071 * Enable PCH resources required for PCH ports:
4072 * - PCH PLLs
4073 * - FDI training & RX/TX
4074 * - update transcoder timings
4075 * - DP transcoding bits
4076 * - transcoder
4077 */
4078static void ironlake_pch_enable(struct drm_crtc *crtc)
0e23b99d
JB
4079{
4080 struct drm_device *dev = crtc->dev;
fac5e23e 4081 struct drm_i915_private *dev_priv = to_i915(dev);
0e23b99d
JB
4082 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4083 int pipe = intel_crtc->pipe;
f0f59a00 4084 u32 temp;
2c07245f 4085
ab9412ba 4086 assert_pch_transcoder_disabled(dev_priv, pipe);
e7e164db 4087
1fbc0d78
DV
4088 if (IS_IVYBRIDGE(dev))
4089 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4090
cd986abb
DV
4091 /* Write the TU size bits before fdi link training, so that error
4092 * detection works. */
4093 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4094 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4095
c98e9dcf 4096 /* For PCH output, training FDI link */
674cf967 4097 dev_priv->display.fdi_link_train(crtc);
2c07245f 4098
3ad8a208
DV
4099 /* We need to program the right clock selection before writing the pixel
4100 * mutliplier into the DPLL. */
303b81e0 4101 if (HAS_PCH_CPT(dev)) {
ee7b9f93 4102 u32 sel;
4b645f14 4103
c98e9dcf 4104 temp = I915_READ(PCH_DPLL_SEL);
11887397
DV
4105 temp |= TRANS_DPLL_ENABLE(pipe);
4106 sel = TRANS_DPLLB_SEL(pipe);
8106ddbd
ACO
4107 if (intel_crtc->config->shared_dpll ==
4108 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
ee7b9f93
JB
4109 temp |= sel;
4110 else
4111 temp &= ~sel;
c98e9dcf 4112 I915_WRITE(PCH_DPLL_SEL, temp);
c98e9dcf 4113 }
5eddb70b 4114
3ad8a208
DV
4115 /* XXX: pch pll's can be enabled any time before we enable the PCH
4116 * transcoder, and we actually should do this to not upset any PCH
4117 * transcoder that already use the clock when we share it.
4118 *
4119 * Note that enable_shared_dpll tries to do the right thing, but
4120 * get_shared_dpll unconditionally resets the pll - we need that to have
4121 * the right LVDS enable sequence. */
85b3894f 4122 intel_enable_shared_dpll(intel_crtc);
3ad8a208 4123
d9b6cb56
JB
4124 /* set transcoder timing, panel must allow it */
4125 assert_panel_unlocked(dev_priv, pipe);
275f01b2 4126 ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
8db9d77b 4127
303b81e0 4128 intel_fdi_normal_train(crtc);
5e84e1a4 4129
c98e9dcf 4130 /* For PCH DP, enable TRANS_DP_CTL */
6e3c9717 4131 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
9c4edaee
VS
4132 const struct drm_display_mode *adjusted_mode =
4133 &intel_crtc->config->base.adjusted_mode;
dfd07d72 4134 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
f0f59a00 4135 i915_reg_t reg = TRANS_DP_CTL(pipe);
5eddb70b
CW
4136 temp = I915_READ(reg);
4137 temp &= ~(TRANS_DP_PORT_SEL_MASK |
220cad3c
EA
4138 TRANS_DP_SYNC_MASK |
4139 TRANS_DP_BPC_MASK);
e3ef4479 4140 temp |= TRANS_DP_OUTPUT_ENABLE;
9325c9f0 4141 temp |= bpc << 9; /* same format but at 11:9 */
c98e9dcf 4142
9c4edaee 4143 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5eddb70b 4144 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
9c4edaee 4145 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5eddb70b 4146 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
c98e9dcf
JB
4147
4148 switch (intel_trans_dp_port_sel(crtc)) {
c48b5305 4149 case PORT_B:
5eddb70b 4150 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf 4151 break;
c48b5305 4152 case PORT_C:
5eddb70b 4153 temp |= TRANS_DP_PORT_SEL_C;
c98e9dcf 4154 break;
c48b5305 4155 case PORT_D:
5eddb70b 4156 temp |= TRANS_DP_PORT_SEL_D;
c98e9dcf
JB
4157 break;
4158 default:
e95d41e1 4159 BUG();
32f9d658 4160 }
2c07245f 4161
5eddb70b 4162 I915_WRITE(reg, temp);
6be4a607 4163 }
b52eb4dc 4164
b8a4f404 4165 ironlake_enable_pch_transcoder(dev_priv, pipe);
f67a559d
JB
4166}
4167
1507e5bd
PZ
4168static void lpt_pch_enable(struct drm_crtc *crtc)
4169{
4170 struct drm_device *dev = crtc->dev;
fac5e23e 4171 struct drm_i915_private *dev_priv = to_i915(dev);
1507e5bd 4172 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 4173 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1507e5bd 4174
ab9412ba 4175 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
1507e5bd 4176
8c52b5e8 4177 lpt_program_iclkip(crtc);
1507e5bd 4178
0540e488 4179 /* Set transcoder timing. */
275f01b2 4180 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
1507e5bd 4181
937bb610 4182 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
f67a559d
JB
4183}
4184
a1520318 4185static void cpt_verify_modeset(struct drm_device *dev, int pipe)
d4270e57 4186{
fac5e23e 4187 struct drm_i915_private *dev_priv = to_i915(dev);
f0f59a00 4188 i915_reg_t dslreg = PIPEDSL(pipe);
d4270e57
JB
4189 u32 temp;
4190
4191 temp = I915_READ(dslreg);
4192 udelay(500);
4193 if (wait_for(I915_READ(dslreg) != temp, 5)) {
d4270e57 4194 if (wait_for(I915_READ(dslreg) != temp, 5))
84f44ce7 4195 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
d4270e57
JB
4196 }
4197}
4198
86adf9d7
ML
4199static int
4200skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4201 unsigned scaler_user, int *scaler_id, unsigned int rotation,
4202 int src_w, int src_h, int dst_w, int dst_h)
a1b2278e 4203{
86adf9d7
ML
4204 struct intel_crtc_scaler_state *scaler_state =
4205 &crtc_state->scaler_state;
4206 struct intel_crtc *intel_crtc =
4207 to_intel_crtc(crtc_state->base.crtc);
a1b2278e 4208 int need_scaling;
6156a456
CK
4209
4210 need_scaling = intel_rotation_90_or_270(rotation) ?
4211 (src_h != dst_w || src_w != dst_h):
4212 (src_w != dst_w || src_h != dst_h);
a1b2278e
CK
4213
4214 /*
4215 * if plane is being disabled or scaler is no more required or force detach
4216 * - free scaler binded to this plane/crtc
4217 * - in order to do this, update crtc->scaler_usage
4218 *
4219 * Here scaler state in crtc_state is set free so that
4220 * scaler can be assigned to other user. Actual register
4221 * update to free the scaler is done in plane/panel-fit programming.
4222 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4223 */
86adf9d7 4224 if (force_detach || !need_scaling) {
a1b2278e 4225 if (*scaler_id >= 0) {
86adf9d7 4226 scaler_state->scaler_users &= ~(1 << scaler_user);
a1b2278e
CK
4227 scaler_state->scalers[*scaler_id].in_use = 0;
4228
86adf9d7
ML
4229 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4230 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4231 intel_crtc->pipe, scaler_user, *scaler_id,
a1b2278e
CK
4232 scaler_state->scaler_users);
4233 *scaler_id = -1;
4234 }
4235 return 0;
4236 }
4237
4238 /* range checks */
4239 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4240 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4241
4242 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4243 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
86adf9d7 4244 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
a1b2278e 4245 "size is out of scaler range\n",
86adf9d7 4246 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
a1b2278e
CK
4247 return -EINVAL;
4248 }
4249
86adf9d7
ML
4250 /* mark this plane as a scaler user in crtc_state */
4251 scaler_state->scaler_users |= (1 << scaler_user);
4252 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4253 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4254 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4255 scaler_state->scaler_users);
4256
4257 return 0;
4258}
4259
4260/**
4261 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4262 *
4263 * @state: crtc's scaler state
86adf9d7
ML
4264 *
4265 * Return
4266 * 0 - scaler_usage updated successfully
4267 * error - requested scaling cannot be supported or other error condition
4268 */
e435d6e5 4269int skl_update_scaler_crtc(struct intel_crtc_state *state)
86adf9d7
ML
4270{
4271 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
7c5f93b0 4272 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
86adf9d7 4273
78108b7c
VS
4274 DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
4275 intel_crtc->base.base.id, intel_crtc->base.name,
4276 intel_crtc->pipe, SKL_CRTC_INDEX);
86adf9d7 4277
e435d6e5 4278 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
fa5a7970 4279 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
86adf9d7 4280 state->pipe_src_w, state->pipe_src_h,
aad941d5 4281 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
86adf9d7
ML
4282}
4283
4284/**
4285 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4286 *
4287 * @state: crtc's scaler state
86adf9d7
ML
4288 * @plane_state: atomic plane state to update
4289 *
4290 * Return
4291 * 0 - scaler_usage updated successfully
4292 * error - requested scaling cannot be supported or other error condition
4293 */
da20eabd
ML
4294static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4295 struct intel_plane_state *plane_state)
86adf9d7
ML
4296{
4297
4298 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
da20eabd
ML
4299 struct intel_plane *intel_plane =
4300 to_intel_plane(plane_state->base.plane);
86adf9d7
ML
4301 struct drm_framebuffer *fb = plane_state->base.fb;
4302 int ret;
4303
4304 bool force_detach = !fb || !plane_state->visible;
4305
72660ce0
VS
4306 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
4307 intel_plane->base.base.id, intel_plane->base.name,
4308 intel_crtc->pipe, drm_plane_index(&intel_plane->base));
86adf9d7
ML
4309
4310 ret = skl_update_scaler(crtc_state, force_detach,
4311 drm_plane_index(&intel_plane->base),
4312 &plane_state->scaler_id,
4313 plane_state->base.rotation,
4314 drm_rect_width(&plane_state->src) >> 16,
4315 drm_rect_height(&plane_state->src) >> 16,
4316 drm_rect_width(&plane_state->dst),
4317 drm_rect_height(&plane_state->dst));
4318
4319 if (ret || plane_state->scaler_id < 0)
4320 return ret;
4321
a1b2278e 4322 /* check colorkey */
818ed961 4323 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
72660ce0
VS
4324 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4325 intel_plane->base.base.id,
4326 intel_plane->base.name);
a1b2278e
CK
4327 return -EINVAL;
4328 }
4329
4330 /* Check src format */
86adf9d7
ML
4331 switch (fb->pixel_format) {
4332 case DRM_FORMAT_RGB565:
4333 case DRM_FORMAT_XBGR8888:
4334 case DRM_FORMAT_XRGB8888:
4335 case DRM_FORMAT_ABGR8888:
4336 case DRM_FORMAT_ARGB8888:
4337 case DRM_FORMAT_XRGB2101010:
4338 case DRM_FORMAT_XBGR2101010:
4339 case DRM_FORMAT_YUYV:
4340 case DRM_FORMAT_YVYU:
4341 case DRM_FORMAT_UYVY:
4342 case DRM_FORMAT_VYUY:
4343 break;
4344 default:
72660ce0
VS
4345 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4346 intel_plane->base.base.id, intel_plane->base.name,
4347 fb->base.id, fb->pixel_format);
86adf9d7 4348 return -EINVAL;
a1b2278e
CK
4349 }
4350
a1b2278e
CK
4351 return 0;
4352}
4353
e435d6e5
ML
4354static void skylake_scaler_disable(struct intel_crtc *crtc)
4355{
4356 int i;
4357
4358 for (i = 0; i < crtc->num_scalers; i++)
4359 skl_detach_scaler(crtc, i);
4360}
4361
4362static void skylake_pfit_enable(struct intel_crtc *crtc)
bd2e244f
JB
4363{
4364 struct drm_device *dev = crtc->base.dev;
fac5e23e 4365 struct drm_i915_private *dev_priv = to_i915(dev);
bd2e244f 4366 int pipe = crtc->pipe;
a1b2278e
CK
4367 struct intel_crtc_scaler_state *scaler_state =
4368 &crtc->config->scaler_state;
4369
4370 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4371
6e3c9717 4372 if (crtc->config->pch_pfit.enabled) {
a1b2278e
CK
4373 int id;
4374
4375 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4376 DRM_ERROR("Requesting pfit without getting a scaler first\n");
4377 return;
4378 }
4379
4380 id = scaler_state->scaler_id;
4381 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4382 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4383 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4384 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4385
4386 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
bd2e244f
JB
4387 }
4388}
4389
b074cec8
JB
4390static void ironlake_pfit_enable(struct intel_crtc *crtc)
4391{
4392 struct drm_device *dev = crtc->base.dev;
fac5e23e 4393 struct drm_i915_private *dev_priv = to_i915(dev);
b074cec8
JB
4394 int pipe = crtc->pipe;
4395
6e3c9717 4396 if (crtc->config->pch_pfit.enabled) {
b074cec8
JB
4397 /* Force use of hard-coded filter coefficients
4398 * as some pre-programmed values are broken,
4399 * e.g. x201.
4400 */
4401 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4402 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4403 PF_PIPE_SEL_IVB(pipe));
4404 else
4405 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
6e3c9717
ACO
4406 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4407 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
d4270e57
JB
4408 }
4409}
4410
20bc8673 4411void hsw_enable_ips(struct intel_crtc *crtc)
d77e4531 4412{
cea165c3 4413 struct drm_device *dev = crtc->base.dev;
fac5e23e 4414 struct drm_i915_private *dev_priv = to_i915(dev);
d77e4531 4415
6e3c9717 4416 if (!crtc->config->ips_enabled)
d77e4531
PZ
4417 return;
4418
307e4498
ML
4419 /*
4420 * We can only enable IPS after we enable a plane and wait for a vblank
4421 * This function is called from post_plane_update, which is run after
4422 * a vblank wait.
4423 */
cea165c3 4424
d77e4531 4425 assert_plane_enabled(dev_priv, crtc->plane);
cea165c3 4426 if (IS_BROADWELL(dev)) {
2a114cc1
BW
4427 mutex_lock(&dev_priv->rps.hw_lock);
4428 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4429 mutex_unlock(&dev_priv->rps.hw_lock);
4430 /* Quoting Art Runyan: "its not safe to expect any particular
4431 * value in IPS_CTL bit 31 after enabling IPS through the
e59150dc
JB
4432 * mailbox." Moreover, the mailbox may return a bogus state,
4433 * so we need to just enable it and continue on.
2a114cc1
BW
4434 */
4435 } else {
4436 I915_WRITE(IPS_CTL, IPS_ENABLE);
4437 /* The bit only becomes 1 in the next vblank, so this wait here
4438 * is essentially intel_wait_for_vblank. If we don't have this
4439 * and don't wait for vblanks until the end of crtc_enable, then
4440 * the HW state readout code will complain that the expected
4441 * IPS_CTL value is not the one we read. */
2ec9ba3c
CW
4442 if (intel_wait_for_register(dev_priv,
4443 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
4444 50))
2a114cc1
BW
4445 DRM_ERROR("Timed out waiting for IPS enable\n");
4446 }
d77e4531
PZ
4447}
4448
20bc8673 4449void hsw_disable_ips(struct intel_crtc *crtc)
d77e4531
PZ
4450{
4451 struct drm_device *dev = crtc->base.dev;
fac5e23e 4452 struct drm_i915_private *dev_priv = to_i915(dev);
d77e4531 4453
6e3c9717 4454 if (!crtc->config->ips_enabled)
d77e4531
PZ
4455 return;
4456
4457 assert_plane_enabled(dev_priv, crtc->plane);
23d0b130 4458 if (IS_BROADWELL(dev)) {
2a114cc1
BW
4459 mutex_lock(&dev_priv->rps.hw_lock);
4460 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4461 mutex_unlock(&dev_priv->rps.hw_lock);
23d0b130 4462 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
b85c1ecf
CW
4463 if (intel_wait_for_register(dev_priv,
4464 IPS_CTL, IPS_ENABLE, 0,
4465 42))
23d0b130 4466 DRM_ERROR("Timed out waiting for IPS disable\n");
e59150dc 4467 } else {
2a114cc1 4468 I915_WRITE(IPS_CTL, 0);
e59150dc
JB
4469 POSTING_READ(IPS_CTL);
4470 }
d77e4531
PZ
4471
4472 /* We need to wait for a vblank before we can disable the plane. */
4473 intel_wait_for_vblank(dev, crtc->pipe);
4474}
4475
7cac945f 4476static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
d3eedb1a 4477{
7cac945f 4478 if (intel_crtc->overlay) {
d3eedb1a 4479 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 4480 struct drm_i915_private *dev_priv = to_i915(dev);
d3eedb1a
VS
4481
4482 mutex_lock(&dev->struct_mutex);
4483 dev_priv->mm.interruptible = false;
4484 (void) intel_overlay_switch_off(intel_crtc->overlay);
4485 dev_priv->mm.interruptible = true;
4486 mutex_unlock(&dev->struct_mutex);
4487 }
4488
4489 /* Let userspace switch the overlay on again. In most cases userspace
4490 * has to recompute where to put it anyway.
4491 */
4492}
4493
87d4300a
ML
4494/**
4495 * intel_post_enable_primary - Perform operations after enabling primary plane
4496 * @crtc: the CRTC whose primary plane was just enabled
4497 *
4498 * Performs potentially sleeping operations that must be done after the primary
4499 * plane is enabled, such as updating FBC and IPS. Note that this may be
4500 * called due to an explicit primary plane update, or due to an implicit
4501 * re-enable that is caused when a sprite plane is updated to no longer
4502 * completely hide the primary plane.
4503 */
4504static void
4505intel_post_enable_primary(struct drm_crtc *crtc)
a5c4d7bc
VS
4506{
4507 struct drm_device *dev = crtc->dev;
fac5e23e 4508 struct drm_i915_private *dev_priv = to_i915(dev);
a5c4d7bc
VS
4509 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4510 int pipe = intel_crtc->pipe;
a5c4d7bc 4511
87d4300a
ML
4512 /*
4513 * FIXME IPS should be fine as long as one plane is
4514 * enabled, but in practice it seems to have problems
4515 * when going from primary only to sprite only and vice
4516 * versa.
4517 */
a5c4d7bc
VS
4518 hsw_enable_ips(intel_crtc);
4519
f99d7069 4520 /*
87d4300a
ML
4521 * Gen2 reports pipe underruns whenever all planes are disabled.
4522 * So don't enable underrun reporting before at least some planes
4523 * are enabled.
4524 * FIXME: Need to fix the logic to work when we turn off all planes
4525 * but leave the pipe running.
f99d7069 4526 */
87d4300a
ML
4527 if (IS_GEN2(dev))
4528 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4529
aca7b684
VS
4530 /* Underruns don't always raise interrupts, so check manually. */
4531 intel_check_cpu_fifo_underruns(dev_priv);
4532 intel_check_pch_fifo_underruns(dev_priv);
a5c4d7bc
VS
4533}
4534
2622a081 4535/* FIXME move all this to pre_plane_update() with proper state tracking */
87d4300a
ML
4536static void
4537intel_pre_disable_primary(struct drm_crtc *crtc)
a5c4d7bc
VS
4538{
4539 struct drm_device *dev = crtc->dev;
fac5e23e 4540 struct drm_i915_private *dev_priv = to_i915(dev);
a5c4d7bc
VS
4541 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4542 int pipe = intel_crtc->pipe;
a5c4d7bc 4543
87d4300a
ML
4544 /*
4545 * Gen2 reports pipe underruns whenever all planes are disabled.
4546 * So diasble underrun reporting before all the planes get disabled.
4547 * FIXME: Need to fix the logic to work when we turn off all planes
4548 * but leave the pipe running.
4549 */
4550 if (IS_GEN2(dev))
4551 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
a5c4d7bc 4552
2622a081
VS
4553 /*
4554 * FIXME IPS should be fine as long as one plane is
4555 * enabled, but in practice it seems to have problems
4556 * when going from primary only to sprite only and vice
4557 * versa.
4558 */
4559 hsw_disable_ips(intel_crtc);
4560}
4561
4562/* FIXME get rid of this and use pre_plane_update */
4563static void
4564intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4565{
4566 struct drm_device *dev = crtc->dev;
fac5e23e 4567 struct drm_i915_private *dev_priv = to_i915(dev);
2622a081
VS
4568 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4569 int pipe = intel_crtc->pipe;
4570
4571 intel_pre_disable_primary(crtc);
4572
87d4300a
ML
4573 /*
4574 * Vblank time updates from the shadow to live plane control register
4575 * are blocked if the memory self-refresh mode is active at that
4576 * moment. So to make sure the plane gets truly disabled, disable
4577 * first the self-refresh mode. The self-refresh enable bit in turn
4578 * will be checked/applied by the HW only at the next frame start
4579 * event which is after the vblank start event, so we need to have a
4580 * wait-for-vblank between disabling the plane and the pipe.
4581 */
262cd2e1 4582 if (HAS_GMCH_DISPLAY(dev)) {
87d4300a 4583 intel_set_memory_cxsr(dev_priv, false);
262cd2e1
VS
4584 dev_priv->wm.vlv.cxsr = false;
4585 intel_wait_for_vblank(dev, pipe);
4586 }
87d4300a
ML
4587}
4588
5a21b665
DV
4589static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
4590{
4591 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4592 struct drm_atomic_state *old_state = old_crtc_state->base.state;
4593 struct intel_crtc_state *pipe_config =
4594 to_intel_crtc_state(crtc->base.state);
4595 struct drm_device *dev = crtc->base.dev;
4596 struct drm_plane *primary = crtc->base.primary;
4597 struct drm_plane_state *old_pri_state =
4598 drm_atomic_get_existing_plane_state(old_state, primary);
4599
4600 intel_frontbuffer_flip(dev, pipe_config->fb_bits);
4601
4602 crtc->wm.cxsr_allowed = true;
4603
4604 if (pipe_config->update_wm_post && pipe_config->base.active)
4605 intel_update_watermarks(&crtc->base);
4606
4607 if (old_pri_state) {
4608 struct intel_plane_state *primary_state =
4609 to_intel_plane_state(primary->state);
4610 struct intel_plane_state *old_primary_state =
4611 to_intel_plane_state(old_pri_state);
4612
4613 intel_fbc_post_update(crtc);
4614
4615 if (primary_state->visible &&
4616 (needs_modeset(&pipe_config->base) ||
4617 !old_primary_state->visible))
4618 intel_post_enable_primary(&crtc->base);
4619 }
4620}
4621
5c74cd73 4622static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
ac21b225 4623{
5c74cd73 4624 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
ac21b225 4625 struct drm_device *dev = crtc->base.dev;
fac5e23e 4626 struct drm_i915_private *dev_priv = to_i915(dev);
ab1d3a0e
ML
4627 struct intel_crtc_state *pipe_config =
4628 to_intel_crtc_state(crtc->base.state);
5c74cd73
ML
4629 struct drm_atomic_state *old_state = old_crtc_state->base.state;
4630 struct drm_plane *primary = crtc->base.primary;
4631 struct drm_plane_state *old_pri_state =
4632 drm_atomic_get_existing_plane_state(old_state, primary);
4633 bool modeset = needs_modeset(&pipe_config->base);
ac21b225 4634
5c74cd73
ML
4635 if (old_pri_state) {
4636 struct intel_plane_state *primary_state =
4637 to_intel_plane_state(primary->state);
4638 struct intel_plane_state *old_primary_state =
4639 to_intel_plane_state(old_pri_state);
4640
faf68d92 4641 intel_fbc_pre_update(crtc, pipe_config, primary_state);
31ae71fc 4642
5c74cd73
ML
4643 if (old_primary_state->visible &&
4644 (modeset || !primary_state->visible))
4645 intel_pre_disable_primary(&crtc->base);
4646 }
852eb00d 4647
a4015f9a 4648 if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
852eb00d 4649 crtc->wm.cxsr_allowed = false;
2dfd178d 4650
2622a081
VS
4651 /*
4652 * Vblank time updates from the shadow to live plane control register
4653 * are blocked if the memory self-refresh mode is active at that
4654 * moment. So to make sure the plane gets truly disabled, disable
4655 * first the self-refresh mode. The self-refresh enable bit in turn
4656 * will be checked/applied by the HW only at the next frame start
4657 * event which is after the vblank start event, so we need to have a
4658 * wait-for-vblank between disabling the plane and the pipe.
4659 */
4660 if (old_crtc_state->base.active) {
2dfd178d 4661 intel_set_memory_cxsr(dev_priv, false);
2622a081
VS
4662 dev_priv->wm.vlv.cxsr = false;
4663 intel_wait_for_vblank(dev, crtc->pipe);
4664 }
852eb00d 4665 }
92826fcd 4666
ed4a6a7c
MR
4667 /*
4668 * IVB workaround: must disable low power watermarks for at least
4669 * one frame before enabling scaling. LP watermarks can be re-enabled
4670 * when scaling is disabled.
4671 *
4672 * WaCxSRDisabledForSpriteScaling:ivb
4673 */
4674 if (pipe_config->disable_lp_wm) {
4675 ilk_disable_lp_wm(dev);
4676 intel_wait_for_vblank(dev, crtc->pipe);
4677 }
4678
4679 /*
4680 * If we're doing a modeset, we're done. No need to do any pre-vblank
4681 * watermark programming here.
4682 */
4683 if (needs_modeset(&pipe_config->base))
4684 return;
4685
4686 /*
4687 * For platforms that support atomic watermarks, program the
4688 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
4689 * will be the intermediate values that are safe for both pre- and
4690 * post- vblank; when vblank happens, the 'active' values will be set
4691 * to the final 'target' values and we'll do this again to get the
4692 * optimal watermarks. For gen9+ platforms, the values we program here
4693 * will be the final target values which will get automatically latched
4694 * at vblank time; no further programming will be necessary.
4695 *
4696 * If a platform hasn't been transitioned to atomic watermarks yet,
4697 * we'll continue to update watermarks the old way, if flags tell
4698 * us to.
4699 */
4700 if (dev_priv->display.initial_watermarks != NULL)
4701 dev_priv->display.initial_watermarks(pipe_config);
caed361d 4702 else if (pipe_config->update_wm_pre)
92826fcd 4703 intel_update_watermarks(&crtc->base);
ac21b225
ML
4704}
4705
d032ffa0 4706static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
87d4300a
ML
4707{
4708 struct drm_device *dev = crtc->dev;
4709 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
d032ffa0 4710 struct drm_plane *p;
87d4300a
ML
4711 int pipe = intel_crtc->pipe;
4712
7cac945f 4713 intel_crtc_dpms_overlay_disable(intel_crtc);
27321ae8 4714
d032ffa0
ML
4715 drm_for_each_plane_mask(p, dev, plane_mask)
4716 to_intel_plane(p)->disable_plane(p, crtc);
f98551ae 4717
f99d7069
DV
4718 /*
4719 * FIXME: Once we grow proper nuclear flip support out of this we need
4720 * to compute the mask of flip planes precisely. For the time being
4721 * consider this a flip to a NULL plane.
4722 */
4723 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
a5c4d7bc
VS
4724}
4725
f67a559d
JB
4726static void ironlake_crtc_enable(struct drm_crtc *crtc)
4727{
4728 struct drm_device *dev = crtc->dev;
fac5e23e 4729 struct drm_i915_private *dev_priv = to_i915(dev);
f67a559d 4730 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 4731 struct intel_encoder *encoder;
f67a559d 4732 int pipe = intel_crtc->pipe;
b95c5321
ML
4733 struct intel_crtc_state *pipe_config =
4734 to_intel_crtc_state(crtc->state);
f67a559d 4735
53d9f4e9 4736 if (WARN_ON(intel_crtc->active))
f67a559d
JB
4737 return;
4738
b2c0593a
VS
4739 /*
4740 * Sometimes spurious CPU pipe underruns happen during FDI
4741 * training, at least with VGA+HDMI cloning. Suppress them.
4742 *
4743 * On ILK we get an occasional spurious CPU pipe underruns
4744 * between eDP port A enable and vdd enable. Also PCH port
4745 * enable seems to result in the occasional CPU pipe underrun.
4746 *
4747 * Spurious PCH underruns also occur during PCH enabling.
4748 */
4749 if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
4750 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
81b088ca
VS
4751 if (intel_crtc->config->has_pch_encoder)
4752 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4753
6e3c9717 4754 if (intel_crtc->config->has_pch_encoder)
b14b1055
DV
4755 intel_prepare_shared_dpll(intel_crtc);
4756
6e3c9717 4757 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 4758 intel_dp_set_m_n(intel_crtc, M1_N1);
29407aab
DV
4759
4760 intel_set_pipe_timings(intel_crtc);
bc58be60 4761 intel_set_pipe_src_size(intel_crtc);
29407aab 4762
6e3c9717 4763 if (intel_crtc->config->has_pch_encoder) {
29407aab 4764 intel_cpu_transcoder_set_m_n(intel_crtc,
6e3c9717 4765 &intel_crtc->config->fdi_m_n, NULL);
29407aab
DV
4766 }
4767
4768 ironlake_set_pipeconf(crtc);
4769
f67a559d 4770 intel_crtc->active = true;
8664281b 4771
f6736a1a 4772 for_each_encoder_on_crtc(dev, crtc, encoder)
952735ee
DV
4773 if (encoder->pre_enable)
4774 encoder->pre_enable(encoder);
f67a559d 4775
6e3c9717 4776 if (intel_crtc->config->has_pch_encoder) {
fff367c7
DV
4777 /* Note: FDI PLL enabling _must_ be done before we enable the
4778 * cpu pipes, hence this is separate from all the other fdi/pch
4779 * enabling. */
88cefb6c 4780 ironlake_fdi_pll_enable(intel_crtc);
46b6f814
DV
4781 } else {
4782 assert_fdi_tx_disabled(dev_priv, pipe);
4783 assert_fdi_rx_disabled(dev_priv, pipe);
4784 }
f67a559d 4785
b074cec8 4786 ironlake_pfit_enable(intel_crtc);
f67a559d 4787
9c54c0dd
JB
4788 /*
4789 * On ILK+ LUT must be loaded before the pipe is running but with
4790 * clocks enabled
4791 */
b95c5321 4792 intel_color_load_luts(&pipe_config->base);
9c54c0dd 4793
1d5bf5d9
ID
4794 if (dev_priv->display.initial_watermarks != NULL)
4795 dev_priv->display.initial_watermarks(intel_crtc->config);
e1fdc473 4796 intel_enable_pipe(intel_crtc);
f67a559d 4797
6e3c9717 4798 if (intel_crtc->config->has_pch_encoder)
f67a559d 4799 ironlake_pch_enable(crtc);
c98e9dcf 4800
f9b61ff6
DV
4801 assert_vblank_disabled(crtc);
4802 drm_crtc_vblank_on(crtc);
4803
fa5c73b1
DV
4804 for_each_encoder_on_crtc(dev, crtc, encoder)
4805 encoder->enable(encoder);
61b77ddd
DV
4806
4807 if (HAS_PCH_CPT(dev))
a1520318 4808 cpt_verify_modeset(dev, intel_crtc->pipe);
37ca8d4c
VS
4809
4810 /* Must wait for vblank to avoid spurious PCH FIFO underruns */
4811 if (intel_crtc->config->has_pch_encoder)
4812 intel_wait_for_vblank(dev, pipe);
b2c0593a 4813 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
37ca8d4c 4814 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6be4a607
JB
4815}
4816
42db64ef
PZ
4817/* IPS only exists on ULT machines and is tied to pipe A. */
4818static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4819{
f5adf94e 4820 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
42db64ef
PZ
4821}
4822
4f771f10
PZ
4823static void haswell_crtc_enable(struct drm_crtc *crtc)
4824{
4825 struct drm_device *dev = crtc->dev;
fac5e23e 4826 struct drm_i915_private *dev_priv = to_i915(dev);
4f771f10
PZ
4827 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4828 struct intel_encoder *encoder;
99d736a2 4829 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4d1de975 4830 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
99d736a2
ML
4831 struct intel_crtc_state *pipe_config =
4832 to_intel_crtc_state(crtc->state);
4f771f10 4833
53d9f4e9 4834 if (WARN_ON(intel_crtc->active))
4f771f10
PZ
4835 return;
4836
81b088ca
VS
4837 if (intel_crtc->config->has_pch_encoder)
4838 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4839 false);
4840
95a7a2ae
ID
4841 for_each_encoder_on_crtc(dev, crtc, encoder)
4842 if (encoder->pre_pll_enable)
4843 encoder->pre_pll_enable(encoder);
4844
8106ddbd 4845 if (intel_crtc->config->shared_dpll)
df8ad70c
DV
4846 intel_enable_shared_dpll(intel_crtc);
4847
6e3c9717 4848 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 4849 intel_dp_set_m_n(intel_crtc, M1_N1);
229fca97 4850
4d1de975
JN
4851 if (!intel_crtc->config->has_dsi_encoder)
4852 intel_set_pipe_timings(intel_crtc);
4853
bc58be60 4854 intel_set_pipe_src_size(intel_crtc);
229fca97 4855
4d1de975
JN
4856 if (cpu_transcoder != TRANSCODER_EDP &&
4857 !transcoder_is_dsi(cpu_transcoder)) {
4858 I915_WRITE(PIPE_MULT(cpu_transcoder),
6e3c9717 4859 intel_crtc->config->pixel_multiplier - 1);
ebb69c95
CT
4860 }
4861
6e3c9717 4862 if (intel_crtc->config->has_pch_encoder) {
229fca97 4863 intel_cpu_transcoder_set_m_n(intel_crtc,
6e3c9717 4864 &intel_crtc->config->fdi_m_n, NULL);
229fca97
DV
4865 }
4866
4d1de975
JN
4867 if (!intel_crtc->config->has_dsi_encoder)
4868 haswell_set_pipeconf(crtc);
4869
391bf048 4870 haswell_set_pipemisc(crtc);
229fca97 4871
b95c5321 4872 intel_color_set_csc(&pipe_config->base);
229fca97 4873
4f771f10 4874 intel_crtc->active = true;
8664281b 4875
6b698516
DV
4876 if (intel_crtc->config->has_pch_encoder)
4877 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4878 else
4879 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4880
7d4aefd0 4881 for_each_encoder_on_crtc(dev, crtc, encoder) {
4f771f10
PZ
4882 if (encoder->pre_enable)
4883 encoder->pre_enable(encoder);
7d4aefd0 4884 }
4f771f10 4885
d2d65408 4886 if (intel_crtc->config->has_pch_encoder)
4fe9467d 4887 dev_priv->display.fdi_link_train(crtc);
4fe9467d 4888
a65347ba 4889 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 4890 intel_ddi_enable_pipe_clock(intel_crtc);
4f771f10 4891
1c132b44 4892 if (INTEL_INFO(dev)->gen >= 9)
e435d6e5 4893 skylake_pfit_enable(intel_crtc);
ff6d9f55 4894 else
1c132b44 4895 ironlake_pfit_enable(intel_crtc);
4f771f10
PZ
4896
4897 /*
4898 * On ILK+ LUT must be loaded before the pipe is running but with
4899 * clocks enabled
4900 */
b95c5321 4901 intel_color_load_luts(&pipe_config->base);
4f771f10 4902
1f544388 4903 intel_ddi_set_pipe_settings(crtc);
a65347ba 4904 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 4905 intel_ddi_enable_transcoder_func(crtc);
4f771f10 4906
1d5bf5d9
ID
4907 if (dev_priv->display.initial_watermarks != NULL)
4908 dev_priv->display.initial_watermarks(pipe_config);
4909 else
4910 intel_update_watermarks(crtc);
4d1de975
JN
4911
4912 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
4913 if (!intel_crtc->config->has_dsi_encoder)
4914 intel_enable_pipe(intel_crtc);
42db64ef 4915
6e3c9717 4916 if (intel_crtc->config->has_pch_encoder)
1507e5bd 4917 lpt_pch_enable(crtc);
4f771f10 4918
a65347ba 4919 if (intel_crtc->config->dp_encoder_is_mst)
0e32b39c
DA
4920 intel_ddi_set_vc_payload_alloc(crtc, true);
4921
f9b61ff6
DV
4922 assert_vblank_disabled(crtc);
4923 drm_crtc_vblank_on(crtc);
4924
8807e55b 4925 for_each_encoder_on_crtc(dev, crtc, encoder) {
4f771f10 4926 encoder->enable(encoder);
8807e55b
JN
4927 intel_opregion_notify_encoder(encoder, true);
4928 }
4f771f10 4929
6b698516
DV
4930 if (intel_crtc->config->has_pch_encoder) {
4931 intel_wait_for_vblank(dev, pipe);
4932 intel_wait_for_vblank(dev, pipe);
4933 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
d2d65408
VS
4934 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4935 true);
6b698516 4936 }
d2d65408 4937
e4916946
PZ
4938 /* If we change the relative order between pipe/planes enabling, we need
4939 * to change the workaround. */
99d736a2
ML
4940 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
4941 if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
4942 intel_wait_for_vblank(dev, hsw_workaround_pipe);
4943 intel_wait_for_vblank(dev, hsw_workaround_pipe);
4944 }
4f771f10
PZ
4945}
4946
bfd16b2a 4947static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
3f8dce3a
DV
4948{
4949 struct drm_device *dev = crtc->base.dev;
fac5e23e 4950 struct drm_i915_private *dev_priv = to_i915(dev);
3f8dce3a
DV
4951 int pipe = crtc->pipe;
4952
4953 /* To avoid upsetting the power well on haswell only disable the pfit if
4954 * it's in use. The hw state code will make sure we get this right. */
bfd16b2a 4955 if (force || crtc->config->pch_pfit.enabled) {
3f8dce3a
DV
4956 I915_WRITE(PF_CTL(pipe), 0);
4957 I915_WRITE(PF_WIN_POS(pipe), 0);
4958 I915_WRITE(PF_WIN_SZ(pipe), 0);
4959 }
4960}
4961
6be4a607
JB
4962static void ironlake_crtc_disable(struct drm_crtc *crtc)
4963{
4964 struct drm_device *dev = crtc->dev;
fac5e23e 4965 struct drm_i915_private *dev_priv = to_i915(dev);
6be4a607 4966 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 4967 struct intel_encoder *encoder;
6be4a607 4968 int pipe = intel_crtc->pipe;
b52eb4dc 4969
b2c0593a
VS
4970 /*
4971 * Sometimes spurious CPU pipe underruns happen when the
4972 * pipe is already disabled, but FDI RX/TX is still enabled.
4973 * Happens at least with VGA+HDMI cloning. Suppress them.
4974 */
4975 if (intel_crtc->config->has_pch_encoder) {
4976 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
37ca8d4c 4977 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
b2c0593a 4978 }
37ca8d4c 4979
ea9d758d
DV
4980 for_each_encoder_on_crtc(dev, crtc, encoder)
4981 encoder->disable(encoder);
4982
f9b61ff6
DV
4983 drm_crtc_vblank_off(crtc);
4984 assert_vblank_disabled(crtc);
4985
575f7ab7 4986 intel_disable_pipe(intel_crtc);
32f9d658 4987
bfd16b2a 4988 ironlake_pfit_disable(intel_crtc, false);
2c07245f 4989
b2c0593a 4990 if (intel_crtc->config->has_pch_encoder)
5a74f70a
VS
4991 ironlake_fdi_disable(crtc);
4992
bf49ec8c
DV
4993 for_each_encoder_on_crtc(dev, crtc, encoder)
4994 if (encoder->post_disable)
4995 encoder->post_disable(encoder);
2c07245f 4996
6e3c9717 4997 if (intel_crtc->config->has_pch_encoder) {
d925c59a 4998 ironlake_disable_pch_transcoder(dev_priv, pipe);
6be4a607 4999
d925c59a 5000 if (HAS_PCH_CPT(dev)) {
f0f59a00
VS
5001 i915_reg_t reg;
5002 u32 temp;
5003
d925c59a
DV
5004 /* disable TRANS_DP_CTL */
5005 reg = TRANS_DP_CTL(pipe);
5006 temp = I915_READ(reg);
5007 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5008 TRANS_DP_PORT_SEL_MASK);
5009 temp |= TRANS_DP_PORT_SEL_NONE;
5010 I915_WRITE(reg, temp);
5011
5012 /* disable DPLL_SEL */
5013 temp = I915_READ(PCH_DPLL_SEL);
11887397 5014 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
d925c59a 5015 I915_WRITE(PCH_DPLL_SEL, temp);
9db4a9c7 5016 }
e3421a18 5017
d925c59a
DV
5018 ironlake_fdi_pll_disable(intel_crtc);
5019 }
81b088ca 5020
b2c0593a 5021 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
81b088ca 5022 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6be4a607 5023}
1b3c7a47 5024
4f771f10 5025static void haswell_crtc_disable(struct drm_crtc *crtc)
ee7b9f93 5026{
4f771f10 5027 struct drm_device *dev = crtc->dev;
fac5e23e 5028 struct drm_i915_private *dev_priv = to_i915(dev);
ee7b9f93 5029 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4f771f10 5030 struct intel_encoder *encoder;
6e3c9717 5031 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
ee7b9f93 5032
d2d65408
VS
5033 if (intel_crtc->config->has_pch_encoder)
5034 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5035 false);
5036
8807e55b
JN
5037 for_each_encoder_on_crtc(dev, crtc, encoder) {
5038 intel_opregion_notify_encoder(encoder, false);
4f771f10 5039 encoder->disable(encoder);
8807e55b 5040 }
4f771f10 5041
f9b61ff6
DV
5042 drm_crtc_vblank_off(crtc);
5043 assert_vblank_disabled(crtc);
5044
4d1de975
JN
5045 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5046 if (!intel_crtc->config->has_dsi_encoder)
5047 intel_disable_pipe(intel_crtc);
4f771f10 5048
6e3c9717 5049 if (intel_crtc->config->dp_encoder_is_mst)
a4bf214f
VS
5050 intel_ddi_set_vc_payload_alloc(crtc, false);
5051
a65347ba 5052 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 5053 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4f771f10 5054
1c132b44 5055 if (INTEL_INFO(dev)->gen >= 9)
e435d6e5 5056 skylake_scaler_disable(intel_crtc);
ff6d9f55 5057 else
bfd16b2a 5058 ironlake_pfit_disable(intel_crtc, false);
4f771f10 5059
a65347ba 5060 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 5061 intel_ddi_disable_pipe_clock(intel_crtc);
4f771f10 5062
97b040aa
ID
5063 for_each_encoder_on_crtc(dev, crtc, encoder)
5064 if (encoder->post_disable)
5065 encoder->post_disable(encoder);
81b088ca 5066
92966a37
VS
5067 if (intel_crtc->config->has_pch_encoder) {
5068 lpt_disable_pch_transcoder(dev_priv);
503a74e9 5069 lpt_disable_iclkip(dev_priv);
92966a37
VS
5070 intel_ddi_fdi_disable(crtc);
5071
81b088ca
VS
5072 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5073 true);
92966a37 5074 }
4f771f10
PZ
5075}
5076
2dd24552
JB
5077static void i9xx_pfit_enable(struct intel_crtc *crtc)
5078{
5079 struct drm_device *dev = crtc->base.dev;
fac5e23e 5080 struct drm_i915_private *dev_priv = to_i915(dev);
6e3c9717 5081 struct intel_crtc_state *pipe_config = crtc->config;
2dd24552 5082
681a8504 5083 if (!pipe_config->gmch_pfit.control)
2dd24552
JB
5084 return;
5085
2dd24552 5086 /*
c0b03411
DV
5087 * The panel fitter should only be adjusted whilst the pipe is disabled,
5088 * according to register description and PRM.
2dd24552 5089 */
c0b03411
DV
5090 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5091 assert_pipe_disabled(dev_priv, crtc->pipe);
2dd24552 5092
b074cec8
JB
5093 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5094 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5a80c45c
DV
5095
5096 /* Border color in case we don't scale up to the full screen. Black by
5097 * default, change to something else for debugging. */
5098 I915_WRITE(BCLRPAT(crtc->pipe), 0);
2dd24552
JB
5099}
5100
d05410f9
DA
5101static enum intel_display_power_domain port_to_power_domain(enum port port)
5102{
5103 switch (port) {
5104 case PORT_A:
6331a704 5105 return POWER_DOMAIN_PORT_DDI_A_LANES;
d05410f9 5106 case PORT_B:
6331a704 5107 return POWER_DOMAIN_PORT_DDI_B_LANES;
d05410f9 5108 case PORT_C:
6331a704 5109 return POWER_DOMAIN_PORT_DDI_C_LANES;
d05410f9 5110 case PORT_D:
6331a704 5111 return POWER_DOMAIN_PORT_DDI_D_LANES;
d8e19f99 5112 case PORT_E:
6331a704 5113 return POWER_DOMAIN_PORT_DDI_E_LANES;
d05410f9 5114 default:
b9fec167 5115 MISSING_CASE(port);
d05410f9
DA
5116 return POWER_DOMAIN_PORT_OTHER;
5117 }
5118}
5119
25f78f58
VS
5120static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5121{
5122 switch (port) {
5123 case PORT_A:
5124 return POWER_DOMAIN_AUX_A;
5125 case PORT_B:
5126 return POWER_DOMAIN_AUX_B;
5127 case PORT_C:
5128 return POWER_DOMAIN_AUX_C;
5129 case PORT_D:
5130 return POWER_DOMAIN_AUX_D;
5131 case PORT_E:
5132 /* FIXME: Check VBT for actual wiring of PORT E */
5133 return POWER_DOMAIN_AUX_D;
5134 default:
b9fec167 5135 MISSING_CASE(port);
25f78f58
VS
5136 return POWER_DOMAIN_AUX_A;
5137 }
5138}
5139
319be8ae
ID
5140enum intel_display_power_domain
5141intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5142{
5143 struct drm_device *dev = intel_encoder->base.dev;
5144 struct intel_digital_port *intel_dig_port;
5145
5146 switch (intel_encoder->type) {
5147 case INTEL_OUTPUT_UNKNOWN:
5148 /* Only DDI platforms should ever use this output type */
5149 WARN_ON_ONCE(!HAS_DDI(dev));
5150 case INTEL_OUTPUT_DISPLAYPORT:
5151 case INTEL_OUTPUT_HDMI:
5152 case INTEL_OUTPUT_EDP:
5153 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
d05410f9 5154 return port_to_power_domain(intel_dig_port->port);
0e32b39c
DA
5155 case INTEL_OUTPUT_DP_MST:
5156 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5157 return port_to_power_domain(intel_dig_port->port);
319be8ae
ID
5158 case INTEL_OUTPUT_ANALOG:
5159 return POWER_DOMAIN_PORT_CRT;
5160 case INTEL_OUTPUT_DSI:
5161 return POWER_DOMAIN_PORT_DSI;
5162 default:
5163 return POWER_DOMAIN_PORT_OTHER;
5164 }
5165}
5166
25f78f58
VS
5167enum intel_display_power_domain
5168intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5169{
5170 struct drm_device *dev = intel_encoder->base.dev;
5171 struct intel_digital_port *intel_dig_port;
5172
5173 switch (intel_encoder->type) {
5174 case INTEL_OUTPUT_UNKNOWN:
651174a4
ID
5175 case INTEL_OUTPUT_HDMI:
5176 /*
5177 * Only DDI platforms should ever use these output types.
5178 * We can get here after the HDMI detect code has already set
5179 * the type of the shared encoder. Since we can't be sure
5180 * what's the status of the given connectors, play safe and
5181 * run the DP detection too.
5182 */
25f78f58
VS
5183 WARN_ON_ONCE(!HAS_DDI(dev));
5184 case INTEL_OUTPUT_DISPLAYPORT:
5185 case INTEL_OUTPUT_EDP:
5186 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5187 return port_to_aux_power_domain(intel_dig_port->port);
5188 case INTEL_OUTPUT_DP_MST:
5189 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5190 return port_to_aux_power_domain(intel_dig_port->port);
5191 default:
b9fec167 5192 MISSING_CASE(intel_encoder->type);
25f78f58
VS
5193 return POWER_DOMAIN_AUX_A;
5194 }
5195}
5196
74bff5f9
ML
5197static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5198 struct intel_crtc_state *crtc_state)
77d22dca 5199{
319be8ae 5200 struct drm_device *dev = crtc->dev;
74bff5f9 5201 struct drm_encoder *encoder;
319be8ae
ID
5202 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5203 enum pipe pipe = intel_crtc->pipe;
77d22dca 5204 unsigned long mask;
74bff5f9 5205 enum transcoder transcoder = crtc_state->cpu_transcoder;
77d22dca 5206
74bff5f9 5207 if (!crtc_state->base.active)
292b990e
ML
5208 return 0;
5209
77d22dca
ID
5210 mask = BIT(POWER_DOMAIN_PIPE(pipe));
5211 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
74bff5f9
ML
5212 if (crtc_state->pch_pfit.enabled ||
5213 crtc_state->pch_pfit.force_thru)
77d22dca
ID
5214 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5215
74bff5f9
ML
5216 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5217 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5218
319be8ae 5219 mask |= BIT(intel_display_port_power_domain(intel_encoder));
74bff5f9 5220 }
319be8ae 5221
15e7ec29
ML
5222 if (crtc_state->shared_dpll)
5223 mask |= BIT(POWER_DOMAIN_PLLS);
5224
77d22dca
ID
5225 return mask;
5226}
5227
74bff5f9
ML
5228static unsigned long
5229modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5230 struct intel_crtc_state *crtc_state)
77d22dca 5231{
fac5e23e 5232 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
292b990e
ML
5233 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5234 enum intel_display_power_domain domain;
5a21b665 5235 unsigned long domains, new_domains, old_domains;
77d22dca 5236
292b990e 5237 old_domains = intel_crtc->enabled_power_domains;
74bff5f9
ML
5238 intel_crtc->enabled_power_domains = new_domains =
5239 get_crtc_power_domains(crtc, crtc_state);
77d22dca 5240
5a21b665 5241 domains = new_domains & ~old_domains;
292b990e
ML
5242
5243 for_each_power_domain(domain, domains)
5244 intel_display_power_get(dev_priv, domain);
5245
5a21b665 5246 return old_domains & ~new_domains;
292b990e
ML
5247}
5248
5249static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5250 unsigned long domains)
5251{
5252 enum intel_display_power_domain domain;
5253
5254 for_each_power_domain(domain, domains)
5255 intel_display_power_put(dev_priv, domain);
5256}
77d22dca 5257
adafdc6f
MK
5258static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5259{
5260 int max_cdclk_freq = dev_priv->max_cdclk_freq;
5261
5262 if (INTEL_INFO(dev_priv)->gen >= 9 ||
5263 IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5264 return max_cdclk_freq;
5265 else if (IS_CHERRYVIEW(dev_priv))
5266 return max_cdclk_freq*95/100;
5267 else if (INTEL_INFO(dev_priv)->gen < 4)
5268 return 2*max_cdclk_freq*90/100;
5269 else
5270 return max_cdclk_freq*90/100;
5271}
5272
b2045352
VS
5273static int skl_calc_cdclk(int max_pixclk, int vco);
5274
560a7ae4
DL
5275static void intel_update_max_cdclk(struct drm_device *dev)
5276{
fac5e23e 5277 struct drm_i915_private *dev_priv = to_i915(dev);
560a7ae4 5278
ef11bdb3 5279 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
560a7ae4 5280 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
b2045352
VS
5281 int max_cdclk, vco;
5282
5283 vco = dev_priv->skl_preferred_vco_freq;
63911d72 5284 WARN_ON(vco != 8100000 && vco != 8640000);
560a7ae4 5285
b2045352
VS
5286 /*
5287 * Use the lower (vco 8640) cdclk values as a
5288 * first guess. skl_calc_cdclk() will correct it
5289 * if the preferred vco is 8100 instead.
5290 */
560a7ae4 5291 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
487ed2e4 5292 max_cdclk = 617143;
560a7ae4 5293 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
b2045352 5294 max_cdclk = 540000;
560a7ae4 5295 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
b2045352 5296 max_cdclk = 432000;
560a7ae4 5297 else
487ed2e4 5298 max_cdclk = 308571;
b2045352
VS
5299
5300 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
281c114f
MR
5301 } else if (IS_BROXTON(dev)) {
5302 dev_priv->max_cdclk_freq = 624000;
560a7ae4
DL
5303 } else if (IS_BROADWELL(dev)) {
5304 /*
5305 * FIXME with extra cooling we can allow
5306 * 540 MHz for ULX and 675 Mhz for ULT.
5307 * How can we know if extra cooling is
5308 * available? PCI ID, VTB, something else?
5309 */
5310 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5311 dev_priv->max_cdclk_freq = 450000;
5312 else if (IS_BDW_ULX(dev))
5313 dev_priv->max_cdclk_freq = 450000;
5314 else if (IS_BDW_ULT(dev))
5315 dev_priv->max_cdclk_freq = 540000;
5316 else
5317 dev_priv->max_cdclk_freq = 675000;
0904deaf
MK
5318 } else if (IS_CHERRYVIEW(dev)) {
5319 dev_priv->max_cdclk_freq = 320000;
560a7ae4
DL
5320 } else if (IS_VALLEYVIEW(dev)) {
5321 dev_priv->max_cdclk_freq = 400000;
5322 } else {
5323 /* otherwise assume cdclk is fixed */
5324 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5325 }
5326
adafdc6f
MK
5327 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5328
560a7ae4
DL
5329 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5330 dev_priv->max_cdclk_freq);
adafdc6f
MK
5331
5332 DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5333 dev_priv->max_dotclk_freq);
560a7ae4
DL
5334}
5335
5336static void intel_update_cdclk(struct drm_device *dev)
5337{
fac5e23e 5338 struct drm_i915_private *dev_priv = to_i915(dev);
560a7ae4
DL
5339
5340 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
2f2a121a 5341
83d7c81f 5342 if (INTEL_GEN(dev_priv) >= 9)
709e05c3
VS
5343 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
5344 dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
5345 dev_priv->cdclk_pll.ref);
2f2a121a
VS
5346 else
5347 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5348 dev_priv->cdclk_freq);
560a7ae4
DL
5349
5350 /*
b5d99ff9
VS
5351 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
5352 * Programmng [sic] note: bit[9:2] should be programmed to the number
5353 * of cdclk that generates 4MHz reference clock freq which is used to
5354 * generate GMBus clock. This will vary with the cdclk freq.
560a7ae4 5355 */
b5d99ff9 5356 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
560a7ae4 5357 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
560a7ae4
DL
5358}
5359
92891e45
VS
5360/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5361static int skl_cdclk_decimal(int cdclk)
5362{
5363 return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5364}
5365
5f199dfa
VS
5366static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
5367{
5368 int ratio;
5369
5370 if (cdclk == dev_priv->cdclk_pll.ref)
5371 return 0;
5372
5373 switch (cdclk) {
5374 default:
5375 MISSING_CASE(cdclk);
5376 case 144000:
5377 case 288000:
5378 case 384000:
5379 case 576000:
5380 ratio = 60;
5381 break;
5382 case 624000:
5383 ratio = 65;
5384 break;
5385 }
5386
5387 return dev_priv->cdclk_pll.ref * ratio;
5388}
5389
2b73001e
VS
5390static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
5391{
5392 I915_WRITE(BXT_DE_PLL_ENABLE, 0);
5393
5394 /* Timeout 200us */
95cac283
CW
5395 if (intel_wait_for_register(dev_priv,
5396 BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 0,
5397 1))
2b73001e 5398 DRM_ERROR("timeout waiting for DE PLL unlock\n");
83d7c81f
VS
5399
5400 dev_priv->cdclk_pll.vco = 0;
2b73001e
VS
5401}
5402
5f199dfa 5403static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
2b73001e 5404{
5f199dfa 5405 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
2b73001e
VS
5406 u32 val;
5407
5408 val = I915_READ(BXT_DE_PLL_CTL);
5409 val &= ~BXT_DE_PLL_RATIO_MASK;
5f199dfa 5410 val |= BXT_DE_PLL_RATIO(ratio);
2b73001e
VS
5411 I915_WRITE(BXT_DE_PLL_CTL, val);
5412
5413 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5414
5415 /* Timeout 200us */
e084e1b9
CW
5416 if (intel_wait_for_register(dev_priv,
5417 BXT_DE_PLL_ENABLE,
5418 BXT_DE_PLL_LOCK,
5419 BXT_DE_PLL_LOCK,
5420 1))
2b73001e 5421 DRM_ERROR("timeout waiting for DE PLL lock\n");
83d7c81f 5422
5f199dfa 5423 dev_priv->cdclk_pll.vco = vco;
2b73001e
VS
5424}
5425
324513c0 5426static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
f8437dd1 5427{
5f199dfa
VS
5428 u32 val, divider;
5429 int vco, ret;
f8437dd1 5430
5f199dfa
VS
5431 vco = bxt_de_pll_vco(dev_priv, cdclk);
5432
5433 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5434
5435 /* cdclk = vco / 2 / div{1,1.5,2,4} */
5436 switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
5437 case 8:
f8437dd1 5438 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
f8437dd1 5439 break;
5f199dfa 5440 case 4:
f8437dd1 5441 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
f8437dd1 5442 break;
5f199dfa 5443 case 3:
f8437dd1 5444 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
f8437dd1 5445 break;
5f199dfa 5446 case 2:
f8437dd1 5447 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
f8437dd1
VK
5448 break;
5449 default:
5f199dfa
VS
5450 WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
5451 WARN_ON(vco != 0);
f8437dd1 5452
5f199dfa
VS
5453 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5454 break;
f8437dd1
VK
5455 }
5456
f8437dd1 5457 /* Inform power controller of upcoming frequency change */
5f199dfa 5458 mutex_lock(&dev_priv->rps.hw_lock);
f8437dd1
VK
5459 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5460 0x80000000);
5461 mutex_unlock(&dev_priv->rps.hw_lock);
5462
5463 if (ret) {
5464 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
9ef56154 5465 ret, cdclk);
f8437dd1
VK
5466 return;
5467 }
5468
5f199dfa
VS
5469 if (dev_priv->cdclk_pll.vco != 0 &&
5470 dev_priv->cdclk_pll.vco != vco)
2b73001e 5471 bxt_de_pll_disable(dev_priv);
f8437dd1 5472
5f199dfa
VS
5473 if (dev_priv->cdclk_pll.vco != vco)
5474 bxt_de_pll_enable(dev_priv, vco);
f8437dd1 5475
5f199dfa
VS
5476 val = divider | skl_cdclk_decimal(cdclk);
5477 /*
5478 * FIXME if only the cd2x divider needs changing, it could be done
5479 * without shutting off the pipe (if only one pipe is active).
5480 */
5481 val |= BXT_CDCLK_CD2X_PIPE_NONE;
5482 /*
5483 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5484 * enable otherwise.
5485 */
5486 if (cdclk >= 500000)
5487 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5488 I915_WRITE(CDCLK_CTL, val);
f8437dd1
VK
5489
5490 mutex_lock(&dev_priv->rps.hw_lock);
5491 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
9ef56154 5492 DIV_ROUND_UP(cdclk, 25000));
f8437dd1
VK
5493 mutex_unlock(&dev_priv->rps.hw_lock);
5494
5495 if (ret) {
5496 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
9ef56154 5497 ret, cdclk);
f8437dd1
VK
5498 return;
5499 }
5500
91c8a326 5501 intel_update_cdclk(&dev_priv->drm);
f8437dd1
VK
5502}
5503
d66a2194 5504static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
f8437dd1 5505{
d66a2194
ID
5506 u32 cdctl, expected;
5507
91c8a326 5508 intel_update_cdclk(&dev_priv->drm);
f8437dd1 5509
d66a2194
ID
5510 if (dev_priv->cdclk_pll.vco == 0 ||
5511 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5512 goto sanitize;
5513
5514 /* DPLL okay; verify the cdclock
5515 *
5516 * Some BIOS versions leave an incorrect decimal frequency value and
5517 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
5518 * so sanitize this register.
5519 */
5520 cdctl = I915_READ(CDCLK_CTL);
5521 /*
5522 * Let's ignore the pipe field, since BIOS could have configured the
5523 * dividers both synching to an active pipe, or asynchronously
5524 * (PIPE_NONE).
5525 */
5526 cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
5527
5528 expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
5529 skl_cdclk_decimal(dev_priv->cdclk_freq);
5530 /*
5531 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5532 * enable otherwise.
5533 */
5534 if (dev_priv->cdclk_freq >= 500000)
5535 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5536
5537 if (cdctl == expected)
5538 /* All well; nothing to sanitize */
5539 return;
5540
5541sanitize:
5542 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5543
5544 /* force cdclk programming */
5545 dev_priv->cdclk_freq = 0;
5546
5547 /* force full PLL disable + enable */
5548 dev_priv->cdclk_pll.vco = -1;
5549}
5550
324513c0 5551void bxt_init_cdclk(struct drm_i915_private *dev_priv)
d66a2194
ID
5552{
5553 bxt_sanitize_cdclk(dev_priv);
5554
5555 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
089c6fd5 5556 return;
c2e001ef 5557
f8437dd1
VK
5558 /*
5559 * FIXME:
5560 * - The initial CDCLK needs to be read from VBT.
5561 * Need to make this change after VBT has changes for BXT.
f8437dd1 5562 */
324513c0 5563 bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
f8437dd1
VK
5564}
5565
324513c0 5566void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
f8437dd1 5567{
324513c0 5568 bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
f8437dd1
VK
5569}
5570
a8ca4934
VS
5571static int skl_calc_cdclk(int max_pixclk, int vco)
5572{
63911d72 5573 if (vco == 8640000) {
a8ca4934 5574 if (max_pixclk > 540000)
487ed2e4 5575 return 617143;
a8ca4934
VS
5576 else if (max_pixclk > 432000)
5577 return 540000;
487ed2e4 5578 else if (max_pixclk > 308571)
a8ca4934
VS
5579 return 432000;
5580 else
487ed2e4 5581 return 308571;
a8ca4934 5582 } else {
a8ca4934
VS
5583 if (max_pixclk > 540000)
5584 return 675000;
5585 else if (max_pixclk > 450000)
5586 return 540000;
5587 else if (max_pixclk > 337500)
5588 return 450000;
5589 else
5590 return 337500;
5591 }
5592}
5593
ea61791e
VS
5594static void
5595skl_dpll0_update(struct drm_i915_private *dev_priv)
5d96d8af 5596{
ea61791e 5597 u32 val;
5d96d8af 5598
709e05c3 5599 dev_priv->cdclk_pll.ref = 24000;
1c3f7700 5600 dev_priv->cdclk_pll.vco = 0;
709e05c3 5601
ea61791e 5602 val = I915_READ(LCPLL1_CTL);
1c3f7700 5603 if ((val & LCPLL_PLL_ENABLE) == 0)
ea61791e 5604 return;
5d96d8af 5605
1c3f7700
ID
5606 if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
5607 return;
9f7eb31a 5608
ea61791e
VS
5609 val = I915_READ(DPLL_CTRL1);
5610
1c3f7700
ID
5611 if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
5612 DPLL_CTRL1_SSC(SKL_DPLL0) |
5613 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
5614 DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
5615 return;
9f7eb31a 5616
ea61791e
VS
5617 switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
5618 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
5619 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
5620 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
5621 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
63911d72 5622 dev_priv->cdclk_pll.vco = 8100000;
ea61791e
VS
5623 break;
5624 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
5625 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
63911d72 5626 dev_priv->cdclk_pll.vco = 8640000;
ea61791e
VS
5627 break;
5628 default:
5629 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
ea61791e
VS
5630 break;
5631 }
5d96d8af
DL
5632}
5633
b2045352
VS
5634void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
5635{
5636 bool changed = dev_priv->skl_preferred_vco_freq != vco;
5637
5638 dev_priv->skl_preferred_vco_freq = vco;
5639
5640 if (changed)
91c8a326 5641 intel_update_max_cdclk(&dev_priv->drm);
b2045352
VS
5642}
5643
5d96d8af 5644static void
3861fc60 5645skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5d96d8af 5646{
a8ca4934 5647 int min_cdclk = skl_calc_cdclk(0, vco);
5d96d8af
DL
5648 u32 val;
5649
63911d72 5650 WARN_ON(vco != 8100000 && vco != 8640000);
b2045352 5651
5d96d8af 5652 /* select the minimum CDCLK before enabling DPLL 0 */
9ef56154 5653 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
5d96d8af
DL
5654 I915_WRITE(CDCLK_CTL, val);
5655 POSTING_READ(CDCLK_CTL);
5656
5657 /*
5658 * We always enable DPLL0 with the lowest link rate possible, but still
5659 * taking into account the VCO required to operate the eDP panel at the
5660 * desired frequency. The usual DP link rates operate with a VCO of
5661 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5662 * The modeset code is responsible for the selection of the exact link
5663 * rate later on, with the constraint of choosing a frequency that
a8ca4934 5664 * works with vco.
5d96d8af
DL
5665 */
5666 val = I915_READ(DPLL_CTRL1);
5667
5668 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5669 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5670 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
63911d72 5671 if (vco == 8640000)
5d96d8af
DL
5672 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5673 SKL_DPLL0);
5674 else
5675 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5676 SKL_DPLL0);
5677
5678 I915_WRITE(DPLL_CTRL1, val);
5679 POSTING_READ(DPLL_CTRL1);
5680
5681 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5682
e24ca054
CW
5683 if (intel_wait_for_register(dev_priv,
5684 LCPLL1_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
5685 5))
5d96d8af 5686 DRM_ERROR("DPLL0 not locked\n");
1cd593e0 5687
63911d72 5688 dev_priv->cdclk_pll.vco = vco;
b2045352
VS
5689
5690 /* We'll want to keep using the current vco from now on. */
5691 skl_set_preferred_cdclk_vco(dev_priv, vco);
5d96d8af
DL
5692}
5693
430e05de
VS
5694static void
5695skl_dpll0_disable(struct drm_i915_private *dev_priv)
5696{
5697 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
8ad32a05
CW
5698 if (intel_wait_for_register(dev_priv,
5699 LCPLL1_CTL, LCPLL_PLL_LOCK, 0,
5700 1))
430e05de 5701 DRM_ERROR("Couldn't disable DPLL0\n");
1cd593e0 5702
63911d72 5703 dev_priv->cdclk_pll.vco = 0;
430e05de
VS
5704}
5705
5d96d8af
DL
5706static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5707{
5708 int ret;
5709 u32 val;
5710
5711 /* inform PCU we want to change CDCLK */
5712 val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5713 mutex_lock(&dev_priv->rps.hw_lock);
5714 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5715 mutex_unlock(&dev_priv->rps.hw_lock);
5716
5717 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5718}
5719
5720static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5721{
5722 unsigned int i;
5723
5724 for (i = 0; i < 15; i++) {
5725 if (skl_cdclk_pcu_ready(dev_priv))
5726 return true;
5727 udelay(10);
5728 }
5729
5730 return false;
5731}
5732
1cd593e0 5733static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
5d96d8af 5734{
91c8a326 5735 struct drm_device *dev = &dev_priv->drm;
5d96d8af
DL
5736 u32 freq_select, pcu_ack;
5737
1cd593e0
VS
5738 WARN_ON((cdclk == 24000) != (vco == 0));
5739
63911d72 5740 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5d96d8af
DL
5741
5742 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5743 DRM_ERROR("failed to inform PCU about cdclk change\n");
5744 return;
5745 }
5746
5747 /* set CDCLK_CTL */
9ef56154 5748 switch (cdclk) {
5d96d8af
DL
5749 case 450000:
5750 case 432000:
5751 freq_select = CDCLK_FREQ_450_432;
5752 pcu_ack = 1;
5753 break;
5754 case 540000:
5755 freq_select = CDCLK_FREQ_540;
5756 pcu_ack = 2;
5757 break;
487ed2e4 5758 case 308571:
5d96d8af
DL
5759 case 337500:
5760 default:
5761 freq_select = CDCLK_FREQ_337_308;
5762 pcu_ack = 0;
5763 break;
487ed2e4 5764 case 617143:
5d96d8af
DL
5765 case 675000:
5766 freq_select = CDCLK_FREQ_675_617;
5767 pcu_ack = 3;
5768 break;
5769 }
5770
63911d72
VS
5771 if (dev_priv->cdclk_pll.vco != 0 &&
5772 dev_priv->cdclk_pll.vco != vco)
1cd593e0
VS
5773 skl_dpll0_disable(dev_priv);
5774
63911d72 5775 if (dev_priv->cdclk_pll.vco != vco)
1cd593e0
VS
5776 skl_dpll0_enable(dev_priv, vco);
5777
9ef56154 5778 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
5d96d8af
DL
5779 POSTING_READ(CDCLK_CTL);
5780
5781 /* inform PCU of the change */
5782 mutex_lock(&dev_priv->rps.hw_lock);
5783 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5784 mutex_unlock(&dev_priv->rps.hw_lock);
560a7ae4
DL
5785
5786 intel_update_cdclk(dev);
5d96d8af
DL
5787}
5788
9f7eb31a
VS
5789static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
5790
5d96d8af
DL
5791void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5792{
709e05c3 5793 skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
5d96d8af
DL
5794}
5795
5796void skl_init_cdclk(struct drm_i915_private *dev_priv)
5797{
9f7eb31a
VS
5798 int cdclk, vco;
5799
5800 skl_sanitize_cdclk(dev_priv);
5d96d8af 5801
63911d72 5802 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
9f7eb31a
VS
5803 /*
5804 * Use the current vco as our initial
5805 * guess as to what the preferred vco is.
5806 */
5807 if (dev_priv->skl_preferred_vco_freq == 0)
5808 skl_set_preferred_cdclk_vco(dev_priv,
63911d72 5809 dev_priv->cdclk_pll.vco);
70c2c184 5810 return;
1cd593e0 5811 }
5d96d8af 5812
70c2c184
VS
5813 vco = dev_priv->skl_preferred_vco_freq;
5814 if (vco == 0)
63911d72 5815 vco = 8100000;
70c2c184 5816 cdclk = skl_calc_cdclk(0, vco);
5d96d8af 5817
70c2c184 5818 skl_set_cdclk(dev_priv, cdclk, vco);
5d96d8af
DL
5819}
5820
9f7eb31a 5821static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
c73666f3 5822{
09492498 5823 uint32_t cdctl, expected;
c73666f3 5824
f1b391a5
SK
5825 /*
5826 * check if the pre-os intialized the display
5827 * There is SWF18 scratchpad register defined which is set by the
5828 * pre-os which can be used by the OS drivers to check the status
5829 */
5830 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5831 goto sanitize;
5832
91c8a326 5833 intel_update_cdclk(&dev_priv->drm);
c73666f3 5834 /* Is PLL enabled and locked ? */
1c3f7700
ID
5835 if (dev_priv->cdclk_pll.vco == 0 ||
5836 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
c73666f3
SK
5837 goto sanitize;
5838
5839 /* DPLL okay; verify the cdclock
5840 *
5841 * Noticed in some instances that the freq selection is correct but
5842 * decimal part is programmed wrong from BIOS where pre-os does not
5843 * enable display. Verify the same as well.
5844 */
09492498
VS
5845 cdctl = I915_READ(CDCLK_CTL);
5846 expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
5847 skl_cdclk_decimal(dev_priv->cdclk_freq);
5848 if (cdctl == expected)
c73666f3 5849 /* All well; nothing to sanitize */
9f7eb31a 5850 return;
c89e39f3 5851
9f7eb31a
VS
5852sanitize:
5853 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
c73666f3 5854
9f7eb31a
VS
5855 /* force cdclk programming */
5856 dev_priv->cdclk_freq = 0;
5857 /* force full PLL disable + enable */
63911d72 5858 dev_priv->cdclk_pll.vco = -1;
c73666f3
SK
5859}
5860
30a970c6
JB
5861/* Adjust CDclk dividers to allow high res or save power if possible */
5862static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5863{
fac5e23e 5864 struct drm_i915_private *dev_priv = to_i915(dev);
30a970c6
JB
5865 u32 val, cmd;
5866
164dfd28
VK
5867 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5868 != dev_priv->cdclk_freq);
d60c4473 5869
dfcab17e 5870 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
30a970c6 5871 cmd = 2;
dfcab17e 5872 else if (cdclk == 266667)
30a970c6
JB
5873 cmd = 1;
5874 else
5875 cmd = 0;
5876
5877 mutex_lock(&dev_priv->rps.hw_lock);
5878 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5879 val &= ~DSPFREQGUAR_MASK;
5880 val |= (cmd << DSPFREQGUAR_SHIFT);
5881 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5882 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5883 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5884 50)) {
5885 DRM_ERROR("timed out waiting for CDclk change\n");
5886 }
5887 mutex_unlock(&dev_priv->rps.hw_lock);
5888
54433e91
VS
5889 mutex_lock(&dev_priv->sb_lock);
5890
dfcab17e 5891 if (cdclk == 400000) {
6bcda4f0 5892 u32 divider;
30a970c6 5893
6bcda4f0 5894 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
30a970c6 5895
30a970c6
JB
5896 /* adjust cdclk divider */
5897 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
87d5d259 5898 val &= ~CCK_FREQUENCY_VALUES;
30a970c6
JB
5899 val |= divider;
5900 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
a877e801
VS
5901
5902 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
87d5d259 5903 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
a877e801
VS
5904 50))
5905 DRM_ERROR("timed out waiting for CDclk change\n");
30a970c6
JB
5906 }
5907
30a970c6
JB
5908 /* adjust self-refresh exit latency value */
5909 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5910 val &= ~0x7f;
5911
5912 /*
5913 * For high bandwidth configs, we set a higher latency in the bunit
5914 * so that the core display fetch happens in time to avoid underruns.
5915 */
dfcab17e 5916 if (cdclk == 400000)
30a970c6
JB
5917 val |= 4500 / 250; /* 4.5 usec */
5918 else
5919 val |= 3000 / 250; /* 3.0 usec */
5920 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
54433e91 5921
a580516d 5922 mutex_unlock(&dev_priv->sb_lock);
30a970c6 5923
b6283055 5924 intel_update_cdclk(dev);
30a970c6
JB
5925}
5926
383c5a6a
VS
5927static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5928{
fac5e23e 5929 struct drm_i915_private *dev_priv = to_i915(dev);
383c5a6a
VS
5930 u32 val, cmd;
5931
164dfd28
VK
5932 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5933 != dev_priv->cdclk_freq);
383c5a6a
VS
5934
5935 switch (cdclk) {
383c5a6a
VS
5936 case 333333:
5937 case 320000:
383c5a6a 5938 case 266667:
383c5a6a 5939 case 200000:
383c5a6a
VS
5940 break;
5941 default:
5f77eeb0 5942 MISSING_CASE(cdclk);
383c5a6a
VS
5943 return;
5944 }
5945
9d0d3fda
VS
5946 /*
5947 * Specs are full of misinformation, but testing on actual
5948 * hardware has shown that we just need to write the desired
5949 * CCK divider into the Punit register.
5950 */
5951 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5952
383c5a6a
VS
5953 mutex_lock(&dev_priv->rps.hw_lock);
5954 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5955 val &= ~DSPFREQGUAR_MASK_CHV;
5956 val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5957 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5958 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5959 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5960 50)) {
5961 DRM_ERROR("timed out waiting for CDclk change\n");
5962 }
5963 mutex_unlock(&dev_priv->rps.hw_lock);
5964
b6283055 5965 intel_update_cdclk(dev);
383c5a6a
VS
5966}
5967
30a970c6
JB
5968static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5969 int max_pixclk)
5970{
6bcda4f0 5971 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
6cca3195 5972 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
29dc7ef3 5973
30a970c6
JB
5974 /*
5975 * Really only a few cases to deal with, as only 4 CDclks are supported:
5976 * 200MHz
5977 * 267MHz
29dc7ef3 5978 * 320/333MHz (depends on HPLL freq)
6cca3195
VS
5979 * 400MHz (VLV only)
5980 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5981 * of the lower bin and adjust if needed.
e37c67a1
VS
5982 *
5983 * We seem to get an unstable or solid color picture at 200MHz.
5984 * Not sure what's wrong. For now use 200MHz only when all pipes
5985 * are off.
30a970c6 5986 */
6cca3195
VS
5987 if (!IS_CHERRYVIEW(dev_priv) &&
5988 max_pixclk > freq_320*limit/100)
dfcab17e 5989 return 400000;
6cca3195 5990 else if (max_pixclk > 266667*limit/100)
29dc7ef3 5991 return freq_320;
e37c67a1 5992 else if (max_pixclk > 0)
dfcab17e 5993 return 266667;
e37c67a1
VS
5994 else
5995 return 200000;
30a970c6
JB
5996}
5997
324513c0 5998static int bxt_calc_cdclk(int max_pixclk)
f8437dd1 5999{
760e1477 6000 if (max_pixclk > 576000)
f8437dd1 6001 return 624000;
760e1477 6002 else if (max_pixclk > 384000)
f8437dd1 6003 return 576000;
760e1477 6004 else if (max_pixclk > 288000)
f8437dd1 6005 return 384000;
760e1477 6006 else if (max_pixclk > 144000)
f8437dd1
VK
6007 return 288000;
6008 else
6009 return 144000;
6010}
6011
e8788cbc 6012/* Compute the max pixel clock for new configuration. */
a821fc46
ACO
6013static int intel_mode_max_pixclk(struct drm_device *dev,
6014 struct drm_atomic_state *state)
30a970c6 6015{
565602d7 6016 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
fac5e23e 6017 struct drm_i915_private *dev_priv = to_i915(dev);
565602d7
ML
6018 struct drm_crtc *crtc;
6019 struct drm_crtc_state *crtc_state;
6020 unsigned max_pixclk = 0, i;
6021 enum pipe pipe;
30a970c6 6022
565602d7
ML
6023 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
6024 sizeof(intel_state->min_pixclk));
304603f4 6025
565602d7
ML
6026 for_each_crtc_in_state(state, crtc, crtc_state, i) {
6027 int pixclk = 0;
6028
6029 if (crtc_state->enable)
6030 pixclk = crtc_state->adjusted_mode.crtc_clock;
304603f4 6031
565602d7 6032 intel_state->min_pixclk[i] = pixclk;
30a970c6
JB
6033 }
6034
565602d7
ML
6035 for_each_pipe(dev_priv, pipe)
6036 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
6037
30a970c6
JB
6038 return max_pixclk;
6039}
6040
27c329ed 6041static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
30a970c6 6042{
27c329ed 6043 struct drm_device *dev = state->dev;
fac5e23e 6044 struct drm_i915_private *dev_priv = to_i915(dev);
27c329ed 6045 int max_pixclk = intel_mode_max_pixclk(dev, state);
1a617b77
ML
6046 struct intel_atomic_state *intel_state =
6047 to_intel_atomic_state(state);
30a970c6 6048
1a617b77 6049 intel_state->cdclk = intel_state->dev_cdclk =
27c329ed 6050 valleyview_calc_cdclk(dev_priv, max_pixclk);
0a9ab303 6051
1a617b77
ML
6052 if (!intel_state->active_crtcs)
6053 intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
6054
27c329ed
ML
6055 return 0;
6056}
304603f4 6057
324513c0 6058static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
27c329ed 6059{
4e5ca60f 6060 int max_pixclk = ilk_max_pixel_rate(state);
1a617b77
ML
6061 struct intel_atomic_state *intel_state =
6062 to_intel_atomic_state(state);
85a96e7a 6063
1a617b77 6064 intel_state->cdclk = intel_state->dev_cdclk =
324513c0 6065 bxt_calc_cdclk(max_pixclk);
85a96e7a 6066
1a617b77 6067 if (!intel_state->active_crtcs)
324513c0 6068 intel_state->dev_cdclk = bxt_calc_cdclk(0);
1a617b77 6069
27c329ed 6070 return 0;
30a970c6
JB
6071}
6072
1e69cd74
VS
6073static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6074{
6075 unsigned int credits, default_credits;
6076
6077 if (IS_CHERRYVIEW(dev_priv))
6078 default_credits = PFI_CREDIT(12);
6079 else
6080 default_credits = PFI_CREDIT(8);
6081
bfa7df01 6082 if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
1e69cd74
VS
6083 /* CHV suggested value is 31 or 63 */
6084 if (IS_CHERRYVIEW(dev_priv))
fcc0008f 6085 credits = PFI_CREDIT_63;
1e69cd74
VS
6086 else
6087 credits = PFI_CREDIT(15);
6088 } else {
6089 credits = default_credits;
6090 }
6091
6092 /*
6093 * WA - write default credits before re-programming
6094 * FIXME: should we also set the resend bit here?
6095 */
6096 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6097 default_credits);
6098
6099 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6100 credits | PFI_CREDIT_RESEND);
6101
6102 /*
6103 * FIXME is this guaranteed to clear
6104 * immediately or should we poll for it?
6105 */
6106 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6107}
6108
27c329ed 6109static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
30a970c6 6110{
a821fc46 6111 struct drm_device *dev = old_state->dev;
fac5e23e 6112 struct drm_i915_private *dev_priv = to_i915(dev);
1a617b77
ML
6113 struct intel_atomic_state *old_intel_state =
6114 to_intel_atomic_state(old_state);
6115 unsigned req_cdclk = old_intel_state->dev_cdclk;
30a970c6 6116
27c329ed
ML
6117 /*
6118 * FIXME: We can end up here with all power domains off, yet
6119 * with a CDCLK frequency other than the minimum. To account
6120 * for this take the PIPE-A power domain, which covers the HW
6121 * blocks needed for the following programming. This can be
6122 * removed once it's guaranteed that we get here either with
6123 * the minimum CDCLK set, or the required power domains
6124 * enabled.
6125 */
6126 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
738c05c0 6127
27c329ed
ML
6128 if (IS_CHERRYVIEW(dev))
6129 cherryview_set_cdclk(dev, req_cdclk);
6130 else
6131 valleyview_set_cdclk(dev, req_cdclk);
738c05c0 6132
27c329ed 6133 vlv_program_pfi_credits(dev_priv);
1e69cd74 6134
27c329ed 6135 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
30a970c6
JB
6136}
6137
89b667f8
JB
6138static void valleyview_crtc_enable(struct drm_crtc *crtc)
6139{
6140 struct drm_device *dev = crtc->dev;
a72e4c9f 6141 struct drm_i915_private *dev_priv = to_i915(dev);
89b667f8
JB
6142 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6143 struct intel_encoder *encoder;
b95c5321
ML
6144 struct intel_crtc_state *pipe_config =
6145 to_intel_crtc_state(crtc->state);
89b667f8 6146 int pipe = intel_crtc->pipe;
89b667f8 6147
53d9f4e9 6148 if (WARN_ON(intel_crtc->active))
89b667f8
JB
6149 return;
6150
6e3c9717 6151 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 6152 intel_dp_set_m_n(intel_crtc, M1_N1);
5b18e57c
DV
6153
6154 intel_set_pipe_timings(intel_crtc);
bc58be60 6155 intel_set_pipe_src_size(intel_crtc);
5b18e57c 6156
c14b0485 6157 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
fac5e23e 6158 struct drm_i915_private *dev_priv = to_i915(dev);
c14b0485
VS
6159
6160 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6161 I915_WRITE(CHV_CANVAS(pipe), 0);
6162 }
6163
5b18e57c
DV
6164 i9xx_set_pipeconf(intel_crtc);
6165
89b667f8 6166 intel_crtc->active = true;
89b667f8 6167
a72e4c9f 6168 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4a3436e8 6169
89b667f8
JB
6170 for_each_encoder_on_crtc(dev, crtc, encoder)
6171 if (encoder->pre_pll_enable)
6172 encoder->pre_pll_enable(encoder);
6173
cd2d34d9
VS
6174 if (IS_CHERRYVIEW(dev)) {
6175 chv_prepare_pll(intel_crtc, intel_crtc->config);
6176 chv_enable_pll(intel_crtc, intel_crtc->config);
6177 } else {
6178 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6179 vlv_enable_pll(intel_crtc, intel_crtc->config);
9d556c99 6180 }
89b667f8
JB
6181
6182 for_each_encoder_on_crtc(dev, crtc, encoder)
6183 if (encoder->pre_enable)
6184 encoder->pre_enable(encoder);
6185
2dd24552
JB
6186 i9xx_pfit_enable(intel_crtc);
6187
b95c5321 6188 intel_color_load_luts(&pipe_config->base);
63cbb074 6189
caed361d 6190 intel_update_watermarks(crtc);
e1fdc473 6191 intel_enable_pipe(intel_crtc);
be6a6f8e 6192
4b3a9526
VS
6193 assert_vblank_disabled(crtc);
6194 drm_crtc_vblank_on(crtc);
6195
f9b61ff6
DV
6196 for_each_encoder_on_crtc(dev, crtc, encoder)
6197 encoder->enable(encoder);
89b667f8
JB
6198}
6199
f13c2ef3
DV
6200static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6201{
6202 struct drm_device *dev = crtc->base.dev;
fac5e23e 6203 struct drm_i915_private *dev_priv = to_i915(dev);
f13c2ef3 6204
6e3c9717
ACO
6205 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6206 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
f13c2ef3
DV
6207}
6208
0b8765c6 6209static void i9xx_crtc_enable(struct drm_crtc *crtc)
79e53945
JB
6210{
6211 struct drm_device *dev = crtc->dev;
a72e4c9f 6212 struct drm_i915_private *dev_priv = to_i915(dev);
79e53945 6213 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 6214 struct intel_encoder *encoder;
b95c5321
ML
6215 struct intel_crtc_state *pipe_config =
6216 to_intel_crtc_state(crtc->state);
cd2d34d9 6217 enum pipe pipe = intel_crtc->pipe;
79e53945 6218
53d9f4e9 6219 if (WARN_ON(intel_crtc->active))
f7abfe8b
CW
6220 return;
6221
f13c2ef3
DV
6222 i9xx_set_pll_dividers(intel_crtc);
6223
6e3c9717 6224 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 6225 intel_dp_set_m_n(intel_crtc, M1_N1);
5b18e57c
DV
6226
6227 intel_set_pipe_timings(intel_crtc);
bc58be60 6228 intel_set_pipe_src_size(intel_crtc);
5b18e57c 6229
5b18e57c
DV
6230 i9xx_set_pipeconf(intel_crtc);
6231
f7abfe8b 6232 intel_crtc->active = true;
6b383a7f 6233
4a3436e8 6234 if (!IS_GEN2(dev))
a72e4c9f 6235 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4a3436e8 6236
9d6d9f19
MK
6237 for_each_encoder_on_crtc(dev, crtc, encoder)
6238 if (encoder->pre_enable)
6239 encoder->pre_enable(encoder);
6240
f6736a1a
DV
6241 i9xx_enable_pll(intel_crtc);
6242
2dd24552
JB
6243 i9xx_pfit_enable(intel_crtc);
6244
b95c5321 6245 intel_color_load_luts(&pipe_config->base);
63cbb074 6246
f37fcc2a 6247 intel_update_watermarks(crtc);
e1fdc473 6248 intel_enable_pipe(intel_crtc);
be6a6f8e 6249
4b3a9526
VS
6250 assert_vblank_disabled(crtc);
6251 drm_crtc_vblank_on(crtc);
6252
f9b61ff6
DV
6253 for_each_encoder_on_crtc(dev, crtc, encoder)
6254 encoder->enable(encoder);
0b8765c6 6255}
79e53945 6256
87476d63
DV
6257static void i9xx_pfit_disable(struct intel_crtc *crtc)
6258{
6259 struct drm_device *dev = crtc->base.dev;
fac5e23e 6260 struct drm_i915_private *dev_priv = to_i915(dev);
87476d63 6261
6e3c9717 6262 if (!crtc->config->gmch_pfit.control)
328d8e82 6263 return;
87476d63 6264
328d8e82 6265 assert_pipe_disabled(dev_priv, crtc->pipe);
87476d63 6266
328d8e82
DV
6267 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6268 I915_READ(PFIT_CONTROL));
6269 I915_WRITE(PFIT_CONTROL, 0);
87476d63
DV
6270}
6271
0b8765c6
JB
6272static void i9xx_crtc_disable(struct drm_crtc *crtc)
6273{
6274 struct drm_device *dev = crtc->dev;
fac5e23e 6275 struct drm_i915_private *dev_priv = to_i915(dev);
0b8765c6 6276 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 6277 struct intel_encoder *encoder;
0b8765c6 6278 int pipe = intel_crtc->pipe;
ef9c3aee 6279
6304cd91
VS
6280 /*
6281 * On gen2 planes are double buffered but the pipe isn't, so we must
6282 * wait for planes to fully turn off before disabling the pipe.
6283 */
90e83e53
ACO
6284 if (IS_GEN2(dev))
6285 intel_wait_for_vblank(dev, pipe);
6304cd91 6286
4b3a9526
VS
6287 for_each_encoder_on_crtc(dev, crtc, encoder)
6288 encoder->disable(encoder);
6289
f9b61ff6
DV
6290 drm_crtc_vblank_off(crtc);
6291 assert_vblank_disabled(crtc);
6292
575f7ab7 6293 intel_disable_pipe(intel_crtc);
24a1f16d 6294
87476d63 6295 i9xx_pfit_disable(intel_crtc);
24a1f16d 6296
89b667f8
JB
6297 for_each_encoder_on_crtc(dev, crtc, encoder)
6298 if (encoder->post_disable)
6299 encoder->post_disable(encoder);
6300
a65347ba 6301 if (!intel_crtc->config->has_dsi_encoder) {
076ed3b2
CML
6302 if (IS_CHERRYVIEW(dev))
6303 chv_disable_pll(dev_priv, pipe);
6304 else if (IS_VALLEYVIEW(dev))
6305 vlv_disable_pll(dev_priv, pipe);
6306 else
1c4e0274 6307 i9xx_disable_pll(intel_crtc);
076ed3b2 6308 }
0b8765c6 6309
d6db995f
VS
6310 for_each_encoder_on_crtc(dev, crtc, encoder)
6311 if (encoder->post_pll_disable)
6312 encoder->post_pll_disable(encoder);
6313
4a3436e8 6314 if (!IS_GEN2(dev))
a72e4c9f 6315 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
0b8765c6
JB
6316}
6317
b17d48e2
ML
6318static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6319{
842e0307 6320 struct intel_encoder *encoder;
b17d48e2
ML
6321 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6322 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6323 enum intel_display_power_domain domain;
6324 unsigned long domains;
6325
6326 if (!intel_crtc->active)
6327 return;
6328
a539205a 6329 if (to_intel_plane_state(crtc->primary->state)->visible) {
5a21b665 6330 WARN_ON(intel_crtc->flip_work);
fc32b1fd 6331
2622a081 6332 intel_pre_disable_primary_noatomic(crtc);
54a41961
ML
6333
6334 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6335 to_intel_plane_state(crtc->primary->state)->visible = false;
a539205a
ML
6336 }
6337
b17d48e2 6338 dev_priv->display.crtc_disable(crtc);
842e0307 6339
78108b7c
VS
6340 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6341 crtc->base.id, crtc->name);
842e0307
ML
6342
6343 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6344 crtc->state->active = false;
37d9078b 6345 intel_crtc->active = false;
842e0307
ML
6346 crtc->enabled = false;
6347 crtc->state->connector_mask = 0;
6348 crtc->state->encoder_mask = 0;
6349
6350 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6351 encoder->base.crtc = NULL;
6352
58f9c0bc 6353 intel_fbc_disable(intel_crtc);
37d9078b 6354 intel_update_watermarks(crtc);
1f7457b1 6355 intel_disable_shared_dpll(intel_crtc);
b17d48e2
ML
6356
6357 domains = intel_crtc->enabled_power_domains;
6358 for_each_power_domain(domain, domains)
6359 intel_display_power_put(dev_priv, domain);
6360 intel_crtc->enabled_power_domains = 0;
565602d7
ML
6361
6362 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6363 dev_priv->min_pixclk[intel_crtc->pipe] = 0;
b17d48e2
ML
6364}
6365
6b72d486
ML
6366/*
6367 * turn all crtc's off, but do not adjust state
6368 * This has to be paired with a call to intel_modeset_setup_hw_state.
6369 */
70e0bd74 6370int intel_display_suspend(struct drm_device *dev)
ee7b9f93 6371{
e2c8b870 6372 struct drm_i915_private *dev_priv = to_i915(dev);
70e0bd74 6373 struct drm_atomic_state *state;
e2c8b870 6374 int ret;
70e0bd74 6375
e2c8b870
ML
6376 state = drm_atomic_helper_suspend(dev);
6377 ret = PTR_ERR_OR_ZERO(state);
70e0bd74
ML
6378 if (ret)
6379 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
e2c8b870
ML
6380 else
6381 dev_priv->modeset_restore_state = state;
70e0bd74 6382 return ret;
ee7b9f93
JB
6383}
6384
ea5b213a 6385void intel_encoder_destroy(struct drm_encoder *encoder)
7e7d76c3 6386{
4ef69c7a 6387 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
ea5b213a 6388
ea5b213a
CW
6389 drm_encoder_cleanup(encoder);
6390 kfree(intel_encoder);
7e7d76c3
JB
6391}
6392
0a91ca29
DV
6393/* Cross check the actual hw state with our own modeset state tracking (and it's
6394 * internal consistency). */
5a21b665 6395static void intel_connector_verify_state(struct intel_connector *connector)
79e53945 6396{
5a21b665 6397 struct drm_crtc *crtc = connector->base.state->crtc;
35dd3c64
ML
6398
6399 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6400 connector->base.base.id,
6401 connector->base.name);
6402
0a91ca29 6403 if (connector->get_hw_state(connector)) {
e85376cb 6404 struct intel_encoder *encoder = connector->encoder;
5a21b665 6405 struct drm_connector_state *conn_state = connector->base.state;
0a91ca29 6406
35dd3c64
ML
6407 I915_STATE_WARN(!crtc,
6408 "connector enabled without attached crtc\n");
0a91ca29 6409
35dd3c64
ML
6410 if (!crtc)
6411 return;
6412
6413 I915_STATE_WARN(!crtc->state->active,
6414 "connector is active, but attached crtc isn't\n");
6415
e85376cb 6416 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
35dd3c64
ML
6417 return;
6418
e85376cb 6419 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
35dd3c64
ML
6420 "atomic encoder doesn't match attached encoder\n");
6421
e85376cb 6422 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
35dd3c64
ML
6423 "attached encoder crtc differs from connector crtc\n");
6424 } else {
4d688a2a
ML
6425 I915_STATE_WARN(crtc && crtc->state->active,
6426 "attached crtc is active, but connector isn't\n");
5a21b665 6427 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
35dd3c64 6428 "best encoder set without crtc!\n");
0a91ca29 6429 }
79e53945
JB
6430}
6431
08d9bc92
ACO
6432int intel_connector_init(struct intel_connector *connector)
6433{
5350a031 6434 drm_atomic_helper_connector_reset(&connector->base);
08d9bc92 6435
5350a031 6436 if (!connector->base.state)
08d9bc92
ACO
6437 return -ENOMEM;
6438
08d9bc92
ACO
6439 return 0;
6440}
6441
6442struct intel_connector *intel_connector_alloc(void)
6443{
6444 struct intel_connector *connector;
6445
6446 connector = kzalloc(sizeof *connector, GFP_KERNEL);
6447 if (!connector)
6448 return NULL;
6449
6450 if (intel_connector_init(connector) < 0) {
6451 kfree(connector);
6452 return NULL;
6453 }
6454
6455 return connector;
6456}
6457
f0947c37
DV
6458/* Simple connector->get_hw_state implementation for encoders that support only
6459 * one connector and no cloning and hence the encoder state determines the state
6460 * of the connector. */
6461bool intel_connector_get_hw_state(struct intel_connector *connector)
ea5b213a 6462{
24929352 6463 enum pipe pipe = 0;
f0947c37 6464 struct intel_encoder *encoder = connector->encoder;
ea5b213a 6465
f0947c37 6466 return encoder->get_hw_state(encoder, &pipe);
ea5b213a
CW
6467}
6468
6d293983 6469static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
d272ddfa 6470{
6d293983
ACO
6471 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6472 return crtc_state->fdi_lanes;
d272ddfa
VS
6473
6474 return 0;
6475}
6476
6d293983 6477static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5cec258b 6478 struct intel_crtc_state *pipe_config)
1857e1da 6479{
6d293983
ACO
6480 struct drm_atomic_state *state = pipe_config->base.state;
6481 struct intel_crtc *other_crtc;
6482 struct intel_crtc_state *other_crtc_state;
6483
1857e1da
DV
6484 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6485 pipe_name(pipe), pipe_config->fdi_lanes);
6486 if (pipe_config->fdi_lanes > 4) {
6487 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6488 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6489 return -EINVAL;
1857e1da
DV
6490 }
6491
bafb6553 6492 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1857e1da
DV
6493 if (pipe_config->fdi_lanes > 2) {
6494 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6495 pipe_config->fdi_lanes);
6d293983 6496 return -EINVAL;
1857e1da 6497 } else {
6d293983 6498 return 0;
1857e1da
DV
6499 }
6500 }
6501
6502 if (INTEL_INFO(dev)->num_pipes == 2)
6d293983 6503 return 0;
1857e1da
DV
6504
6505 /* Ivybridge 3 pipe is really complicated */
6506 switch (pipe) {
6507 case PIPE_A:
6d293983 6508 return 0;
1857e1da 6509 case PIPE_B:
6d293983
ACO
6510 if (pipe_config->fdi_lanes <= 2)
6511 return 0;
6512
6513 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6514 other_crtc_state =
6515 intel_atomic_get_crtc_state(state, other_crtc);
6516 if (IS_ERR(other_crtc_state))
6517 return PTR_ERR(other_crtc_state);
6518
6519 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
1857e1da
DV
6520 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6521 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6522 return -EINVAL;
1857e1da 6523 }
6d293983 6524 return 0;
1857e1da 6525 case PIPE_C:
251cc67c
VS
6526 if (pipe_config->fdi_lanes > 2) {
6527 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6528 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6529 return -EINVAL;
251cc67c 6530 }
6d293983
ACO
6531
6532 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6533 other_crtc_state =
6534 intel_atomic_get_crtc_state(state, other_crtc);
6535 if (IS_ERR(other_crtc_state))
6536 return PTR_ERR(other_crtc_state);
6537
6538 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
1857e1da 6539 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6d293983 6540 return -EINVAL;
1857e1da 6541 }
6d293983 6542 return 0;
1857e1da
DV
6543 default:
6544 BUG();
6545 }
6546}
6547
e29c22c0
DV
6548#define RETRY 1
6549static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5cec258b 6550 struct intel_crtc_state *pipe_config)
877d48d5 6551{
1857e1da 6552 struct drm_device *dev = intel_crtc->base.dev;
7c5f93b0 6553 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6d293983
ACO
6554 int lane, link_bw, fdi_dotclock, ret;
6555 bool needs_recompute = false;
877d48d5 6556
e29c22c0 6557retry:
877d48d5
DV
6558 /* FDI is a binary signal running at ~2.7GHz, encoding
6559 * each output octet as 10 bits. The actual frequency
6560 * is stored as a divider into a 100MHz clock, and the
6561 * mode pixel clock is stored in units of 1KHz.
6562 * Hence the bw of each lane in terms of the mode signal
6563 * is:
6564 */
21a727b3 6565 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
877d48d5 6566
241bfc38 6567 fdi_dotclock = adjusted_mode->crtc_clock;
877d48d5 6568
2bd89a07 6569 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
877d48d5
DV
6570 pipe_config->pipe_bpp);
6571
6572 pipe_config->fdi_lanes = lane;
6573
2bd89a07 6574 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
877d48d5 6575 link_bw, &pipe_config->fdi_m_n);
1857e1da 6576
e3b247da 6577 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6d293983 6578 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
e29c22c0
DV
6579 pipe_config->pipe_bpp -= 2*3;
6580 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6581 pipe_config->pipe_bpp);
6582 needs_recompute = true;
6583 pipe_config->bw_constrained = true;
6584
6585 goto retry;
6586 }
6587
6588 if (needs_recompute)
6589 return RETRY;
6590
6d293983 6591 return ret;
877d48d5
DV
6592}
6593
8cfb3407
VS
6594static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6595 struct intel_crtc_state *pipe_config)
6596{
6597 if (pipe_config->pipe_bpp > 24)
6598 return false;
6599
6600 /* HSW can handle pixel rate up to cdclk? */
2d1fe073 6601 if (IS_HASWELL(dev_priv))
8cfb3407
VS
6602 return true;
6603
6604 /*
b432e5cf
VS
6605 * We compare against max which means we must take
6606 * the increased cdclk requirement into account when
6607 * calculating the new cdclk.
6608 *
6609 * Should measure whether using a lower cdclk w/o IPS
8cfb3407
VS
6610 */
6611 return ilk_pipe_pixel_rate(pipe_config) <=
6612 dev_priv->max_cdclk_freq * 95 / 100;
6613}
6614
42db64ef 6615static void hsw_compute_ips_config(struct intel_crtc *crtc,
5cec258b 6616 struct intel_crtc_state *pipe_config)
42db64ef 6617{
8cfb3407 6618 struct drm_device *dev = crtc->base.dev;
fac5e23e 6619 struct drm_i915_private *dev_priv = to_i915(dev);
8cfb3407 6620
d330a953 6621 pipe_config->ips_enabled = i915.enable_ips &&
8cfb3407
VS
6622 hsw_crtc_supports_ips(crtc) &&
6623 pipe_config_supports_ips(dev_priv, pipe_config);
42db64ef
PZ
6624}
6625
39acb4aa
VS
6626static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6627{
6628 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6629
6630 /* GDG double wide on either pipe, otherwise pipe A only */
6631 return INTEL_INFO(dev_priv)->gen < 4 &&
6632 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6633}
6634
a43f6e0f 6635static int intel_crtc_compute_config(struct intel_crtc *crtc,
5cec258b 6636 struct intel_crtc_state *pipe_config)
79e53945 6637{
a43f6e0f 6638 struct drm_device *dev = crtc->base.dev;
fac5e23e 6639 struct drm_i915_private *dev_priv = to_i915(dev);
7c5f93b0 6640 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
f3261156 6641 int clock_limit = dev_priv->max_dotclk_freq;
89749350 6642
cf532bb2 6643 if (INTEL_INFO(dev)->gen < 4) {
f3261156 6644 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
cf532bb2
VS
6645
6646 /*
39acb4aa 6647 * Enable double wide mode when the dot clock
cf532bb2 6648 * is > 90% of the (display) core speed.
cf532bb2 6649 */
39acb4aa
VS
6650 if (intel_crtc_supports_double_wide(crtc) &&
6651 adjusted_mode->crtc_clock > clock_limit) {
f3261156 6652 clock_limit = dev_priv->max_dotclk_freq;
cf532bb2 6653 pipe_config->double_wide = true;
ad3a4479 6654 }
f3261156 6655 }
ad3a4479 6656
f3261156
VS
6657 if (adjusted_mode->crtc_clock > clock_limit) {
6658 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6659 adjusted_mode->crtc_clock, clock_limit,
6660 yesno(pipe_config->double_wide));
6661 return -EINVAL;
2c07245f 6662 }
89749350 6663
1d1d0e27
VS
6664 /*
6665 * Pipe horizontal size must be even in:
6666 * - DVO ganged mode
6667 * - LVDS dual channel mode
6668 * - Double wide pipe
6669 */
a93e255f 6670 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
1d1d0e27
VS
6671 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6672 pipe_config->pipe_src_w &= ~1;
6673
8693a824
DL
6674 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6675 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
44f46b42
CW
6676 */
6677 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
aad941d5 6678 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
e29c22c0 6679 return -EINVAL;
44f46b42 6680
f5adf94e 6681 if (HAS_IPS(dev))
a43f6e0f
DV
6682 hsw_compute_ips_config(crtc, pipe_config);
6683
877d48d5 6684 if (pipe_config->has_pch_encoder)
a43f6e0f 6685 return ironlake_fdi_compute_config(crtc, pipe_config);
877d48d5 6686
cf5a15be 6687 return 0;
79e53945
JB
6688}
6689
1652d19e
VS
6690static int skylake_get_display_clock_speed(struct drm_device *dev)
6691{
6692 struct drm_i915_private *dev_priv = to_i915(dev);
ea61791e 6693 uint32_t cdctl;
1652d19e 6694
ea61791e 6695 skl_dpll0_update(dev_priv);
1652d19e 6696
63911d72 6697 if (dev_priv->cdclk_pll.vco == 0)
709e05c3 6698 return dev_priv->cdclk_pll.ref;
1652d19e 6699
ea61791e 6700 cdctl = I915_READ(CDCLK_CTL);
1652d19e 6701
63911d72 6702 if (dev_priv->cdclk_pll.vco == 8640000) {
1652d19e
VS
6703 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6704 case CDCLK_FREQ_450_432:
6705 return 432000;
6706 case CDCLK_FREQ_337_308:
487ed2e4 6707 return 308571;
ea61791e
VS
6708 case CDCLK_FREQ_540:
6709 return 540000;
1652d19e 6710 case CDCLK_FREQ_675_617:
487ed2e4 6711 return 617143;
1652d19e 6712 default:
ea61791e 6713 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
1652d19e
VS
6714 }
6715 } else {
1652d19e
VS
6716 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6717 case CDCLK_FREQ_450_432:
6718 return 450000;
6719 case CDCLK_FREQ_337_308:
6720 return 337500;
ea61791e
VS
6721 case CDCLK_FREQ_540:
6722 return 540000;
1652d19e
VS
6723 case CDCLK_FREQ_675_617:
6724 return 675000;
6725 default:
ea61791e 6726 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
1652d19e
VS
6727 }
6728 }
6729
709e05c3 6730 return dev_priv->cdclk_pll.ref;
1652d19e
VS
6731}
6732
83d7c81f
VS
6733static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
6734{
6735 u32 val;
6736
6737 dev_priv->cdclk_pll.ref = 19200;
1c3f7700 6738 dev_priv->cdclk_pll.vco = 0;
83d7c81f
VS
6739
6740 val = I915_READ(BXT_DE_PLL_ENABLE);
1c3f7700 6741 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
83d7c81f 6742 return;
83d7c81f 6743
1c3f7700
ID
6744 if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
6745 return;
83d7c81f
VS
6746
6747 val = I915_READ(BXT_DE_PLL_CTL);
6748 dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
6749 dev_priv->cdclk_pll.ref;
6750}
6751
acd3f3d3
BP
6752static int broxton_get_display_clock_speed(struct drm_device *dev)
6753{
6754 struct drm_i915_private *dev_priv = to_i915(dev);
f5986242
VS
6755 u32 divider;
6756 int div, vco;
acd3f3d3 6757
83d7c81f
VS
6758 bxt_de_pll_update(dev_priv);
6759
f5986242
VS
6760 vco = dev_priv->cdclk_pll.vco;
6761 if (vco == 0)
6762 return dev_priv->cdclk_pll.ref;
acd3f3d3 6763
f5986242 6764 divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
acd3f3d3 6765
f5986242 6766 switch (divider) {
acd3f3d3 6767 case BXT_CDCLK_CD2X_DIV_SEL_1:
f5986242
VS
6768 div = 2;
6769 break;
acd3f3d3 6770 case BXT_CDCLK_CD2X_DIV_SEL_1_5:
f5986242
VS
6771 div = 3;
6772 break;
acd3f3d3 6773 case BXT_CDCLK_CD2X_DIV_SEL_2:
f5986242
VS
6774 div = 4;
6775 break;
acd3f3d3 6776 case BXT_CDCLK_CD2X_DIV_SEL_4:
f5986242
VS
6777 div = 8;
6778 break;
6779 default:
6780 MISSING_CASE(divider);
6781 return dev_priv->cdclk_pll.ref;
acd3f3d3
BP
6782 }
6783
f5986242 6784 return DIV_ROUND_CLOSEST(vco, div);
acd3f3d3
BP
6785}
6786
1652d19e
VS
6787static int broadwell_get_display_clock_speed(struct drm_device *dev)
6788{
fac5e23e 6789 struct drm_i915_private *dev_priv = to_i915(dev);
1652d19e
VS
6790 uint32_t lcpll = I915_READ(LCPLL_CTL);
6791 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6792
6793 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6794 return 800000;
6795 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6796 return 450000;
6797 else if (freq == LCPLL_CLK_FREQ_450)
6798 return 450000;
6799 else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6800 return 540000;
6801 else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6802 return 337500;
6803 else
6804 return 675000;
6805}
6806
6807static int haswell_get_display_clock_speed(struct drm_device *dev)
6808{
fac5e23e 6809 struct drm_i915_private *dev_priv = to_i915(dev);
1652d19e
VS
6810 uint32_t lcpll = I915_READ(LCPLL_CTL);
6811 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6812
6813 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6814 return 800000;
6815 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6816 return 450000;
6817 else if (freq == LCPLL_CLK_FREQ_450)
6818 return 450000;
6819 else if (IS_HSW_ULT(dev))
6820 return 337500;
6821 else
6822 return 540000;
79e53945
JB
6823}
6824
25eb05fc
JB
6825static int valleyview_get_display_clock_speed(struct drm_device *dev)
6826{
bfa7df01
VS
6827 return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6828 CCK_DISPLAY_CLOCK_CONTROL);
25eb05fc
JB
6829}
6830
b37a6434
VS
6831static int ilk_get_display_clock_speed(struct drm_device *dev)
6832{
6833 return 450000;
6834}
6835
e70236a8
JB
6836static int i945_get_display_clock_speed(struct drm_device *dev)
6837{
6838 return 400000;
6839}
79e53945 6840
e70236a8 6841static int i915_get_display_clock_speed(struct drm_device *dev)
79e53945 6842{
e907f170 6843 return 333333;
e70236a8 6844}
79e53945 6845
e70236a8
JB
6846static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6847{
6848 return 200000;
6849}
79e53945 6850
257a7ffc
DV
6851static int pnv_get_display_clock_speed(struct drm_device *dev)
6852{
6853 u16 gcfgc = 0;
6854
6855 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6856
6857 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6858 case GC_DISPLAY_CLOCK_267_MHZ_PNV:
e907f170 6859 return 266667;
257a7ffc 6860 case GC_DISPLAY_CLOCK_333_MHZ_PNV:
e907f170 6861 return 333333;
257a7ffc 6862 case GC_DISPLAY_CLOCK_444_MHZ_PNV:
e907f170 6863 return 444444;
257a7ffc
DV
6864 case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6865 return 200000;
6866 default:
6867 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6868 case GC_DISPLAY_CLOCK_133_MHZ_PNV:
e907f170 6869 return 133333;
257a7ffc 6870 case GC_DISPLAY_CLOCK_167_MHZ_PNV:
e907f170 6871 return 166667;
257a7ffc
DV
6872 }
6873}
6874
e70236a8
JB
6875static int i915gm_get_display_clock_speed(struct drm_device *dev)
6876{
6877 u16 gcfgc = 0;
79e53945 6878
e70236a8
JB
6879 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6880
6881 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
e907f170 6882 return 133333;
e70236a8
JB
6883 else {
6884 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6885 case GC_DISPLAY_CLOCK_333_MHZ:
e907f170 6886 return 333333;
e70236a8
JB
6887 default:
6888 case GC_DISPLAY_CLOCK_190_200_MHZ:
6889 return 190000;
79e53945 6890 }
e70236a8
JB
6891 }
6892}
6893
6894static int i865_get_display_clock_speed(struct drm_device *dev)
6895{
e907f170 6896 return 266667;
e70236a8
JB
6897}
6898
1b1d2716 6899static int i85x_get_display_clock_speed(struct drm_device *dev)
e70236a8
JB
6900{
6901 u16 hpllcc = 0;
1b1d2716 6902
65cd2b3f
VS
6903 /*
6904 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6905 * encoding is different :(
6906 * FIXME is this the right way to detect 852GM/852GMV?
6907 */
6908 if (dev->pdev->revision == 0x1)
6909 return 133333;
6910
1b1d2716
VS
6911 pci_bus_read_config_word(dev->pdev->bus,
6912 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6913
e70236a8
JB
6914 /* Assume that the hardware is in the high speed state. This
6915 * should be the default.
6916 */
6917 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6918 case GC_CLOCK_133_200:
1b1d2716 6919 case GC_CLOCK_133_200_2:
e70236a8
JB
6920 case GC_CLOCK_100_200:
6921 return 200000;
6922 case GC_CLOCK_166_250:
6923 return 250000;
6924 case GC_CLOCK_100_133:
e907f170 6925 return 133333;
1b1d2716
VS
6926 case GC_CLOCK_133_266:
6927 case GC_CLOCK_133_266_2:
6928 case GC_CLOCK_166_266:
6929 return 266667;
e70236a8 6930 }
79e53945 6931
e70236a8
JB
6932 /* Shouldn't happen */
6933 return 0;
6934}
79e53945 6935
e70236a8
JB
6936static int i830_get_display_clock_speed(struct drm_device *dev)
6937{
e907f170 6938 return 133333;
79e53945
JB
6939}
6940
34edce2f
VS
6941static unsigned int intel_hpll_vco(struct drm_device *dev)
6942{
fac5e23e 6943 struct drm_i915_private *dev_priv = to_i915(dev);
34edce2f
VS
6944 static const unsigned int blb_vco[8] = {
6945 [0] = 3200000,
6946 [1] = 4000000,
6947 [2] = 5333333,
6948 [3] = 4800000,
6949 [4] = 6400000,
6950 };
6951 static const unsigned int pnv_vco[8] = {
6952 [0] = 3200000,
6953 [1] = 4000000,
6954 [2] = 5333333,
6955 [3] = 4800000,
6956 [4] = 2666667,
6957 };
6958 static const unsigned int cl_vco[8] = {
6959 [0] = 3200000,
6960 [1] = 4000000,
6961 [2] = 5333333,
6962 [3] = 6400000,
6963 [4] = 3333333,
6964 [5] = 3566667,
6965 [6] = 4266667,
6966 };
6967 static const unsigned int elk_vco[8] = {
6968 [0] = 3200000,
6969 [1] = 4000000,
6970 [2] = 5333333,
6971 [3] = 4800000,
6972 };
6973 static const unsigned int ctg_vco[8] = {
6974 [0] = 3200000,
6975 [1] = 4000000,
6976 [2] = 5333333,
6977 [3] = 6400000,
6978 [4] = 2666667,
6979 [5] = 4266667,
6980 };
6981 const unsigned int *vco_table;
6982 unsigned int vco;
6983 uint8_t tmp = 0;
6984
6985 /* FIXME other chipsets? */
6986 if (IS_GM45(dev))
6987 vco_table = ctg_vco;
6988 else if (IS_G4X(dev))
6989 vco_table = elk_vco;
6990 else if (IS_CRESTLINE(dev))
6991 vco_table = cl_vco;
6992 else if (IS_PINEVIEW(dev))
6993 vco_table = pnv_vco;
6994 else if (IS_G33(dev))
6995 vco_table = blb_vco;
6996 else
6997 return 0;
6998
6999 tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
7000
7001 vco = vco_table[tmp & 0x7];
7002 if (vco == 0)
7003 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
7004 else
7005 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
7006
7007 return vco;
7008}
7009
7010static int gm45_get_display_clock_speed(struct drm_device *dev)
7011{
7012 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7013 uint16_t tmp = 0;
7014
7015 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7016
7017 cdclk_sel = (tmp >> 12) & 0x1;
7018
7019 switch (vco) {
7020 case 2666667:
7021 case 4000000:
7022 case 5333333:
7023 return cdclk_sel ? 333333 : 222222;
7024 case 3200000:
7025 return cdclk_sel ? 320000 : 228571;
7026 default:
7027 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7028 return 222222;
7029 }
7030}
7031
7032static int i965gm_get_display_clock_speed(struct drm_device *dev)
7033{
7034 static const uint8_t div_3200[] = { 16, 10, 8 };
7035 static const uint8_t div_4000[] = { 20, 12, 10 };
7036 static const uint8_t div_5333[] = { 24, 16, 14 };
7037 const uint8_t *div_table;
7038 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7039 uint16_t tmp = 0;
7040
7041 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7042
7043 cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7044
7045 if (cdclk_sel >= ARRAY_SIZE(div_3200))
7046 goto fail;
7047
7048 switch (vco) {
7049 case 3200000:
7050 div_table = div_3200;
7051 break;
7052 case 4000000:
7053 div_table = div_4000;
7054 break;
7055 case 5333333:
7056 div_table = div_5333;
7057 break;
7058 default:
7059 goto fail;
7060 }
7061
7062 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7063
caf4e252 7064fail:
34edce2f
VS
7065 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7066 return 200000;
7067}
7068
7069static int g33_get_display_clock_speed(struct drm_device *dev)
7070{
7071 static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
7072 static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
7073 static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7074 static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7075 const uint8_t *div_table;
7076 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7077 uint16_t tmp = 0;
7078
7079 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7080
7081 cdclk_sel = (tmp >> 4) & 0x7;
7082
7083 if (cdclk_sel >= ARRAY_SIZE(div_3200))
7084 goto fail;
7085
7086 switch (vco) {
7087 case 3200000:
7088 div_table = div_3200;
7089 break;
7090 case 4000000:
7091 div_table = div_4000;
7092 break;
7093 case 4800000:
7094 div_table = div_4800;
7095 break;
7096 case 5333333:
7097 div_table = div_5333;
7098 break;
7099 default:
7100 goto fail;
7101 }
7102
7103 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7104
caf4e252 7105fail:
34edce2f
VS
7106 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7107 return 190476;
7108}
7109
2c07245f 7110static void
a65851af 7111intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2c07245f 7112{
a65851af
VS
7113 while (*num > DATA_LINK_M_N_MASK ||
7114 *den > DATA_LINK_M_N_MASK) {
2c07245f
ZW
7115 *num >>= 1;
7116 *den >>= 1;
7117 }
7118}
7119
a65851af
VS
7120static void compute_m_n(unsigned int m, unsigned int n,
7121 uint32_t *ret_m, uint32_t *ret_n)
7122{
7123 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7124 *ret_m = div_u64((uint64_t) m * *ret_n, n);
7125 intel_reduce_m_n_ratio(ret_m, ret_n);
7126}
7127
e69d0bc1
DV
7128void
7129intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7130 int pixel_clock, int link_clock,
7131 struct intel_link_m_n *m_n)
2c07245f 7132{
e69d0bc1 7133 m_n->tu = 64;
a65851af
VS
7134
7135 compute_m_n(bits_per_pixel * pixel_clock,
7136 link_clock * nlanes * 8,
7137 &m_n->gmch_m, &m_n->gmch_n);
7138
7139 compute_m_n(pixel_clock, link_clock,
7140 &m_n->link_m, &m_n->link_n);
2c07245f
ZW
7141}
7142
a7615030
CW
7143static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7144{
d330a953
JN
7145 if (i915.panel_use_ssc >= 0)
7146 return i915.panel_use_ssc != 0;
41aa3448 7147 return dev_priv->vbt.lvds_use_ssc
435793df 7148 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
a7615030
CW
7149}
7150
7429e9d4 7151static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
c65d77d8 7152{
7df00d7a 7153 return (1 << dpll->n) << 16 | dpll->m2;
7429e9d4 7154}
f47709a9 7155
7429e9d4
DV
7156static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7157{
7158 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
c65d77d8
JB
7159}
7160
f47709a9 7161static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
190f68c5 7162 struct intel_crtc_state *crtc_state,
9e2c8475 7163 struct dpll *reduced_clock)
a7516a05 7164{
f47709a9 7165 struct drm_device *dev = crtc->base.dev;
a7516a05
JB
7166 u32 fp, fp2 = 0;
7167
7168 if (IS_PINEVIEW(dev)) {
190f68c5 7169 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
a7516a05 7170 if (reduced_clock)
7429e9d4 7171 fp2 = pnv_dpll_compute_fp(reduced_clock);
a7516a05 7172 } else {
190f68c5 7173 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
a7516a05 7174 if (reduced_clock)
7429e9d4 7175 fp2 = i9xx_dpll_compute_fp(reduced_clock);
a7516a05
JB
7176 }
7177
190f68c5 7178 crtc_state->dpll_hw_state.fp0 = fp;
a7516a05 7179
f47709a9 7180 crtc->lowfreq_avail = false;
a93e255f 7181 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ab585dea 7182 reduced_clock) {
190f68c5 7183 crtc_state->dpll_hw_state.fp1 = fp2;
f47709a9 7184 crtc->lowfreq_avail = true;
a7516a05 7185 } else {
190f68c5 7186 crtc_state->dpll_hw_state.fp1 = fp;
a7516a05
JB
7187 }
7188}
7189
5e69f97f
CML
7190static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7191 pipe)
89b667f8
JB
7192{
7193 u32 reg_val;
7194
7195 /*
7196 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7197 * and set it to a reasonable value instead.
7198 */
ab3c759a 7199 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
89b667f8
JB
7200 reg_val &= 0xffffff00;
7201 reg_val |= 0x00000030;
ab3c759a 7202 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
89b667f8 7203
ab3c759a 7204 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
89b667f8
JB
7205 reg_val &= 0x8cffffff;
7206 reg_val = 0x8c000000;
ab3c759a 7207 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
89b667f8 7208
ab3c759a 7209 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
89b667f8 7210 reg_val &= 0xffffff00;
ab3c759a 7211 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
89b667f8 7212
ab3c759a 7213 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
89b667f8
JB
7214 reg_val &= 0x00ffffff;
7215 reg_val |= 0xb0000000;
ab3c759a 7216 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
89b667f8
JB
7217}
7218
b551842d
DV
7219static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7220 struct intel_link_m_n *m_n)
7221{
7222 struct drm_device *dev = crtc->base.dev;
fac5e23e 7223 struct drm_i915_private *dev_priv = to_i915(dev);
b551842d
DV
7224 int pipe = crtc->pipe;
7225
e3b95f1e
DV
7226 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7227 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7228 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7229 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
b551842d
DV
7230}
7231
7232static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
f769cd24
VK
7233 struct intel_link_m_n *m_n,
7234 struct intel_link_m_n *m2_n2)
b551842d
DV
7235{
7236 struct drm_device *dev = crtc->base.dev;
fac5e23e 7237 struct drm_i915_private *dev_priv = to_i915(dev);
b551842d 7238 int pipe = crtc->pipe;
6e3c9717 7239 enum transcoder transcoder = crtc->config->cpu_transcoder;
b551842d
DV
7240
7241 if (INTEL_INFO(dev)->gen >= 5) {
7242 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7243 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7244 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7245 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
f769cd24
VK
7246 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7247 * for gen < 8) and if DRRS is supported (to make sure the
7248 * registers are not unnecessarily accessed).
7249 */
44395bfe 7250 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
6e3c9717 7251 crtc->config->has_drrs) {
f769cd24
VK
7252 I915_WRITE(PIPE_DATA_M2(transcoder),
7253 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7254 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7255 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7256 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7257 }
b551842d 7258 } else {
e3b95f1e
DV
7259 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7260 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7261 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7262 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
b551842d
DV
7263 }
7264}
7265
fe3cd48d 7266void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
03afc4a2 7267{
fe3cd48d
R
7268 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7269
7270 if (m_n == M1_N1) {
7271 dp_m_n = &crtc->config->dp_m_n;
7272 dp_m2_n2 = &crtc->config->dp_m2_n2;
7273 } else if (m_n == M2_N2) {
7274
7275 /*
7276 * M2_N2 registers are not supported. Hence m2_n2 divider value
7277 * needs to be programmed into M1_N1.
7278 */
7279 dp_m_n = &crtc->config->dp_m2_n2;
7280 } else {
7281 DRM_ERROR("Unsupported divider value\n");
7282 return;
7283 }
7284
6e3c9717
ACO
7285 if (crtc->config->has_pch_encoder)
7286 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
03afc4a2 7287 else
fe3cd48d 7288 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
03afc4a2
DV
7289}
7290
251ac862
DV
7291static void vlv_compute_dpll(struct intel_crtc *crtc,
7292 struct intel_crtc_state *pipe_config)
bdd4b6a6 7293{
03ed5cbf 7294 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
cd2d34d9 7295 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
03ed5cbf
VS
7296 if (crtc->pipe != PIPE_A)
7297 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
bdd4b6a6 7298
cd2d34d9 7299 /* DPLL not used with DSI, but still need the rest set up */
187a1c07 7300 if (!pipe_config->has_dsi_encoder)
cd2d34d9
VS
7301 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7302 DPLL_EXT_BUFFER_ENABLE_VLV;
7303
03ed5cbf
VS
7304 pipe_config->dpll_hw_state.dpll_md =
7305 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7306}
bdd4b6a6 7307
03ed5cbf
VS
7308static void chv_compute_dpll(struct intel_crtc *crtc,
7309 struct intel_crtc_state *pipe_config)
7310{
7311 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
cd2d34d9 7312 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
03ed5cbf
VS
7313 if (crtc->pipe != PIPE_A)
7314 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7315
cd2d34d9 7316 /* DPLL not used with DSI, but still need the rest set up */
187a1c07 7317 if (!pipe_config->has_dsi_encoder)
cd2d34d9
VS
7318 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7319
03ed5cbf
VS
7320 pipe_config->dpll_hw_state.dpll_md =
7321 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
bdd4b6a6
DV
7322}
7323
d288f65f 7324static void vlv_prepare_pll(struct intel_crtc *crtc,
5cec258b 7325 const struct intel_crtc_state *pipe_config)
a0c4da24 7326{
f47709a9 7327 struct drm_device *dev = crtc->base.dev;
fac5e23e 7328 struct drm_i915_private *dev_priv = to_i915(dev);
cd2d34d9 7329 enum pipe pipe = crtc->pipe;
bdd4b6a6 7330 u32 mdiv;
a0c4da24 7331 u32 bestn, bestm1, bestm2, bestp1, bestp2;
bdd4b6a6 7332 u32 coreclk, reg_val;
a0c4da24 7333
cd2d34d9
VS
7334 /* Enable Refclk */
7335 I915_WRITE(DPLL(pipe),
7336 pipe_config->dpll_hw_state.dpll &
7337 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7338
7339 /* No need to actually set up the DPLL with DSI */
7340 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7341 return;
7342
a580516d 7343 mutex_lock(&dev_priv->sb_lock);
09153000 7344
d288f65f
VS
7345 bestn = pipe_config->dpll.n;
7346 bestm1 = pipe_config->dpll.m1;
7347 bestm2 = pipe_config->dpll.m2;
7348 bestp1 = pipe_config->dpll.p1;
7349 bestp2 = pipe_config->dpll.p2;
a0c4da24 7350
89b667f8
JB
7351 /* See eDP HDMI DPIO driver vbios notes doc */
7352
7353 /* PLL B needs special handling */
bdd4b6a6 7354 if (pipe == PIPE_B)
5e69f97f 7355 vlv_pllb_recal_opamp(dev_priv, pipe);
89b667f8
JB
7356
7357 /* Set up Tx target for periodic Rcomp update */
ab3c759a 7358 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
89b667f8
JB
7359
7360 /* Disable target IRef on PLL */
ab3c759a 7361 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
89b667f8 7362 reg_val &= 0x00ffffff;
ab3c759a 7363 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
89b667f8
JB
7364
7365 /* Disable fast lock */
ab3c759a 7366 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
89b667f8
JB
7367
7368 /* Set idtafcrecal before PLL is enabled */
a0c4da24
JB
7369 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7370 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7371 mdiv |= ((bestn << DPIO_N_SHIFT));
a0c4da24 7372 mdiv |= (1 << DPIO_K_SHIFT);
7df5080b
JB
7373
7374 /*
7375 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7376 * but we don't support that).
7377 * Note: don't use the DAC post divider as it seems unstable.
7378 */
7379 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
ab3c759a 7380 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
a0c4da24 7381
a0c4da24 7382 mdiv |= DPIO_ENABLE_CALIBRATION;
ab3c759a 7383 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
a0c4da24 7384
89b667f8 7385 /* Set HBR and RBR LPF coefficients */
d288f65f 7386 if (pipe_config->port_clock == 162000 ||
409ee761
ACO
7387 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7388 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
ab3c759a 7389 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
885b0120 7390 0x009f0003);
89b667f8 7391 else
ab3c759a 7392 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
89b667f8
JB
7393 0x00d0000f);
7394
681a8504 7395 if (pipe_config->has_dp_encoder) {
89b667f8 7396 /* Use SSC source */
bdd4b6a6 7397 if (pipe == PIPE_A)
ab3c759a 7398 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7399 0x0df40000);
7400 else
ab3c759a 7401 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7402 0x0df70000);
7403 } else { /* HDMI or VGA */
7404 /* Use bend source */
bdd4b6a6 7405 if (pipe == PIPE_A)
ab3c759a 7406 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7407 0x0df70000);
7408 else
ab3c759a 7409 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7410 0x0df40000);
7411 }
a0c4da24 7412
ab3c759a 7413 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
89b667f8 7414 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
409ee761
ACO
7415 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7416 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
89b667f8 7417 coreclk |= 0x01000000;
ab3c759a 7418 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
a0c4da24 7419
ab3c759a 7420 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
a580516d 7421 mutex_unlock(&dev_priv->sb_lock);
a0c4da24
JB
7422}
7423
d288f65f 7424static void chv_prepare_pll(struct intel_crtc *crtc,
5cec258b 7425 const struct intel_crtc_state *pipe_config)
9d556c99
CML
7426{
7427 struct drm_device *dev = crtc->base.dev;
fac5e23e 7428 struct drm_i915_private *dev_priv = to_i915(dev);
cd2d34d9 7429 enum pipe pipe = crtc->pipe;
9d556c99 7430 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9cbe40c1 7431 u32 loopfilter, tribuf_calcntr;
9d556c99 7432 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
a945ce7e 7433 u32 dpio_val;
9cbe40c1 7434 int vco;
9d556c99 7435
cd2d34d9
VS
7436 /* Enable Refclk and SSC */
7437 I915_WRITE(DPLL(pipe),
7438 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7439
7440 /* No need to actually set up the DPLL with DSI */
7441 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7442 return;
7443
d288f65f
VS
7444 bestn = pipe_config->dpll.n;
7445 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7446 bestm1 = pipe_config->dpll.m1;
7447 bestm2 = pipe_config->dpll.m2 >> 22;
7448 bestp1 = pipe_config->dpll.p1;
7449 bestp2 = pipe_config->dpll.p2;
9cbe40c1 7450 vco = pipe_config->dpll.vco;
a945ce7e 7451 dpio_val = 0;
9cbe40c1 7452 loopfilter = 0;
9d556c99 7453
a580516d 7454 mutex_lock(&dev_priv->sb_lock);
9d556c99 7455
9d556c99
CML
7456 /* p1 and p2 divider */
7457 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7458 5 << DPIO_CHV_S1_DIV_SHIFT |
7459 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7460 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7461 1 << DPIO_CHV_K_DIV_SHIFT);
7462
7463 /* Feedback post-divider - m2 */
7464 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7465
7466 /* Feedback refclk divider - n and m1 */
7467 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7468 DPIO_CHV_M1_DIV_BY_2 |
7469 1 << DPIO_CHV_N_DIV_SHIFT);
7470
7471 /* M2 fraction division */
25a25dfc 7472 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
9d556c99
CML
7473
7474 /* M2 fraction division enable */
a945ce7e
VP
7475 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7476 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7477 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7478 if (bestm2_frac)
7479 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7480 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
9d556c99 7481
de3a0fde
VP
7482 /* Program digital lock detect threshold */
7483 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7484 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7485 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7486 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7487 if (!bestm2_frac)
7488 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7489 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7490
9d556c99 7491 /* Loop filter */
9cbe40c1
VP
7492 if (vco == 5400000) {
7493 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7494 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7495 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7496 tribuf_calcntr = 0x9;
7497 } else if (vco <= 6200000) {
7498 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7499 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7500 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7501 tribuf_calcntr = 0x9;
7502 } else if (vco <= 6480000) {
7503 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7504 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7505 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7506 tribuf_calcntr = 0x8;
7507 } else {
7508 /* Not supported. Apply the same limits as in the max case */
7509 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7510 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7511 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7512 tribuf_calcntr = 0;
7513 }
9d556c99
CML
7514 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7515
968040b2 7516 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
9cbe40c1
VP
7517 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7518 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7519 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7520
9d556c99
CML
7521 /* AFC Recal */
7522 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7523 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7524 DPIO_AFC_RECAL);
7525
a580516d 7526 mutex_unlock(&dev_priv->sb_lock);
9d556c99
CML
7527}
7528
d288f65f
VS
7529/**
7530 * vlv_force_pll_on - forcibly enable just the PLL
7531 * @dev_priv: i915 private structure
7532 * @pipe: pipe PLL to enable
7533 * @dpll: PLL configuration
7534 *
7535 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7536 * in cases where we need the PLL enabled even when @pipe is not going to
7537 * be enabled.
7538 */
3f36b937
TU
7539int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7540 const struct dpll *dpll)
d288f65f
VS
7541{
7542 struct intel_crtc *crtc =
7543 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
3f36b937
TU
7544 struct intel_crtc_state *pipe_config;
7545
7546 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7547 if (!pipe_config)
7548 return -ENOMEM;
7549
7550 pipe_config->base.crtc = &crtc->base;
7551 pipe_config->pixel_multiplier = 1;
7552 pipe_config->dpll = *dpll;
d288f65f
VS
7553
7554 if (IS_CHERRYVIEW(dev)) {
3f36b937
TU
7555 chv_compute_dpll(crtc, pipe_config);
7556 chv_prepare_pll(crtc, pipe_config);
7557 chv_enable_pll(crtc, pipe_config);
d288f65f 7558 } else {
3f36b937
TU
7559 vlv_compute_dpll(crtc, pipe_config);
7560 vlv_prepare_pll(crtc, pipe_config);
7561 vlv_enable_pll(crtc, pipe_config);
d288f65f 7562 }
3f36b937
TU
7563
7564 kfree(pipe_config);
7565
7566 return 0;
d288f65f
VS
7567}
7568
7569/**
7570 * vlv_force_pll_off - forcibly disable just the PLL
7571 * @dev_priv: i915 private structure
7572 * @pipe: pipe PLL to disable
7573 *
7574 * Disable the PLL for @pipe. To be used in cases where we need
7575 * the PLL enabled even when @pipe is not going to be enabled.
7576 */
7577void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7578{
7579 if (IS_CHERRYVIEW(dev))
7580 chv_disable_pll(to_i915(dev), pipe);
7581 else
7582 vlv_disable_pll(to_i915(dev), pipe);
7583}
7584
251ac862
DV
7585static void i9xx_compute_dpll(struct intel_crtc *crtc,
7586 struct intel_crtc_state *crtc_state,
9e2c8475 7587 struct dpll *reduced_clock)
eb1cbe48 7588{
f47709a9 7589 struct drm_device *dev = crtc->base.dev;
fac5e23e 7590 struct drm_i915_private *dev_priv = to_i915(dev);
eb1cbe48
DV
7591 u32 dpll;
7592 bool is_sdvo;
190f68c5 7593 struct dpll *clock = &crtc_state->dpll;
eb1cbe48 7594
190f68c5 7595 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
2a8f64ca 7596
a93e255f
ACO
7597 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7598 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
eb1cbe48
DV
7599
7600 dpll = DPLL_VGA_MODE_DIS;
7601
a93e255f 7602 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
eb1cbe48
DV
7603 dpll |= DPLLB_MODE_LVDS;
7604 else
7605 dpll |= DPLLB_MODE_DAC_SERIAL;
6cc5f341 7606
ef1b460d 7607 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
190f68c5 7608 dpll |= (crtc_state->pixel_multiplier - 1)
198a037f 7609 << SDVO_MULTIPLIER_SHIFT_HIRES;
eb1cbe48 7610 }
198a037f
DV
7611
7612 if (is_sdvo)
4a33e48d 7613 dpll |= DPLL_SDVO_HIGH_SPEED;
198a037f 7614
190f68c5 7615 if (crtc_state->has_dp_encoder)
4a33e48d 7616 dpll |= DPLL_SDVO_HIGH_SPEED;
eb1cbe48
DV
7617
7618 /* compute bitmask from p1 value */
7619 if (IS_PINEVIEW(dev))
7620 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7621 else {
7622 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7623 if (IS_G4X(dev) && reduced_clock)
7624 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7625 }
7626 switch (clock->p2) {
7627 case 5:
7628 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7629 break;
7630 case 7:
7631 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7632 break;
7633 case 10:
7634 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7635 break;
7636 case 14:
7637 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7638 break;
7639 }
7640 if (INTEL_INFO(dev)->gen >= 4)
7641 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7642
190f68c5 7643 if (crtc_state->sdvo_tv_clock)
eb1cbe48 7644 dpll |= PLL_REF_INPUT_TVCLKINBC;
a93e255f 7645 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ceb41007 7646 intel_panel_use_ssc(dev_priv))
eb1cbe48
DV
7647 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7648 else
7649 dpll |= PLL_REF_INPUT_DREFCLK;
7650
7651 dpll |= DPLL_VCO_ENABLE;
190f68c5 7652 crtc_state->dpll_hw_state.dpll = dpll;
8bcc2795 7653
eb1cbe48 7654 if (INTEL_INFO(dev)->gen >= 4) {
190f68c5 7655 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
ef1b460d 7656 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
190f68c5 7657 crtc_state->dpll_hw_state.dpll_md = dpll_md;
eb1cbe48
DV
7658 }
7659}
7660
251ac862
DV
7661static void i8xx_compute_dpll(struct intel_crtc *crtc,
7662 struct intel_crtc_state *crtc_state,
9e2c8475 7663 struct dpll *reduced_clock)
eb1cbe48 7664{
f47709a9 7665 struct drm_device *dev = crtc->base.dev;
fac5e23e 7666 struct drm_i915_private *dev_priv = to_i915(dev);
eb1cbe48 7667 u32 dpll;
190f68c5 7668 struct dpll *clock = &crtc_state->dpll;
eb1cbe48 7669
190f68c5 7670 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
2a8f64ca 7671
eb1cbe48
DV
7672 dpll = DPLL_VGA_MODE_DIS;
7673
a93e255f 7674 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
eb1cbe48
DV
7675 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7676 } else {
7677 if (clock->p1 == 2)
7678 dpll |= PLL_P1_DIVIDE_BY_TWO;
7679 else
7680 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7681 if (clock->p2 == 4)
7682 dpll |= PLL_P2_DIVIDE_BY_4;
7683 }
7684
a93e255f 7685 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
4a33e48d
DV
7686 dpll |= DPLL_DVO_2X_MODE;
7687
a93e255f 7688 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ceb41007 7689 intel_panel_use_ssc(dev_priv))
eb1cbe48
DV
7690 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7691 else
7692 dpll |= PLL_REF_INPUT_DREFCLK;
7693
7694 dpll |= DPLL_VCO_ENABLE;
190f68c5 7695 crtc_state->dpll_hw_state.dpll = dpll;
eb1cbe48
DV
7696}
7697
8a654f3b 7698static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
b0e77b9c
PZ
7699{
7700 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 7701 struct drm_i915_private *dev_priv = to_i915(dev);
b0e77b9c 7702 enum pipe pipe = intel_crtc->pipe;
6e3c9717 7703 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7c5f93b0 7704 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1caea6e9
VS
7705 uint32_t crtc_vtotal, crtc_vblank_end;
7706 int vsyncshift = 0;
4d8a62ea
DV
7707
7708 /* We need to be careful not to changed the adjusted mode, for otherwise
7709 * the hw state checker will get angry at the mismatch. */
7710 crtc_vtotal = adjusted_mode->crtc_vtotal;
7711 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
b0e77b9c 7712
609aeaca 7713 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
b0e77b9c 7714 /* the chip adds 2 halflines automatically */
4d8a62ea
DV
7715 crtc_vtotal -= 1;
7716 crtc_vblank_end -= 1;
609aeaca 7717
409ee761 7718 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
609aeaca
VS
7719 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7720 else
7721 vsyncshift = adjusted_mode->crtc_hsync_start -
7722 adjusted_mode->crtc_htotal / 2;
1caea6e9
VS
7723 if (vsyncshift < 0)
7724 vsyncshift += adjusted_mode->crtc_htotal;
b0e77b9c
PZ
7725 }
7726
7727 if (INTEL_INFO(dev)->gen > 3)
fe2b8f9d 7728 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
b0e77b9c 7729
fe2b8f9d 7730 I915_WRITE(HTOTAL(cpu_transcoder),
b0e77b9c
PZ
7731 (adjusted_mode->crtc_hdisplay - 1) |
7732 ((adjusted_mode->crtc_htotal - 1) << 16));
fe2b8f9d 7733 I915_WRITE(HBLANK(cpu_transcoder),
b0e77b9c
PZ
7734 (adjusted_mode->crtc_hblank_start - 1) |
7735 ((adjusted_mode->crtc_hblank_end - 1) << 16));
fe2b8f9d 7736 I915_WRITE(HSYNC(cpu_transcoder),
b0e77b9c
PZ
7737 (adjusted_mode->crtc_hsync_start - 1) |
7738 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7739
fe2b8f9d 7740 I915_WRITE(VTOTAL(cpu_transcoder),
b0e77b9c 7741 (adjusted_mode->crtc_vdisplay - 1) |
4d8a62ea 7742 ((crtc_vtotal - 1) << 16));
fe2b8f9d 7743 I915_WRITE(VBLANK(cpu_transcoder),
b0e77b9c 7744 (adjusted_mode->crtc_vblank_start - 1) |
4d8a62ea 7745 ((crtc_vblank_end - 1) << 16));
fe2b8f9d 7746 I915_WRITE(VSYNC(cpu_transcoder),
b0e77b9c
PZ
7747 (adjusted_mode->crtc_vsync_start - 1) |
7748 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7749
b5e508d4
PZ
7750 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7751 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7752 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7753 * bits. */
7754 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7755 (pipe == PIPE_B || pipe == PIPE_C))
7756 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7757
bc58be60
JN
7758}
7759
7760static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7761{
7762 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 7763 struct drm_i915_private *dev_priv = to_i915(dev);
bc58be60
JN
7764 enum pipe pipe = intel_crtc->pipe;
7765
b0e77b9c
PZ
7766 /* pipesrc controls the size that is scaled from, which should
7767 * always be the user's requested size.
7768 */
7769 I915_WRITE(PIPESRC(pipe),
6e3c9717
ACO
7770 ((intel_crtc->config->pipe_src_w - 1) << 16) |
7771 (intel_crtc->config->pipe_src_h - 1));
b0e77b9c
PZ
7772}
7773
1bd1bd80 7774static void intel_get_pipe_timings(struct intel_crtc *crtc,
5cec258b 7775 struct intel_crtc_state *pipe_config)
1bd1bd80
DV
7776{
7777 struct drm_device *dev = crtc->base.dev;
fac5e23e 7778 struct drm_i915_private *dev_priv = to_i915(dev);
1bd1bd80
DV
7779 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7780 uint32_t tmp;
7781
7782 tmp = I915_READ(HTOTAL(cpu_transcoder));
2d112de7
ACO
7783 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7784 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7785 tmp = I915_READ(HBLANK(cpu_transcoder));
2d112de7
ACO
7786 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7787 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7788 tmp = I915_READ(HSYNC(cpu_transcoder));
2d112de7
ACO
7789 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7790 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80
DV
7791
7792 tmp = I915_READ(VTOTAL(cpu_transcoder));
2d112de7
ACO
7793 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7794 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7795 tmp = I915_READ(VBLANK(cpu_transcoder));
2d112de7
ACO
7796 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7797 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7798 tmp = I915_READ(VSYNC(cpu_transcoder));
2d112de7
ACO
7799 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7800 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80
DV
7801
7802 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
2d112de7
ACO
7803 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7804 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7805 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
1bd1bd80 7806 }
bc58be60
JN
7807}
7808
7809static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7810 struct intel_crtc_state *pipe_config)
7811{
7812 struct drm_device *dev = crtc->base.dev;
fac5e23e 7813 struct drm_i915_private *dev_priv = to_i915(dev);
bc58be60 7814 u32 tmp;
1bd1bd80
DV
7815
7816 tmp = I915_READ(PIPESRC(crtc->pipe));
37327abd
VS
7817 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7818 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7819
2d112de7
ACO
7820 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7821 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
1bd1bd80
DV
7822}
7823
f6a83288 7824void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5cec258b 7825 struct intel_crtc_state *pipe_config)
babea61d 7826{
2d112de7
ACO
7827 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7828 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7829 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7830 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
babea61d 7831
2d112de7
ACO
7832 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7833 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7834 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7835 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
babea61d 7836
2d112de7 7837 mode->flags = pipe_config->base.adjusted_mode.flags;
cd13f5ab 7838 mode->type = DRM_MODE_TYPE_DRIVER;
babea61d 7839
2d112de7
ACO
7840 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7841 mode->flags |= pipe_config->base.adjusted_mode.flags;
cd13f5ab
ML
7842
7843 mode->hsync = drm_mode_hsync(mode);
7844 mode->vrefresh = drm_mode_vrefresh(mode);
7845 drm_mode_set_name(mode);
babea61d
JB
7846}
7847
84b046f3
DV
7848static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7849{
7850 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 7851 struct drm_i915_private *dev_priv = to_i915(dev);
84b046f3
DV
7852 uint32_t pipeconf;
7853
9f11a9e4 7854 pipeconf = 0;
84b046f3 7855
b6b5d049
VS
7856 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7857 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7858 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
67c72a12 7859
6e3c9717 7860 if (intel_crtc->config->double_wide)
cf532bb2 7861 pipeconf |= PIPECONF_DOUBLE_WIDE;
84b046f3 7862
ff9ce46e 7863 /* only g4x and later have fancy bpc/dither controls */
666a4537 7864 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ff9ce46e 7865 /* Bspec claims that we can't use dithering for 30bpp pipes. */
6e3c9717 7866 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
ff9ce46e 7867 pipeconf |= PIPECONF_DITHER_EN |
84b046f3 7868 PIPECONF_DITHER_TYPE_SP;
84b046f3 7869
6e3c9717 7870 switch (intel_crtc->config->pipe_bpp) {
ff9ce46e
DV
7871 case 18:
7872 pipeconf |= PIPECONF_6BPC;
7873 break;
7874 case 24:
7875 pipeconf |= PIPECONF_8BPC;
7876 break;
7877 case 30:
7878 pipeconf |= PIPECONF_10BPC;
7879 break;
7880 default:
7881 /* Case prevented by intel_choose_pipe_bpp_dither. */
7882 BUG();
84b046f3
DV
7883 }
7884 }
7885
7886 if (HAS_PIPE_CXSR(dev)) {
7887 if (intel_crtc->lowfreq_avail) {
7888 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7889 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7890 } else {
7891 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
84b046f3
DV
7892 }
7893 }
7894
6e3c9717 7895 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
efc2cfff 7896 if (INTEL_INFO(dev)->gen < 4 ||
409ee761 7897 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
efc2cfff
VS
7898 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7899 else
7900 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7901 } else
84b046f3
DV
7902 pipeconf |= PIPECONF_PROGRESSIVE;
7903
666a4537
WB
7904 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7905 intel_crtc->config->limited_color_range)
9f11a9e4 7906 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
9c8e09b7 7907
84b046f3
DV
7908 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7909 POSTING_READ(PIPECONF(intel_crtc->pipe));
7910}
7911
81c97f52
ACO
7912static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7913 struct intel_crtc_state *crtc_state)
7914{
7915 struct drm_device *dev = crtc->base.dev;
fac5e23e 7916 struct drm_i915_private *dev_priv = to_i915(dev);
1b6f4958 7917 const struct intel_limit *limit;
81c97f52
ACO
7918 int refclk = 48000;
7919
7920 memset(&crtc_state->dpll_hw_state, 0,
7921 sizeof(crtc_state->dpll_hw_state));
7922
7923 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7924 if (intel_panel_use_ssc(dev_priv)) {
7925 refclk = dev_priv->vbt.lvds_ssc_freq;
7926 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7927 }
7928
7929 limit = &intel_limits_i8xx_lvds;
7930 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
7931 limit = &intel_limits_i8xx_dvo;
7932 } else {
7933 limit = &intel_limits_i8xx_dac;
7934 }
7935
7936 if (!crtc_state->clock_set &&
7937 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7938 refclk, NULL, &crtc_state->dpll)) {
7939 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7940 return -EINVAL;
7941 }
7942
7943 i8xx_compute_dpll(crtc, crtc_state, NULL);
7944
7945 return 0;
7946}
7947
19ec6693
ACO
7948static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7949 struct intel_crtc_state *crtc_state)
7950{
7951 struct drm_device *dev = crtc->base.dev;
fac5e23e 7952 struct drm_i915_private *dev_priv = to_i915(dev);
1b6f4958 7953 const struct intel_limit *limit;
19ec6693
ACO
7954 int refclk = 96000;
7955
7956 memset(&crtc_state->dpll_hw_state, 0,
7957 sizeof(crtc_state->dpll_hw_state));
7958
7959 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7960 if (intel_panel_use_ssc(dev_priv)) {
7961 refclk = dev_priv->vbt.lvds_ssc_freq;
7962 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7963 }
7964
7965 if (intel_is_dual_link_lvds(dev))
7966 limit = &intel_limits_g4x_dual_channel_lvds;
7967 else
7968 limit = &intel_limits_g4x_single_channel_lvds;
7969 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7970 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7971 limit = &intel_limits_g4x_hdmi;
7972 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7973 limit = &intel_limits_g4x_sdvo;
7974 } else {
7975 /* The option is for other outputs */
7976 limit = &intel_limits_i9xx_sdvo;
7977 }
7978
7979 if (!crtc_state->clock_set &&
7980 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7981 refclk, NULL, &crtc_state->dpll)) {
7982 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7983 return -EINVAL;
7984 }
7985
7986 i9xx_compute_dpll(crtc, crtc_state, NULL);
7987
7988 return 0;
7989}
7990
70e8aa21
ACO
7991static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7992 struct intel_crtc_state *crtc_state)
7993{
7994 struct drm_device *dev = crtc->base.dev;
fac5e23e 7995 struct drm_i915_private *dev_priv = to_i915(dev);
1b6f4958 7996 const struct intel_limit *limit;
70e8aa21
ACO
7997 int refclk = 96000;
7998
7999 memset(&crtc_state->dpll_hw_state, 0,
8000 sizeof(crtc_state->dpll_hw_state));
8001
8002 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8003 if (intel_panel_use_ssc(dev_priv)) {
8004 refclk = dev_priv->vbt.lvds_ssc_freq;
8005 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8006 }
8007
8008 limit = &intel_limits_pineview_lvds;
8009 } else {
8010 limit = &intel_limits_pineview_sdvo;
8011 }
8012
8013 if (!crtc_state->clock_set &&
8014 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8015 refclk, NULL, &crtc_state->dpll)) {
8016 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8017 return -EINVAL;
8018 }
8019
8020 i9xx_compute_dpll(crtc, crtc_state, NULL);
8021
8022 return 0;
8023}
8024
190f68c5
ACO
8025static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8026 struct intel_crtc_state *crtc_state)
79e53945 8027{
c7653199 8028 struct drm_device *dev = crtc->base.dev;
fac5e23e 8029 struct drm_i915_private *dev_priv = to_i915(dev);
1b6f4958 8030 const struct intel_limit *limit;
81c97f52 8031 int refclk = 96000;
79e53945 8032
dd3cd74a
ACO
8033 memset(&crtc_state->dpll_hw_state, 0,
8034 sizeof(crtc_state->dpll_hw_state));
8035
70e8aa21
ACO
8036 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8037 if (intel_panel_use_ssc(dev_priv)) {
8038 refclk = dev_priv->vbt.lvds_ssc_freq;
8039 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8040 }
43565a06 8041
70e8aa21
ACO
8042 limit = &intel_limits_i9xx_lvds;
8043 } else {
8044 limit = &intel_limits_i9xx_sdvo;
81c97f52 8045 }
79e53945 8046
70e8aa21
ACO
8047 if (!crtc_state->clock_set &&
8048 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8049 refclk, NULL, &crtc_state->dpll)) {
8050 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8051 return -EINVAL;
f47709a9 8052 }
7026d4ac 8053
81c97f52 8054 i9xx_compute_dpll(crtc, crtc_state, NULL);
79e53945 8055
c8f7a0db 8056 return 0;
f564048e
EA
8057}
8058
65b3d6a9
ACO
8059static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8060 struct intel_crtc_state *crtc_state)
8061{
8062 int refclk = 100000;
1b6f4958 8063 const struct intel_limit *limit = &intel_limits_chv;
65b3d6a9
ACO
8064
8065 memset(&crtc_state->dpll_hw_state, 0,
8066 sizeof(crtc_state->dpll_hw_state));
8067
65b3d6a9
ACO
8068 if (!crtc_state->clock_set &&
8069 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8070 refclk, NULL, &crtc_state->dpll)) {
8071 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8072 return -EINVAL;
8073 }
8074
8075 chv_compute_dpll(crtc, crtc_state);
8076
8077 return 0;
8078}
8079
8080static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8081 struct intel_crtc_state *crtc_state)
8082{
8083 int refclk = 100000;
1b6f4958 8084 const struct intel_limit *limit = &intel_limits_vlv;
65b3d6a9
ACO
8085
8086 memset(&crtc_state->dpll_hw_state, 0,
8087 sizeof(crtc_state->dpll_hw_state));
8088
65b3d6a9
ACO
8089 if (!crtc_state->clock_set &&
8090 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8091 refclk, NULL, &crtc_state->dpll)) {
8092 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8093 return -EINVAL;
8094 }
8095
8096 vlv_compute_dpll(crtc, crtc_state);
8097
8098 return 0;
8099}
8100
2fa2fe9a 8101static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5cec258b 8102 struct intel_crtc_state *pipe_config)
2fa2fe9a
DV
8103{
8104 struct drm_device *dev = crtc->base.dev;
fac5e23e 8105 struct drm_i915_private *dev_priv = to_i915(dev);
2fa2fe9a
DV
8106 uint32_t tmp;
8107
dc9e7dec
VS
8108 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
8109 return;
8110
2fa2fe9a 8111 tmp = I915_READ(PFIT_CONTROL);
06922821
DV
8112 if (!(tmp & PFIT_ENABLE))
8113 return;
2fa2fe9a 8114
06922821 8115 /* Check whether the pfit is attached to our pipe. */
2fa2fe9a
DV
8116 if (INTEL_INFO(dev)->gen < 4) {
8117 if (crtc->pipe != PIPE_B)
8118 return;
2fa2fe9a
DV
8119 } else {
8120 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8121 return;
8122 }
8123
06922821 8124 pipe_config->gmch_pfit.control = tmp;
2fa2fe9a 8125 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
2fa2fe9a
DV
8126}
8127
acbec814 8128static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 8129 struct intel_crtc_state *pipe_config)
acbec814
JB
8130{
8131 struct drm_device *dev = crtc->base.dev;
fac5e23e 8132 struct drm_i915_private *dev_priv = to_i915(dev);
acbec814 8133 int pipe = pipe_config->cpu_transcoder;
9e2c8475 8134 struct dpll clock;
acbec814 8135 u32 mdiv;
662c6ecb 8136 int refclk = 100000;
acbec814 8137
b521973b
VS
8138 /* In case of DSI, DPLL will not be used */
8139 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
f573de5a
SK
8140 return;
8141
a580516d 8142 mutex_lock(&dev_priv->sb_lock);
ab3c759a 8143 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
a580516d 8144 mutex_unlock(&dev_priv->sb_lock);
acbec814
JB
8145
8146 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8147 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8148 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8149 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8150 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8151
dccbea3b 8152 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
acbec814
JB
8153}
8154
5724dbd1
DL
8155static void
8156i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8157 struct intel_initial_plane_config *plane_config)
1ad292b5
JB
8158{
8159 struct drm_device *dev = crtc->base.dev;
fac5e23e 8160 struct drm_i915_private *dev_priv = to_i915(dev);
1ad292b5
JB
8161 u32 val, base, offset;
8162 int pipe = crtc->pipe, plane = crtc->plane;
8163 int fourcc, pixel_format;
6761dd31 8164 unsigned int aligned_height;
b113d5ee 8165 struct drm_framebuffer *fb;
1b842c89 8166 struct intel_framebuffer *intel_fb;
1ad292b5 8167
42a7b088
DL
8168 val = I915_READ(DSPCNTR(plane));
8169 if (!(val & DISPLAY_PLANE_ENABLE))
8170 return;
8171
d9806c9f 8172 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 8173 if (!intel_fb) {
1ad292b5
JB
8174 DRM_DEBUG_KMS("failed to alloc fb\n");
8175 return;
8176 }
8177
1b842c89
DL
8178 fb = &intel_fb->base;
8179
18c5247e
DV
8180 if (INTEL_INFO(dev)->gen >= 4) {
8181 if (val & DISPPLANE_TILED) {
49af449b 8182 plane_config->tiling = I915_TILING_X;
18c5247e
DV
8183 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8184 }
8185 }
1ad292b5
JB
8186
8187 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
b35d63fa 8188 fourcc = i9xx_format_to_fourcc(pixel_format);
b113d5ee
DL
8189 fb->pixel_format = fourcc;
8190 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
1ad292b5
JB
8191
8192 if (INTEL_INFO(dev)->gen >= 4) {
49af449b 8193 if (plane_config->tiling)
1ad292b5
JB
8194 offset = I915_READ(DSPTILEOFF(plane));
8195 else
8196 offset = I915_READ(DSPLINOFF(plane));
8197 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8198 } else {
8199 base = I915_READ(DSPADDR(plane));
8200 }
8201 plane_config->base = base;
8202
8203 val = I915_READ(PIPESRC(pipe));
b113d5ee
DL
8204 fb->width = ((val >> 16) & 0xfff) + 1;
8205 fb->height = ((val >> 0) & 0xfff) + 1;
1ad292b5
JB
8206
8207 val = I915_READ(DSPSTRIDE(pipe));
b113d5ee 8208 fb->pitches[0] = val & 0xffffffc0;
1ad292b5 8209
b113d5ee 8210 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
8211 fb->pixel_format,
8212 fb->modifier[0]);
1ad292b5 8213
f37b5c2b 8214 plane_config->size = fb->pitches[0] * aligned_height;
1ad292b5 8215
2844a921
DL
8216 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8217 pipe_name(pipe), plane, fb->width, fb->height,
8218 fb->bits_per_pixel, base, fb->pitches[0],
8219 plane_config->size);
1ad292b5 8220
2d14030b 8221 plane_config->fb = intel_fb;
1ad292b5
JB
8222}
8223
70b23a98 8224static void chv_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 8225 struct intel_crtc_state *pipe_config)
70b23a98
VS
8226{
8227 struct drm_device *dev = crtc->base.dev;
fac5e23e 8228 struct drm_i915_private *dev_priv = to_i915(dev);
70b23a98
VS
8229 int pipe = pipe_config->cpu_transcoder;
8230 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9e2c8475 8231 struct dpll clock;
0d7b6b11 8232 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
70b23a98
VS
8233 int refclk = 100000;
8234
b521973b
VS
8235 /* In case of DSI, DPLL will not be used */
8236 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8237 return;
8238
a580516d 8239 mutex_lock(&dev_priv->sb_lock);
70b23a98
VS
8240 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8241 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8242 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8243 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
0d7b6b11 8244 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
a580516d 8245 mutex_unlock(&dev_priv->sb_lock);
70b23a98
VS
8246
8247 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
0d7b6b11
ID
8248 clock.m2 = (pll_dw0 & 0xff) << 22;
8249 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8250 clock.m2 |= pll_dw2 & 0x3fffff;
70b23a98
VS
8251 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8252 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8253 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8254
dccbea3b 8255 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
70b23a98
VS
8256}
8257
0e8ffe1b 8258static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5cec258b 8259 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
8260{
8261 struct drm_device *dev = crtc->base.dev;
fac5e23e 8262 struct drm_i915_private *dev_priv = to_i915(dev);
1729050e 8263 enum intel_display_power_domain power_domain;
0e8ffe1b 8264 uint32_t tmp;
1729050e 8265 bool ret;
0e8ffe1b 8266
1729050e
ID
8267 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8268 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
b5482bd0
ID
8269 return false;
8270
e143a21c 8271 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8106ddbd 8272 pipe_config->shared_dpll = NULL;
eccb140b 8273
1729050e
ID
8274 ret = false;
8275
0e8ffe1b
DV
8276 tmp = I915_READ(PIPECONF(crtc->pipe));
8277 if (!(tmp & PIPECONF_ENABLE))
1729050e 8278 goto out;
0e8ffe1b 8279
666a4537 8280 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
42571aef
VS
8281 switch (tmp & PIPECONF_BPC_MASK) {
8282 case PIPECONF_6BPC:
8283 pipe_config->pipe_bpp = 18;
8284 break;
8285 case PIPECONF_8BPC:
8286 pipe_config->pipe_bpp = 24;
8287 break;
8288 case PIPECONF_10BPC:
8289 pipe_config->pipe_bpp = 30;
8290 break;
8291 default:
8292 break;
8293 }
8294 }
8295
666a4537
WB
8296 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8297 (tmp & PIPECONF_COLOR_RANGE_SELECT))
b5a9fa09
DV
8298 pipe_config->limited_color_range = true;
8299
282740f7
VS
8300 if (INTEL_INFO(dev)->gen < 4)
8301 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8302
1bd1bd80 8303 intel_get_pipe_timings(crtc, pipe_config);
bc58be60 8304 intel_get_pipe_src_size(crtc, pipe_config);
1bd1bd80 8305
2fa2fe9a
DV
8306 i9xx_get_pfit_config(crtc, pipe_config);
8307
6c49f241 8308 if (INTEL_INFO(dev)->gen >= 4) {
c231775c
VS
8309 /* No way to read it out on pipes B and C */
8310 if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
8311 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8312 else
8313 tmp = I915_READ(DPLL_MD(crtc->pipe));
6c49f241
DV
8314 pipe_config->pixel_multiplier =
8315 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8316 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8bcc2795 8317 pipe_config->dpll_hw_state.dpll_md = tmp;
6c49f241
DV
8318 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8319 tmp = I915_READ(DPLL(crtc->pipe));
8320 pipe_config->pixel_multiplier =
8321 ((tmp & SDVO_MULTIPLIER_MASK)
8322 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8323 } else {
8324 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8325 * port and will be fixed up in the encoder->get_config
8326 * function. */
8327 pipe_config->pixel_multiplier = 1;
8328 }
8bcc2795 8329 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
666a4537 8330 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
1c4e0274
VS
8331 /*
8332 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8333 * on 830. Filter it out here so that we don't
8334 * report errors due to that.
8335 */
8336 if (IS_I830(dev))
8337 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8338
8bcc2795
DV
8339 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8340 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
165e901c
VS
8341 } else {
8342 /* Mask out read-only status bits. */
8343 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8344 DPLL_PORTC_READY_MASK |
8345 DPLL_PORTB_READY_MASK);
8bcc2795 8346 }
6c49f241 8347
70b23a98
VS
8348 if (IS_CHERRYVIEW(dev))
8349 chv_crtc_clock_get(crtc, pipe_config);
8350 else if (IS_VALLEYVIEW(dev))
acbec814
JB
8351 vlv_crtc_clock_get(crtc, pipe_config);
8352 else
8353 i9xx_crtc_clock_get(crtc, pipe_config);
18442d08 8354
0f64614d
VS
8355 /*
8356 * Normally the dotclock is filled in by the encoder .get_config()
8357 * but in case the pipe is enabled w/o any ports we need a sane
8358 * default.
8359 */
8360 pipe_config->base.adjusted_mode.crtc_clock =
8361 pipe_config->port_clock / pipe_config->pixel_multiplier;
8362
1729050e
ID
8363 ret = true;
8364
8365out:
8366 intel_display_power_put(dev_priv, power_domain);
8367
8368 return ret;
0e8ffe1b
DV
8369}
8370
dde86e2d 8371static void ironlake_init_pch_refclk(struct drm_device *dev)
13d83a67 8372{
fac5e23e 8373 struct drm_i915_private *dev_priv = to_i915(dev);
13d83a67 8374 struct intel_encoder *encoder;
1c1a24d2 8375 int i;
74cfd7ac 8376 u32 val, final;
13d83a67 8377 bool has_lvds = false;
199e5d79 8378 bool has_cpu_edp = false;
199e5d79 8379 bool has_panel = false;
99eb6a01
KP
8380 bool has_ck505 = false;
8381 bool can_ssc = false;
1c1a24d2 8382 bool using_ssc_source = false;
13d83a67
JB
8383
8384 /* We need to take the global config into account */
b2784e15 8385 for_each_intel_encoder(dev, encoder) {
199e5d79
KP
8386 switch (encoder->type) {
8387 case INTEL_OUTPUT_LVDS:
8388 has_panel = true;
8389 has_lvds = true;
8390 break;
8391 case INTEL_OUTPUT_EDP:
8392 has_panel = true;
2de6905f 8393 if (enc_to_dig_port(&encoder->base)->port == PORT_A)
199e5d79
KP
8394 has_cpu_edp = true;
8395 break;
6847d71b
PZ
8396 default:
8397 break;
13d83a67
JB
8398 }
8399 }
8400
99eb6a01 8401 if (HAS_PCH_IBX(dev)) {
41aa3448 8402 has_ck505 = dev_priv->vbt.display_clock_mode;
99eb6a01
KP
8403 can_ssc = has_ck505;
8404 } else {
8405 has_ck505 = false;
8406 can_ssc = true;
8407 }
8408
1c1a24d2
L
8409 /* Check if any DPLLs are using the SSC source */
8410 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8411 u32 temp = I915_READ(PCH_DPLL(i));
8412
8413 if (!(temp & DPLL_VCO_ENABLE))
8414 continue;
8415
8416 if ((temp & PLL_REF_INPUT_MASK) ==
8417 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8418 using_ssc_source = true;
8419 break;
8420 }
8421 }
8422
8423 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8424 has_panel, has_lvds, has_ck505, using_ssc_source);
13d83a67
JB
8425
8426 /* Ironlake: try to setup display ref clock before DPLL
8427 * enabling. This is only under driver's control after
8428 * PCH B stepping, previous chipset stepping should be
8429 * ignoring this setting.
8430 */
74cfd7ac
CW
8431 val = I915_READ(PCH_DREF_CONTROL);
8432
8433 /* As we must carefully and slowly disable/enable each source in turn,
8434 * compute the final state we want first and check if we need to
8435 * make any changes at all.
8436 */
8437 final = val;
8438 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8439 if (has_ck505)
8440 final |= DREF_NONSPREAD_CK505_ENABLE;
8441 else
8442 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8443
8c07eb68 8444 final &= ~DREF_SSC_SOURCE_MASK;
74cfd7ac 8445 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8c07eb68 8446 final &= ~DREF_SSC1_ENABLE;
74cfd7ac
CW
8447
8448 if (has_panel) {
8449 final |= DREF_SSC_SOURCE_ENABLE;
8450
8451 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8452 final |= DREF_SSC1_ENABLE;
8453
8454 if (has_cpu_edp) {
8455 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8456 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8457 else
8458 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8459 } else
8460 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1c1a24d2
L
8461 } else if (using_ssc_source) {
8462 final |= DREF_SSC_SOURCE_ENABLE;
8463 final |= DREF_SSC1_ENABLE;
74cfd7ac
CW
8464 }
8465
8466 if (final == val)
8467 return;
8468
13d83a67 8469 /* Always enable nonspread source */
74cfd7ac 8470 val &= ~DREF_NONSPREAD_SOURCE_MASK;
13d83a67 8471
99eb6a01 8472 if (has_ck505)
74cfd7ac 8473 val |= DREF_NONSPREAD_CK505_ENABLE;
99eb6a01 8474 else
74cfd7ac 8475 val |= DREF_NONSPREAD_SOURCE_ENABLE;
13d83a67 8476
199e5d79 8477 if (has_panel) {
74cfd7ac
CW
8478 val &= ~DREF_SSC_SOURCE_MASK;
8479 val |= DREF_SSC_SOURCE_ENABLE;
13d83a67 8480
199e5d79 8481 /* SSC must be turned on before enabling the CPU output */
99eb6a01 8482 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 8483 DRM_DEBUG_KMS("Using SSC on panel\n");
74cfd7ac 8484 val |= DREF_SSC1_ENABLE;
e77166b5 8485 } else
74cfd7ac 8486 val &= ~DREF_SSC1_ENABLE;
199e5d79
KP
8487
8488 /* Get SSC going before enabling the outputs */
74cfd7ac 8489 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8490 POSTING_READ(PCH_DREF_CONTROL);
8491 udelay(200);
8492
74cfd7ac 8493 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
13d83a67
JB
8494
8495 /* Enable CPU source on CPU attached eDP */
199e5d79 8496 if (has_cpu_edp) {
99eb6a01 8497 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 8498 DRM_DEBUG_KMS("Using SSC on eDP\n");
74cfd7ac 8499 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
eba905b2 8500 } else
74cfd7ac 8501 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
199e5d79 8502 } else
74cfd7ac 8503 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 8504
74cfd7ac 8505 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8506 POSTING_READ(PCH_DREF_CONTROL);
8507 udelay(200);
8508 } else {
1c1a24d2 8509 DRM_DEBUG_KMS("Disabling CPU source output\n");
199e5d79 8510
74cfd7ac 8511 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
199e5d79
KP
8512
8513 /* Turn off CPU output */
74cfd7ac 8514 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 8515
74cfd7ac 8516 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8517 POSTING_READ(PCH_DREF_CONTROL);
8518 udelay(200);
8519
1c1a24d2
L
8520 if (!using_ssc_source) {
8521 DRM_DEBUG_KMS("Disabling SSC source\n");
199e5d79 8522
1c1a24d2
L
8523 /* Turn off the SSC source */
8524 val &= ~DREF_SSC_SOURCE_MASK;
8525 val |= DREF_SSC_SOURCE_DISABLE;
f165d283 8526
1c1a24d2
L
8527 /* Turn off SSC1 */
8528 val &= ~DREF_SSC1_ENABLE;
8529
8530 I915_WRITE(PCH_DREF_CONTROL, val);
8531 POSTING_READ(PCH_DREF_CONTROL);
8532 udelay(200);
8533 }
13d83a67 8534 }
74cfd7ac
CW
8535
8536 BUG_ON(val != final);
13d83a67
JB
8537}
8538
f31f2d55 8539static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
dde86e2d 8540{
f31f2d55 8541 uint32_t tmp;
dde86e2d 8542
0ff066a9
PZ
8543 tmp = I915_READ(SOUTH_CHICKEN2);
8544 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8545 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 8546
cf3598c2
ID
8547 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8548 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
0ff066a9 8549 DRM_ERROR("FDI mPHY reset assert timeout\n");
dde86e2d 8550
0ff066a9
PZ
8551 tmp = I915_READ(SOUTH_CHICKEN2);
8552 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8553 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 8554
cf3598c2
ID
8555 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8556 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
0ff066a9 8557 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
f31f2d55
PZ
8558}
8559
8560/* WaMPhyProgramming:hsw */
8561static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8562{
8563 uint32_t tmp;
dde86e2d
PZ
8564
8565 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8566 tmp &= ~(0xFF << 24);
8567 tmp |= (0x12 << 24);
8568 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8569
dde86e2d
PZ
8570 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8571 tmp |= (1 << 11);
8572 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8573
8574 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8575 tmp |= (1 << 11);
8576 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8577
dde86e2d
PZ
8578 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8579 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8580 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8581
8582 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8583 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8584 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8585
0ff066a9
PZ
8586 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8587 tmp &= ~(7 << 13);
8588 tmp |= (5 << 13);
8589 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
dde86e2d 8590
0ff066a9
PZ
8591 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8592 tmp &= ~(7 << 13);
8593 tmp |= (5 << 13);
8594 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
dde86e2d
PZ
8595
8596 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8597 tmp &= ~0xFF;
8598 tmp |= 0x1C;
8599 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8600
8601 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8602 tmp &= ~0xFF;
8603 tmp |= 0x1C;
8604 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8605
8606 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8607 tmp &= ~(0xFF << 16);
8608 tmp |= (0x1C << 16);
8609 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8610
8611 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8612 tmp &= ~(0xFF << 16);
8613 tmp |= (0x1C << 16);
8614 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8615
0ff066a9
PZ
8616 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8617 tmp |= (1 << 27);
8618 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
dde86e2d 8619
0ff066a9
PZ
8620 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8621 tmp |= (1 << 27);
8622 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
dde86e2d 8623
0ff066a9
PZ
8624 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8625 tmp &= ~(0xF << 28);
8626 tmp |= (4 << 28);
8627 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
dde86e2d 8628
0ff066a9
PZ
8629 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8630 tmp &= ~(0xF << 28);
8631 tmp |= (4 << 28);
8632 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
f31f2d55
PZ
8633}
8634
2fa86a1f
PZ
8635/* Implements 3 different sequences from BSpec chapter "Display iCLK
8636 * Programming" based on the parameters passed:
8637 * - Sequence to enable CLKOUT_DP
8638 * - Sequence to enable CLKOUT_DP without spread
8639 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8640 */
8641static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8642 bool with_fdi)
f31f2d55 8643{
fac5e23e 8644 struct drm_i915_private *dev_priv = to_i915(dev);
2fa86a1f
PZ
8645 uint32_t reg, tmp;
8646
8647 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8648 with_spread = true;
c2699524 8649 if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
2fa86a1f 8650 with_fdi = false;
f31f2d55 8651
a580516d 8652 mutex_lock(&dev_priv->sb_lock);
f31f2d55
PZ
8653
8654 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8655 tmp &= ~SBI_SSCCTL_DISABLE;
8656 tmp |= SBI_SSCCTL_PATHALT;
8657 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8658
8659 udelay(24);
8660
2fa86a1f
PZ
8661 if (with_spread) {
8662 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8663 tmp &= ~SBI_SSCCTL_PATHALT;
8664 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
f31f2d55 8665
2fa86a1f
PZ
8666 if (with_fdi) {
8667 lpt_reset_fdi_mphy(dev_priv);
8668 lpt_program_fdi_mphy(dev_priv);
8669 }
8670 }
dde86e2d 8671
c2699524 8672 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
2fa86a1f
PZ
8673 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8674 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8675 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
c00db246 8676
a580516d 8677 mutex_unlock(&dev_priv->sb_lock);
dde86e2d
PZ
8678}
8679
47701c3b
PZ
8680/* Sequence to disable CLKOUT_DP */
8681static void lpt_disable_clkout_dp(struct drm_device *dev)
8682{
fac5e23e 8683 struct drm_i915_private *dev_priv = to_i915(dev);
47701c3b
PZ
8684 uint32_t reg, tmp;
8685
a580516d 8686 mutex_lock(&dev_priv->sb_lock);
47701c3b 8687
c2699524 8688 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
47701c3b
PZ
8689 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8690 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8691 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8692
8693 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8694 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8695 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8696 tmp |= SBI_SSCCTL_PATHALT;
8697 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8698 udelay(32);
8699 }
8700 tmp |= SBI_SSCCTL_DISABLE;
8701 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8702 }
8703
a580516d 8704 mutex_unlock(&dev_priv->sb_lock);
47701c3b
PZ
8705}
8706
f7be2c21
VS
8707#define BEND_IDX(steps) ((50 + (steps)) / 5)
8708
8709static const uint16_t sscdivintphase[] = {
8710 [BEND_IDX( 50)] = 0x3B23,
8711 [BEND_IDX( 45)] = 0x3B23,
8712 [BEND_IDX( 40)] = 0x3C23,
8713 [BEND_IDX( 35)] = 0x3C23,
8714 [BEND_IDX( 30)] = 0x3D23,
8715 [BEND_IDX( 25)] = 0x3D23,
8716 [BEND_IDX( 20)] = 0x3E23,
8717 [BEND_IDX( 15)] = 0x3E23,
8718 [BEND_IDX( 10)] = 0x3F23,
8719 [BEND_IDX( 5)] = 0x3F23,
8720 [BEND_IDX( 0)] = 0x0025,
8721 [BEND_IDX( -5)] = 0x0025,
8722 [BEND_IDX(-10)] = 0x0125,
8723 [BEND_IDX(-15)] = 0x0125,
8724 [BEND_IDX(-20)] = 0x0225,
8725 [BEND_IDX(-25)] = 0x0225,
8726 [BEND_IDX(-30)] = 0x0325,
8727 [BEND_IDX(-35)] = 0x0325,
8728 [BEND_IDX(-40)] = 0x0425,
8729 [BEND_IDX(-45)] = 0x0425,
8730 [BEND_IDX(-50)] = 0x0525,
8731};
8732
8733/*
8734 * Bend CLKOUT_DP
8735 * steps -50 to 50 inclusive, in steps of 5
8736 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8737 * change in clock period = -(steps / 10) * 5.787 ps
8738 */
8739static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8740{
8741 uint32_t tmp;
8742 int idx = BEND_IDX(steps);
8743
8744 if (WARN_ON(steps % 5 != 0))
8745 return;
8746
8747 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8748 return;
8749
8750 mutex_lock(&dev_priv->sb_lock);
8751
8752 if (steps % 10 != 0)
8753 tmp = 0xAAAAAAAB;
8754 else
8755 tmp = 0x00000000;
8756 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8757
8758 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8759 tmp &= 0xffff0000;
8760 tmp |= sscdivintphase[idx];
8761 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8762
8763 mutex_unlock(&dev_priv->sb_lock);
8764}
8765
8766#undef BEND_IDX
8767
bf8fa3d3
PZ
8768static void lpt_init_pch_refclk(struct drm_device *dev)
8769{
bf8fa3d3
PZ
8770 struct intel_encoder *encoder;
8771 bool has_vga = false;
8772
b2784e15 8773 for_each_intel_encoder(dev, encoder) {
bf8fa3d3
PZ
8774 switch (encoder->type) {
8775 case INTEL_OUTPUT_ANALOG:
8776 has_vga = true;
8777 break;
6847d71b
PZ
8778 default:
8779 break;
bf8fa3d3
PZ
8780 }
8781 }
8782
f7be2c21
VS
8783 if (has_vga) {
8784 lpt_bend_clkout_dp(to_i915(dev), 0);
47701c3b 8785 lpt_enable_clkout_dp(dev, true, true);
f7be2c21 8786 } else {
47701c3b 8787 lpt_disable_clkout_dp(dev);
f7be2c21 8788 }
bf8fa3d3
PZ
8789}
8790
dde86e2d
PZ
8791/*
8792 * Initialize reference clocks when the driver loads
8793 */
8794void intel_init_pch_refclk(struct drm_device *dev)
8795{
8796 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8797 ironlake_init_pch_refclk(dev);
8798 else if (HAS_PCH_LPT(dev))
8799 lpt_init_pch_refclk(dev);
8800}
8801
6ff93609 8802static void ironlake_set_pipeconf(struct drm_crtc *crtc)
79e53945 8803{
fac5e23e 8804 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
79e53945
JB
8805 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8806 int pipe = intel_crtc->pipe;
c8203565
PZ
8807 uint32_t val;
8808
78114071 8809 val = 0;
c8203565 8810
6e3c9717 8811 switch (intel_crtc->config->pipe_bpp) {
c8203565 8812 case 18:
dfd07d72 8813 val |= PIPECONF_6BPC;
c8203565
PZ
8814 break;
8815 case 24:
dfd07d72 8816 val |= PIPECONF_8BPC;
c8203565
PZ
8817 break;
8818 case 30:
dfd07d72 8819 val |= PIPECONF_10BPC;
c8203565
PZ
8820 break;
8821 case 36:
dfd07d72 8822 val |= PIPECONF_12BPC;
c8203565
PZ
8823 break;
8824 default:
cc769b62
PZ
8825 /* Case prevented by intel_choose_pipe_bpp_dither. */
8826 BUG();
c8203565
PZ
8827 }
8828
6e3c9717 8829 if (intel_crtc->config->dither)
c8203565
PZ
8830 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8831
6e3c9717 8832 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
c8203565
PZ
8833 val |= PIPECONF_INTERLACED_ILK;
8834 else
8835 val |= PIPECONF_PROGRESSIVE;
8836
6e3c9717 8837 if (intel_crtc->config->limited_color_range)
3685a8f3 8838 val |= PIPECONF_COLOR_RANGE_SELECT;
3685a8f3 8839
c8203565
PZ
8840 I915_WRITE(PIPECONF(pipe), val);
8841 POSTING_READ(PIPECONF(pipe));
8842}
8843
6ff93609 8844static void haswell_set_pipeconf(struct drm_crtc *crtc)
ee2b0b38 8845{
fac5e23e 8846 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
ee2b0b38 8847 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 8848 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
391bf048 8849 u32 val = 0;
ee2b0b38 8850
391bf048 8851 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
ee2b0b38
PZ
8852 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8853
6e3c9717 8854 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
ee2b0b38
PZ
8855 val |= PIPECONF_INTERLACED_ILK;
8856 else
8857 val |= PIPECONF_PROGRESSIVE;
8858
702e7a56
PZ
8859 I915_WRITE(PIPECONF(cpu_transcoder), val);
8860 POSTING_READ(PIPECONF(cpu_transcoder));
391bf048
JN
8861}
8862
391bf048
JN
8863static void haswell_set_pipemisc(struct drm_crtc *crtc)
8864{
fac5e23e 8865 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
391bf048 8866 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
756f85cf 8867
391bf048
JN
8868 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
8869 u32 val = 0;
756f85cf 8870
6e3c9717 8871 switch (intel_crtc->config->pipe_bpp) {
756f85cf
PZ
8872 case 18:
8873 val |= PIPEMISC_DITHER_6_BPC;
8874 break;
8875 case 24:
8876 val |= PIPEMISC_DITHER_8_BPC;
8877 break;
8878 case 30:
8879 val |= PIPEMISC_DITHER_10_BPC;
8880 break;
8881 case 36:
8882 val |= PIPEMISC_DITHER_12_BPC;
8883 break;
8884 default:
8885 /* Case prevented by pipe_config_set_bpp. */
8886 BUG();
8887 }
8888
6e3c9717 8889 if (intel_crtc->config->dither)
756f85cf
PZ
8890 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8891
391bf048 8892 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
756f85cf 8893 }
ee2b0b38
PZ
8894}
8895
d4b1931c
PZ
8896int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8897{
8898 /*
8899 * Account for spread spectrum to avoid
8900 * oversubscribing the link. Max center spread
8901 * is 2.5%; use 5% for safety's sake.
8902 */
8903 u32 bps = target_clock * bpp * 21 / 20;
619d4d04 8904 return DIV_ROUND_UP(bps, link_bw * 8);
d4b1931c
PZ
8905}
8906
7429e9d4 8907static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6cf86a5e 8908{
7429e9d4 8909 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
f48d8f23
PZ
8910}
8911
b75ca6f6
ACO
8912static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8913 struct intel_crtc_state *crtc_state,
9e2c8475 8914 struct dpll *reduced_clock)
79e53945 8915{
de13a2e3 8916 struct drm_crtc *crtc = &intel_crtc->base;
79e53945 8917 struct drm_device *dev = crtc->dev;
fac5e23e 8918 struct drm_i915_private *dev_priv = to_i915(dev);
55bb9992 8919 struct drm_atomic_state *state = crtc_state->base.state;
da3ced29 8920 struct drm_connector *connector;
55bb9992
ACO
8921 struct drm_connector_state *connector_state;
8922 struct intel_encoder *encoder;
b75ca6f6 8923 u32 dpll, fp, fp2;
ceb41007 8924 int factor, i;
09ede541 8925 bool is_lvds = false, is_sdvo = false;
79e53945 8926
da3ced29 8927 for_each_connector_in_state(state, connector, connector_state, i) {
55bb9992
ACO
8928 if (connector_state->crtc != crtc_state->base.crtc)
8929 continue;
8930
8931 encoder = to_intel_encoder(connector_state->best_encoder);
8932
8933 switch (encoder->type) {
79e53945
JB
8934 case INTEL_OUTPUT_LVDS:
8935 is_lvds = true;
8936 break;
8937 case INTEL_OUTPUT_SDVO:
7d57382e 8938 case INTEL_OUTPUT_HDMI:
79e53945 8939 is_sdvo = true;
79e53945 8940 break;
6847d71b
PZ
8941 default:
8942 break;
79e53945
JB
8943 }
8944 }
79e53945 8945
c1858123 8946 /* Enable autotuning of the PLL clock (if permissible) */
8febb297
EA
8947 factor = 21;
8948 if (is_lvds) {
8949 if ((intel_panel_use_ssc(dev_priv) &&
e91e941b 8950 dev_priv->vbt.lvds_ssc_freq == 100000) ||
f0b44056 8951 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8febb297 8952 factor = 25;
190f68c5 8953 } else if (crtc_state->sdvo_tv_clock)
8febb297 8954 factor = 20;
c1858123 8955
b75ca6f6
ACO
8956 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8957
190f68c5 8958 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
b75ca6f6
ACO
8959 fp |= FP_CB_TUNE;
8960
8961 if (reduced_clock) {
8962 fp2 = i9xx_dpll_compute_fp(reduced_clock);
2c07245f 8963
b75ca6f6
ACO
8964 if (reduced_clock->m < factor * reduced_clock->n)
8965 fp2 |= FP_CB_TUNE;
8966 } else {
8967 fp2 = fp;
8968 }
9a7c7890 8969
5eddb70b 8970 dpll = 0;
2c07245f 8971
a07d6787
EA
8972 if (is_lvds)
8973 dpll |= DPLLB_MODE_LVDS;
8974 else
8975 dpll |= DPLLB_MODE_DAC_SERIAL;
198a037f 8976
190f68c5 8977 dpll |= (crtc_state->pixel_multiplier - 1)
ef1b460d 8978 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
198a037f
DV
8979
8980 if (is_sdvo)
4a33e48d 8981 dpll |= DPLL_SDVO_HIGH_SPEED;
190f68c5 8982 if (crtc_state->has_dp_encoder)
4a33e48d 8983 dpll |= DPLL_SDVO_HIGH_SPEED;
79e53945 8984
a07d6787 8985 /* compute bitmask from p1 value */
190f68c5 8986 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
a07d6787 8987 /* also FPA1 */
190f68c5 8988 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
a07d6787 8989
190f68c5 8990 switch (crtc_state->dpll.p2) {
a07d6787
EA
8991 case 5:
8992 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8993 break;
8994 case 7:
8995 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8996 break;
8997 case 10:
8998 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8999 break;
9000 case 14:
9001 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9002 break;
79e53945
JB
9003 }
9004
ceb41007 9005 if (is_lvds && intel_panel_use_ssc(dev_priv))
43565a06 9006 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
79e53945
JB
9007 else
9008 dpll |= PLL_REF_INPUT_DREFCLK;
9009
b75ca6f6
ACO
9010 dpll |= DPLL_VCO_ENABLE;
9011
9012 crtc_state->dpll_hw_state.dpll = dpll;
9013 crtc_state->dpll_hw_state.fp0 = fp;
9014 crtc_state->dpll_hw_state.fp1 = fp2;
de13a2e3
PZ
9015}
9016
190f68c5
ACO
9017static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9018 struct intel_crtc_state *crtc_state)
de13a2e3 9019{
997c030c 9020 struct drm_device *dev = crtc->base.dev;
fac5e23e 9021 struct drm_i915_private *dev_priv = to_i915(dev);
9e2c8475 9022 struct dpll reduced_clock;
7ed9f894 9023 bool has_reduced_clock = false;
e2b78267 9024 struct intel_shared_dpll *pll;
1b6f4958 9025 const struct intel_limit *limit;
997c030c 9026 int refclk = 120000;
de13a2e3 9027
dd3cd74a
ACO
9028 memset(&crtc_state->dpll_hw_state, 0,
9029 sizeof(crtc_state->dpll_hw_state));
9030
ded220e2
ACO
9031 crtc->lowfreq_avail = false;
9032
9033 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9034 if (!crtc_state->has_pch_encoder)
9035 return 0;
79e53945 9036
997c030c
ACO
9037 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9038 if (intel_panel_use_ssc(dev_priv)) {
9039 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9040 dev_priv->vbt.lvds_ssc_freq);
9041 refclk = dev_priv->vbt.lvds_ssc_freq;
9042 }
9043
9044 if (intel_is_dual_link_lvds(dev)) {
9045 if (refclk == 100000)
9046 limit = &intel_limits_ironlake_dual_lvds_100m;
9047 else
9048 limit = &intel_limits_ironlake_dual_lvds;
9049 } else {
9050 if (refclk == 100000)
9051 limit = &intel_limits_ironlake_single_lvds_100m;
9052 else
9053 limit = &intel_limits_ironlake_single_lvds;
9054 }
9055 } else {
9056 limit = &intel_limits_ironlake_dac;
9057 }
9058
364ee29d 9059 if (!crtc_state->clock_set &&
997c030c
ACO
9060 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9061 refclk, NULL, &crtc_state->dpll)) {
364ee29d
ACO
9062 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9063 return -EINVAL;
f47709a9 9064 }
79e53945 9065
b75ca6f6
ACO
9066 ironlake_compute_dpll(crtc, crtc_state,
9067 has_reduced_clock ? &reduced_clock : NULL);
66e985c0 9068
ded220e2
ACO
9069 pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
9070 if (pll == NULL) {
9071 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9072 pipe_name(crtc->pipe));
9073 return -EINVAL;
3fb37703 9074 }
79e53945 9075
ded220e2
ACO
9076 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9077 has_reduced_clock)
c7653199 9078 crtc->lowfreq_avail = true;
e2b78267 9079
c8f7a0db 9080 return 0;
79e53945
JB
9081}
9082
eb14cb74
VS
9083static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9084 struct intel_link_m_n *m_n)
9085{
9086 struct drm_device *dev = crtc->base.dev;
fac5e23e 9087 struct drm_i915_private *dev_priv = to_i915(dev);
eb14cb74
VS
9088 enum pipe pipe = crtc->pipe;
9089
9090 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9091 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9092 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9093 & ~TU_SIZE_MASK;
9094 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9095 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9096 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9097}
9098
9099static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9100 enum transcoder transcoder,
b95af8be
VK
9101 struct intel_link_m_n *m_n,
9102 struct intel_link_m_n *m2_n2)
72419203
DV
9103{
9104 struct drm_device *dev = crtc->base.dev;
fac5e23e 9105 struct drm_i915_private *dev_priv = to_i915(dev);
eb14cb74 9106 enum pipe pipe = crtc->pipe;
72419203 9107
eb14cb74
VS
9108 if (INTEL_INFO(dev)->gen >= 5) {
9109 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9110 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9111 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9112 & ~TU_SIZE_MASK;
9113 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9114 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9115 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
b95af8be
VK
9116 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9117 * gen < 8) and if DRRS is supported (to make sure the
9118 * registers are not unnecessarily read).
9119 */
9120 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
6e3c9717 9121 crtc->config->has_drrs) {
b95af8be
VK
9122 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9123 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9124 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9125 & ~TU_SIZE_MASK;
9126 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9127 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9128 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9129 }
eb14cb74
VS
9130 } else {
9131 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9132 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9133 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9134 & ~TU_SIZE_MASK;
9135 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9136 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9137 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9138 }
9139}
9140
9141void intel_dp_get_m_n(struct intel_crtc *crtc,
5cec258b 9142 struct intel_crtc_state *pipe_config)
eb14cb74 9143{
681a8504 9144 if (pipe_config->has_pch_encoder)
eb14cb74
VS
9145 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9146 else
9147 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
b95af8be
VK
9148 &pipe_config->dp_m_n,
9149 &pipe_config->dp_m2_n2);
eb14cb74 9150}
72419203 9151
eb14cb74 9152static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
5cec258b 9153 struct intel_crtc_state *pipe_config)
eb14cb74
VS
9154{
9155 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
b95af8be 9156 &pipe_config->fdi_m_n, NULL);
72419203
DV
9157}
9158
bd2e244f 9159static void skylake_get_pfit_config(struct intel_crtc *crtc,
5cec258b 9160 struct intel_crtc_state *pipe_config)
bd2e244f
JB
9161{
9162 struct drm_device *dev = crtc->base.dev;
fac5e23e 9163 struct drm_i915_private *dev_priv = to_i915(dev);
a1b2278e
CK
9164 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9165 uint32_t ps_ctrl = 0;
9166 int id = -1;
9167 int i;
bd2e244f 9168
a1b2278e
CK
9169 /* find scaler attached to this pipe */
9170 for (i = 0; i < crtc->num_scalers; i++) {
9171 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9172 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9173 id = i;
9174 pipe_config->pch_pfit.enabled = true;
9175 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9176 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9177 break;
9178 }
9179 }
bd2e244f 9180
a1b2278e
CK
9181 scaler_state->scaler_id = id;
9182 if (id >= 0) {
9183 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9184 } else {
9185 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
bd2e244f
JB
9186 }
9187}
9188
5724dbd1
DL
9189static void
9190skylake_get_initial_plane_config(struct intel_crtc *crtc,
9191 struct intel_initial_plane_config *plane_config)
bc8d7dff
DL
9192{
9193 struct drm_device *dev = crtc->base.dev;
fac5e23e 9194 struct drm_i915_private *dev_priv = to_i915(dev);
40f46283 9195 u32 val, base, offset, stride_mult, tiling;
bc8d7dff
DL
9196 int pipe = crtc->pipe;
9197 int fourcc, pixel_format;
6761dd31 9198 unsigned int aligned_height;
bc8d7dff 9199 struct drm_framebuffer *fb;
1b842c89 9200 struct intel_framebuffer *intel_fb;
bc8d7dff 9201
d9806c9f 9202 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 9203 if (!intel_fb) {
bc8d7dff
DL
9204 DRM_DEBUG_KMS("failed to alloc fb\n");
9205 return;
9206 }
9207
1b842c89
DL
9208 fb = &intel_fb->base;
9209
bc8d7dff 9210 val = I915_READ(PLANE_CTL(pipe, 0));
42a7b088
DL
9211 if (!(val & PLANE_CTL_ENABLE))
9212 goto error;
9213
bc8d7dff
DL
9214 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9215 fourcc = skl_format_to_fourcc(pixel_format,
9216 val & PLANE_CTL_ORDER_RGBX,
9217 val & PLANE_CTL_ALPHA_MASK);
9218 fb->pixel_format = fourcc;
9219 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9220
40f46283
DL
9221 tiling = val & PLANE_CTL_TILED_MASK;
9222 switch (tiling) {
9223 case PLANE_CTL_TILED_LINEAR:
9224 fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9225 break;
9226 case PLANE_CTL_TILED_X:
9227 plane_config->tiling = I915_TILING_X;
9228 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9229 break;
9230 case PLANE_CTL_TILED_Y:
9231 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9232 break;
9233 case PLANE_CTL_TILED_YF:
9234 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9235 break;
9236 default:
9237 MISSING_CASE(tiling);
9238 goto error;
9239 }
9240
bc8d7dff
DL
9241 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9242 plane_config->base = base;
9243
9244 offset = I915_READ(PLANE_OFFSET(pipe, 0));
9245
9246 val = I915_READ(PLANE_SIZE(pipe, 0));
9247 fb->height = ((val >> 16) & 0xfff) + 1;
9248 fb->width = ((val >> 0) & 0x1fff) + 1;
9249
9250 val = I915_READ(PLANE_STRIDE(pipe, 0));
7b49f948 9251 stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
40f46283 9252 fb->pixel_format);
bc8d7dff
DL
9253 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9254
9255 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
9256 fb->pixel_format,
9257 fb->modifier[0]);
bc8d7dff 9258
f37b5c2b 9259 plane_config->size = fb->pitches[0] * aligned_height;
bc8d7dff
DL
9260
9261 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9262 pipe_name(pipe), fb->width, fb->height,
9263 fb->bits_per_pixel, base, fb->pitches[0],
9264 plane_config->size);
9265
2d14030b 9266 plane_config->fb = intel_fb;
bc8d7dff
DL
9267 return;
9268
9269error:
9270 kfree(fb);
9271}
9272
2fa2fe9a 9273static void ironlake_get_pfit_config(struct intel_crtc *crtc,
5cec258b 9274 struct intel_crtc_state *pipe_config)
2fa2fe9a
DV
9275{
9276 struct drm_device *dev = crtc->base.dev;
fac5e23e 9277 struct drm_i915_private *dev_priv = to_i915(dev);
2fa2fe9a
DV
9278 uint32_t tmp;
9279
9280 tmp = I915_READ(PF_CTL(crtc->pipe));
9281
9282 if (tmp & PF_ENABLE) {
fd4daa9c 9283 pipe_config->pch_pfit.enabled = true;
2fa2fe9a
DV
9284 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9285 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
cb8b2a30
DV
9286
9287 /* We currently do not free assignements of panel fitters on
9288 * ivb/hsw (since we don't use the higher upscaling modes which
9289 * differentiates them) so just WARN about this case for now. */
9290 if (IS_GEN7(dev)) {
9291 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9292 PF_PIPE_SEL_IVB(crtc->pipe));
9293 }
2fa2fe9a 9294 }
79e53945
JB
9295}
9296
5724dbd1
DL
9297static void
9298ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9299 struct intel_initial_plane_config *plane_config)
4c6baa59
JB
9300{
9301 struct drm_device *dev = crtc->base.dev;
fac5e23e 9302 struct drm_i915_private *dev_priv = to_i915(dev);
4c6baa59 9303 u32 val, base, offset;
aeee5a49 9304 int pipe = crtc->pipe;
4c6baa59 9305 int fourcc, pixel_format;
6761dd31 9306 unsigned int aligned_height;
b113d5ee 9307 struct drm_framebuffer *fb;
1b842c89 9308 struct intel_framebuffer *intel_fb;
4c6baa59 9309
42a7b088
DL
9310 val = I915_READ(DSPCNTR(pipe));
9311 if (!(val & DISPLAY_PLANE_ENABLE))
9312 return;
9313
d9806c9f 9314 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 9315 if (!intel_fb) {
4c6baa59
JB
9316 DRM_DEBUG_KMS("failed to alloc fb\n");
9317 return;
9318 }
9319
1b842c89
DL
9320 fb = &intel_fb->base;
9321
18c5247e
DV
9322 if (INTEL_INFO(dev)->gen >= 4) {
9323 if (val & DISPPLANE_TILED) {
49af449b 9324 plane_config->tiling = I915_TILING_X;
18c5247e
DV
9325 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9326 }
9327 }
4c6baa59
JB
9328
9329 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
b35d63fa 9330 fourcc = i9xx_format_to_fourcc(pixel_format);
b113d5ee
DL
9331 fb->pixel_format = fourcc;
9332 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
4c6baa59 9333
aeee5a49 9334 base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
4c6baa59 9335 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
aeee5a49 9336 offset = I915_READ(DSPOFFSET(pipe));
4c6baa59 9337 } else {
49af449b 9338 if (plane_config->tiling)
aeee5a49 9339 offset = I915_READ(DSPTILEOFF(pipe));
4c6baa59 9340 else
aeee5a49 9341 offset = I915_READ(DSPLINOFF(pipe));
4c6baa59
JB
9342 }
9343 plane_config->base = base;
9344
9345 val = I915_READ(PIPESRC(pipe));
b113d5ee
DL
9346 fb->width = ((val >> 16) & 0xfff) + 1;
9347 fb->height = ((val >> 0) & 0xfff) + 1;
4c6baa59
JB
9348
9349 val = I915_READ(DSPSTRIDE(pipe));
b113d5ee 9350 fb->pitches[0] = val & 0xffffffc0;
4c6baa59 9351
b113d5ee 9352 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
9353 fb->pixel_format,
9354 fb->modifier[0]);
4c6baa59 9355
f37b5c2b 9356 plane_config->size = fb->pitches[0] * aligned_height;
4c6baa59 9357
2844a921
DL
9358 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9359 pipe_name(pipe), fb->width, fb->height,
9360 fb->bits_per_pixel, base, fb->pitches[0],
9361 plane_config->size);
b113d5ee 9362
2d14030b 9363 plane_config->fb = intel_fb;
4c6baa59
JB
9364}
9365
0e8ffe1b 9366static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5cec258b 9367 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
9368{
9369 struct drm_device *dev = crtc->base.dev;
fac5e23e 9370 struct drm_i915_private *dev_priv = to_i915(dev);
1729050e 9371 enum intel_display_power_domain power_domain;
0e8ffe1b 9372 uint32_t tmp;
1729050e 9373 bool ret;
0e8ffe1b 9374
1729050e
ID
9375 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9376 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
930e8c9e
PZ
9377 return false;
9378
e143a21c 9379 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8106ddbd 9380 pipe_config->shared_dpll = NULL;
eccb140b 9381
1729050e 9382 ret = false;
0e8ffe1b
DV
9383 tmp = I915_READ(PIPECONF(crtc->pipe));
9384 if (!(tmp & PIPECONF_ENABLE))
1729050e 9385 goto out;
0e8ffe1b 9386
42571aef
VS
9387 switch (tmp & PIPECONF_BPC_MASK) {
9388 case PIPECONF_6BPC:
9389 pipe_config->pipe_bpp = 18;
9390 break;
9391 case PIPECONF_8BPC:
9392 pipe_config->pipe_bpp = 24;
9393 break;
9394 case PIPECONF_10BPC:
9395 pipe_config->pipe_bpp = 30;
9396 break;
9397 case PIPECONF_12BPC:
9398 pipe_config->pipe_bpp = 36;
9399 break;
9400 default:
9401 break;
9402 }
9403
b5a9fa09
DV
9404 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9405 pipe_config->limited_color_range = true;
9406
ab9412ba 9407 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
66e985c0 9408 struct intel_shared_dpll *pll;
8106ddbd 9409 enum intel_dpll_id pll_id;
66e985c0 9410
88adfff1
DV
9411 pipe_config->has_pch_encoder = true;
9412
627eb5a3
DV
9413 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9414 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9415 FDI_DP_PORT_WIDTH_SHIFT) + 1;
72419203
DV
9416
9417 ironlake_get_fdi_m_n_config(crtc, pipe_config);
6c49f241 9418
2d1fe073 9419 if (HAS_PCH_IBX(dev_priv)) {
d9a7bc67
ID
9420 /*
9421 * The pipe->pch transcoder and pch transcoder->pll
9422 * mapping is fixed.
9423 */
8106ddbd 9424 pll_id = (enum intel_dpll_id) crtc->pipe;
c0d43d62
DV
9425 } else {
9426 tmp = I915_READ(PCH_DPLL_SEL);
9427 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
8106ddbd 9428 pll_id = DPLL_ID_PCH_PLL_B;
c0d43d62 9429 else
8106ddbd 9430 pll_id= DPLL_ID_PCH_PLL_A;
c0d43d62 9431 }
66e985c0 9432
8106ddbd
ACO
9433 pipe_config->shared_dpll =
9434 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9435 pll = pipe_config->shared_dpll;
66e985c0 9436
2edd6443
ACO
9437 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9438 &pipe_config->dpll_hw_state));
c93f54cf
DV
9439
9440 tmp = pipe_config->dpll_hw_state.dpll;
9441 pipe_config->pixel_multiplier =
9442 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9443 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
18442d08
VS
9444
9445 ironlake_pch_clock_get(crtc, pipe_config);
6c49f241
DV
9446 } else {
9447 pipe_config->pixel_multiplier = 1;
627eb5a3
DV
9448 }
9449
1bd1bd80 9450 intel_get_pipe_timings(crtc, pipe_config);
bc58be60 9451 intel_get_pipe_src_size(crtc, pipe_config);
1bd1bd80 9452
2fa2fe9a
DV
9453 ironlake_get_pfit_config(crtc, pipe_config);
9454
1729050e
ID
9455 ret = true;
9456
9457out:
9458 intel_display_power_put(dev_priv, power_domain);
9459
9460 return ret;
0e8ffe1b
DV
9461}
9462
be256dc7
PZ
9463static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9464{
91c8a326 9465 struct drm_device *dev = &dev_priv->drm;
be256dc7 9466 struct intel_crtc *crtc;
be256dc7 9467
d3fcc808 9468 for_each_intel_crtc(dev, crtc)
e2c719b7 9469 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
be256dc7
PZ
9470 pipe_name(crtc->pipe));
9471
e2c719b7
RC
9472 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9473 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
01403de3
VS
9474 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9475 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
e2c719b7
RC
9476 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9477 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
be256dc7 9478 "CPU PWM1 enabled\n");
c5107b87 9479 if (IS_HASWELL(dev))
e2c719b7 9480 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
c5107b87 9481 "CPU PWM2 enabled\n");
e2c719b7 9482 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
be256dc7 9483 "PCH PWM1 enabled\n");
e2c719b7 9484 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
be256dc7 9485 "Utility pin enabled\n");
e2c719b7 9486 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
be256dc7 9487
9926ada1
PZ
9488 /*
9489 * In theory we can still leave IRQs enabled, as long as only the HPD
9490 * interrupts remain enabled. We used to check for that, but since it's
9491 * gen-specific and since we only disable LCPLL after we fully disable
9492 * the interrupts, the check below should be enough.
9493 */
e2c719b7 9494 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
be256dc7
PZ
9495}
9496
9ccd5aeb
PZ
9497static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9498{
91c8a326 9499 struct drm_device *dev = &dev_priv->drm;
9ccd5aeb
PZ
9500
9501 if (IS_HASWELL(dev))
9502 return I915_READ(D_COMP_HSW);
9503 else
9504 return I915_READ(D_COMP_BDW);
9505}
9506
3c4c9b81
PZ
9507static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9508{
91c8a326 9509 struct drm_device *dev = &dev_priv->drm;
3c4c9b81
PZ
9510
9511 if (IS_HASWELL(dev)) {
9512 mutex_lock(&dev_priv->rps.hw_lock);
9513 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9514 val))
f475dadf 9515 DRM_ERROR("Failed to write to D_COMP\n");
3c4c9b81
PZ
9516 mutex_unlock(&dev_priv->rps.hw_lock);
9517 } else {
9ccd5aeb
PZ
9518 I915_WRITE(D_COMP_BDW, val);
9519 POSTING_READ(D_COMP_BDW);
3c4c9b81 9520 }
be256dc7
PZ
9521}
9522
9523/*
9524 * This function implements pieces of two sequences from BSpec:
9525 * - Sequence for display software to disable LCPLL
9526 * - Sequence for display software to allow package C8+
9527 * The steps implemented here are just the steps that actually touch the LCPLL
9528 * register. Callers should take care of disabling all the display engine
9529 * functions, doing the mode unset, fixing interrupts, etc.
9530 */
6ff58d53
PZ
9531static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9532 bool switch_to_fclk, bool allow_power_down)
be256dc7
PZ
9533{
9534 uint32_t val;
9535
9536 assert_can_disable_lcpll(dev_priv);
9537
9538 val = I915_READ(LCPLL_CTL);
9539
9540 if (switch_to_fclk) {
9541 val |= LCPLL_CD_SOURCE_FCLK;
9542 I915_WRITE(LCPLL_CTL, val);
9543
f53dd63f
ID
9544 if (wait_for_us(I915_READ(LCPLL_CTL) &
9545 LCPLL_CD_SOURCE_FCLK_DONE, 1))
be256dc7
PZ
9546 DRM_ERROR("Switching to FCLK failed\n");
9547
9548 val = I915_READ(LCPLL_CTL);
9549 }
9550
9551 val |= LCPLL_PLL_DISABLE;
9552 I915_WRITE(LCPLL_CTL, val);
9553 POSTING_READ(LCPLL_CTL);
9554
24d8441d 9555 if (intel_wait_for_register(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 0, 1))
be256dc7
PZ
9556 DRM_ERROR("LCPLL still locked\n");
9557
9ccd5aeb 9558 val = hsw_read_dcomp(dev_priv);
be256dc7 9559 val |= D_COMP_COMP_DISABLE;
3c4c9b81 9560 hsw_write_dcomp(dev_priv, val);
be256dc7
PZ
9561 ndelay(100);
9562
9ccd5aeb
PZ
9563 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9564 1))
be256dc7
PZ
9565 DRM_ERROR("D_COMP RCOMP still in progress\n");
9566
9567 if (allow_power_down) {
9568 val = I915_READ(LCPLL_CTL);
9569 val |= LCPLL_POWER_DOWN_ALLOW;
9570 I915_WRITE(LCPLL_CTL, val);
9571 POSTING_READ(LCPLL_CTL);
9572 }
9573}
9574
9575/*
9576 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9577 * source.
9578 */
6ff58d53 9579static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
be256dc7
PZ
9580{
9581 uint32_t val;
9582
9583 val = I915_READ(LCPLL_CTL);
9584
9585 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9586 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9587 return;
9588
a8a8bd54
PZ
9589 /*
9590 * Make sure we're not on PC8 state before disabling PC8, otherwise
9591 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
a8a8bd54 9592 */
59bad947 9593 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
215733fa 9594
be256dc7
PZ
9595 if (val & LCPLL_POWER_DOWN_ALLOW) {
9596 val &= ~LCPLL_POWER_DOWN_ALLOW;
9597 I915_WRITE(LCPLL_CTL, val);
35d8f2eb 9598 POSTING_READ(LCPLL_CTL);
be256dc7
PZ
9599 }
9600
9ccd5aeb 9601 val = hsw_read_dcomp(dev_priv);
be256dc7
PZ
9602 val |= D_COMP_COMP_FORCE;
9603 val &= ~D_COMP_COMP_DISABLE;
3c4c9b81 9604 hsw_write_dcomp(dev_priv, val);
be256dc7
PZ
9605
9606 val = I915_READ(LCPLL_CTL);
9607 val &= ~LCPLL_PLL_DISABLE;
9608 I915_WRITE(LCPLL_CTL, val);
9609
93220c08
CW
9610 if (intel_wait_for_register(dev_priv,
9611 LCPLL_CTL, LCPLL_PLL_LOCK, LCPLL_PLL_LOCK,
9612 5))
be256dc7
PZ
9613 DRM_ERROR("LCPLL not locked yet\n");
9614
9615 if (val & LCPLL_CD_SOURCE_FCLK) {
9616 val = I915_READ(LCPLL_CTL);
9617 val &= ~LCPLL_CD_SOURCE_FCLK;
9618 I915_WRITE(LCPLL_CTL, val);
9619
f53dd63f
ID
9620 if (wait_for_us((I915_READ(LCPLL_CTL) &
9621 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
be256dc7
PZ
9622 DRM_ERROR("Switching back to LCPLL failed\n");
9623 }
215733fa 9624
59bad947 9625 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
91c8a326 9626 intel_update_cdclk(&dev_priv->drm);
be256dc7
PZ
9627}
9628
765dab67
PZ
9629/*
9630 * Package states C8 and deeper are really deep PC states that can only be
9631 * reached when all the devices on the system allow it, so even if the graphics
9632 * device allows PC8+, it doesn't mean the system will actually get to these
9633 * states. Our driver only allows PC8+ when going into runtime PM.
9634 *
9635 * The requirements for PC8+ are that all the outputs are disabled, the power
9636 * well is disabled and most interrupts are disabled, and these are also
9637 * requirements for runtime PM. When these conditions are met, we manually do
9638 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9639 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9640 * hang the machine.
9641 *
9642 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9643 * the state of some registers, so when we come back from PC8+ we need to
9644 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9645 * need to take care of the registers kept by RC6. Notice that this happens even
9646 * if we don't put the device in PCI D3 state (which is what currently happens
9647 * because of the runtime PM support).
9648 *
9649 * For more, read "Display Sequences for Package C8" on the hardware
9650 * documentation.
9651 */
a14cb6fc 9652void hsw_enable_pc8(struct drm_i915_private *dev_priv)
c67a470b 9653{
91c8a326 9654 struct drm_device *dev = &dev_priv->drm;
c67a470b
PZ
9655 uint32_t val;
9656
c67a470b
PZ
9657 DRM_DEBUG_KMS("Enabling package C8+\n");
9658
c2699524 9659 if (HAS_PCH_LPT_LP(dev)) {
c67a470b
PZ
9660 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9661 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9662 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9663 }
9664
9665 lpt_disable_clkout_dp(dev);
c67a470b
PZ
9666 hsw_disable_lcpll(dev_priv, true, true);
9667}
9668
a14cb6fc 9669void hsw_disable_pc8(struct drm_i915_private *dev_priv)
c67a470b 9670{
91c8a326 9671 struct drm_device *dev = &dev_priv->drm;
c67a470b
PZ
9672 uint32_t val;
9673
c67a470b
PZ
9674 DRM_DEBUG_KMS("Disabling package C8+\n");
9675
9676 hsw_restore_lcpll(dev_priv);
c67a470b
PZ
9677 lpt_init_pch_refclk(dev);
9678
c2699524 9679 if (HAS_PCH_LPT_LP(dev)) {
c67a470b
PZ
9680 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9681 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9682 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9683 }
c67a470b
PZ
9684}
9685
324513c0 9686static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
f8437dd1 9687{
a821fc46 9688 struct drm_device *dev = old_state->dev;
1a617b77
ML
9689 struct intel_atomic_state *old_intel_state =
9690 to_intel_atomic_state(old_state);
9691 unsigned int req_cdclk = old_intel_state->dev_cdclk;
f8437dd1 9692
324513c0 9693 bxt_set_cdclk(to_i915(dev), req_cdclk);
f8437dd1
VK
9694}
9695
b432e5cf 9696/* compute the max rate for new configuration */
27c329ed 9697static int ilk_max_pixel_rate(struct drm_atomic_state *state)
b432e5cf 9698{
565602d7 9699 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
fac5e23e 9700 struct drm_i915_private *dev_priv = to_i915(state->dev);
565602d7
ML
9701 struct drm_crtc *crtc;
9702 struct drm_crtc_state *cstate;
27c329ed 9703 struct intel_crtc_state *crtc_state;
565602d7
ML
9704 unsigned max_pixel_rate = 0, i;
9705 enum pipe pipe;
b432e5cf 9706
565602d7
ML
9707 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9708 sizeof(intel_state->min_pixclk));
27c329ed 9709
565602d7
ML
9710 for_each_crtc_in_state(state, crtc, cstate, i) {
9711 int pixel_rate;
27c329ed 9712
565602d7
ML
9713 crtc_state = to_intel_crtc_state(cstate);
9714 if (!crtc_state->base.enable) {
9715 intel_state->min_pixclk[i] = 0;
b432e5cf 9716 continue;
565602d7 9717 }
b432e5cf 9718
27c329ed 9719 pixel_rate = ilk_pipe_pixel_rate(crtc_state);
b432e5cf
VS
9720
9721 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
565602d7 9722 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
b432e5cf
VS
9723 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9724
565602d7 9725 intel_state->min_pixclk[i] = pixel_rate;
b432e5cf
VS
9726 }
9727
565602d7
ML
9728 for_each_pipe(dev_priv, pipe)
9729 max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9730
b432e5cf
VS
9731 return max_pixel_rate;
9732}
9733
9734static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9735{
fac5e23e 9736 struct drm_i915_private *dev_priv = to_i915(dev);
b432e5cf
VS
9737 uint32_t val, data;
9738 int ret;
9739
9740 if (WARN((I915_READ(LCPLL_CTL) &
9741 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9742 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9743 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9744 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9745 "trying to change cdclk frequency with cdclk not enabled\n"))
9746 return;
9747
9748 mutex_lock(&dev_priv->rps.hw_lock);
9749 ret = sandybridge_pcode_write(dev_priv,
9750 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9751 mutex_unlock(&dev_priv->rps.hw_lock);
9752 if (ret) {
9753 DRM_ERROR("failed to inform pcode about cdclk change\n");
9754 return;
9755 }
9756
9757 val = I915_READ(LCPLL_CTL);
9758 val |= LCPLL_CD_SOURCE_FCLK;
9759 I915_WRITE(LCPLL_CTL, val);
9760
5ba00178
TU
9761 if (wait_for_us(I915_READ(LCPLL_CTL) &
9762 LCPLL_CD_SOURCE_FCLK_DONE, 1))
b432e5cf
VS
9763 DRM_ERROR("Switching to FCLK failed\n");
9764
9765 val = I915_READ(LCPLL_CTL);
9766 val &= ~LCPLL_CLK_FREQ_MASK;
9767
9768 switch (cdclk) {
9769 case 450000:
9770 val |= LCPLL_CLK_FREQ_450;
9771 data = 0;
9772 break;
9773 case 540000:
9774 val |= LCPLL_CLK_FREQ_54O_BDW;
9775 data = 1;
9776 break;
9777 case 337500:
9778 val |= LCPLL_CLK_FREQ_337_5_BDW;
9779 data = 2;
9780 break;
9781 case 675000:
9782 val |= LCPLL_CLK_FREQ_675_BDW;
9783 data = 3;
9784 break;
9785 default:
9786 WARN(1, "invalid cdclk frequency\n");
9787 return;
9788 }
9789
9790 I915_WRITE(LCPLL_CTL, val);
9791
9792 val = I915_READ(LCPLL_CTL);
9793 val &= ~LCPLL_CD_SOURCE_FCLK;
9794 I915_WRITE(LCPLL_CTL, val);
9795
5ba00178
TU
9796 if (wait_for_us((I915_READ(LCPLL_CTL) &
9797 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
b432e5cf
VS
9798 DRM_ERROR("Switching back to LCPLL failed\n");
9799
9800 mutex_lock(&dev_priv->rps.hw_lock);
9801 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9802 mutex_unlock(&dev_priv->rps.hw_lock);
9803
7f1052a8
VS
9804 I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9805
b432e5cf
VS
9806 intel_update_cdclk(dev);
9807
9808 WARN(cdclk != dev_priv->cdclk_freq,
9809 "cdclk requested %d kHz but got %d kHz\n",
9810 cdclk, dev_priv->cdclk_freq);
9811}
9812
587c7914
VS
9813static int broadwell_calc_cdclk(int max_pixclk)
9814{
9815 if (max_pixclk > 540000)
9816 return 675000;
9817 else if (max_pixclk > 450000)
9818 return 540000;
9819 else if (max_pixclk > 337500)
9820 return 450000;
9821 else
9822 return 337500;
9823}
9824
27c329ed 9825static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
b432e5cf 9826{
27c329ed 9827 struct drm_i915_private *dev_priv = to_i915(state->dev);
1a617b77 9828 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
27c329ed 9829 int max_pixclk = ilk_max_pixel_rate(state);
b432e5cf
VS
9830 int cdclk;
9831
9832 /*
9833 * FIXME should also account for plane ratio
9834 * once 64bpp pixel formats are supported.
9835 */
587c7914 9836 cdclk = broadwell_calc_cdclk(max_pixclk);
b432e5cf 9837
b432e5cf 9838 if (cdclk > dev_priv->max_cdclk_freq) {
63ba534e
ML
9839 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9840 cdclk, dev_priv->max_cdclk_freq);
9841 return -EINVAL;
b432e5cf
VS
9842 }
9843
1a617b77
ML
9844 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9845 if (!intel_state->active_crtcs)
587c7914 9846 intel_state->dev_cdclk = broadwell_calc_cdclk(0);
b432e5cf
VS
9847
9848 return 0;
9849}
9850
27c329ed 9851static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
b432e5cf 9852{
27c329ed 9853 struct drm_device *dev = old_state->dev;
1a617b77
ML
9854 struct intel_atomic_state *old_intel_state =
9855 to_intel_atomic_state(old_state);
9856 unsigned req_cdclk = old_intel_state->dev_cdclk;
b432e5cf 9857
27c329ed 9858 broadwell_set_cdclk(dev, req_cdclk);
b432e5cf
VS
9859}
9860
c89e39f3
CT
9861static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
9862{
9863 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9864 struct drm_i915_private *dev_priv = to_i915(state->dev);
9865 const int max_pixclk = ilk_max_pixel_rate(state);
a8ca4934 9866 int vco = intel_state->cdclk_pll_vco;
c89e39f3
CT
9867 int cdclk;
9868
9869 /*
9870 * FIXME should also account for plane ratio
9871 * once 64bpp pixel formats are supported.
9872 */
a8ca4934 9873 cdclk = skl_calc_cdclk(max_pixclk, vco);
c89e39f3
CT
9874
9875 /*
9876 * FIXME move the cdclk caclulation to
9877 * compute_config() so we can fail gracegully.
9878 */
9879 if (cdclk > dev_priv->max_cdclk_freq) {
9880 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9881 cdclk, dev_priv->max_cdclk_freq);
9882 cdclk = dev_priv->max_cdclk_freq;
9883 }
9884
9885 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9886 if (!intel_state->active_crtcs)
a8ca4934 9887 intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
c89e39f3
CT
9888
9889 return 0;
9890}
9891
9892static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9893{
1cd593e0
VS
9894 struct drm_i915_private *dev_priv = to_i915(old_state->dev);
9895 struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
9896 unsigned int req_cdclk = intel_state->dev_cdclk;
9897 unsigned int req_vco = intel_state->cdclk_pll_vco;
c89e39f3 9898
1cd593e0 9899 skl_set_cdclk(dev_priv, req_cdclk, req_vco);
c89e39f3
CT
9900}
9901
190f68c5
ACO
9902static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9903 struct intel_crtc_state *crtc_state)
09b4ddf9 9904{
af3997b5
MK
9905 struct intel_encoder *intel_encoder =
9906 intel_ddi_get_crtc_new_encoder(crtc_state);
9907
9908 if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9909 if (!intel_ddi_pll_select(crtc, crtc_state))
9910 return -EINVAL;
9911 }
716c2e55 9912
c7653199 9913 crtc->lowfreq_avail = false;
644cef34 9914
c8f7a0db 9915 return 0;
79e53945
JB
9916}
9917
3760b59c
S
9918static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9919 enum port port,
9920 struct intel_crtc_state *pipe_config)
9921{
8106ddbd
ACO
9922 enum intel_dpll_id id;
9923
3760b59c
S
9924 switch (port) {
9925 case PORT_A:
9926 pipe_config->ddi_pll_sel = SKL_DPLL0;
08250c4b 9927 id = DPLL_ID_SKL_DPLL0;
3760b59c
S
9928 break;
9929 case PORT_B:
9930 pipe_config->ddi_pll_sel = SKL_DPLL1;
08250c4b 9931 id = DPLL_ID_SKL_DPLL1;
3760b59c
S
9932 break;
9933 case PORT_C:
9934 pipe_config->ddi_pll_sel = SKL_DPLL2;
08250c4b 9935 id = DPLL_ID_SKL_DPLL2;
3760b59c
S
9936 break;
9937 default:
9938 DRM_ERROR("Incorrect port type\n");
8106ddbd 9939 return;
3760b59c 9940 }
8106ddbd
ACO
9941
9942 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
3760b59c
S
9943}
9944
96b7dfb7
S
9945static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9946 enum port port,
5cec258b 9947 struct intel_crtc_state *pipe_config)
96b7dfb7 9948{
8106ddbd 9949 enum intel_dpll_id id;
a3c988ea 9950 u32 temp;
96b7dfb7
S
9951
9952 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9953 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9954
9955 switch (pipe_config->ddi_pll_sel) {
3148ade7 9956 case SKL_DPLL0:
a3c988ea
ACO
9957 id = DPLL_ID_SKL_DPLL0;
9958 break;
96b7dfb7 9959 case SKL_DPLL1:
8106ddbd 9960 id = DPLL_ID_SKL_DPLL1;
96b7dfb7
S
9961 break;
9962 case SKL_DPLL2:
8106ddbd 9963 id = DPLL_ID_SKL_DPLL2;
96b7dfb7
S
9964 break;
9965 case SKL_DPLL3:
8106ddbd 9966 id = DPLL_ID_SKL_DPLL3;
96b7dfb7 9967 break;
8106ddbd
ACO
9968 default:
9969 MISSING_CASE(pipe_config->ddi_pll_sel);
9970 return;
96b7dfb7 9971 }
8106ddbd
ACO
9972
9973 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
96b7dfb7
S
9974}
9975
7d2c8175
DL
9976static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9977 enum port port,
5cec258b 9978 struct intel_crtc_state *pipe_config)
7d2c8175 9979{
8106ddbd
ACO
9980 enum intel_dpll_id id;
9981
7d2c8175
DL
9982 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9983
9984 switch (pipe_config->ddi_pll_sel) {
9985 case PORT_CLK_SEL_WRPLL1:
8106ddbd 9986 id = DPLL_ID_WRPLL1;
7d2c8175
DL
9987 break;
9988 case PORT_CLK_SEL_WRPLL2:
8106ddbd 9989 id = DPLL_ID_WRPLL2;
7d2c8175 9990 break;
00490c22 9991 case PORT_CLK_SEL_SPLL:
8106ddbd 9992 id = DPLL_ID_SPLL;
79bd23da 9993 break;
9d16da65
ACO
9994 case PORT_CLK_SEL_LCPLL_810:
9995 id = DPLL_ID_LCPLL_810;
9996 break;
9997 case PORT_CLK_SEL_LCPLL_1350:
9998 id = DPLL_ID_LCPLL_1350;
9999 break;
10000 case PORT_CLK_SEL_LCPLL_2700:
10001 id = DPLL_ID_LCPLL_2700;
10002 break;
8106ddbd
ACO
10003 default:
10004 MISSING_CASE(pipe_config->ddi_pll_sel);
10005 /* fall through */
10006 case PORT_CLK_SEL_NONE:
8106ddbd 10007 return;
7d2c8175 10008 }
8106ddbd
ACO
10009
10010 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
7d2c8175
DL
10011}
10012
cf30429e
JN
10013static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10014 struct intel_crtc_state *pipe_config,
10015 unsigned long *power_domain_mask)
10016{
10017 struct drm_device *dev = crtc->base.dev;
fac5e23e 10018 struct drm_i915_private *dev_priv = to_i915(dev);
cf30429e
JN
10019 enum intel_display_power_domain power_domain;
10020 u32 tmp;
10021
d9a7bc67
ID
10022 /*
10023 * The pipe->transcoder mapping is fixed with the exception of the eDP
10024 * transcoder handled below.
10025 */
cf30429e
JN
10026 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10027
10028 /*
10029 * XXX: Do intel_display_power_get_if_enabled before reading this (for
10030 * consistency and less surprising code; it's in always on power).
10031 */
10032 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
10033 if (tmp & TRANS_DDI_FUNC_ENABLE) {
10034 enum pipe trans_edp_pipe;
10035 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10036 default:
10037 WARN(1, "unknown pipe linked to edp transcoder\n");
10038 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10039 case TRANS_DDI_EDP_INPUT_A_ON:
10040 trans_edp_pipe = PIPE_A;
10041 break;
10042 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10043 trans_edp_pipe = PIPE_B;
10044 break;
10045 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10046 trans_edp_pipe = PIPE_C;
10047 break;
10048 }
10049
10050 if (trans_edp_pipe == crtc->pipe)
10051 pipe_config->cpu_transcoder = TRANSCODER_EDP;
10052 }
10053
10054 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10055 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10056 return false;
10057 *power_domain_mask |= BIT(power_domain);
10058
10059 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10060
10061 return tmp & PIPECONF_ENABLE;
10062}
10063
4d1de975
JN
10064static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10065 struct intel_crtc_state *pipe_config,
10066 unsigned long *power_domain_mask)
10067{
10068 struct drm_device *dev = crtc->base.dev;
fac5e23e 10069 struct drm_i915_private *dev_priv = to_i915(dev);
4d1de975
JN
10070 enum intel_display_power_domain power_domain;
10071 enum port port;
10072 enum transcoder cpu_transcoder;
10073 u32 tmp;
10074
10075 pipe_config->has_dsi_encoder = false;
10076
10077 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10078 if (port == PORT_A)
10079 cpu_transcoder = TRANSCODER_DSI_A;
10080 else
10081 cpu_transcoder = TRANSCODER_DSI_C;
10082
10083 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10084 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10085 continue;
10086 *power_domain_mask |= BIT(power_domain);
10087
db18b6a6
ID
10088 /*
10089 * The PLL needs to be enabled with a valid divider
10090 * configuration, otherwise accessing DSI registers will hang
10091 * the machine. See BSpec North Display Engine
10092 * registers/MIPI[BXT]. We can break out here early, since we
10093 * need the same DSI PLL to be enabled for both DSI ports.
10094 */
10095 if (!intel_dsi_pll_is_enabled(dev_priv))
10096 break;
10097
4d1de975
JN
10098 /* XXX: this works for video mode only */
10099 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10100 if (!(tmp & DPI_ENABLE))
10101 continue;
10102
10103 tmp = I915_READ(MIPI_CTRL(port));
10104 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10105 continue;
10106
10107 pipe_config->cpu_transcoder = cpu_transcoder;
10108 pipe_config->has_dsi_encoder = true;
10109 break;
10110 }
10111
10112 return pipe_config->has_dsi_encoder;
10113}
10114
26804afd 10115static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
5cec258b 10116 struct intel_crtc_state *pipe_config)
26804afd
DV
10117{
10118 struct drm_device *dev = crtc->base.dev;
fac5e23e 10119 struct drm_i915_private *dev_priv = to_i915(dev);
d452c5b6 10120 struct intel_shared_dpll *pll;
26804afd
DV
10121 enum port port;
10122 uint32_t tmp;
10123
10124 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10125
10126 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
10127
ef11bdb3 10128 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
96b7dfb7 10129 skylake_get_ddi_pll(dev_priv, port, pipe_config);
3760b59c
S
10130 else if (IS_BROXTON(dev))
10131 bxt_get_ddi_pll(dev_priv, port, pipe_config);
96b7dfb7
S
10132 else
10133 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9cd86933 10134
8106ddbd
ACO
10135 pll = pipe_config->shared_dpll;
10136 if (pll) {
2edd6443
ACO
10137 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
10138 &pipe_config->dpll_hw_state));
d452c5b6
DV
10139 }
10140
26804afd
DV
10141 /*
10142 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10143 * DDI E. So just check whether this pipe is wired to DDI E and whether
10144 * the PCH transcoder is on.
10145 */
ca370455
DL
10146 if (INTEL_INFO(dev)->gen < 9 &&
10147 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
26804afd
DV
10148 pipe_config->has_pch_encoder = true;
10149
10150 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10151 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10152 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10153
10154 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10155 }
10156}
10157
0e8ffe1b 10158static bool haswell_get_pipe_config(struct intel_crtc *crtc,
5cec258b 10159 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
10160{
10161 struct drm_device *dev = crtc->base.dev;
fac5e23e 10162 struct drm_i915_private *dev_priv = to_i915(dev);
1729050e
ID
10163 enum intel_display_power_domain power_domain;
10164 unsigned long power_domain_mask;
cf30429e 10165 bool active;
0e8ffe1b 10166
1729050e
ID
10167 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10168 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
b5482bd0 10169 return false;
1729050e
ID
10170 power_domain_mask = BIT(power_domain);
10171
8106ddbd 10172 pipe_config->shared_dpll = NULL;
c0d43d62 10173
cf30429e 10174 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
eccb140b 10175
4d1de975
JN
10176 if (IS_BROXTON(dev_priv)) {
10177 bxt_get_dsi_transcoder_state(crtc, pipe_config,
10178 &power_domain_mask);
10179 WARN_ON(active && pipe_config->has_dsi_encoder);
10180 if (pipe_config->has_dsi_encoder)
10181 active = true;
10182 }
10183
cf30429e 10184 if (!active)
1729050e 10185 goto out;
0e8ffe1b 10186
4d1de975
JN
10187 if (!pipe_config->has_dsi_encoder) {
10188 haswell_get_ddi_port_state(crtc, pipe_config);
10189 intel_get_pipe_timings(crtc, pipe_config);
10190 }
627eb5a3 10191
bc58be60 10192 intel_get_pipe_src_size(crtc, pipe_config);
1bd1bd80 10193
05dc698c
LL
10194 pipe_config->gamma_mode =
10195 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
10196
a1b2278e
CK
10197 if (INTEL_INFO(dev)->gen >= 9) {
10198 skl_init_scalers(dev, crtc, pipe_config);
10199 }
10200
af99ceda
CK
10201 if (INTEL_INFO(dev)->gen >= 9) {
10202 pipe_config->scaler_state.scaler_id = -1;
10203 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10204 }
10205
1729050e
ID
10206 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10207 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10208 power_domain_mask |= BIT(power_domain);
1c132b44 10209 if (INTEL_INFO(dev)->gen >= 9)
bd2e244f 10210 skylake_get_pfit_config(crtc, pipe_config);
ff6d9f55 10211 else
1c132b44 10212 ironlake_get_pfit_config(crtc, pipe_config);
bd2e244f 10213 }
88adfff1 10214
e59150dc
JB
10215 if (IS_HASWELL(dev))
10216 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10217 (I915_READ(IPS_CTL) & IPS_ENABLE);
42db64ef 10218
4d1de975
JN
10219 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10220 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
ebb69c95
CT
10221 pipe_config->pixel_multiplier =
10222 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10223 } else {
10224 pipe_config->pixel_multiplier = 1;
10225 }
6c49f241 10226
1729050e
ID
10227out:
10228 for_each_power_domain(power_domain, power_domain_mask)
10229 intel_display_power_put(dev_priv, power_domain);
10230
cf30429e 10231 return active;
0e8ffe1b
DV
10232}
10233
55a08b3f
ML
10234static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10235 const struct intel_plane_state *plane_state)
560b85bb
CW
10236{
10237 struct drm_device *dev = crtc->dev;
fac5e23e 10238 struct drm_i915_private *dev_priv = to_i915(dev);
560b85bb 10239 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
dc41c154 10240 uint32_t cntl = 0, size = 0;
560b85bb 10241
55a08b3f
ML
10242 if (plane_state && plane_state->visible) {
10243 unsigned int width = plane_state->base.crtc_w;
10244 unsigned int height = plane_state->base.crtc_h;
dc41c154
VS
10245 unsigned int stride = roundup_pow_of_two(width) * 4;
10246
10247 switch (stride) {
10248 default:
10249 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10250 width, stride);
10251 stride = 256;
10252 /* fallthrough */
10253 case 256:
10254 case 512:
10255 case 1024:
10256 case 2048:
10257 break;
4b0e333e
CW
10258 }
10259
dc41c154
VS
10260 cntl |= CURSOR_ENABLE |
10261 CURSOR_GAMMA_ENABLE |
10262 CURSOR_FORMAT_ARGB |
10263 CURSOR_STRIDE(stride);
10264
10265 size = (height << 12) | width;
4b0e333e 10266 }
560b85bb 10267
dc41c154
VS
10268 if (intel_crtc->cursor_cntl != 0 &&
10269 (intel_crtc->cursor_base != base ||
10270 intel_crtc->cursor_size != size ||
10271 intel_crtc->cursor_cntl != cntl)) {
10272 /* On these chipsets we can only modify the base/size/stride
10273 * whilst the cursor is disabled.
10274 */
0b87c24e
VS
10275 I915_WRITE(CURCNTR(PIPE_A), 0);
10276 POSTING_READ(CURCNTR(PIPE_A));
dc41c154 10277 intel_crtc->cursor_cntl = 0;
4b0e333e 10278 }
560b85bb 10279
99d1f387 10280 if (intel_crtc->cursor_base != base) {
0b87c24e 10281 I915_WRITE(CURBASE(PIPE_A), base);
99d1f387
VS
10282 intel_crtc->cursor_base = base;
10283 }
4726e0b0 10284
dc41c154
VS
10285 if (intel_crtc->cursor_size != size) {
10286 I915_WRITE(CURSIZE, size);
10287 intel_crtc->cursor_size = size;
4b0e333e 10288 }
560b85bb 10289
4b0e333e 10290 if (intel_crtc->cursor_cntl != cntl) {
0b87c24e
VS
10291 I915_WRITE(CURCNTR(PIPE_A), cntl);
10292 POSTING_READ(CURCNTR(PIPE_A));
4b0e333e 10293 intel_crtc->cursor_cntl = cntl;
560b85bb 10294 }
560b85bb
CW
10295}
10296
55a08b3f
ML
10297static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10298 const struct intel_plane_state *plane_state)
65a21cd6
JB
10299{
10300 struct drm_device *dev = crtc->dev;
fac5e23e 10301 struct drm_i915_private *dev_priv = to_i915(dev);
65a21cd6
JB
10302 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10303 int pipe = intel_crtc->pipe;
663f3122 10304 uint32_t cntl = 0;
4b0e333e 10305
55a08b3f 10306 if (plane_state && plane_state->visible) {
4b0e333e 10307 cntl = MCURSOR_GAMMA_ENABLE;
55a08b3f 10308 switch (plane_state->base.crtc_w) {
4726e0b0
SK
10309 case 64:
10310 cntl |= CURSOR_MODE_64_ARGB_AX;
10311 break;
10312 case 128:
10313 cntl |= CURSOR_MODE_128_ARGB_AX;
10314 break;
10315 case 256:
10316 cntl |= CURSOR_MODE_256_ARGB_AX;
10317 break;
10318 default:
55a08b3f 10319 MISSING_CASE(plane_state->base.crtc_w);
4726e0b0 10320 return;
65a21cd6 10321 }
4b0e333e 10322 cntl |= pipe << 28; /* Connect to correct pipe */
47bf17a7 10323
fc6f93bc 10324 if (HAS_DDI(dev))
47bf17a7 10325 cntl |= CURSOR_PIPE_CSC_ENABLE;
65a21cd6 10326
55a08b3f
ML
10327 if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
10328 cntl |= CURSOR_ROTATE_180;
10329 }
4398ad45 10330
4b0e333e
CW
10331 if (intel_crtc->cursor_cntl != cntl) {
10332 I915_WRITE(CURCNTR(pipe), cntl);
10333 POSTING_READ(CURCNTR(pipe));
10334 intel_crtc->cursor_cntl = cntl;
65a21cd6 10335 }
4b0e333e 10336
65a21cd6 10337 /* and commit changes on next vblank */
5efb3e28
VS
10338 I915_WRITE(CURBASE(pipe), base);
10339 POSTING_READ(CURBASE(pipe));
99d1f387
VS
10340
10341 intel_crtc->cursor_base = base;
65a21cd6
JB
10342}
10343
cda4b7d3 10344/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6b383a7f 10345static void intel_crtc_update_cursor(struct drm_crtc *crtc,
55a08b3f 10346 const struct intel_plane_state *plane_state)
cda4b7d3
CW
10347{
10348 struct drm_device *dev = crtc->dev;
fac5e23e 10349 struct drm_i915_private *dev_priv = to_i915(dev);
cda4b7d3
CW
10350 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10351 int pipe = intel_crtc->pipe;
55a08b3f
ML
10352 u32 base = intel_crtc->cursor_addr;
10353 u32 pos = 0;
cda4b7d3 10354
55a08b3f
ML
10355 if (plane_state) {
10356 int x = plane_state->base.crtc_x;
10357 int y = plane_state->base.crtc_y;
cda4b7d3 10358
55a08b3f
ML
10359 if (x < 0) {
10360 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10361 x = -x;
10362 }
10363 pos |= x << CURSOR_X_SHIFT;
cda4b7d3 10364
55a08b3f
ML
10365 if (y < 0) {
10366 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10367 y = -y;
10368 }
10369 pos |= y << CURSOR_Y_SHIFT;
10370
10371 /* ILK+ do this automagically */
10372 if (HAS_GMCH_DISPLAY(dev) &&
10373 plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
10374 base += (plane_state->base.crtc_h *
10375 plane_state->base.crtc_w - 1) * 4;
10376 }
cda4b7d3 10377 }
cda4b7d3 10378
5efb3e28
VS
10379 I915_WRITE(CURPOS(pipe), pos);
10380
8ac54669 10381 if (IS_845G(dev) || IS_I865G(dev))
55a08b3f 10382 i845_update_cursor(crtc, base, plane_state);
5efb3e28 10383 else
55a08b3f 10384 i9xx_update_cursor(crtc, base, plane_state);
cda4b7d3
CW
10385}
10386
dc41c154
VS
10387static bool cursor_size_ok(struct drm_device *dev,
10388 uint32_t width, uint32_t height)
10389{
10390 if (width == 0 || height == 0)
10391 return false;
10392
10393 /*
10394 * 845g/865g are special in that they are only limited by
10395 * the width of their cursors, the height is arbitrary up to
10396 * the precision of the register. Everything else requires
10397 * square cursors, limited to a few power-of-two sizes.
10398 */
10399 if (IS_845G(dev) || IS_I865G(dev)) {
10400 if ((width & 63) != 0)
10401 return false;
10402
10403 if (width > (IS_845G(dev) ? 64 : 512))
10404 return false;
10405
10406 if (height > 1023)
10407 return false;
10408 } else {
10409 switch (width | height) {
10410 case 256:
10411 case 128:
10412 if (IS_GEN2(dev))
10413 return false;
10414 case 64:
10415 break;
10416 default:
10417 return false;
10418 }
10419 }
10420
10421 return true;
10422}
10423
79e53945
JB
10424/* VESA 640x480x72Hz mode to set on the pipe */
10425static struct drm_display_mode load_detect_mode = {
10426 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10427 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10428};
10429
a8bb6818
DV
10430struct drm_framebuffer *
10431__intel_framebuffer_create(struct drm_device *dev,
10432 struct drm_mode_fb_cmd2 *mode_cmd,
10433 struct drm_i915_gem_object *obj)
d2dff872
CW
10434{
10435 struct intel_framebuffer *intel_fb;
10436 int ret;
10437
10438 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
dcb1394e 10439 if (!intel_fb)
d2dff872 10440 return ERR_PTR(-ENOMEM);
d2dff872
CW
10441
10442 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
dd4916c5
DV
10443 if (ret)
10444 goto err;
d2dff872
CW
10445
10446 return &intel_fb->base;
dcb1394e 10447
dd4916c5 10448err:
dd4916c5 10449 kfree(intel_fb);
dd4916c5 10450 return ERR_PTR(ret);
d2dff872
CW
10451}
10452
b5ea642a 10453static struct drm_framebuffer *
a8bb6818
DV
10454intel_framebuffer_create(struct drm_device *dev,
10455 struct drm_mode_fb_cmd2 *mode_cmd,
10456 struct drm_i915_gem_object *obj)
10457{
10458 struct drm_framebuffer *fb;
10459 int ret;
10460
10461 ret = i915_mutex_lock_interruptible(dev);
10462 if (ret)
10463 return ERR_PTR(ret);
10464 fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10465 mutex_unlock(&dev->struct_mutex);
10466
10467 return fb;
10468}
10469
d2dff872
CW
10470static u32
10471intel_framebuffer_pitch_for_width(int width, int bpp)
10472{
10473 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10474 return ALIGN(pitch, 64);
10475}
10476
10477static u32
10478intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10479{
10480 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
1267a26b 10481 return PAGE_ALIGN(pitch * mode->vdisplay);
d2dff872
CW
10482}
10483
10484static struct drm_framebuffer *
10485intel_framebuffer_create_for_mode(struct drm_device *dev,
10486 struct drm_display_mode *mode,
10487 int depth, int bpp)
10488{
dcb1394e 10489 struct drm_framebuffer *fb;
d2dff872 10490 struct drm_i915_gem_object *obj;
0fed39bd 10491 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
d2dff872 10492
d37cd8a8 10493 obj = i915_gem_object_create(dev,
d2dff872 10494 intel_framebuffer_size_for_mode(mode, bpp));
fe3db79b
CW
10495 if (IS_ERR(obj))
10496 return ERR_CAST(obj);
d2dff872
CW
10497
10498 mode_cmd.width = mode->hdisplay;
10499 mode_cmd.height = mode->vdisplay;
308e5bcb
JB
10500 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10501 bpp);
5ca0c34a 10502 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
d2dff872 10503
dcb1394e
LW
10504 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10505 if (IS_ERR(fb))
10506 drm_gem_object_unreference_unlocked(&obj->base);
10507
10508 return fb;
d2dff872
CW
10509}
10510
10511static struct drm_framebuffer *
10512mode_fits_in_fbdev(struct drm_device *dev,
10513 struct drm_display_mode *mode)
10514{
0695726e 10515#ifdef CONFIG_DRM_FBDEV_EMULATION
fac5e23e 10516 struct drm_i915_private *dev_priv = to_i915(dev);
d2dff872
CW
10517 struct drm_i915_gem_object *obj;
10518 struct drm_framebuffer *fb;
10519
4c0e5528 10520 if (!dev_priv->fbdev)
d2dff872
CW
10521 return NULL;
10522
4c0e5528 10523 if (!dev_priv->fbdev->fb)
d2dff872
CW
10524 return NULL;
10525
4c0e5528
DV
10526 obj = dev_priv->fbdev->fb->obj;
10527 BUG_ON(!obj);
10528
8bcd4553 10529 fb = &dev_priv->fbdev->fb->base;
01f2c773
VS
10530 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10531 fb->bits_per_pixel))
d2dff872
CW
10532 return NULL;
10533
01f2c773 10534 if (obj->base.size < mode->vdisplay * fb->pitches[0])
d2dff872
CW
10535 return NULL;
10536
edde3617 10537 drm_framebuffer_reference(fb);
d2dff872 10538 return fb;
4520f53a
DV
10539#else
10540 return NULL;
10541#endif
d2dff872
CW
10542}
10543
d3a40d1b
ACO
10544static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10545 struct drm_crtc *crtc,
10546 struct drm_display_mode *mode,
10547 struct drm_framebuffer *fb,
10548 int x, int y)
10549{
10550 struct drm_plane_state *plane_state;
10551 int hdisplay, vdisplay;
10552 int ret;
10553
10554 plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10555 if (IS_ERR(plane_state))
10556 return PTR_ERR(plane_state);
10557
10558 if (mode)
10559 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10560 else
10561 hdisplay = vdisplay = 0;
10562
10563 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10564 if (ret)
10565 return ret;
10566 drm_atomic_set_fb_for_plane(plane_state, fb);
10567 plane_state->crtc_x = 0;
10568 plane_state->crtc_y = 0;
10569 plane_state->crtc_w = hdisplay;
10570 plane_state->crtc_h = vdisplay;
10571 plane_state->src_x = x << 16;
10572 plane_state->src_y = y << 16;
10573 plane_state->src_w = hdisplay << 16;
10574 plane_state->src_h = vdisplay << 16;
10575
10576 return 0;
10577}
10578
d2434ab7 10579bool intel_get_load_detect_pipe(struct drm_connector *connector,
7173188d 10580 struct drm_display_mode *mode,
51fd371b
RC
10581 struct intel_load_detect_pipe *old,
10582 struct drm_modeset_acquire_ctx *ctx)
79e53945
JB
10583{
10584 struct intel_crtc *intel_crtc;
d2434ab7
DV
10585 struct intel_encoder *intel_encoder =
10586 intel_attached_encoder(connector);
79e53945 10587 struct drm_crtc *possible_crtc;
4ef69c7a 10588 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
10589 struct drm_crtc *crtc = NULL;
10590 struct drm_device *dev = encoder->dev;
94352cf9 10591 struct drm_framebuffer *fb;
51fd371b 10592 struct drm_mode_config *config = &dev->mode_config;
edde3617 10593 struct drm_atomic_state *state = NULL, *restore_state = NULL;
944b0c76 10594 struct drm_connector_state *connector_state;
4be07317 10595 struct intel_crtc_state *crtc_state;
51fd371b 10596 int ret, i = -1;
79e53945 10597
d2dff872 10598 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
c23cc417 10599 connector->base.id, connector->name,
8e329a03 10600 encoder->base.id, encoder->name);
d2dff872 10601
edde3617
ML
10602 old->restore_state = NULL;
10603
51fd371b
RC
10604retry:
10605 ret = drm_modeset_lock(&config->connection_mutex, ctx);
10606 if (ret)
ad3c558f 10607 goto fail;
6e9f798d 10608
79e53945
JB
10609 /*
10610 * Algorithm gets a little messy:
7a5e4805 10611 *
79e53945
JB
10612 * - if the connector already has an assigned crtc, use it (but make
10613 * sure it's on first)
7a5e4805 10614 *
79e53945
JB
10615 * - try to find the first unused crtc that can drive this connector,
10616 * and use that if we find one
79e53945
JB
10617 */
10618
10619 /* See if we already have a CRTC for this connector */
edde3617
ML
10620 if (connector->state->crtc) {
10621 crtc = connector->state->crtc;
8261b191 10622
51fd371b 10623 ret = drm_modeset_lock(&crtc->mutex, ctx);
4d02e2de 10624 if (ret)
ad3c558f 10625 goto fail;
8261b191
CW
10626
10627 /* Make sure the crtc and connector are running */
edde3617 10628 goto found;
79e53945
JB
10629 }
10630
10631 /* Find an unused one (if possible) */
70e1e0ec 10632 for_each_crtc(dev, possible_crtc) {
79e53945
JB
10633 i++;
10634 if (!(encoder->possible_crtcs & (1 << i)))
10635 continue;
edde3617
ML
10636
10637 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10638 if (ret)
10639 goto fail;
10640
10641 if (possible_crtc->state->enable) {
10642 drm_modeset_unlock(&possible_crtc->mutex);
a459249c 10643 continue;
edde3617 10644 }
a459249c
VS
10645
10646 crtc = possible_crtc;
10647 break;
79e53945
JB
10648 }
10649
10650 /*
10651 * If we didn't find an unused CRTC, don't use any.
10652 */
10653 if (!crtc) {
7173188d 10654 DRM_DEBUG_KMS("no pipe available for load-detect\n");
ad3c558f 10655 goto fail;
79e53945
JB
10656 }
10657
edde3617
ML
10658found:
10659 intel_crtc = to_intel_crtc(crtc);
10660
4d02e2de
DV
10661 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10662 if (ret)
ad3c558f 10663 goto fail;
79e53945 10664
83a57153 10665 state = drm_atomic_state_alloc(dev);
edde3617
ML
10666 restore_state = drm_atomic_state_alloc(dev);
10667 if (!state || !restore_state) {
10668 ret = -ENOMEM;
10669 goto fail;
10670 }
83a57153
ACO
10671
10672 state->acquire_ctx = ctx;
edde3617 10673 restore_state->acquire_ctx = ctx;
83a57153 10674
944b0c76
ACO
10675 connector_state = drm_atomic_get_connector_state(state, connector);
10676 if (IS_ERR(connector_state)) {
10677 ret = PTR_ERR(connector_state);
10678 goto fail;
10679 }
10680
edde3617
ML
10681 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10682 if (ret)
10683 goto fail;
944b0c76 10684
4be07317
ACO
10685 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10686 if (IS_ERR(crtc_state)) {
10687 ret = PTR_ERR(crtc_state);
10688 goto fail;
10689 }
10690
49d6fa21 10691 crtc_state->base.active = crtc_state->base.enable = true;
4be07317 10692
6492711d
CW
10693 if (!mode)
10694 mode = &load_detect_mode;
79e53945 10695
d2dff872
CW
10696 /* We need a framebuffer large enough to accommodate all accesses
10697 * that the plane may generate whilst we perform load detection.
10698 * We can not rely on the fbcon either being present (we get called
10699 * during its initialisation to detect all boot displays, or it may
10700 * not even exist) or that it is large enough to satisfy the
10701 * requested mode.
10702 */
94352cf9
DV
10703 fb = mode_fits_in_fbdev(dev, mode);
10704 if (fb == NULL) {
d2dff872 10705 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
94352cf9 10706 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
d2dff872
CW
10707 } else
10708 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
94352cf9 10709 if (IS_ERR(fb)) {
d2dff872 10710 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
412b61d8 10711 goto fail;
79e53945 10712 }
79e53945 10713
d3a40d1b
ACO
10714 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10715 if (ret)
10716 goto fail;
10717
edde3617
ML
10718 drm_framebuffer_unreference(fb);
10719
10720 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10721 if (ret)
10722 goto fail;
10723
10724 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10725 if (!ret)
10726 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10727 if (!ret)
10728 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10729 if (ret) {
10730 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10731 goto fail;
10732 }
8c7b5ccb 10733
3ba86073
ML
10734 ret = drm_atomic_commit(state);
10735 if (ret) {
6492711d 10736 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
412b61d8 10737 goto fail;
79e53945 10738 }
edde3617
ML
10739
10740 old->restore_state = restore_state;
7173188d 10741
79e53945 10742 /* let the connector get through one full cycle before testing */
9d0498a2 10743 intel_wait_for_vblank(dev, intel_crtc->pipe);
7173188d 10744 return true;
412b61d8 10745
ad3c558f 10746fail:
e5d958ef 10747 drm_atomic_state_free(state);
edde3617
ML
10748 drm_atomic_state_free(restore_state);
10749 restore_state = state = NULL;
83a57153 10750
51fd371b
RC
10751 if (ret == -EDEADLK) {
10752 drm_modeset_backoff(ctx);
10753 goto retry;
10754 }
10755
412b61d8 10756 return false;
79e53945
JB
10757}
10758
d2434ab7 10759void intel_release_load_detect_pipe(struct drm_connector *connector,
49172fee
ACO
10760 struct intel_load_detect_pipe *old,
10761 struct drm_modeset_acquire_ctx *ctx)
79e53945 10762{
d2434ab7
DV
10763 struct intel_encoder *intel_encoder =
10764 intel_attached_encoder(connector);
4ef69c7a 10765 struct drm_encoder *encoder = &intel_encoder->base;
edde3617 10766 struct drm_atomic_state *state = old->restore_state;
d3a40d1b 10767 int ret;
79e53945 10768
d2dff872 10769 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
c23cc417 10770 connector->base.id, connector->name,
8e329a03 10771 encoder->base.id, encoder->name);
d2dff872 10772
edde3617 10773 if (!state)
0622a53c 10774 return;
79e53945 10775
edde3617
ML
10776 ret = drm_atomic_commit(state);
10777 if (ret) {
10778 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10779 drm_atomic_state_free(state);
10780 }
79e53945
JB
10781}
10782
da4a1efa 10783static int i9xx_pll_refclk(struct drm_device *dev,
5cec258b 10784 const struct intel_crtc_state *pipe_config)
da4a1efa 10785{
fac5e23e 10786 struct drm_i915_private *dev_priv = to_i915(dev);
da4a1efa
VS
10787 u32 dpll = pipe_config->dpll_hw_state.dpll;
10788
10789 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
e91e941b 10790 return dev_priv->vbt.lvds_ssc_freq;
da4a1efa
VS
10791 else if (HAS_PCH_SPLIT(dev))
10792 return 120000;
10793 else if (!IS_GEN2(dev))
10794 return 96000;
10795 else
10796 return 48000;
10797}
10798
79e53945 10799/* Returns the clock of the currently programmed mode of the given pipe. */
f1f644dc 10800static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 10801 struct intel_crtc_state *pipe_config)
79e53945 10802{
f1f644dc 10803 struct drm_device *dev = crtc->base.dev;
fac5e23e 10804 struct drm_i915_private *dev_priv = to_i915(dev);
f1f644dc 10805 int pipe = pipe_config->cpu_transcoder;
293623f7 10806 u32 dpll = pipe_config->dpll_hw_state.dpll;
79e53945 10807 u32 fp;
9e2c8475 10808 struct dpll clock;
dccbea3b 10809 int port_clock;
da4a1efa 10810 int refclk = i9xx_pll_refclk(dev, pipe_config);
79e53945
JB
10811
10812 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
293623f7 10813 fp = pipe_config->dpll_hw_state.fp0;
79e53945 10814 else
293623f7 10815 fp = pipe_config->dpll_hw_state.fp1;
79e53945
JB
10816
10817 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
f2b115e6
AJ
10818 if (IS_PINEVIEW(dev)) {
10819 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10820 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
2177832f
SL
10821 } else {
10822 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10823 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10824 }
10825
a6c45cf0 10826 if (!IS_GEN2(dev)) {
f2b115e6
AJ
10827 if (IS_PINEVIEW(dev))
10828 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10829 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
2177832f
SL
10830 else
10831 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
79e53945
JB
10832 DPLL_FPA01_P1_POST_DIV_SHIFT);
10833
10834 switch (dpll & DPLL_MODE_MASK) {
10835 case DPLLB_MODE_DAC_SERIAL:
10836 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10837 5 : 10;
10838 break;
10839 case DPLLB_MODE_LVDS:
10840 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10841 7 : 14;
10842 break;
10843 default:
28c97730 10844 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
79e53945 10845 "mode\n", (int)(dpll & DPLL_MODE_MASK));
f1f644dc 10846 return;
79e53945
JB
10847 }
10848
ac58c3f0 10849 if (IS_PINEVIEW(dev))
dccbea3b 10850 port_clock = pnv_calc_dpll_params(refclk, &clock);
ac58c3f0 10851 else
dccbea3b 10852 port_clock = i9xx_calc_dpll_params(refclk, &clock);
79e53945 10853 } else {
0fb58223 10854 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
b1c560d1 10855 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
79e53945
JB
10856
10857 if (is_lvds) {
10858 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10859 DPLL_FPA01_P1_POST_DIV_SHIFT);
b1c560d1
VS
10860
10861 if (lvds & LVDS_CLKB_POWER_UP)
10862 clock.p2 = 7;
10863 else
10864 clock.p2 = 14;
79e53945
JB
10865 } else {
10866 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10867 clock.p1 = 2;
10868 else {
10869 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10870 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10871 }
10872 if (dpll & PLL_P2_DIVIDE_BY_4)
10873 clock.p2 = 4;
10874 else
10875 clock.p2 = 2;
79e53945 10876 }
da4a1efa 10877
dccbea3b 10878 port_clock = i9xx_calc_dpll_params(refclk, &clock);
79e53945
JB
10879 }
10880
18442d08
VS
10881 /*
10882 * This value includes pixel_multiplier. We will use
241bfc38 10883 * port_clock to compute adjusted_mode.crtc_clock in the
18442d08
VS
10884 * encoder's get_config() function.
10885 */
dccbea3b 10886 pipe_config->port_clock = port_clock;
f1f644dc
JB
10887}
10888
6878da05
VS
10889int intel_dotclock_calculate(int link_freq,
10890 const struct intel_link_m_n *m_n)
f1f644dc 10891{
f1f644dc
JB
10892 /*
10893 * The calculation for the data clock is:
1041a02f 10894 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
f1f644dc 10895 * But we want to avoid losing precison if possible, so:
1041a02f 10896 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
f1f644dc
JB
10897 *
10898 * and the link clock is simpler:
1041a02f 10899 * link_clock = (m * link_clock) / n
f1f644dc
JB
10900 */
10901
6878da05
VS
10902 if (!m_n->link_n)
10903 return 0;
f1f644dc 10904
6878da05
VS
10905 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10906}
f1f644dc 10907
18442d08 10908static void ironlake_pch_clock_get(struct intel_crtc *crtc,
5cec258b 10909 struct intel_crtc_state *pipe_config)
6878da05 10910{
e3b247da 10911 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
79e53945 10912
18442d08
VS
10913 /* read out port_clock from the DPLL */
10914 i9xx_crtc_clock_get(crtc, pipe_config);
f1f644dc 10915
f1f644dc 10916 /*
e3b247da
VS
10917 * In case there is an active pipe without active ports,
10918 * we may need some idea for the dotclock anyway.
10919 * Calculate one based on the FDI configuration.
79e53945 10920 */
2d112de7 10921 pipe_config->base.adjusted_mode.crtc_clock =
21a727b3 10922 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
18442d08 10923 &pipe_config->fdi_m_n);
79e53945
JB
10924}
10925
10926/** Returns the currently programmed mode of the given pipe. */
10927struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10928 struct drm_crtc *crtc)
10929{
fac5e23e 10930 struct drm_i915_private *dev_priv = to_i915(dev);
79e53945 10931 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 10932 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
79e53945 10933 struct drm_display_mode *mode;
3f36b937 10934 struct intel_crtc_state *pipe_config;
fe2b8f9d
PZ
10935 int htot = I915_READ(HTOTAL(cpu_transcoder));
10936 int hsync = I915_READ(HSYNC(cpu_transcoder));
10937 int vtot = I915_READ(VTOTAL(cpu_transcoder));
10938 int vsync = I915_READ(VSYNC(cpu_transcoder));
293623f7 10939 enum pipe pipe = intel_crtc->pipe;
79e53945
JB
10940
10941 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10942 if (!mode)
10943 return NULL;
10944
3f36b937
TU
10945 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10946 if (!pipe_config) {
10947 kfree(mode);
10948 return NULL;
10949 }
10950
f1f644dc
JB
10951 /*
10952 * Construct a pipe_config sufficient for getting the clock info
10953 * back out of crtc_clock_get.
10954 *
10955 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10956 * to use a real value here instead.
10957 */
3f36b937
TU
10958 pipe_config->cpu_transcoder = (enum transcoder) pipe;
10959 pipe_config->pixel_multiplier = 1;
10960 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10961 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10962 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10963 i9xx_crtc_clock_get(intel_crtc, pipe_config);
10964
10965 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
79e53945
JB
10966 mode->hdisplay = (htot & 0xffff) + 1;
10967 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10968 mode->hsync_start = (hsync & 0xffff) + 1;
10969 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10970 mode->vdisplay = (vtot & 0xffff) + 1;
10971 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10972 mode->vsync_start = (vsync & 0xffff) + 1;
10973 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10974
10975 drm_mode_set_name(mode);
79e53945 10976
3f36b937
TU
10977 kfree(pipe_config);
10978
79e53945
JB
10979 return mode;
10980}
10981
10982static void intel_crtc_destroy(struct drm_crtc *crtc)
10983{
10984 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
67e77c5a 10985 struct drm_device *dev = crtc->dev;
51cbaf01 10986 struct intel_flip_work *work;
67e77c5a 10987
5e2d7afc 10988 spin_lock_irq(&dev->event_lock);
5a21b665
DV
10989 work = intel_crtc->flip_work;
10990 intel_crtc->flip_work = NULL;
10991 spin_unlock_irq(&dev->event_lock);
67e77c5a 10992
5a21b665 10993 if (work) {
51cbaf01
ML
10994 cancel_work_sync(&work->mmio_work);
10995 cancel_work_sync(&work->unpin_work);
5a21b665 10996 kfree(work);
67e77c5a 10997 }
79e53945
JB
10998
10999 drm_crtc_cleanup(crtc);
67e77c5a 11000
79e53945
JB
11001 kfree(intel_crtc);
11002}
11003
6b95a207
KH
11004static void intel_unpin_work_fn(struct work_struct *__work)
11005{
51cbaf01
ML
11006 struct intel_flip_work *work =
11007 container_of(__work, struct intel_flip_work, unpin_work);
5a21b665
DV
11008 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11009 struct drm_device *dev = crtc->base.dev;
11010 struct drm_plane *primary = crtc->base.primary;
03f476e1 11011
5a21b665
DV
11012 if (is_mmio_work(work))
11013 flush_work(&work->mmio_work);
03f476e1 11014
5a21b665
DV
11015 mutex_lock(&dev->struct_mutex);
11016 intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
11017 drm_gem_object_unreference(&work->pending_flip_obj->base);
143f73b3 11018
5a21b665
DV
11019 if (work->flip_queued_req)
11020 i915_gem_request_assign(&work->flip_queued_req, NULL);
11021 mutex_unlock(&dev->struct_mutex);
143f73b3 11022
5a21b665
DV
11023 intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
11024 intel_fbc_post_update(crtc);
11025 drm_framebuffer_unreference(work->old_fb);
143f73b3 11026
5a21b665
DV
11027 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
11028 atomic_dec(&crtc->unpin_work_count);
a6747b73 11029
5a21b665
DV
11030 kfree(work);
11031}
d9e86c0e 11032
5a21b665
DV
11033/* Is 'a' after or equal to 'b'? */
11034static bool g4x_flip_count_after_eq(u32 a, u32 b)
11035{
11036 return !((a - b) & 0x80000000);
11037}
143f73b3 11038
5a21b665
DV
11039static bool __pageflip_finished_cs(struct intel_crtc *crtc,
11040 struct intel_flip_work *work)
11041{
11042 struct drm_device *dev = crtc->base.dev;
fac5e23e 11043 struct drm_i915_private *dev_priv = to_i915(dev);
5a21b665 11044 unsigned reset_counter;
143f73b3 11045
5a21b665
DV
11046 reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11047 if (crtc->reset_counter != reset_counter)
11048 return true;
143f73b3 11049
5a21b665
DV
11050 /*
11051 * The relevant registers doen't exist on pre-ctg.
11052 * As the flip done interrupt doesn't trigger for mmio
11053 * flips on gmch platforms, a flip count check isn't
11054 * really needed there. But since ctg has the registers,
11055 * include it in the check anyway.
11056 */
11057 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
11058 return true;
b4a98e57 11059
5a21b665
DV
11060 /*
11061 * BDW signals flip done immediately if the plane
11062 * is disabled, even if the plane enable is already
11063 * armed to occur at the next vblank :(
11064 */
f99d7069 11065
5a21b665
DV
11066 /*
11067 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
11068 * used the same base address. In that case the mmio flip might
11069 * have completed, but the CS hasn't even executed the flip yet.
11070 *
11071 * A flip count check isn't enough as the CS might have updated
11072 * the base address just after start of vblank, but before we
11073 * managed to process the interrupt. This means we'd complete the
11074 * CS flip too soon.
11075 *
11076 * Combining both checks should get us a good enough result. It may
11077 * still happen that the CS flip has been executed, but has not
11078 * yet actually completed. But in case the base address is the same
11079 * anyway, we don't really care.
11080 */
11081 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
11082 crtc->flip_work->gtt_offset &&
11083 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
11084 crtc->flip_work->flip_count);
11085}
b4a98e57 11086
5a21b665
DV
11087static bool
11088__pageflip_finished_mmio(struct intel_crtc *crtc,
11089 struct intel_flip_work *work)
11090{
11091 /*
11092 * MMIO work completes when vblank is different from
11093 * flip_queued_vblank.
11094 *
11095 * Reset counter value doesn't matter, this is handled by
11096 * i915_wait_request finishing early, so no need to handle
11097 * reset here.
11098 */
11099 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
6b95a207
KH
11100}
11101
51cbaf01
ML
11102
11103static bool pageflip_finished(struct intel_crtc *crtc,
11104 struct intel_flip_work *work)
11105{
11106 if (!atomic_read(&work->pending))
11107 return false;
11108
11109 smp_rmb();
11110
5a21b665
DV
11111 if (is_mmio_work(work))
11112 return __pageflip_finished_mmio(crtc, work);
11113 else
11114 return __pageflip_finished_cs(crtc, work);
11115}
11116
11117void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11118{
91c8a326 11119 struct drm_device *dev = &dev_priv->drm;
5a21b665
DV
11120 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11121 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11122 struct intel_flip_work *work;
11123 unsigned long flags;
11124
11125 /* Ignore early vblank irqs */
11126 if (!crtc)
11127 return;
11128
51cbaf01 11129 /*
5a21b665
DV
11130 * This is called both by irq handlers and the reset code (to complete
11131 * lost pageflips) so needs the full irqsave spinlocks.
51cbaf01 11132 */
5a21b665
DV
11133 spin_lock_irqsave(&dev->event_lock, flags);
11134 work = intel_crtc->flip_work;
11135
11136 if (work != NULL &&
11137 !is_mmio_work(work) &&
11138 pageflip_finished(intel_crtc, work))
11139 page_flip_completed(intel_crtc);
11140
11141 spin_unlock_irqrestore(&dev->event_lock, flags);
75f7f3ec
VS
11142}
11143
51cbaf01 11144void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
6b95a207 11145{
91c8a326 11146 struct drm_device *dev = &dev_priv->drm;
5251f04e
ML
11147 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11148 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
51cbaf01 11149 struct intel_flip_work *work;
6b95a207
KH
11150 unsigned long flags;
11151
5251f04e
ML
11152 /* Ignore early vblank irqs */
11153 if (!crtc)
11154 return;
f326038a
DV
11155
11156 /*
11157 * This is called both by irq handlers and the reset code (to complete
11158 * lost pageflips) so needs the full irqsave spinlocks.
e7d841ca 11159 */
6b95a207 11160 spin_lock_irqsave(&dev->event_lock, flags);
5a21b665 11161 work = intel_crtc->flip_work;
5251f04e 11162
5a21b665
DV
11163 if (work != NULL &&
11164 is_mmio_work(work) &&
11165 pageflip_finished(intel_crtc, work))
11166 page_flip_completed(intel_crtc);
5251f04e 11167
6b95a207
KH
11168 spin_unlock_irqrestore(&dev->event_lock, flags);
11169}
11170
5a21b665
DV
11171static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
11172 struct intel_flip_work *work)
84c33a64 11173{
5a21b665 11174 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
84c33a64 11175
5a21b665
DV
11176 /* Ensure that the work item is consistent when activating it ... */
11177 smp_mb__before_atomic();
11178 atomic_set(&work->pending, 1);
11179}
a6747b73 11180
5a21b665
DV
11181static int intel_gen2_queue_flip(struct drm_device *dev,
11182 struct drm_crtc *crtc,
11183 struct drm_framebuffer *fb,
11184 struct drm_i915_gem_object *obj,
11185 struct drm_i915_gem_request *req,
11186 uint32_t flags)
11187{
11188 struct intel_engine_cs *engine = req->engine;
11189 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11190 u32 flip_mask;
11191 int ret;
143f73b3 11192
5a21b665
DV
11193 ret = intel_ring_begin(req, 6);
11194 if (ret)
11195 return ret;
143f73b3 11196
5a21b665
DV
11197 /* Can't queue multiple flips, so wait for the previous
11198 * one to finish before executing the next.
11199 */
11200 if (intel_crtc->plane)
11201 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11202 else
11203 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11204 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11205 intel_ring_emit(engine, MI_NOOP);
11206 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11207 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11208 intel_ring_emit(engine, fb->pitches[0]);
11209 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11210 intel_ring_emit(engine, 0); /* aux display base address, unused */
143f73b3 11211
5a21b665
DV
11212 return 0;
11213}
84c33a64 11214
5a21b665
DV
11215static int intel_gen3_queue_flip(struct drm_device *dev,
11216 struct drm_crtc *crtc,
11217 struct drm_framebuffer *fb,
11218 struct drm_i915_gem_object *obj,
11219 struct drm_i915_gem_request *req,
11220 uint32_t flags)
11221{
11222 struct intel_engine_cs *engine = req->engine;
11223 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11224 u32 flip_mask;
11225 int ret;
d55dbd06 11226
5a21b665
DV
11227 ret = intel_ring_begin(req, 6);
11228 if (ret)
11229 return ret;
d55dbd06 11230
5a21b665
DV
11231 if (intel_crtc->plane)
11232 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11233 else
11234 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11235 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11236 intel_ring_emit(engine, MI_NOOP);
11237 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11238 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11239 intel_ring_emit(engine, fb->pitches[0]);
11240 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11241 intel_ring_emit(engine, MI_NOOP);
fd8e058a 11242
5a21b665
DV
11243 return 0;
11244}
84c33a64 11245
5a21b665
DV
11246static int intel_gen4_queue_flip(struct drm_device *dev,
11247 struct drm_crtc *crtc,
11248 struct drm_framebuffer *fb,
11249 struct drm_i915_gem_object *obj,
11250 struct drm_i915_gem_request *req,
11251 uint32_t flags)
11252{
11253 struct intel_engine_cs *engine = req->engine;
fac5e23e 11254 struct drm_i915_private *dev_priv = to_i915(dev);
5a21b665
DV
11255 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11256 uint32_t pf, pipesrc;
11257 int ret;
143f73b3 11258
5a21b665
DV
11259 ret = intel_ring_begin(req, 4);
11260 if (ret)
11261 return ret;
143f73b3 11262
5a21b665
DV
11263 /* i965+ uses the linear or tiled offsets from the
11264 * Display Registers (which do not change across a page-flip)
11265 * so we need only reprogram the base address.
11266 */
11267 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11268 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11269 intel_ring_emit(engine, fb->pitches[0]);
11270 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
11271 obj->tiling_mode);
11272
11273 /* XXX Enabling the panel-fitter across page-flip is so far
11274 * untested on non-native modes, so ignore it for now.
11275 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11276 */
11277 pf = 0;
11278 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11279 intel_ring_emit(engine, pf | pipesrc);
143f73b3 11280
5a21b665 11281 return 0;
8c9f3aaf
JB
11282}
11283
5a21b665
DV
11284static int intel_gen6_queue_flip(struct drm_device *dev,
11285 struct drm_crtc *crtc,
11286 struct drm_framebuffer *fb,
11287 struct drm_i915_gem_object *obj,
11288 struct drm_i915_gem_request *req,
11289 uint32_t flags)
da20eabd 11290{
5a21b665 11291 struct intel_engine_cs *engine = req->engine;
fac5e23e 11292 struct drm_i915_private *dev_priv = to_i915(dev);
5a21b665
DV
11293 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11294 uint32_t pf, pipesrc;
11295 int ret;
d21fbe87 11296
5a21b665
DV
11297 ret = intel_ring_begin(req, 4);
11298 if (ret)
11299 return ret;
92826fcd 11300
5a21b665
DV
11301 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11302 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11303 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11304 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
92826fcd 11305
5a21b665
DV
11306 /* Contrary to the suggestions in the documentation,
11307 * "Enable Panel Fitter" does not seem to be required when page
11308 * flipping with a non-native mode, and worse causes a normal
11309 * modeset to fail.
11310 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11311 */
11312 pf = 0;
11313 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11314 intel_ring_emit(engine, pf | pipesrc);
7809e5ae 11315
5a21b665 11316 return 0;
7809e5ae
MR
11317}
11318
5a21b665
DV
11319static int intel_gen7_queue_flip(struct drm_device *dev,
11320 struct drm_crtc *crtc,
11321 struct drm_framebuffer *fb,
11322 struct drm_i915_gem_object *obj,
11323 struct drm_i915_gem_request *req,
11324 uint32_t flags)
d21fbe87 11325{
5a21b665
DV
11326 struct intel_engine_cs *engine = req->engine;
11327 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11328 uint32_t plane_bit = 0;
11329 int len, ret;
d21fbe87 11330
5a21b665
DV
11331 switch (intel_crtc->plane) {
11332 case PLANE_A:
11333 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11334 break;
11335 case PLANE_B:
11336 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11337 break;
11338 case PLANE_C:
11339 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11340 break;
11341 default:
11342 WARN_ONCE(1, "unknown plane in flip command\n");
11343 return -ENODEV;
11344 }
11345
11346 len = 4;
11347 if (engine->id == RCS) {
11348 len += 6;
11349 /*
11350 * On Gen 8, SRM is now taking an extra dword to accommodate
11351 * 48bits addresses, and we need a NOOP for the batch size to
11352 * stay even.
11353 */
11354 if (IS_GEN8(dev))
11355 len += 2;
11356 }
11357
11358 /*
11359 * BSpec MI_DISPLAY_FLIP for IVB:
11360 * "The full packet must be contained within the same cache line."
11361 *
11362 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11363 * cacheline, if we ever start emitting more commands before
11364 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11365 * then do the cacheline alignment, and finally emit the
11366 * MI_DISPLAY_FLIP.
11367 */
11368 ret = intel_ring_cacheline_align(req);
11369 if (ret)
11370 return ret;
11371
11372 ret = intel_ring_begin(req, len);
11373 if (ret)
11374 return ret;
11375
11376 /* Unmask the flip-done completion message. Note that the bspec says that
11377 * we should do this for both the BCS and RCS, and that we must not unmask
11378 * more than one flip event at any time (or ensure that one flip message
11379 * can be sent by waiting for flip-done prior to queueing new flips).
11380 * Experimentation says that BCS works despite DERRMR masking all
11381 * flip-done completion events and that unmasking all planes at once
11382 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11383 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11384 */
11385 if (engine->id == RCS) {
11386 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
11387 intel_ring_emit_reg(engine, DERRMR);
11388 intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11389 DERRMR_PIPEB_PRI_FLIP_DONE |
11390 DERRMR_PIPEC_PRI_FLIP_DONE));
11391 if (IS_GEN8(dev))
11392 intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
11393 MI_SRM_LRM_GLOBAL_GTT);
11394 else
11395 intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
11396 MI_SRM_LRM_GLOBAL_GTT);
11397 intel_ring_emit_reg(engine, DERRMR);
11398 intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
11399 if (IS_GEN8(dev)) {
11400 intel_ring_emit(engine, 0);
11401 intel_ring_emit(engine, MI_NOOP);
11402 }
11403 }
11404
11405 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11406 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11407 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11408 intel_ring_emit(engine, (MI_NOOP));
11409
11410 return 0;
11411}
11412
11413static bool use_mmio_flip(struct intel_engine_cs *engine,
11414 struct drm_i915_gem_object *obj)
11415{
c37efb99
CW
11416 struct reservation_object *resv;
11417
5a21b665
DV
11418 /*
11419 * This is not being used for older platforms, because
11420 * non-availability of flip done interrupt forces us to use
11421 * CS flips. Older platforms derive flip done using some clever
11422 * tricks involving the flip_pending status bits and vblank irqs.
11423 * So using MMIO flips there would disrupt this mechanism.
11424 */
11425
11426 if (engine == NULL)
11427 return true;
11428
11429 if (INTEL_GEN(engine->i915) < 5)
11430 return false;
11431
11432 if (i915.use_mmio_flip < 0)
11433 return false;
11434 else if (i915.use_mmio_flip > 0)
11435 return true;
11436 else if (i915.enable_execlists)
11437 return true;
c37efb99
CW
11438
11439 resv = i915_gem_object_get_dmabuf_resv(obj);
11440 if (resv && !reservation_object_test_signaled_rcu(resv, false))
5a21b665 11441 return true;
c37efb99
CW
11442
11443 return engine != i915_gem_request_get_engine(obj->last_write_req);
5a21b665
DV
11444}
11445
11446static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11447 unsigned int rotation,
11448 struct intel_flip_work *work)
11449{
11450 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 11451 struct drm_i915_private *dev_priv = to_i915(dev);
5a21b665
DV
11452 struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11453 const enum pipe pipe = intel_crtc->pipe;
11454 u32 ctl, stride, tile_height;
11455
11456 ctl = I915_READ(PLANE_CTL(pipe, 0));
11457 ctl &= ~PLANE_CTL_TILED_MASK;
11458 switch (fb->modifier[0]) {
11459 case DRM_FORMAT_MOD_NONE:
11460 break;
11461 case I915_FORMAT_MOD_X_TILED:
11462 ctl |= PLANE_CTL_TILED_X;
11463 break;
11464 case I915_FORMAT_MOD_Y_TILED:
11465 ctl |= PLANE_CTL_TILED_Y;
11466 break;
11467 case I915_FORMAT_MOD_Yf_TILED:
11468 ctl |= PLANE_CTL_TILED_YF;
11469 break;
11470 default:
11471 MISSING_CASE(fb->modifier[0]);
11472 }
11473
11474 /*
11475 * The stride is either expressed as a multiple of 64 bytes chunks for
11476 * linear buffers or in number of tiles for tiled buffers.
11477 */
11478 if (intel_rotation_90_or_270(rotation)) {
11479 /* stride = Surface height in tiles */
11480 tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
11481 stride = DIV_ROUND_UP(fb->height, tile_height);
11482 } else {
11483 stride = fb->pitches[0] /
11484 intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11485 fb->pixel_format);
11486 }
11487
11488 /*
11489 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11490 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11491 */
11492 I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11493 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11494
11495 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11496 POSTING_READ(PLANE_SURF(pipe, 0));
11497}
11498
11499static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11500 struct intel_flip_work *work)
11501{
11502 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 11503 struct drm_i915_private *dev_priv = to_i915(dev);
5a21b665
DV
11504 struct intel_framebuffer *intel_fb =
11505 to_intel_framebuffer(intel_crtc->base.primary->fb);
11506 struct drm_i915_gem_object *obj = intel_fb->obj;
11507 i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11508 u32 dspcntr;
11509
11510 dspcntr = I915_READ(reg);
11511
11512 if (obj->tiling_mode != I915_TILING_NONE)
11513 dspcntr |= DISPPLANE_TILED;
11514 else
11515 dspcntr &= ~DISPPLANE_TILED;
11516
11517 I915_WRITE(reg, dspcntr);
11518
11519 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11520 POSTING_READ(DSPSURF(intel_crtc->plane));
11521}
11522
11523static void intel_mmio_flip_work_func(struct work_struct *w)
11524{
11525 struct intel_flip_work *work =
11526 container_of(w, struct intel_flip_work, mmio_work);
11527 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11528 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11529 struct intel_framebuffer *intel_fb =
11530 to_intel_framebuffer(crtc->base.primary->fb);
11531 struct drm_i915_gem_object *obj = intel_fb->obj;
c37efb99 11532 struct reservation_object *resv;
5a21b665
DV
11533
11534 if (work->flip_queued_req)
11535 WARN_ON(__i915_wait_request(work->flip_queued_req,
11536 false, NULL,
11537 &dev_priv->rps.mmioflips));
11538
11539 /* For framebuffer backed by dmabuf, wait for fence */
c37efb99
CW
11540 resv = i915_gem_object_get_dmabuf_resv(obj);
11541 if (resv)
11542 WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
5a21b665
DV
11543 MAX_SCHEDULE_TIMEOUT) < 0);
11544
11545 intel_pipe_update_start(crtc);
11546
11547 if (INTEL_GEN(dev_priv) >= 9)
11548 skl_do_mmio_flip(crtc, work->rotation, work);
11549 else
11550 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11551 ilk_do_mmio_flip(crtc, work);
11552
11553 intel_pipe_update_end(crtc, work);
11554}
11555
11556static int intel_default_queue_flip(struct drm_device *dev,
11557 struct drm_crtc *crtc,
11558 struct drm_framebuffer *fb,
11559 struct drm_i915_gem_object *obj,
11560 struct drm_i915_gem_request *req,
11561 uint32_t flags)
11562{
11563 return -ENODEV;
11564}
11565
11566static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11567 struct intel_crtc *intel_crtc,
11568 struct intel_flip_work *work)
11569{
11570 u32 addr, vblank;
11571
11572 if (!atomic_read(&work->pending))
11573 return false;
11574
11575 smp_rmb();
11576
11577 vblank = intel_crtc_get_vblank_counter(intel_crtc);
11578 if (work->flip_ready_vblank == 0) {
11579 if (work->flip_queued_req &&
f69a02c9 11580 !i915_gem_request_completed(work->flip_queued_req))
5a21b665
DV
11581 return false;
11582
11583 work->flip_ready_vblank = vblank;
11584 }
11585
11586 if (vblank - work->flip_ready_vblank < 3)
11587 return false;
11588
11589 /* Potential stall - if we see that the flip has happened,
11590 * assume a missed interrupt. */
11591 if (INTEL_GEN(dev_priv) >= 4)
11592 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11593 else
11594 addr = I915_READ(DSPADDR(intel_crtc->plane));
11595
11596 /* There is a potential issue here with a false positive after a flip
11597 * to the same address. We could address this by checking for a
11598 * non-incrementing frame counter.
11599 */
11600 return addr == work->gtt_offset;
11601}
11602
11603void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11604{
91c8a326 11605 struct drm_device *dev = &dev_priv->drm;
5a21b665
DV
11606 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11607 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11608 struct intel_flip_work *work;
11609
11610 WARN_ON(!in_interrupt());
11611
11612 if (crtc == NULL)
11613 return;
11614
11615 spin_lock(&dev->event_lock);
11616 work = intel_crtc->flip_work;
11617
11618 if (work != NULL && !is_mmio_work(work) &&
11619 __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
11620 WARN_ONCE(1,
11621 "Kicking stuck page flip: queued at %d, now %d\n",
11622 work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
11623 page_flip_completed(intel_crtc);
11624 work = NULL;
11625 }
11626
11627 if (work != NULL && !is_mmio_work(work) &&
11628 intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
11629 intel_queue_rps_boost_for_request(work->flip_queued_req);
11630 spin_unlock(&dev->event_lock);
11631}
11632
11633static int intel_crtc_page_flip(struct drm_crtc *crtc,
11634 struct drm_framebuffer *fb,
11635 struct drm_pending_vblank_event *event,
11636 uint32_t page_flip_flags)
11637{
11638 struct drm_device *dev = crtc->dev;
fac5e23e 11639 struct drm_i915_private *dev_priv = to_i915(dev);
5a21b665
DV
11640 struct drm_framebuffer *old_fb = crtc->primary->fb;
11641 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11642 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11643 struct drm_plane *primary = crtc->primary;
11644 enum pipe pipe = intel_crtc->pipe;
11645 struct intel_flip_work *work;
11646 struct intel_engine_cs *engine;
11647 bool mmio_flip;
11648 struct drm_i915_gem_request *request = NULL;
11649 int ret;
11650
11651 /*
11652 * drm_mode_page_flip_ioctl() should already catch this, but double
11653 * check to be safe. In the future we may enable pageflipping from
11654 * a disabled primary plane.
11655 */
11656 if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11657 return -EBUSY;
11658
11659 /* Can't change pixel format via MI display flips. */
11660 if (fb->pixel_format != crtc->primary->fb->pixel_format)
11661 return -EINVAL;
11662
11663 /*
11664 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11665 * Note that pitch changes could also affect these register.
11666 */
11667 if (INTEL_INFO(dev)->gen > 3 &&
11668 (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11669 fb->pitches[0] != crtc->primary->fb->pitches[0]))
11670 return -EINVAL;
11671
11672 if (i915_terminally_wedged(&dev_priv->gpu_error))
11673 goto out_hang;
11674
11675 work = kzalloc(sizeof(*work), GFP_KERNEL);
11676 if (work == NULL)
11677 return -ENOMEM;
11678
11679 work->event = event;
11680 work->crtc = crtc;
11681 work->old_fb = old_fb;
11682 INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
11683
11684 ret = drm_crtc_vblank_get(crtc);
11685 if (ret)
11686 goto free_work;
11687
11688 /* We borrow the event spin lock for protecting flip_work */
11689 spin_lock_irq(&dev->event_lock);
11690 if (intel_crtc->flip_work) {
11691 /* Before declaring the flip queue wedged, check if
11692 * the hardware completed the operation behind our backs.
11693 */
11694 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
11695 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11696 page_flip_completed(intel_crtc);
11697 } else {
11698 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11699 spin_unlock_irq(&dev->event_lock);
11700
11701 drm_crtc_vblank_put(crtc);
11702 kfree(work);
11703 return -EBUSY;
11704 }
11705 }
11706 intel_crtc->flip_work = work;
11707 spin_unlock_irq(&dev->event_lock);
11708
11709 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11710 flush_workqueue(dev_priv->wq);
11711
11712 /* Reference the objects for the scheduled work. */
11713 drm_framebuffer_reference(work->old_fb);
11714 drm_gem_object_reference(&obj->base);
11715
11716 crtc->primary->fb = fb;
11717 update_state_fb(crtc->primary);
faf68d92
ML
11718
11719 intel_fbc_pre_update(intel_crtc, intel_crtc->config,
11720 to_intel_plane_state(primary->state));
5a21b665
DV
11721
11722 work->pending_flip_obj = obj;
11723
11724 ret = i915_mutex_lock_interruptible(dev);
11725 if (ret)
11726 goto cleanup;
11727
11728 intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11729 if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
11730 ret = -EIO;
11731 goto cleanup;
11732 }
11733
11734 atomic_inc(&intel_crtc->unpin_work_count);
11735
11736 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11737 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11738
11739 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11740 engine = &dev_priv->engine[BCS];
11741 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11742 /* vlv: DISPLAY_FLIP fails to change tiling */
11743 engine = NULL;
11744 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11745 engine = &dev_priv->engine[BCS];
11746 } else if (INTEL_INFO(dev)->gen >= 7) {
11747 engine = i915_gem_request_get_engine(obj->last_write_req);
11748 if (engine == NULL || engine->id != RCS)
11749 engine = &dev_priv->engine[BCS];
11750 } else {
11751 engine = &dev_priv->engine[RCS];
11752 }
11753
11754 mmio_flip = use_mmio_flip(engine, obj);
11755
11756 /* When using CS flips, we want to emit semaphores between rings.
11757 * However, when using mmio flips we will create a task to do the
11758 * synchronisation, so all we want here is to pin the framebuffer
11759 * into the display plane and skip any waits.
11760 */
11761 if (!mmio_flip) {
11762 ret = i915_gem_object_sync(obj, engine, &request);
11763 if (!ret && !request) {
11764 request = i915_gem_request_alloc(engine, NULL);
11765 ret = PTR_ERR_OR_ZERO(request);
11766 }
11767
11768 if (ret)
11769 goto cleanup_pending;
11770 }
11771
11772 ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
11773 if (ret)
11774 goto cleanup_pending;
11775
11776 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11777 obj, 0);
11778 work->gtt_offset += intel_crtc->dspaddr_offset;
11779 work->rotation = crtc->primary->state->rotation;
11780
11781 if (mmio_flip) {
11782 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
11783
11784 i915_gem_request_assign(&work->flip_queued_req,
11785 obj->last_write_req);
11786
11787 schedule_work(&work->mmio_work);
11788 } else {
11789 i915_gem_request_assign(&work->flip_queued_req, request);
11790 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11791 page_flip_flags);
11792 if (ret)
11793 goto cleanup_unpin;
11794
11795 intel_mark_page_flip_active(intel_crtc, work);
11796
11797 i915_add_request_no_flush(request);
11798 }
11799
11800 i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11801 to_intel_plane(primary)->frontbuffer_bit);
11802 mutex_unlock(&dev->struct_mutex);
11803
11804 intel_frontbuffer_flip_prepare(dev,
11805 to_intel_plane(primary)->frontbuffer_bit);
11806
11807 trace_i915_flip_request(intel_crtc->plane, obj);
11808
11809 return 0;
11810
11811cleanup_unpin:
11812 intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
11813cleanup_pending:
11814 if (!IS_ERR_OR_NULL(request))
11815 i915_add_request_no_flush(request);
11816 atomic_dec(&intel_crtc->unpin_work_count);
11817 mutex_unlock(&dev->struct_mutex);
11818cleanup:
11819 crtc->primary->fb = old_fb;
11820 update_state_fb(crtc->primary);
11821
11822 drm_gem_object_unreference_unlocked(&obj->base);
11823 drm_framebuffer_unreference(work->old_fb);
11824
11825 spin_lock_irq(&dev->event_lock);
11826 intel_crtc->flip_work = NULL;
11827 spin_unlock_irq(&dev->event_lock);
11828
11829 drm_crtc_vblank_put(crtc);
11830free_work:
11831 kfree(work);
11832
11833 if (ret == -EIO) {
11834 struct drm_atomic_state *state;
11835 struct drm_plane_state *plane_state;
11836
11837out_hang:
11838 state = drm_atomic_state_alloc(dev);
11839 if (!state)
11840 return -ENOMEM;
11841 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11842
11843retry:
11844 plane_state = drm_atomic_get_plane_state(state, primary);
11845 ret = PTR_ERR_OR_ZERO(plane_state);
11846 if (!ret) {
11847 drm_atomic_set_fb_for_plane(plane_state, fb);
11848
11849 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11850 if (!ret)
11851 ret = drm_atomic_commit(state);
11852 }
11853
11854 if (ret == -EDEADLK) {
11855 drm_modeset_backoff(state->acquire_ctx);
11856 drm_atomic_state_clear(state);
11857 goto retry;
11858 }
11859
11860 if (ret)
11861 drm_atomic_state_free(state);
11862
11863 if (ret == 0 && event) {
11864 spin_lock_irq(&dev->event_lock);
11865 drm_crtc_send_vblank_event(crtc, event);
11866 spin_unlock_irq(&dev->event_lock);
11867 }
11868 }
11869 return ret;
11870}
11871
11872
11873/**
11874 * intel_wm_need_update - Check whether watermarks need updating
11875 * @plane: drm plane
11876 * @state: new plane state
11877 *
11878 * Check current plane state versus the new one to determine whether
11879 * watermarks need to be recalculated.
11880 *
11881 * Returns true or false.
11882 */
11883static bool intel_wm_need_update(struct drm_plane *plane,
11884 struct drm_plane_state *state)
11885{
11886 struct intel_plane_state *new = to_intel_plane_state(state);
11887 struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11888
11889 /* Update watermarks on tiling or size changes. */
11890 if (new->visible != cur->visible)
11891 return true;
11892
11893 if (!cur->base.fb || !new->base.fb)
11894 return false;
11895
11896 if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11897 cur->base.rotation != new->base.rotation ||
11898 drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11899 drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11900 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11901 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11902 return true;
11903
11904 return false;
11905}
11906
11907static bool needs_scaling(struct intel_plane_state *state)
11908{
11909 int src_w = drm_rect_width(&state->src) >> 16;
11910 int src_h = drm_rect_height(&state->src) >> 16;
11911 int dst_w = drm_rect_width(&state->dst);
11912 int dst_h = drm_rect_height(&state->dst);
11913
11914 return (src_w != dst_w || src_h != dst_h);
11915}
d21fbe87 11916
da20eabd
ML
11917int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11918 struct drm_plane_state *plane_state)
11919{
ab1d3a0e 11920 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
da20eabd
ML
11921 struct drm_crtc *crtc = crtc_state->crtc;
11922 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11923 struct drm_plane *plane = plane_state->plane;
11924 struct drm_device *dev = crtc->dev;
ed4a6a7c 11925 struct drm_i915_private *dev_priv = to_i915(dev);
da20eabd
ML
11926 struct intel_plane_state *old_plane_state =
11927 to_intel_plane_state(plane->state);
da20eabd
ML
11928 bool mode_changed = needs_modeset(crtc_state);
11929 bool was_crtc_enabled = crtc->state->active;
11930 bool is_crtc_enabled = crtc_state->active;
da20eabd
ML
11931 bool turn_off, turn_on, visible, was_visible;
11932 struct drm_framebuffer *fb = plane_state->fb;
78108b7c 11933 int ret;
da20eabd 11934
84114990 11935 if (INTEL_GEN(dev) >= 9 && plane->type != DRM_PLANE_TYPE_CURSOR) {
da20eabd
ML
11936 ret = skl_update_scaler_plane(
11937 to_intel_crtc_state(crtc_state),
11938 to_intel_plane_state(plane_state));
11939 if (ret)
11940 return ret;
11941 }
11942
da20eabd
ML
11943 was_visible = old_plane_state->visible;
11944 visible = to_intel_plane_state(plane_state)->visible;
11945
11946 if (!was_crtc_enabled && WARN_ON(was_visible))
11947 was_visible = false;
11948
35c08f43
ML
11949 /*
11950 * Visibility is calculated as if the crtc was on, but
11951 * after scaler setup everything depends on it being off
11952 * when the crtc isn't active.
f818ffea
VS
11953 *
11954 * FIXME this is wrong for watermarks. Watermarks should also
11955 * be computed as if the pipe would be active. Perhaps move
11956 * per-plane wm computation to the .check_plane() hook, and
11957 * only combine the results from all planes in the current place?
35c08f43
ML
11958 */
11959 if (!is_crtc_enabled)
11960 to_intel_plane_state(plane_state)->visible = visible = false;
da20eabd
ML
11961
11962 if (!was_visible && !visible)
11963 return 0;
11964
e8861675
ML
11965 if (fb != old_plane_state->base.fb)
11966 pipe_config->fb_changed = true;
11967
da20eabd
ML
11968 turn_off = was_visible && (!visible || mode_changed);
11969 turn_on = visible && (!was_visible || mode_changed);
11970
72660ce0 11971 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
78108b7c
VS
11972 intel_crtc->base.base.id,
11973 intel_crtc->base.name,
72660ce0
VS
11974 plane->base.id, plane->name,
11975 fb ? fb->base.id : -1);
da20eabd 11976
72660ce0
VS
11977 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
11978 plane->base.id, plane->name,
11979 was_visible, visible,
da20eabd
ML
11980 turn_off, turn_on, mode_changed);
11981
caed361d
VS
11982 if (turn_on) {
11983 pipe_config->update_wm_pre = true;
11984
11985 /* must disable cxsr around plane enable/disable */
11986 if (plane->type != DRM_PLANE_TYPE_CURSOR)
11987 pipe_config->disable_cxsr = true;
11988 } else if (turn_off) {
11989 pipe_config->update_wm_post = true;
92826fcd 11990
852eb00d 11991 /* must disable cxsr around plane enable/disable */
e8861675 11992 if (plane->type != DRM_PLANE_TYPE_CURSOR)
ab1d3a0e 11993 pipe_config->disable_cxsr = true;
852eb00d 11994 } else if (intel_wm_need_update(plane, plane_state)) {
caed361d
VS
11995 /* FIXME bollocks */
11996 pipe_config->update_wm_pre = true;
11997 pipe_config->update_wm_post = true;
852eb00d 11998 }
da20eabd 11999
ed4a6a7c 12000 /* Pre-gen9 platforms need two-step watermark updates */
caed361d
VS
12001 if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
12002 INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
ed4a6a7c
MR
12003 to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
12004
8be6ca85 12005 if (visible || was_visible)
cd202f69 12006 pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
a9ff8714 12007
31ae71fc
ML
12008 /*
12009 * WaCxSRDisabledForSpriteScaling:ivb
12010 *
12011 * cstate->update_wm was already set above, so this flag will
12012 * take effect when we commit and program watermarks.
12013 */
12014 if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
12015 needs_scaling(to_intel_plane_state(plane_state)) &&
12016 !needs_scaling(old_plane_state))
12017 pipe_config->disable_lp_wm = true;
d21fbe87 12018
da20eabd
ML
12019 return 0;
12020}
12021
6d3a1ce7
ML
12022static bool encoders_cloneable(const struct intel_encoder *a,
12023 const struct intel_encoder *b)
12024{
12025 /* masks could be asymmetric, so check both ways */
12026 return a == b || (a->cloneable & (1 << b->type) &&
12027 b->cloneable & (1 << a->type));
12028}
12029
12030static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12031 struct intel_crtc *crtc,
12032 struct intel_encoder *encoder)
12033{
12034 struct intel_encoder *source_encoder;
12035 struct drm_connector *connector;
12036 struct drm_connector_state *connector_state;
12037 int i;
12038
12039 for_each_connector_in_state(state, connector, connector_state, i) {
12040 if (connector_state->crtc != &crtc->base)
12041 continue;
12042
12043 source_encoder =
12044 to_intel_encoder(connector_state->best_encoder);
12045 if (!encoders_cloneable(encoder, source_encoder))
12046 return false;
12047 }
12048
12049 return true;
12050}
12051
12052static bool check_encoder_cloning(struct drm_atomic_state *state,
12053 struct intel_crtc *crtc)
12054{
12055 struct intel_encoder *encoder;
12056 struct drm_connector *connector;
12057 struct drm_connector_state *connector_state;
12058 int i;
12059
12060 for_each_connector_in_state(state, connector, connector_state, i) {
12061 if (connector_state->crtc != &crtc->base)
12062 continue;
12063
12064 encoder = to_intel_encoder(connector_state->best_encoder);
12065 if (!check_single_encoder_cloning(state, crtc, encoder))
12066 return false;
12067 }
12068
12069 return true;
12070}
12071
12072static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12073 struct drm_crtc_state *crtc_state)
12074{
cf5a15be 12075 struct drm_device *dev = crtc->dev;
fac5e23e 12076 struct drm_i915_private *dev_priv = to_i915(dev);
6d3a1ce7 12077 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
cf5a15be
ML
12078 struct intel_crtc_state *pipe_config =
12079 to_intel_crtc_state(crtc_state);
6d3a1ce7 12080 struct drm_atomic_state *state = crtc_state->state;
4d20cd86 12081 int ret;
6d3a1ce7
ML
12082 bool mode_changed = needs_modeset(crtc_state);
12083
12084 if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
12085 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12086 return -EINVAL;
12087 }
12088
852eb00d 12089 if (mode_changed && !crtc_state->active)
caed361d 12090 pipe_config->update_wm_post = true;
eddfcbcd 12091
ad421372
ML
12092 if (mode_changed && crtc_state->enable &&
12093 dev_priv->display.crtc_compute_clock &&
8106ddbd 12094 !WARN_ON(pipe_config->shared_dpll)) {
ad421372
ML
12095 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
12096 pipe_config);
12097 if (ret)
12098 return ret;
12099 }
12100
82cf435b
LL
12101 if (crtc_state->color_mgmt_changed) {
12102 ret = intel_color_check(crtc, crtc_state);
12103 if (ret)
12104 return ret;
12105 }
12106
e435d6e5 12107 ret = 0;
86c8bbbe 12108 if (dev_priv->display.compute_pipe_wm) {
e3bddded 12109 ret = dev_priv->display.compute_pipe_wm(pipe_config);
ed4a6a7c
MR
12110 if (ret) {
12111 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12112 return ret;
12113 }
12114 }
12115
12116 if (dev_priv->display.compute_intermediate_wm &&
12117 !to_intel_atomic_state(state)->skip_intermediate_wm) {
12118 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12119 return 0;
12120
12121 /*
12122 * Calculate 'intermediate' watermarks that satisfy both the
12123 * old state and the new state. We can program these
12124 * immediately.
12125 */
12126 ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
12127 intel_crtc,
12128 pipe_config);
12129 if (ret) {
12130 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
86c8bbbe 12131 return ret;
ed4a6a7c 12132 }
e3d5457c
VS
12133 } else if (dev_priv->display.compute_intermediate_wm) {
12134 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12135 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
86c8bbbe
MR
12136 }
12137
e435d6e5
ML
12138 if (INTEL_INFO(dev)->gen >= 9) {
12139 if (mode_changed)
12140 ret = skl_update_scaler_crtc(pipe_config);
12141
12142 if (!ret)
12143 ret = intel_atomic_setup_scalers(dev, intel_crtc,
12144 pipe_config);
12145 }
12146
12147 return ret;
6d3a1ce7
ML
12148}
12149
65b38e0d 12150static const struct drm_crtc_helper_funcs intel_helper_funcs = {
f6e5b160 12151 .mode_set_base_atomic = intel_pipe_set_base_atomic,
5a21b665
DV
12152 .atomic_begin = intel_begin_crtc_commit,
12153 .atomic_flush = intel_finish_crtc_commit,
6d3a1ce7 12154 .atomic_check = intel_crtc_atomic_check,
f6e5b160
CW
12155};
12156
d29b2f9d
ACO
12157static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12158{
12159 struct intel_connector *connector;
12160
12161 for_each_intel_connector(dev, connector) {
8863dc7f
DV
12162 if (connector->base.state->crtc)
12163 drm_connector_unreference(&connector->base);
12164
d29b2f9d
ACO
12165 if (connector->base.encoder) {
12166 connector->base.state->best_encoder =
12167 connector->base.encoder;
12168 connector->base.state->crtc =
12169 connector->base.encoder->crtc;
8863dc7f
DV
12170
12171 drm_connector_reference(&connector->base);
d29b2f9d
ACO
12172 } else {
12173 connector->base.state->best_encoder = NULL;
12174 connector->base.state->crtc = NULL;
12175 }
12176 }
12177}
12178
050f7aeb 12179static void
eba905b2 12180connected_sink_compute_bpp(struct intel_connector *connector,
5cec258b 12181 struct intel_crtc_state *pipe_config)
050f7aeb
DV
12182{
12183 int bpp = pipe_config->pipe_bpp;
12184
12185 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12186 connector->base.base.id,
c23cc417 12187 connector->base.name);
050f7aeb
DV
12188
12189 /* Don't use an invalid EDID bpc value */
12190 if (connector->base.display_info.bpc &&
12191 connector->base.display_info.bpc * 3 < bpp) {
12192 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12193 bpp, connector->base.display_info.bpc*3);
12194 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12195 }
12196
013dd9e0
JN
12197 /* Clamp bpp to default limit on screens without EDID 1.4 */
12198 if (connector->base.display_info.bpc == 0) {
12199 int type = connector->base.connector_type;
12200 int clamp_bpp = 24;
12201
12202 /* Fall back to 18 bpp when DP sink capability is unknown. */
12203 if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12204 type == DRM_MODE_CONNECTOR_eDP)
12205 clamp_bpp = 18;
12206
12207 if (bpp > clamp_bpp) {
12208 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12209 bpp, clamp_bpp);
12210 pipe_config->pipe_bpp = clamp_bpp;
12211 }
050f7aeb
DV
12212 }
12213}
12214
4e53c2e0 12215static int
050f7aeb 12216compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5cec258b 12217 struct intel_crtc_state *pipe_config)
4e53c2e0 12218{
050f7aeb 12219 struct drm_device *dev = crtc->base.dev;
1486017f 12220 struct drm_atomic_state *state;
da3ced29
ACO
12221 struct drm_connector *connector;
12222 struct drm_connector_state *connector_state;
1486017f 12223 int bpp, i;
4e53c2e0 12224
666a4537 12225 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
4e53c2e0 12226 bpp = 10*3;
d328c9d7
DV
12227 else if (INTEL_INFO(dev)->gen >= 5)
12228 bpp = 12*3;
12229 else
12230 bpp = 8*3;
12231
4e53c2e0 12232
4e53c2e0
DV
12233 pipe_config->pipe_bpp = bpp;
12234
1486017f
ACO
12235 state = pipe_config->base.state;
12236
4e53c2e0 12237 /* Clamp display bpp to EDID value */
da3ced29
ACO
12238 for_each_connector_in_state(state, connector, connector_state, i) {
12239 if (connector_state->crtc != &crtc->base)
4e53c2e0
DV
12240 continue;
12241
da3ced29
ACO
12242 connected_sink_compute_bpp(to_intel_connector(connector),
12243 pipe_config);
4e53c2e0
DV
12244 }
12245
12246 return bpp;
12247}
12248
644db711
DV
12249static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12250{
12251 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12252 "type: 0x%x flags: 0x%x\n",
1342830c 12253 mode->crtc_clock,
644db711
DV
12254 mode->crtc_hdisplay, mode->crtc_hsync_start,
12255 mode->crtc_hsync_end, mode->crtc_htotal,
12256 mode->crtc_vdisplay, mode->crtc_vsync_start,
12257 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12258}
12259
c0b03411 12260static void intel_dump_pipe_config(struct intel_crtc *crtc,
5cec258b 12261 struct intel_crtc_state *pipe_config,
c0b03411
DV
12262 const char *context)
12263{
6a60cd87
CK
12264 struct drm_device *dev = crtc->base.dev;
12265 struct drm_plane *plane;
12266 struct intel_plane *intel_plane;
12267 struct intel_plane_state *state;
12268 struct drm_framebuffer *fb;
12269
78108b7c
VS
12270 DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
12271 crtc->base.base.id, crtc->base.name,
6a60cd87 12272 context, pipe_config, pipe_name(crtc->pipe));
c0b03411 12273
da205630 12274 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
c0b03411
DV
12275 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12276 pipe_config->pipe_bpp, pipe_config->dither);
12277 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12278 pipe_config->has_pch_encoder,
12279 pipe_config->fdi_lanes,
12280 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12281 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12282 pipe_config->fdi_m_n.tu);
90a6b7b0 12283 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
eb14cb74 12284 pipe_config->has_dp_encoder,
90a6b7b0 12285 pipe_config->lane_count,
eb14cb74
VS
12286 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12287 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12288 pipe_config->dp_m_n.tu);
b95af8be 12289
90a6b7b0 12290 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
b95af8be 12291 pipe_config->has_dp_encoder,
90a6b7b0 12292 pipe_config->lane_count,
b95af8be
VK
12293 pipe_config->dp_m2_n2.gmch_m,
12294 pipe_config->dp_m2_n2.gmch_n,
12295 pipe_config->dp_m2_n2.link_m,
12296 pipe_config->dp_m2_n2.link_n,
12297 pipe_config->dp_m2_n2.tu);
12298
55072d19
DV
12299 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12300 pipe_config->has_audio,
12301 pipe_config->has_infoframe);
12302
c0b03411 12303 DRM_DEBUG_KMS("requested mode:\n");
2d112de7 12304 drm_mode_debug_printmodeline(&pipe_config->base.mode);
c0b03411 12305 DRM_DEBUG_KMS("adjusted mode:\n");
2d112de7
ACO
12306 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12307 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
d71b8d4a 12308 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
37327abd
VS
12309 DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12310 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
0ec463d3
TU
12311 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12312 crtc->num_scalers,
12313 pipe_config->scaler_state.scaler_users,
12314 pipe_config->scaler_state.scaler_id);
c0b03411
DV
12315 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12316 pipe_config->gmch_pfit.control,
12317 pipe_config->gmch_pfit.pgm_ratios,
12318 pipe_config->gmch_pfit.lvds_border_bits);
fd4daa9c 12319 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
c0b03411 12320 pipe_config->pch_pfit.pos,
fd4daa9c
CW
12321 pipe_config->pch_pfit.size,
12322 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
42db64ef 12323 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
cf532bb2 12324 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
6a60cd87 12325
415ff0f6 12326 if (IS_BROXTON(dev)) {
05712c15 12327 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
415ff0f6 12328 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
c8453338 12329 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
415ff0f6
TU
12330 pipe_config->ddi_pll_sel,
12331 pipe_config->dpll_hw_state.ebb0,
05712c15 12332 pipe_config->dpll_hw_state.ebb4,
415ff0f6
TU
12333 pipe_config->dpll_hw_state.pll0,
12334 pipe_config->dpll_hw_state.pll1,
12335 pipe_config->dpll_hw_state.pll2,
12336 pipe_config->dpll_hw_state.pll3,
12337 pipe_config->dpll_hw_state.pll6,
12338 pipe_config->dpll_hw_state.pll8,
05712c15 12339 pipe_config->dpll_hw_state.pll9,
c8453338 12340 pipe_config->dpll_hw_state.pll10,
415ff0f6 12341 pipe_config->dpll_hw_state.pcsdw12);
ef11bdb3 12342 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
415ff0f6
TU
12343 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12344 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12345 pipe_config->ddi_pll_sel,
12346 pipe_config->dpll_hw_state.ctrl1,
12347 pipe_config->dpll_hw_state.cfgcr1,
12348 pipe_config->dpll_hw_state.cfgcr2);
12349 } else if (HAS_DDI(dev)) {
1260f07e 12350 DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
415ff0f6 12351 pipe_config->ddi_pll_sel,
00490c22
ML
12352 pipe_config->dpll_hw_state.wrpll,
12353 pipe_config->dpll_hw_state.spll);
415ff0f6
TU
12354 } else {
12355 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12356 "fp0: 0x%x, fp1: 0x%x\n",
12357 pipe_config->dpll_hw_state.dpll,
12358 pipe_config->dpll_hw_state.dpll_md,
12359 pipe_config->dpll_hw_state.fp0,
12360 pipe_config->dpll_hw_state.fp1);
12361 }
12362
6a60cd87
CK
12363 DRM_DEBUG_KMS("planes on this crtc\n");
12364 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12365 intel_plane = to_intel_plane(plane);
12366 if (intel_plane->pipe != crtc->pipe)
12367 continue;
12368
12369 state = to_intel_plane_state(plane->state);
12370 fb = state->base.fb;
12371 if (!fb) {
1d577e02
VS
12372 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
12373 plane->base.id, plane->name, state->scaler_id);
6a60cd87
CK
12374 continue;
12375 }
12376
1d577e02
VS
12377 DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
12378 plane->base.id, plane->name);
12379 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
12380 fb->base.id, fb->width, fb->height,
12381 drm_get_format_name(fb->pixel_format));
12382 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
12383 state->scaler_id,
12384 state->src.x1 >> 16, state->src.y1 >> 16,
12385 drm_rect_width(&state->src) >> 16,
12386 drm_rect_height(&state->src) >> 16,
12387 state->dst.x1, state->dst.y1,
12388 drm_rect_width(&state->dst),
12389 drm_rect_height(&state->dst));
6a60cd87 12390 }
c0b03411
DV
12391}
12392
5448a00d 12393static bool check_digital_port_conflicts(struct drm_atomic_state *state)
00f0b378 12394{
5448a00d 12395 struct drm_device *dev = state->dev;
da3ced29 12396 struct drm_connector *connector;
00f0b378
VS
12397 unsigned int used_ports = 0;
12398
12399 /*
12400 * Walk the connector list instead of the encoder
12401 * list to detect the problem on ddi platforms
12402 * where there's just one encoder per digital port.
12403 */
0bff4858
VS
12404 drm_for_each_connector(connector, dev) {
12405 struct drm_connector_state *connector_state;
12406 struct intel_encoder *encoder;
12407
12408 connector_state = drm_atomic_get_existing_connector_state(state, connector);
12409 if (!connector_state)
12410 connector_state = connector->state;
12411
5448a00d 12412 if (!connector_state->best_encoder)
00f0b378
VS
12413 continue;
12414
5448a00d
ACO
12415 encoder = to_intel_encoder(connector_state->best_encoder);
12416
12417 WARN_ON(!connector_state->crtc);
00f0b378
VS
12418
12419 switch (encoder->type) {
12420 unsigned int port_mask;
12421 case INTEL_OUTPUT_UNKNOWN:
12422 if (WARN_ON(!HAS_DDI(dev)))
12423 break;
12424 case INTEL_OUTPUT_DISPLAYPORT:
12425 case INTEL_OUTPUT_HDMI:
12426 case INTEL_OUTPUT_EDP:
12427 port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12428
12429 /* the same port mustn't appear more than once */
12430 if (used_ports & port_mask)
12431 return false;
12432
12433 used_ports |= port_mask;
12434 default:
12435 break;
12436 }
12437 }
12438
12439 return true;
12440}
12441
83a57153
ACO
12442static void
12443clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12444{
12445 struct drm_crtc_state tmp_state;
663a3640 12446 struct intel_crtc_scaler_state scaler_state;
4978cc93 12447 struct intel_dpll_hw_state dpll_hw_state;
8106ddbd 12448 struct intel_shared_dpll *shared_dpll;
8504c74c 12449 uint32_t ddi_pll_sel;
c4e2d043 12450 bool force_thru;
83a57153 12451
7546a384
ACO
12452 /* FIXME: before the switch to atomic started, a new pipe_config was
12453 * kzalloc'd. Code that depends on any field being zero should be
12454 * fixed, so that the crtc_state can be safely duplicated. For now,
12455 * only fields that are know to not cause problems are preserved. */
12456
83a57153 12457 tmp_state = crtc_state->base;
663a3640 12458 scaler_state = crtc_state->scaler_state;
4978cc93
ACO
12459 shared_dpll = crtc_state->shared_dpll;
12460 dpll_hw_state = crtc_state->dpll_hw_state;
8504c74c 12461 ddi_pll_sel = crtc_state->ddi_pll_sel;
c4e2d043 12462 force_thru = crtc_state->pch_pfit.force_thru;
4978cc93 12463
83a57153 12464 memset(crtc_state, 0, sizeof *crtc_state);
4978cc93 12465
83a57153 12466 crtc_state->base = tmp_state;
663a3640 12467 crtc_state->scaler_state = scaler_state;
4978cc93
ACO
12468 crtc_state->shared_dpll = shared_dpll;
12469 crtc_state->dpll_hw_state = dpll_hw_state;
8504c74c 12470 crtc_state->ddi_pll_sel = ddi_pll_sel;
c4e2d043 12471 crtc_state->pch_pfit.force_thru = force_thru;
83a57153
ACO
12472}
12473
548ee15b 12474static int
b8cecdf5 12475intel_modeset_pipe_config(struct drm_crtc *crtc,
b359283a 12476 struct intel_crtc_state *pipe_config)
ee7b9f93 12477{
b359283a 12478 struct drm_atomic_state *state = pipe_config->base.state;
7758a113 12479 struct intel_encoder *encoder;
da3ced29 12480 struct drm_connector *connector;
0b901879 12481 struct drm_connector_state *connector_state;
d328c9d7 12482 int base_bpp, ret = -EINVAL;
0b901879 12483 int i;
e29c22c0 12484 bool retry = true;
ee7b9f93 12485
83a57153 12486 clear_intel_crtc_state(pipe_config);
7758a113 12487
e143a21c
DV
12488 pipe_config->cpu_transcoder =
12489 (enum transcoder) to_intel_crtc(crtc)->pipe;
b8cecdf5 12490
2960bc9c
ID
12491 /*
12492 * Sanitize sync polarity flags based on requested ones. If neither
12493 * positive or negative polarity is requested, treat this as meaning
12494 * negative polarity.
12495 */
2d112de7 12496 if (!(pipe_config->base.adjusted_mode.flags &
2960bc9c 12497 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
2d112de7 12498 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
2960bc9c 12499
2d112de7 12500 if (!(pipe_config->base.adjusted_mode.flags &
2960bc9c 12501 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
2d112de7 12502 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
2960bc9c 12503
d328c9d7
DV
12504 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12505 pipe_config);
12506 if (base_bpp < 0)
4e53c2e0
DV
12507 goto fail;
12508
e41a56be
VS
12509 /*
12510 * Determine the real pipe dimensions. Note that stereo modes can
12511 * increase the actual pipe size due to the frame doubling and
12512 * insertion of additional space for blanks between the frame. This
12513 * is stored in the crtc timings. We use the requested mode to do this
12514 * computation to clearly distinguish it from the adjusted mode, which
12515 * can be changed by the connectors in the below retry loop.
12516 */
2d112de7 12517 drm_crtc_get_hv_timing(&pipe_config->base.mode,
ecb7e16b
GP
12518 &pipe_config->pipe_src_w,
12519 &pipe_config->pipe_src_h);
e41a56be 12520
253c84c8
VS
12521 for_each_connector_in_state(state, connector, connector_state, i) {
12522 if (connector_state->crtc != crtc)
12523 continue;
12524
12525 encoder = to_intel_encoder(connector_state->best_encoder);
12526
12527 /*
12528 * Determine output_types before calling the .compute_config()
12529 * hooks so that the hooks can use this information safely.
12530 */
12531 pipe_config->output_types |= 1 << encoder->type;
12532 }
12533
e29c22c0 12534encoder_retry:
ef1b460d 12535 /* Ensure the port clock defaults are reset when retrying. */
ff9a6750 12536 pipe_config->port_clock = 0;
ef1b460d 12537 pipe_config->pixel_multiplier = 1;
ff9a6750 12538
135c81b8 12539 /* Fill in default crtc timings, allow encoders to overwrite them. */
2d112de7
ACO
12540 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12541 CRTC_STEREO_DOUBLE);
135c81b8 12542
7758a113
DV
12543 /* Pass our mode to the connectors and the CRTC to give them a chance to
12544 * adjust it according to limitations or connector properties, and also
12545 * a chance to reject the mode entirely.
47f1c6c9 12546 */
da3ced29 12547 for_each_connector_in_state(state, connector, connector_state, i) {
0b901879 12548 if (connector_state->crtc != crtc)
7758a113 12549 continue;
7ae89233 12550
0b901879
ACO
12551 encoder = to_intel_encoder(connector_state->best_encoder);
12552
efea6e8e
DV
12553 if (!(encoder->compute_config(encoder, pipe_config))) {
12554 DRM_DEBUG_KMS("Encoder config failure\n");
7758a113
DV
12555 goto fail;
12556 }
ee7b9f93 12557 }
47f1c6c9 12558
ff9a6750
DV
12559 /* Set default port clock if not overwritten by the encoder. Needs to be
12560 * done afterwards in case the encoder adjusts the mode. */
12561 if (!pipe_config->port_clock)
2d112de7 12562 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
241bfc38 12563 * pipe_config->pixel_multiplier;
ff9a6750 12564
a43f6e0f 12565 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
e29c22c0 12566 if (ret < 0) {
7758a113
DV
12567 DRM_DEBUG_KMS("CRTC fixup failed\n");
12568 goto fail;
ee7b9f93 12569 }
e29c22c0
DV
12570
12571 if (ret == RETRY) {
12572 if (WARN(!retry, "loop in pipe configuration computation\n")) {
12573 ret = -EINVAL;
12574 goto fail;
12575 }
12576
12577 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12578 retry = false;
12579 goto encoder_retry;
12580 }
12581
e8fa4270
DV
12582 /* Dithering seems to not pass-through bits correctly when it should, so
12583 * only enable it on 6bpc panels. */
12584 pipe_config->dither = pipe_config->pipe_bpp == 6*3;
62f0ace5 12585 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
d328c9d7 12586 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
4e53c2e0 12587
7758a113 12588fail:
548ee15b 12589 return ret;
ee7b9f93 12590}
47f1c6c9 12591
ea9d758d 12592static void
4740b0f2 12593intel_modeset_update_crtc_state(struct drm_atomic_state *state)
ea9d758d 12594{
0a9ab303
ACO
12595 struct drm_crtc *crtc;
12596 struct drm_crtc_state *crtc_state;
8a75d157 12597 int i;
ea9d758d 12598
7668851f 12599 /* Double check state. */
8a75d157 12600 for_each_crtc_in_state(state, crtc, crtc_state, i) {
3cb480bc 12601 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
fc467a22
ML
12602
12603 /* Update hwmode for vblank functions */
12604 if (crtc->state->active)
12605 crtc->hwmode = crtc->state->adjusted_mode;
12606 else
12607 crtc->hwmode.crtc_clock = 0;
61067a5e
ML
12608
12609 /*
12610 * Update legacy state to satisfy fbc code. This can
12611 * be removed when fbc uses the atomic state.
12612 */
12613 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12614 struct drm_plane_state *plane_state = crtc->primary->state;
12615
12616 crtc->primary->fb = plane_state->fb;
12617 crtc->x = plane_state->src_x >> 16;
12618 crtc->y = plane_state->src_y >> 16;
12619 }
ea9d758d 12620 }
ea9d758d
DV
12621}
12622
3bd26263 12623static bool intel_fuzzy_clock_check(int clock1, int clock2)
f1f644dc 12624{
3bd26263 12625 int diff;
f1f644dc
JB
12626
12627 if (clock1 == clock2)
12628 return true;
12629
12630 if (!clock1 || !clock2)
12631 return false;
12632
12633 diff = abs(clock1 - clock2);
12634
12635 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12636 return true;
12637
12638 return false;
12639}
12640
25c5b266
DV
12641#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12642 list_for_each_entry((intel_crtc), \
12643 &(dev)->mode_config.crtc_list, \
12644 base.head) \
95150bdf 12645 for_each_if (mask & (1 <<(intel_crtc)->pipe))
25c5b266 12646
cfb23ed6
ML
12647static bool
12648intel_compare_m_n(unsigned int m, unsigned int n,
12649 unsigned int m2, unsigned int n2,
12650 bool exact)
12651{
12652 if (m == m2 && n == n2)
12653 return true;
12654
12655 if (exact || !m || !n || !m2 || !n2)
12656 return false;
12657
12658 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12659
31d10b57
ML
12660 if (n > n2) {
12661 while (n > n2) {
cfb23ed6
ML
12662 m2 <<= 1;
12663 n2 <<= 1;
12664 }
31d10b57
ML
12665 } else if (n < n2) {
12666 while (n < n2) {
cfb23ed6
ML
12667 m <<= 1;
12668 n <<= 1;
12669 }
12670 }
12671
31d10b57
ML
12672 if (n != n2)
12673 return false;
12674
12675 return intel_fuzzy_clock_check(m, m2);
cfb23ed6
ML
12676}
12677
12678static bool
12679intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12680 struct intel_link_m_n *m2_n2,
12681 bool adjust)
12682{
12683 if (m_n->tu == m2_n2->tu &&
12684 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12685 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12686 intel_compare_m_n(m_n->link_m, m_n->link_n,
12687 m2_n2->link_m, m2_n2->link_n, !adjust)) {
12688 if (adjust)
12689 *m2_n2 = *m_n;
12690
12691 return true;
12692 }
12693
12694 return false;
12695}
12696
0e8ffe1b 12697static bool
2fa2fe9a 12698intel_pipe_config_compare(struct drm_device *dev,
5cec258b 12699 struct intel_crtc_state *current_config,
cfb23ed6
ML
12700 struct intel_crtc_state *pipe_config,
12701 bool adjust)
0e8ffe1b 12702{
cfb23ed6
ML
12703 bool ret = true;
12704
12705#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12706 do { \
12707 if (!adjust) \
12708 DRM_ERROR(fmt, ##__VA_ARGS__); \
12709 else \
12710 DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12711 } while (0)
12712
66e985c0
DV
12713#define PIPE_CONF_CHECK_X(name) \
12714 if (current_config->name != pipe_config->name) { \
cfb23ed6 12715 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
66e985c0
DV
12716 "(expected 0x%08x, found 0x%08x)\n", \
12717 current_config->name, \
12718 pipe_config->name); \
cfb23ed6 12719 ret = false; \
66e985c0
DV
12720 }
12721
08a24034
DV
12722#define PIPE_CONF_CHECK_I(name) \
12723 if (current_config->name != pipe_config->name) { \
cfb23ed6 12724 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
08a24034
DV
12725 "(expected %i, found %i)\n", \
12726 current_config->name, \
12727 pipe_config->name); \
cfb23ed6
ML
12728 ret = false; \
12729 }
12730
8106ddbd
ACO
12731#define PIPE_CONF_CHECK_P(name) \
12732 if (current_config->name != pipe_config->name) { \
12733 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12734 "(expected %p, found %p)\n", \
12735 current_config->name, \
12736 pipe_config->name); \
12737 ret = false; \
12738 }
12739
cfb23ed6
ML
12740#define PIPE_CONF_CHECK_M_N(name) \
12741 if (!intel_compare_link_m_n(&current_config->name, \
12742 &pipe_config->name,\
12743 adjust)) { \
12744 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12745 "(expected tu %i gmch %i/%i link %i/%i, " \
12746 "found tu %i, gmch %i/%i link %i/%i)\n", \
12747 current_config->name.tu, \
12748 current_config->name.gmch_m, \
12749 current_config->name.gmch_n, \
12750 current_config->name.link_m, \
12751 current_config->name.link_n, \
12752 pipe_config->name.tu, \
12753 pipe_config->name.gmch_m, \
12754 pipe_config->name.gmch_n, \
12755 pipe_config->name.link_m, \
12756 pipe_config->name.link_n); \
12757 ret = false; \
12758 }
12759
55c561a7
DV
12760/* This is required for BDW+ where there is only one set of registers for
12761 * switching between high and low RR.
12762 * This macro can be used whenever a comparison has to be made between one
12763 * hw state and multiple sw state variables.
12764 */
cfb23ed6
ML
12765#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12766 if (!intel_compare_link_m_n(&current_config->name, \
12767 &pipe_config->name, adjust) && \
12768 !intel_compare_link_m_n(&current_config->alt_name, \
12769 &pipe_config->name, adjust)) { \
12770 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12771 "(expected tu %i gmch %i/%i link %i/%i, " \
12772 "or tu %i gmch %i/%i link %i/%i, " \
12773 "found tu %i, gmch %i/%i link %i/%i)\n", \
12774 current_config->name.tu, \
12775 current_config->name.gmch_m, \
12776 current_config->name.gmch_n, \
12777 current_config->name.link_m, \
12778 current_config->name.link_n, \
12779 current_config->alt_name.tu, \
12780 current_config->alt_name.gmch_m, \
12781 current_config->alt_name.gmch_n, \
12782 current_config->alt_name.link_m, \
12783 current_config->alt_name.link_n, \
12784 pipe_config->name.tu, \
12785 pipe_config->name.gmch_m, \
12786 pipe_config->name.gmch_n, \
12787 pipe_config->name.link_m, \
12788 pipe_config->name.link_n); \
12789 ret = false; \
88adfff1
DV
12790 }
12791
1bd1bd80
DV
12792#define PIPE_CONF_CHECK_FLAGS(name, mask) \
12793 if ((current_config->name ^ pipe_config->name) & (mask)) { \
cfb23ed6 12794 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
1bd1bd80
DV
12795 "(expected %i, found %i)\n", \
12796 current_config->name & (mask), \
12797 pipe_config->name & (mask)); \
cfb23ed6 12798 ret = false; \
1bd1bd80
DV
12799 }
12800
5e550656
VS
12801#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12802 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
cfb23ed6 12803 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
5e550656
VS
12804 "(expected %i, found %i)\n", \
12805 current_config->name, \
12806 pipe_config->name); \
cfb23ed6 12807 ret = false; \
5e550656
VS
12808 }
12809
bb760063
DV
12810#define PIPE_CONF_QUIRK(quirk) \
12811 ((current_config->quirks | pipe_config->quirks) & (quirk))
12812
eccb140b
DV
12813 PIPE_CONF_CHECK_I(cpu_transcoder);
12814
08a24034
DV
12815 PIPE_CONF_CHECK_I(has_pch_encoder);
12816 PIPE_CONF_CHECK_I(fdi_lanes);
cfb23ed6 12817 PIPE_CONF_CHECK_M_N(fdi_m_n);
08a24034 12818
eb14cb74 12819 PIPE_CONF_CHECK_I(has_dp_encoder);
90a6b7b0 12820 PIPE_CONF_CHECK_I(lane_count);
95a7a2ae 12821 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
b95af8be
VK
12822
12823 if (INTEL_INFO(dev)->gen < 8) {
cfb23ed6
ML
12824 PIPE_CONF_CHECK_M_N(dp_m_n);
12825
cfb23ed6
ML
12826 if (current_config->has_drrs)
12827 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12828 } else
12829 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
eb14cb74 12830
a65347ba 12831 PIPE_CONF_CHECK_I(has_dsi_encoder);
253c84c8 12832 PIPE_CONF_CHECK_X(output_types);
a65347ba 12833
2d112de7
ACO
12834 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12835 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12836 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12837 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12838 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12839 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
1bd1bd80 12840
2d112de7
ACO
12841 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12842 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12843 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12844 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12845 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12846 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
1bd1bd80 12847
c93f54cf 12848 PIPE_CONF_CHECK_I(pixel_multiplier);
6897b4b5 12849 PIPE_CONF_CHECK_I(has_hdmi_sink);
b5a9fa09 12850 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
666a4537 12851 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
b5a9fa09 12852 PIPE_CONF_CHECK_I(limited_color_range);
e43823ec 12853 PIPE_CONF_CHECK_I(has_infoframe);
6c49f241 12854
9ed109a7
DV
12855 PIPE_CONF_CHECK_I(has_audio);
12856
2d112de7 12857 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
1bd1bd80
DV
12858 DRM_MODE_FLAG_INTERLACE);
12859
bb760063 12860 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
2d112de7 12861 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12862 DRM_MODE_FLAG_PHSYNC);
2d112de7 12863 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12864 DRM_MODE_FLAG_NHSYNC);
2d112de7 12865 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12866 DRM_MODE_FLAG_PVSYNC);
2d112de7 12867 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063
DV
12868 DRM_MODE_FLAG_NVSYNC);
12869 }
045ac3b5 12870
333b8ca8 12871 PIPE_CONF_CHECK_X(gmch_pfit.control);
e2ff2d4a
DV
12872 /* pfit ratios are autocomputed by the hw on gen4+ */
12873 if (INTEL_INFO(dev)->gen < 4)
7f7d8dd6 12874 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
333b8ca8 12875 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
9953599b 12876
bfd16b2a
ML
12877 if (!adjust) {
12878 PIPE_CONF_CHECK_I(pipe_src_w);
12879 PIPE_CONF_CHECK_I(pipe_src_h);
12880
12881 PIPE_CONF_CHECK_I(pch_pfit.enabled);
12882 if (current_config->pch_pfit.enabled) {
12883 PIPE_CONF_CHECK_X(pch_pfit.pos);
12884 PIPE_CONF_CHECK_X(pch_pfit.size);
12885 }
2fa2fe9a 12886
7aefe2b5
ML
12887 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12888 }
a1b2278e 12889
e59150dc
JB
12890 /* BDW+ don't expose a synchronous way to read the state */
12891 if (IS_HASWELL(dev))
12892 PIPE_CONF_CHECK_I(ips_enabled);
42db64ef 12893
282740f7
VS
12894 PIPE_CONF_CHECK_I(double_wide);
12895
26804afd
DV
12896 PIPE_CONF_CHECK_X(ddi_pll_sel);
12897
8106ddbd 12898 PIPE_CONF_CHECK_P(shared_dpll);
66e985c0 12899 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8bcc2795 12900 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
66e985c0
DV
12901 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12902 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
d452c5b6 12903 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
00490c22 12904 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
3f4cd19f
DL
12905 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12906 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12907 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
c0d43d62 12908
47eacbab
VS
12909 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12910 PIPE_CONF_CHECK_X(dsi_pll.div);
12911
42571aef
VS
12912 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12913 PIPE_CONF_CHECK_I(pipe_bpp);
12914
2d112de7 12915 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
a9a7e98a 12916 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
5e550656 12917
66e985c0 12918#undef PIPE_CONF_CHECK_X
08a24034 12919#undef PIPE_CONF_CHECK_I
8106ddbd 12920#undef PIPE_CONF_CHECK_P
1bd1bd80 12921#undef PIPE_CONF_CHECK_FLAGS
5e550656 12922#undef PIPE_CONF_CHECK_CLOCK_FUZZY
bb760063 12923#undef PIPE_CONF_QUIRK
cfb23ed6 12924#undef INTEL_ERR_OR_DBG_KMS
88adfff1 12925
cfb23ed6 12926 return ret;
0e8ffe1b
DV
12927}
12928
e3b247da
VS
12929static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12930 const struct intel_crtc_state *pipe_config)
12931{
12932 if (pipe_config->has_pch_encoder) {
21a727b3 12933 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
e3b247da
VS
12934 &pipe_config->fdi_m_n);
12935 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12936
12937 /*
12938 * FDI already provided one idea for the dotclock.
12939 * Yell if the encoder disagrees.
12940 */
12941 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12942 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12943 fdi_dotclock, dotclock);
12944 }
12945}
12946
c0ead703
ML
12947static void verify_wm_state(struct drm_crtc *crtc,
12948 struct drm_crtc_state *new_state)
08db6652 12949{
e7c84544 12950 struct drm_device *dev = crtc->dev;
fac5e23e 12951 struct drm_i915_private *dev_priv = to_i915(dev);
08db6652 12952 struct skl_ddb_allocation hw_ddb, *sw_ddb;
e7c84544
ML
12953 struct skl_ddb_entry *hw_entry, *sw_entry;
12954 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12955 const enum pipe pipe = intel_crtc->pipe;
08db6652
DL
12956 int plane;
12957
e7c84544 12958 if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
08db6652
DL
12959 return;
12960
12961 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12962 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12963
e7c84544
ML
12964 /* planes */
12965 for_each_plane(dev_priv, pipe, plane) {
12966 hw_entry = &hw_ddb.plane[pipe][plane];
12967 sw_entry = &sw_ddb->plane[pipe][plane];
08db6652 12968
e7c84544 12969 if (skl_ddb_entry_equal(hw_entry, sw_entry))
08db6652
DL
12970 continue;
12971
e7c84544
ML
12972 DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12973 "(expected (%u,%u), found (%u,%u))\n",
12974 pipe_name(pipe), plane + 1,
12975 sw_entry->start, sw_entry->end,
12976 hw_entry->start, hw_entry->end);
12977 }
08db6652 12978
e7c84544
ML
12979 /* cursor */
12980 hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12981 sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
08db6652 12982
e7c84544 12983 if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
08db6652
DL
12984 DRM_ERROR("mismatch in DDB state pipe %c cursor "
12985 "(expected (%u,%u), found (%u,%u))\n",
12986 pipe_name(pipe),
12987 sw_entry->start, sw_entry->end,
12988 hw_entry->start, hw_entry->end);
12989 }
12990}
12991
91d1b4bd 12992static void
c0ead703 12993verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
8af6cf88 12994{
35dd3c64 12995 struct drm_connector *connector;
8af6cf88 12996
e7c84544 12997 drm_for_each_connector(connector, dev) {
35dd3c64
ML
12998 struct drm_encoder *encoder = connector->encoder;
12999 struct drm_connector_state *state = connector->state;
ad3c558f 13000
e7c84544
ML
13001 if (state->crtc != crtc)
13002 continue;
13003
5a21b665 13004 intel_connector_verify_state(to_intel_connector(connector));
8af6cf88 13005
ad3c558f 13006 I915_STATE_WARN(state->best_encoder != encoder,
35dd3c64 13007 "connector's atomic encoder doesn't match legacy encoder\n");
8af6cf88 13008 }
91d1b4bd
DV
13009}
13010
13011static void
c0ead703 13012verify_encoder_state(struct drm_device *dev)
91d1b4bd
DV
13013{
13014 struct intel_encoder *encoder;
13015 struct intel_connector *connector;
8af6cf88 13016
b2784e15 13017 for_each_intel_encoder(dev, encoder) {
8af6cf88 13018 bool enabled = false;
4d20cd86 13019 enum pipe pipe;
8af6cf88
DV
13020
13021 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13022 encoder->base.base.id,
8e329a03 13023 encoder->base.name);
8af6cf88 13024
3a3371ff 13025 for_each_intel_connector(dev, connector) {
4d20cd86 13026 if (connector->base.state->best_encoder != &encoder->base)
8af6cf88
DV
13027 continue;
13028 enabled = true;
ad3c558f
ML
13029
13030 I915_STATE_WARN(connector->base.state->crtc !=
13031 encoder->base.crtc,
13032 "connector's crtc doesn't match encoder crtc\n");
8af6cf88 13033 }
0e32b39c 13034
e2c719b7 13035 I915_STATE_WARN(!!encoder->base.crtc != enabled,
8af6cf88
DV
13036 "encoder's enabled state mismatch "
13037 "(expected %i, found %i)\n",
13038 !!encoder->base.crtc, enabled);
7c60d198
ML
13039
13040 if (!encoder->base.crtc) {
4d20cd86 13041 bool active;
7c60d198 13042
4d20cd86
ML
13043 active = encoder->get_hw_state(encoder, &pipe);
13044 I915_STATE_WARN(active,
13045 "encoder detached but still enabled on pipe %c.\n",
13046 pipe_name(pipe));
7c60d198 13047 }
8af6cf88 13048 }
91d1b4bd
DV
13049}
13050
13051static void
c0ead703
ML
13052verify_crtc_state(struct drm_crtc *crtc,
13053 struct drm_crtc_state *old_crtc_state,
13054 struct drm_crtc_state *new_crtc_state)
91d1b4bd 13055{
e7c84544 13056 struct drm_device *dev = crtc->dev;
fac5e23e 13057 struct drm_i915_private *dev_priv = to_i915(dev);
91d1b4bd 13058 struct intel_encoder *encoder;
e7c84544
ML
13059 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13060 struct intel_crtc_state *pipe_config, *sw_config;
13061 struct drm_atomic_state *old_state;
13062 bool active;
045ac3b5 13063
e7c84544 13064 old_state = old_crtc_state->state;
ec2dc6a0 13065 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
e7c84544
ML
13066 pipe_config = to_intel_crtc_state(old_crtc_state);
13067 memset(pipe_config, 0, sizeof(*pipe_config));
13068 pipe_config->base.crtc = crtc;
13069 pipe_config->base.state = old_state;
8af6cf88 13070
78108b7c 13071 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
8af6cf88 13072
e7c84544 13073 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
d62cf62a 13074
e7c84544
ML
13075 /* hw state is inconsistent with the pipe quirk */
13076 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
13077 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
13078 active = new_crtc_state->active;
6c49f241 13079
e7c84544
ML
13080 I915_STATE_WARN(new_crtc_state->active != active,
13081 "crtc active state doesn't match with hw state "
13082 "(expected %i, found %i)\n", new_crtc_state->active, active);
0e8ffe1b 13083
e7c84544
ML
13084 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
13085 "transitional active state does not match atomic hw state "
13086 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
4d20cd86 13087
e7c84544
ML
13088 for_each_encoder_on_crtc(dev, crtc, encoder) {
13089 enum pipe pipe;
4d20cd86 13090
e7c84544
ML
13091 active = encoder->get_hw_state(encoder, &pipe);
13092 I915_STATE_WARN(active != new_crtc_state->active,
13093 "[ENCODER:%i] active %i with crtc active %i\n",
13094 encoder->base.base.id, active, new_crtc_state->active);
4d20cd86 13095
e7c84544
ML
13096 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
13097 "Encoder connected to wrong pipe %c\n",
13098 pipe_name(pipe));
4d20cd86 13099
253c84c8
VS
13100 if (active) {
13101 pipe_config->output_types |= 1 << encoder->type;
e7c84544 13102 encoder->get_config(encoder, pipe_config);
253c84c8 13103 }
e7c84544 13104 }
53d9f4e9 13105
e7c84544
ML
13106 if (!new_crtc_state->active)
13107 return;
cfb23ed6 13108
e7c84544 13109 intel_pipe_config_sanity_check(dev_priv, pipe_config);
e3b247da 13110
e7c84544
ML
13111 sw_config = to_intel_crtc_state(crtc->state);
13112 if (!intel_pipe_config_compare(dev, sw_config,
13113 pipe_config, false)) {
13114 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13115 intel_dump_pipe_config(intel_crtc, pipe_config,
13116 "[hw state]");
13117 intel_dump_pipe_config(intel_crtc, sw_config,
13118 "[sw state]");
8af6cf88
DV
13119 }
13120}
13121
91d1b4bd 13122static void
c0ead703
ML
13123verify_single_dpll_state(struct drm_i915_private *dev_priv,
13124 struct intel_shared_dpll *pll,
13125 struct drm_crtc *crtc,
13126 struct drm_crtc_state *new_state)
91d1b4bd 13127{
91d1b4bd 13128 struct intel_dpll_hw_state dpll_hw_state;
e7c84544
ML
13129 unsigned crtc_mask;
13130 bool active;
5358901f 13131
e7c84544 13132 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
5358901f 13133
e7c84544 13134 DRM_DEBUG_KMS("%s\n", pll->name);
5358901f 13135
e7c84544 13136 active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
5358901f 13137
e7c84544
ML
13138 if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
13139 I915_STATE_WARN(!pll->on && pll->active_mask,
13140 "pll in active use but not on in sw tracking\n");
13141 I915_STATE_WARN(pll->on && !pll->active_mask,
13142 "pll is on but not used by any active crtc\n");
13143 I915_STATE_WARN(pll->on != active,
13144 "pll on state mismatch (expected %i, found %i)\n",
13145 pll->on, active);
13146 }
5358901f 13147
e7c84544 13148 if (!crtc) {
2dd66ebd 13149 I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
e7c84544
ML
13150 "more active pll users than references: %x vs %x\n",
13151 pll->active_mask, pll->config.crtc_mask);
5358901f 13152
e7c84544
ML
13153 return;
13154 }
13155
13156 crtc_mask = 1 << drm_crtc_index(crtc);
13157
13158 if (new_state->active)
13159 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13160 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13161 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13162 else
13163 I915_STATE_WARN(pll->active_mask & crtc_mask,
13164 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13165 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
2dd66ebd 13166
e7c84544
ML
13167 I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
13168 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13169 crtc_mask, pll->config.crtc_mask);
66e985c0 13170
e7c84544
ML
13171 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
13172 &dpll_hw_state,
13173 sizeof(dpll_hw_state)),
13174 "pll hw state mismatch\n");
13175}
13176
13177static void
c0ead703
ML
13178verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13179 struct drm_crtc_state *old_crtc_state,
13180 struct drm_crtc_state *new_crtc_state)
e7c84544 13181{
fac5e23e 13182 struct drm_i915_private *dev_priv = to_i915(dev);
e7c84544
ML
13183 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13184 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13185
13186 if (new_state->shared_dpll)
c0ead703 13187 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
e7c84544
ML
13188
13189 if (old_state->shared_dpll &&
13190 old_state->shared_dpll != new_state->shared_dpll) {
13191 unsigned crtc_mask = 1 << drm_crtc_index(crtc);
13192 struct intel_shared_dpll *pll = old_state->shared_dpll;
13193
13194 I915_STATE_WARN(pll->active_mask & crtc_mask,
13195 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13196 pipe_name(drm_crtc_index(crtc)));
13197 I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
13198 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13199 pipe_name(drm_crtc_index(crtc)));
5358901f 13200 }
8af6cf88
DV
13201}
13202
e7c84544 13203static void
c0ead703 13204intel_modeset_verify_crtc(struct drm_crtc *crtc,
e7c84544
ML
13205 struct drm_crtc_state *old_state,
13206 struct drm_crtc_state *new_state)
13207{
5a21b665
DV
13208 if (!needs_modeset(new_state) &&
13209 !to_intel_crtc_state(new_state)->update_pipe)
13210 return;
13211
c0ead703 13212 verify_wm_state(crtc, new_state);
5a21b665 13213 verify_connector_state(crtc->dev, crtc);
c0ead703
ML
13214 verify_crtc_state(crtc, old_state, new_state);
13215 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
e7c84544
ML
13216}
13217
13218static void
c0ead703 13219verify_disabled_dpll_state(struct drm_device *dev)
e7c84544 13220{
fac5e23e 13221 struct drm_i915_private *dev_priv = to_i915(dev);
e7c84544
ML
13222 int i;
13223
13224 for (i = 0; i < dev_priv->num_shared_dpll; i++)
c0ead703 13225 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
e7c84544
ML
13226}
13227
13228static void
c0ead703 13229intel_modeset_verify_disabled(struct drm_device *dev)
e7c84544 13230{
c0ead703
ML
13231 verify_encoder_state(dev);
13232 verify_connector_state(dev, NULL);
13233 verify_disabled_dpll_state(dev);
e7c84544
ML
13234}
13235
80715b2f
VS
13236static void update_scanline_offset(struct intel_crtc *crtc)
13237{
13238 struct drm_device *dev = crtc->base.dev;
13239
13240 /*
13241 * The scanline counter increments at the leading edge of hsync.
13242 *
13243 * On most platforms it starts counting from vtotal-1 on the
13244 * first active line. That means the scanline counter value is
13245 * always one less than what we would expect. Ie. just after
13246 * start of vblank, which also occurs at start of hsync (on the
13247 * last active line), the scanline counter will read vblank_start-1.
13248 *
13249 * On gen2 the scanline counter starts counting from 1 instead
13250 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13251 * to keep the value positive), instead of adding one.
13252 *
13253 * On HSW+ the behaviour of the scanline counter depends on the output
13254 * type. For DP ports it behaves like most other platforms, but on HDMI
13255 * there's an extra 1 line difference. So we need to add two instead of
13256 * one to the value.
13257 */
13258 if (IS_GEN2(dev)) {
124abe07 13259 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
80715b2f
VS
13260 int vtotal;
13261
124abe07
VS
13262 vtotal = adjusted_mode->crtc_vtotal;
13263 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
80715b2f
VS
13264 vtotal /= 2;
13265
13266 crtc->scanline_offset = vtotal - 1;
13267 } else if (HAS_DDI(dev) &&
409ee761 13268 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
80715b2f
VS
13269 crtc->scanline_offset = 2;
13270 } else
13271 crtc->scanline_offset = 1;
13272}
13273
ad421372 13274static void intel_modeset_clear_plls(struct drm_atomic_state *state)
ed6739ef 13275{
225da59b 13276 struct drm_device *dev = state->dev;
ed6739ef 13277 struct drm_i915_private *dev_priv = to_i915(dev);
ad421372 13278 struct intel_shared_dpll_config *shared_dpll = NULL;
0a9ab303
ACO
13279 struct drm_crtc *crtc;
13280 struct drm_crtc_state *crtc_state;
0a9ab303 13281 int i;
ed6739ef
ACO
13282
13283 if (!dev_priv->display.crtc_compute_clock)
ad421372 13284 return;
ed6739ef 13285
0a9ab303 13286 for_each_crtc_in_state(state, crtc, crtc_state, i) {
fb1a38a9 13287 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8106ddbd
ACO
13288 struct intel_shared_dpll *old_dpll =
13289 to_intel_crtc_state(crtc->state)->shared_dpll;
0a9ab303 13290
fb1a38a9 13291 if (!needs_modeset(crtc_state))
225da59b
ACO
13292 continue;
13293
8106ddbd 13294 to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
fb1a38a9 13295
8106ddbd 13296 if (!old_dpll)
fb1a38a9 13297 continue;
0a9ab303 13298
ad421372
ML
13299 if (!shared_dpll)
13300 shared_dpll = intel_atomic_get_shared_dpll_state(state);
ed6739ef 13301
8106ddbd 13302 intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
ad421372 13303 }
ed6739ef
ACO
13304}
13305
99d736a2
ML
13306/*
13307 * This implements the workaround described in the "notes" section of the mode
13308 * set sequence documentation. When going from no pipes or single pipe to
13309 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13310 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13311 */
13312static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13313{
13314 struct drm_crtc_state *crtc_state;
13315 struct intel_crtc *intel_crtc;
13316 struct drm_crtc *crtc;
13317 struct intel_crtc_state *first_crtc_state = NULL;
13318 struct intel_crtc_state *other_crtc_state = NULL;
13319 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13320 int i;
13321
13322 /* look at all crtc's that are going to be enabled in during modeset */
13323 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13324 intel_crtc = to_intel_crtc(crtc);
13325
13326 if (!crtc_state->active || !needs_modeset(crtc_state))
13327 continue;
13328
13329 if (first_crtc_state) {
13330 other_crtc_state = to_intel_crtc_state(crtc_state);
13331 break;
13332 } else {
13333 first_crtc_state = to_intel_crtc_state(crtc_state);
13334 first_pipe = intel_crtc->pipe;
13335 }
13336 }
13337
13338 /* No workaround needed? */
13339 if (!first_crtc_state)
13340 return 0;
13341
13342 /* w/a possibly needed, check how many crtc's are already enabled. */
13343 for_each_intel_crtc(state->dev, intel_crtc) {
13344 struct intel_crtc_state *pipe_config;
13345
13346 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13347 if (IS_ERR(pipe_config))
13348 return PTR_ERR(pipe_config);
13349
13350 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13351
13352 if (!pipe_config->base.active ||
13353 needs_modeset(&pipe_config->base))
13354 continue;
13355
13356 /* 2 or more enabled crtcs means no need for w/a */
13357 if (enabled_pipe != INVALID_PIPE)
13358 return 0;
13359
13360 enabled_pipe = intel_crtc->pipe;
13361 }
13362
13363 if (enabled_pipe != INVALID_PIPE)
13364 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13365 else if (other_crtc_state)
13366 other_crtc_state->hsw_workaround_pipe = first_pipe;
13367
13368 return 0;
13369}
13370
27c329ed
ML
13371static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13372{
13373 struct drm_crtc *crtc;
13374 struct drm_crtc_state *crtc_state;
13375 int ret = 0;
13376
13377 /* add all active pipes to the state */
13378 for_each_crtc(state->dev, crtc) {
13379 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13380 if (IS_ERR(crtc_state))
13381 return PTR_ERR(crtc_state);
13382
13383 if (!crtc_state->active || needs_modeset(crtc_state))
13384 continue;
13385
13386 crtc_state->mode_changed = true;
13387
13388 ret = drm_atomic_add_affected_connectors(state, crtc);
13389 if (ret)
13390 break;
13391
13392 ret = drm_atomic_add_affected_planes(state, crtc);
13393 if (ret)
13394 break;
13395 }
13396
13397 return ret;
13398}
13399
c347a676 13400static int intel_modeset_checks(struct drm_atomic_state *state)
054518dd 13401{
565602d7 13402 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
fac5e23e 13403 struct drm_i915_private *dev_priv = to_i915(state->dev);
565602d7
ML
13404 struct drm_crtc *crtc;
13405 struct drm_crtc_state *crtc_state;
13406 int ret = 0, i;
054518dd 13407
b359283a
ML
13408 if (!check_digital_port_conflicts(state)) {
13409 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13410 return -EINVAL;
13411 }
13412
565602d7
ML
13413 intel_state->modeset = true;
13414 intel_state->active_crtcs = dev_priv->active_crtcs;
13415
13416 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13417 if (crtc_state->active)
13418 intel_state->active_crtcs |= 1 << i;
13419 else
13420 intel_state->active_crtcs &= ~(1 << i);
8b4a7d05
MR
13421
13422 if (crtc_state->active != crtc->state->active)
13423 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
565602d7
ML
13424 }
13425
054518dd
ACO
13426 /*
13427 * See if the config requires any additional preparation, e.g.
13428 * to adjust global state with pipes off. We need to do this
13429 * here so we can get the modeset_pipe updated config for the new
13430 * mode set on this crtc. For other crtcs we need to use the
13431 * adjusted_mode bits in the crtc directly.
13432 */
27c329ed 13433 if (dev_priv->display.modeset_calc_cdclk) {
c89e39f3 13434 if (!intel_state->cdclk_pll_vco)
63911d72 13435 intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
b2045352
VS
13436 if (!intel_state->cdclk_pll_vco)
13437 intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
c89e39f3 13438
27c329ed 13439 ret = dev_priv->display.modeset_calc_cdclk(state);
c89e39f3
CT
13440 if (ret < 0)
13441 return ret;
27c329ed 13442
c89e39f3 13443 if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
63911d72 13444 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
27c329ed
ML
13445 ret = intel_modeset_all_pipes(state);
13446
13447 if (ret < 0)
054518dd 13448 return ret;
e8788cbc
ML
13449
13450 DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
13451 intel_state->cdclk, intel_state->dev_cdclk);
27c329ed 13452 } else
1a617b77 13453 to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
054518dd 13454
ad421372 13455 intel_modeset_clear_plls(state);
054518dd 13456
565602d7 13457 if (IS_HASWELL(dev_priv))
ad421372 13458 return haswell_mode_set_planes_workaround(state);
99d736a2 13459
ad421372 13460 return 0;
c347a676
ACO
13461}
13462
aa363136
MR
13463/*
13464 * Handle calculation of various watermark data at the end of the atomic check
13465 * phase. The code here should be run after the per-crtc and per-plane 'check'
13466 * handlers to ensure that all derived state has been updated.
13467 */
55994c2c 13468static int calc_watermark_data(struct drm_atomic_state *state)
aa363136
MR
13469{
13470 struct drm_device *dev = state->dev;
98d39494 13471 struct drm_i915_private *dev_priv = to_i915(dev);
98d39494
MR
13472
13473 /* Is there platform-specific watermark information to calculate? */
13474 if (dev_priv->display.compute_global_watermarks)
55994c2c
MR
13475 return dev_priv->display.compute_global_watermarks(state);
13476
13477 return 0;
aa363136
MR
13478}
13479
74c090b1
ML
13480/**
13481 * intel_atomic_check - validate state object
13482 * @dev: drm device
13483 * @state: state to validate
13484 */
13485static int intel_atomic_check(struct drm_device *dev,
13486 struct drm_atomic_state *state)
c347a676 13487{
dd8b3bdb 13488 struct drm_i915_private *dev_priv = to_i915(dev);
aa363136 13489 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
c347a676
ACO
13490 struct drm_crtc *crtc;
13491 struct drm_crtc_state *crtc_state;
13492 int ret, i;
61333b60 13493 bool any_ms = false;
c347a676 13494
74c090b1 13495 ret = drm_atomic_helper_check_modeset(dev, state);
054518dd
ACO
13496 if (ret)
13497 return ret;
13498
c347a676 13499 for_each_crtc_in_state(state, crtc, crtc_state, i) {
cfb23ed6
ML
13500 struct intel_crtc_state *pipe_config =
13501 to_intel_crtc_state(crtc_state);
1ed51de9
DV
13502
13503 /* Catch I915_MODE_FLAG_INHERITED */
13504 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13505 crtc_state->mode_changed = true;
cfb23ed6 13506
af4a879e 13507 if (!needs_modeset(crtc_state))
c347a676
ACO
13508 continue;
13509
af4a879e
DV
13510 if (!crtc_state->enable) {
13511 any_ms = true;
cfb23ed6 13512 continue;
af4a879e 13513 }
cfb23ed6 13514
26495481
DV
13515 /* FIXME: For only active_changed we shouldn't need to do any
13516 * state recomputation at all. */
13517
1ed51de9
DV
13518 ret = drm_atomic_add_affected_connectors(state, crtc);
13519 if (ret)
13520 return ret;
b359283a 13521
cfb23ed6 13522 ret = intel_modeset_pipe_config(crtc, pipe_config);
25aa1c39
ML
13523 if (ret) {
13524 intel_dump_pipe_config(to_intel_crtc(crtc),
13525 pipe_config, "[failed]");
c347a676 13526 return ret;
25aa1c39 13527 }
c347a676 13528
73831236 13529 if (i915.fastboot &&
dd8b3bdb 13530 intel_pipe_config_compare(dev,
cfb23ed6 13531 to_intel_crtc_state(crtc->state),
1ed51de9 13532 pipe_config, true)) {
26495481 13533 crtc_state->mode_changed = false;
bfd16b2a 13534 to_intel_crtc_state(crtc_state)->update_pipe = true;
26495481
DV
13535 }
13536
af4a879e 13537 if (needs_modeset(crtc_state))
26495481 13538 any_ms = true;
cfb23ed6 13539
af4a879e
DV
13540 ret = drm_atomic_add_affected_planes(state, crtc);
13541 if (ret)
13542 return ret;
61333b60 13543
26495481
DV
13544 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13545 needs_modeset(crtc_state) ?
13546 "[modeset]" : "[fastset]");
c347a676
ACO
13547 }
13548
61333b60
ML
13549 if (any_ms) {
13550 ret = intel_modeset_checks(state);
13551
13552 if (ret)
13553 return ret;
27c329ed 13554 } else
dd8b3bdb 13555 intel_state->cdclk = dev_priv->cdclk_freq;
76305b1a 13556
dd8b3bdb 13557 ret = drm_atomic_helper_check_planes(dev, state);
aa363136
MR
13558 if (ret)
13559 return ret;
13560
f51be2e0 13561 intel_fbc_choose_crtc(dev_priv, state);
55994c2c 13562 return calc_watermark_data(state);
054518dd
ACO
13563}
13564
5008e874
ML
13565static int intel_atomic_prepare_commit(struct drm_device *dev,
13566 struct drm_atomic_state *state,
81072bfd 13567 bool nonblock)
5008e874 13568{
fac5e23e 13569 struct drm_i915_private *dev_priv = to_i915(dev);
7580d774 13570 struct drm_plane_state *plane_state;
5008e874 13571 struct drm_crtc_state *crtc_state;
7580d774 13572 struct drm_plane *plane;
5008e874
ML
13573 struct drm_crtc *crtc;
13574 int i, ret;
13575
5a21b665
DV
13576 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13577 if (state->legacy_cursor_update)
a6747b73
ML
13578 continue;
13579
5a21b665
DV
13580 ret = intel_crtc_wait_for_pending_flips(crtc);
13581 if (ret)
13582 return ret;
5008e874 13583
5a21b665
DV
13584 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13585 flush_workqueue(dev_priv->wq);
d55dbd06
ML
13586 }
13587
f935675f
ML
13588 ret = mutex_lock_interruptible(&dev->struct_mutex);
13589 if (ret)
13590 return ret;
13591
5008e874 13592 ret = drm_atomic_helper_prepare_planes(dev, state);
f7e5838b 13593 mutex_unlock(&dev->struct_mutex);
7580d774 13594
21daaeee 13595 if (!ret && !nonblock) {
7580d774
ML
13596 for_each_plane_in_state(state, plane, plane_state, i) {
13597 struct intel_plane_state *intel_plane_state =
13598 to_intel_plane_state(plane_state);
13599
13600 if (!intel_plane_state->wait_req)
13601 continue;
13602
13603 ret = __i915_wait_request(intel_plane_state->wait_req,
299259a3 13604 true, NULL, NULL);
f7e5838b 13605 if (ret) {
f4457ae7
CW
13606 /* Any hang should be swallowed by the wait */
13607 WARN_ON(ret == -EIO);
f7e5838b
CW
13608 mutex_lock(&dev->struct_mutex);
13609 drm_atomic_helper_cleanup_planes(dev, state);
13610 mutex_unlock(&dev->struct_mutex);
7580d774 13611 break;
f7e5838b 13612 }
7580d774 13613 }
7580d774 13614 }
5008e874
ML
13615
13616 return ret;
13617}
13618
a2991414
ML
13619u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13620{
13621 struct drm_device *dev = crtc->base.dev;
13622
13623 if (!dev->max_vblank_count)
13624 return drm_accurate_vblank_count(&crtc->base);
13625
13626 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13627}
13628
5a21b665
DV
13629static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13630 struct drm_i915_private *dev_priv,
13631 unsigned crtc_mask)
e8861675 13632{
5a21b665
DV
13633 unsigned last_vblank_count[I915_MAX_PIPES];
13634 enum pipe pipe;
13635 int ret;
e8861675 13636
5a21b665
DV
13637 if (!crtc_mask)
13638 return;
e8861675 13639
5a21b665
DV
13640 for_each_pipe(dev_priv, pipe) {
13641 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
e8861675 13642
5a21b665 13643 if (!((1 << pipe) & crtc_mask))
e8861675
ML
13644 continue;
13645
5a21b665
DV
13646 ret = drm_crtc_vblank_get(crtc);
13647 if (WARN_ON(ret != 0)) {
13648 crtc_mask &= ~(1 << pipe);
13649 continue;
e8861675
ML
13650 }
13651
5a21b665 13652 last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
e8861675
ML
13653 }
13654
5a21b665
DV
13655 for_each_pipe(dev_priv, pipe) {
13656 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13657 long lret;
e8861675 13658
5a21b665
DV
13659 if (!((1 << pipe) & crtc_mask))
13660 continue;
d55dbd06 13661
5a21b665
DV
13662 lret = wait_event_timeout(dev->vblank[pipe].queue,
13663 last_vblank_count[pipe] !=
13664 drm_crtc_vblank_count(crtc),
13665 msecs_to_jiffies(50));
d55dbd06 13666
5a21b665 13667 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
d55dbd06 13668
5a21b665 13669 drm_crtc_vblank_put(crtc);
d55dbd06
ML
13670 }
13671}
13672
5a21b665 13673static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
a6747b73 13674{
5a21b665
DV
13675 /* fb updated, need to unpin old fb */
13676 if (crtc_state->fb_changed)
13677 return true;
a6747b73 13678
5a21b665
DV
13679 /* wm changes, need vblank before final wm's */
13680 if (crtc_state->update_wm_post)
13681 return true;
a6747b73 13682
5a21b665
DV
13683 /*
13684 * cxsr is re-enabled after vblank.
13685 * This is already handled by crtc_state->update_wm_post,
13686 * but added for clarity.
13687 */
13688 if (crtc_state->disable_cxsr)
13689 return true;
a6747b73 13690
5a21b665 13691 return false;
e8861675
ML
13692}
13693
94f05024 13694static void intel_atomic_commit_tail(struct drm_atomic_state *state)
a6778b3c 13695{
94f05024 13696 struct drm_device *dev = state->dev;
565602d7 13697 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
fac5e23e 13698 struct drm_i915_private *dev_priv = to_i915(dev);
29ceb0e6 13699 struct drm_crtc_state *old_crtc_state;
7580d774 13700 struct drm_crtc *crtc;
5a21b665 13701 struct intel_crtc_state *intel_cstate;
94f05024
DV
13702 struct drm_plane *plane;
13703 struct drm_plane_state *plane_state;
5a21b665
DV
13704 bool hw_check = intel_state->modeset;
13705 unsigned long put_domains[I915_MAX_PIPES] = {};
13706 unsigned crtc_vblank_mask = 0;
94f05024 13707 int i, ret;
a6778b3c 13708
94f05024
DV
13709 for_each_plane_in_state(state, plane, plane_state, i) {
13710 struct intel_plane_state *intel_plane_state =
13711 to_intel_plane_state(plane_state);
ea0000f0 13712
94f05024
DV
13713 if (!intel_plane_state->wait_req)
13714 continue;
d4afb8cc 13715
94f05024
DV
13716 ret = __i915_wait_request(intel_plane_state->wait_req,
13717 true, NULL, NULL);
13718 /* EIO should be eaten, and we can't get interrupted in the
13719 * worker, and blocking commits have waited already. */
13720 WARN_ON(ret);
13721 }
1c5e19f8 13722
ea0000f0
DV
13723 drm_atomic_helper_wait_for_dependencies(state);
13724
565602d7
ML
13725 if (intel_state->modeset) {
13726 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13727 sizeof(intel_state->min_pixclk));
13728 dev_priv->active_crtcs = intel_state->active_crtcs;
1a617b77 13729 dev_priv->atomic_cdclk_freq = intel_state->cdclk;
5a21b665
DV
13730
13731 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
565602d7
ML
13732 }
13733
29ceb0e6 13734 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
a539205a
ML
13735 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13736
5a21b665
DV
13737 if (needs_modeset(crtc->state) ||
13738 to_intel_crtc_state(crtc->state)->update_pipe) {
13739 hw_check = true;
13740
13741 put_domains[to_intel_crtc(crtc)->pipe] =
13742 modeset_get_crtc_power_domains(crtc,
13743 to_intel_crtc_state(crtc->state));
13744 }
13745
61333b60
ML
13746 if (!needs_modeset(crtc->state))
13747 continue;
13748
29ceb0e6 13749 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
460da916 13750
29ceb0e6
VS
13751 if (old_crtc_state->active) {
13752 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
a539205a 13753 dev_priv->display.crtc_disable(crtc);
eddfcbcd 13754 intel_crtc->active = false;
58f9c0bc 13755 intel_fbc_disable(intel_crtc);
eddfcbcd 13756 intel_disable_shared_dpll(intel_crtc);
9bbc8258
VS
13757
13758 /*
13759 * Underruns don't always raise
13760 * interrupts, so check manually.
13761 */
13762 intel_check_cpu_fifo_underruns(dev_priv);
13763 intel_check_pch_fifo_underruns(dev_priv);
b9001114
ML
13764
13765 if (!crtc->state->active)
13766 intel_update_watermarks(crtc);
a539205a 13767 }
b8cecdf5 13768 }
7758a113 13769
ea9d758d
DV
13770 /* Only after disabling all output pipelines that will be changed can we
13771 * update the the output configuration. */
4740b0f2 13772 intel_modeset_update_crtc_state(state);
f6e5b160 13773
565602d7 13774 if (intel_state->modeset) {
4740b0f2 13775 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
33c8df89
ML
13776
13777 if (dev_priv->display.modeset_commit_cdclk &&
c89e39f3 13778 (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
63911d72 13779 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
33c8df89 13780 dev_priv->display.modeset_commit_cdclk(state);
f6d1973d 13781
c0ead703 13782 intel_modeset_verify_disabled(dev);
4740b0f2 13783 }
47fab737 13784
a6778b3c 13785 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
29ceb0e6 13786 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
f6ac4b2a
ML
13787 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13788 bool modeset = needs_modeset(crtc->state);
5a21b665
DV
13789 struct intel_crtc_state *pipe_config =
13790 to_intel_crtc_state(crtc->state);
9f836f90 13791
f6ac4b2a 13792 if (modeset && crtc->state->active) {
a539205a
ML
13793 update_scanline_offset(to_intel_crtc(crtc));
13794 dev_priv->display.crtc_enable(crtc);
13795 }
80715b2f 13796
1f7528c4
DV
13797 /* Complete events for now disable pipes here. */
13798 if (modeset && !crtc->state->active && crtc->state->event) {
13799 spin_lock_irq(&dev->event_lock);
13800 drm_crtc_send_vblank_event(crtc, crtc->state->event);
13801 spin_unlock_irq(&dev->event_lock);
13802
13803 crtc->state->event = NULL;
13804 }
13805
f6ac4b2a 13806 if (!modeset)
29ceb0e6 13807 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
f6ac4b2a 13808
5a21b665
DV
13809 if (crtc->state->active &&
13810 drm_atomic_get_existing_plane_state(state, crtc->primary))
faf68d92 13811 intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
5a21b665 13812
1f7528c4 13813 if (crtc->state->active)
5a21b665 13814 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
f6d1973d 13815
5a21b665
DV
13816 if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13817 crtc_vblank_mask |= 1 << i;
177246a8
MR
13818 }
13819
94f05024
DV
13820 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13821 * already, but still need the state for the delayed optimization. To
13822 * fix this:
13823 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13824 * - schedule that vblank worker _before_ calling hw_done
13825 * - at the start of commit_tail, cancel it _synchrously
13826 * - switch over to the vblank wait helper in the core after that since
13827 * we don't need out special handling any more.
13828 */
5a21b665
DV
13829 if (!state->legacy_cursor_update)
13830 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13831
13832 /*
13833 * Now that the vblank has passed, we can go ahead and program the
13834 * optimal watermarks on platforms that need two-step watermark
13835 * programming.
13836 *
13837 * TODO: Move this (and other cleanup) to an async worker eventually.
13838 */
13839 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13840 intel_cstate = to_intel_crtc_state(crtc->state);
13841
13842 if (dev_priv->display.optimize_watermarks)
13843 dev_priv->display.optimize_watermarks(intel_cstate);
13844 }
13845
13846 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13847 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13848
13849 if (put_domains[i])
13850 modeset_put_power_domains(dev_priv, put_domains[i]);
13851
13852 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13853 }
13854
94f05024
DV
13855 drm_atomic_helper_commit_hw_done(state);
13856
5a21b665
DV
13857 if (intel_state->modeset)
13858 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13859
13860 mutex_lock(&dev->struct_mutex);
13861 drm_atomic_helper_cleanup_planes(dev, state);
13862 mutex_unlock(&dev->struct_mutex);
13863
ea0000f0
DV
13864 drm_atomic_helper_commit_cleanup_done(state);
13865
ee165b1a 13866 drm_atomic_state_free(state);
f30da187 13867
75714940
MK
13868 /* As one of the primary mmio accessors, KMS has a high likelihood
13869 * of triggering bugs in unclaimed access. After we finish
13870 * modesetting, see if an error has been flagged, and if so
13871 * enable debugging for the next modeset - and hope we catch
13872 * the culprit.
13873 *
13874 * XXX note that we assume display power is on at this point.
13875 * This might hold true now but we need to add pm helper to check
13876 * unclaimed only when the hardware is on, as atomic commits
13877 * can happen also when the device is completely off.
13878 */
13879 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
94f05024
DV
13880}
13881
13882static void intel_atomic_commit_work(struct work_struct *work)
13883{
13884 struct drm_atomic_state *state = container_of(work,
13885 struct drm_atomic_state,
13886 commit_work);
13887 intel_atomic_commit_tail(state);
13888}
13889
6c9c1b38
DV
13890static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13891{
13892 struct drm_plane_state *old_plane_state;
13893 struct drm_plane *plane;
13894 struct drm_i915_gem_object *obj, *old_obj;
13895 struct intel_plane *intel_plane;
13896 int i;
13897
13898 mutex_lock(&state->dev->struct_mutex);
13899 for_each_plane_in_state(state, plane, old_plane_state, i) {
13900 obj = intel_fb_obj(plane->state->fb);
13901 old_obj = intel_fb_obj(old_plane_state->fb);
13902 intel_plane = to_intel_plane(plane);
13903
13904 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13905 }
13906 mutex_unlock(&state->dev->struct_mutex);
13907}
13908
94f05024
DV
13909/**
13910 * intel_atomic_commit - commit validated state object
13911 * @dev: DRM device
13912 * @state: the top-level driver state object
13913 * @nonblock: nonblocking commit
13914 *
13915 * This function commits a top-level state object that has been validated
13916 * with drm_atomic_helper_check().
13917 *
13918 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
13919 * nonblocking commits are only safe for pure plane updates. Everything else
13920 * should work though.
13921 *
13922 * RETURNS
13923 * Zero for success or -errno.
13924 */
13925static int intel_atomic_commit(struct drm_device *dev,
13926 struct drm_atomic_state *state,
13927 bool nonblock)
13928{
13929 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
fac5e23e 13930 struct drm_i915_private *dev_priv = to_i915(dev);
94f05024
DV
13931 int ret = 0;
13932
13933 if (intel_state->modeset && nonblock) {
13934 DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
13935 return -EINVAL;
13936 }
13937
13938 ret = drm_atomic_helper_setup_commit(state, nonblock);
13939 if (ret)
13940 return ret;
13941
13942 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13943
13944 ret = intel_atomic_prepare_commit(dev, state, nonblock);
13945 if (ret) {
13946 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13947 return ret;
13948 }
13949
13950 drm_atomic_helper_swap_state(state, true);
13951 dev_priv->wm.distrust_bios_wm = false;
13952 dev_priv->wm.skl_results = intel_state->wm_results;
13953 intel_shared_dpll_commit(state);
6c9c1b38 13954 intel_atomic_track_fbs(state);
94f05024
DV
13955
13956 if (nonblock)
13957 queue_work(system_unbound_wq, &state->commit_work);
13958 else
13959 intel_atomic_commit_tail(state);
75714940 13960
74c090b1 13961 return 0;
7f27126e
JB
13962}
13963
c0c36b94
CW
13964void intel_crtc_restore_mode(struct drm_crtc *crtc)
13965{
83a57153
ACO
13966 struct drm_device *dev = crtc->dev;
13967 struct drm_atomic_state *state;
e694eb02 13968 struct drm_crtc_state *crtc_state;
2bfb4627 13969 int ret;
83a57153
ACO
13970
13971 state = drm_atomic_state_alloc(dev);
13972 if (!state) {
78108b7c
VS
13973 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
13974 crtc->base.id, crtc->name);
83a57153
ACO
13975 return;
13976 }
13977
e694eb02 13978 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
83a57153 13979
e694eb02
ML
13980retry:
13981 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13982 ret = PTR_ERR_OR_ZERO(crtc_state);
13983 if (!ret) {
13984 if (!crtc_state->active)
13985 goto out;
83a57153 13986
e694eb02 13987 crtc_state->mode_changed = true;
74c090b1 13988 ret = drm_atomic_commit(state);
83a57153
ACO
13989 }
13990
e694eb02
ML
13991 if (ret == -EDEADLK) {
13992 drm_atomic_state_clear(state);
13993 drm_modeset_backoff(state->acquire_ctx);
13994 goto retry;
4ed9fb37 13995 }
4be07317 13996
2bfb4627 13997 if (ret)
e694eb02 13998out:
2bfb4627 13999 drm_atomic_state_free(state);
c0c36b94
CW
14000}
14001
25c5b266
DV
14002#undef for_each_intel_crtc_masked
14003
f6e5b160 14004static const struct drm_crtc_funcs intel_crtc_funcs = {
82cf435b 14005 .gamma_set = drm_atomic_helper_legacy_gamma_set,
74c090b1 14006 .set_config = drm_atomic_helper_set_config,
82cf435b 14007 .set_property = drm_atomic_helper_crtc_set_property,
f6e5b160 14008 .destroy = intel_crtc_destroy,
527b6abe 14009 .page_flip = intel_crtc_page_flip,
1356837e
MR
14010 .atomic_duplicate_state = intel_crtc_duplicate_state,
14011 .atomic_destroy_state = intel_crtc_destroy_state,
f6e5b160
CW
14012};
14013
6beb8c23
MR
14014/**
14015 * intel_prepare_plane_fb - Prepare fb for usage on plane
14016 * @plane: drm plane to prepare for
14017 * @fb: framebuffer to prepare for presentation
14018 *
14019 * Prepares a framebuffer for usage on a display plane. Generally this
14020 * involves pinning the underlying object and updating the frontbuffer tracking
14021 * bits. Some older platforms need special physical address handling for
14022 * cursor planes.
14023 *
f935675f
ML
14024 * Must be called with struct_mutex held.
14025 *
6beb8c23
MR
14026 * Returns 0 on success, negative error code on failure.
14027 */
14028int
14029intel_prepare_plane_fb(struct drm_plane *plane,
d136dfee 14030 const struct drm_plane_state *new_state)
465c120c
MR
14031{
14032 struct drm_device *dev = plane->dev;
844f9111 14033 struct drm_framebuffer *fb = new_state->fb;
6beb8c23 14034 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1ee49399 14035 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
c37efb99 14036 struct reservation_object *resv;
6beb8c23 14037 int ret = 0;
465c120c 14038
1ee49399 14039 if (!obj && !old_obj)
465c120c
MR
14040 return 0;
14041
5008e874
ML
14042 if (old_obj) {
14043 struct drm_crtc_state *crtc_state =
14044 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
14045
14046 /* Big Hammer, we also need to ensure that any pending
14047 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14048 * current scanout is retired before unpinning the old
14049 * framebuffer. Note that we rely on userspace rendering
14050 * into the buffer attached to the pipe they are waiting
14051 * on. If not, userspace generates a GPU hang with IPEHR
14052 * point to the MI_WAIT_FOR_EVENT.
14053 *
14054 * This should only fail upon a hung GPU, in which case we
14055 * can safely continue.
14056 */
14057 if (needs_modeset(crtc_state))
14058 ret = i915_gem_object_wait_rendering(old_obj, true);
f4457ae7
CW
14059 if (ret) {
14060 /* GPU hangs should have been swallowed by the wait */
14061 WARN_ON(ret == -EIO);
f935675f 14062 return ret;
f4457ae7 14063 }
5008e874
ML
14064 }
14065
c37efb99
CW
14066 if (!obj)
14067 return 0;
14068
5a21b665 14069 /* For framebuffer backed by dmabuf, wait for fence */
c37efb99
CW
14070 resv = i915_gem_object_get_dmabuf_resv(obj);
14071 if (resv) {
5a21b665
DV
14072 long lret;
14073
c37efb99 14074 lret = reservation_object_wait_timeout_rcu(resv, false, true,
5a21b665
DV
14075 MAX_SCHEDULE_TIMEOUT);
14076 if (lret == -ERESTARTSYS)
14077 return lret;
14078
14079 WARN(lret < 0, "waiting returns %li\n", lret);
14080 }
14081
c37efb99 14082 if (plane->type == DRM_PLANE_TYPE_CURSOR &&
6beb8c23
MR
14083 INTEL_INFO(dev)->cursor_needs_physical) {
14084 int align = IS_I830(dev) ? 16 * 1024 : 256;
14085 ret = i915_gem_object_attach_phys(obj, align);
14086 if (ret)
14087 DRM_DEBUG_KMS("failed to attach phys object\n");
14088 } else {
3465c580 14089 ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
6beb8c23 14090 }
465c120c 14091
c37efb99 14092 if (ret == 0) {
6c9c1b38
DV
14093 struct intel_plane_state *plane_state =
14094 to_intel_plane_state(new_state);
7580d774 14095
6c9c1b38
DV
14096 i915_gem_request_assign(&plane_state->wait_req,
14097 obj->last_write_req);
7580d774 14098 }
fdd508a6 14099
6beb8c23
MR
14100 return ret;
14101}
14102
38f3ce3a
MR
14103/**
14104 * intel_cleanup_plane_fb - Cleans up an fb after plane use
14105 * @plane: drm plane to clean up for
14106 * @fb: old framebuffer that was on plane
14107 *
14108 * Cleans up a framebuffer that has just been removed from a plane.
f935675f
ML
14109 *
14110 * Must be called with struct_mutex held.
38f3ce3a
MR
14111 */
14112void
14113intel_cleanup_plane_fb(struct drm_plane *plane,
d136dfee 14114 const struct drm_plane_state *old_state)
38f3ce3a
MR
14115{
14116 struct drm_device *dev = plane->dev;
7580d774 14117 struct intel_plane_state *old_intel_state;
1ee49399
ML
14118 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
14119 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
38f3ce3a 14120
7580d774
ML
14121 old_intel_state = to_intel_plane_state(old_state);
14122
1ee49399 14123 if (!obj && !old_obj)
38f3ce3a
MR
14124 return;
14125
1ee49399
ML
14126 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
14127 !INTEL_INFO(dev)->cursor_needs_physical))
3465c580 14128 intel_unpin_fb_obj(old_state->fb, old_state->rotation);
1ee49399 14129
7580d774 14130 i915_gem_request_assign(&old_intel_state->wait_req, NULL);
465c120c
MR
14131}
14132
6156a456
CK
14133int
14134skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
14135{
14136 int max_scale;
6156a456
CK
14137 int crtc_clock, cdclk;
14138
bf8a0af0 14139 if (!intel_crtc || !crtc_state->base.enable)
6156a456
CK
14140 return DRM_PLANE_HELPER_NO_SCALING;
14141
6156a456 14142 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
27c329ed 14143 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
6156a456 14144
54bf1ce6 14145 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
6156a456
CK
14146 return DRM_PLANE_HELPER_NO_SCALING;
14147
14148 /*
14149 * skl max scale is lower of:
14150 * close to 3 but not 3, -1 is for that purpose
14151 * or
14152 * cdclk/crtc_clock
14153 */
14154 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
14155
14156 return max_scale;
14157}
14158
465c120c 14159static int
3c692a41 14160intel_check_primary_plane(struct drm_plane *plane,
061e4b8d 14161 struct intel_crtc_state *crtc_state,
3c692a41
GP
14162 struct intel_plane_state *state)
14163{
2b875c22
MR
14164 struct drm_crtc *crtc = state->base.crtc;
14165 struct drm_framebuffer *fb = state->base.fb;
6156a456 14166 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
061e4b8d
ML
14167 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
14168 bool can_position = false;
465c120c 14169
693bdc28
VS
14170 if (INTEL_INFO(plane->dev)->gen >= 9) {
14171 /* use scaler when colorkey is not required */
14172 if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
14173 min_scale = 1;
14174 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
14175 }
d8106366 14176 can_position = true;
6156a456 14177 }
d8106366 14178
061e4b8d
ML
14179 return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14180 &state->dst, &state->clip,
9b8b013d 14181 state->base.rotation,
da20eabd
ML
14182 min_scale, max_scale,
14183 can_position, true,
14184 &state->visible);
14af293f
GP
14185}
14186
5a21b665
DV
14187static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14188 struct drm_crtc_state *old_crtc_state)
14189{
14190 struct drm_device *dev = crtc->dev;
14191 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14192 struct intel_crtc_state *old_intel_state =
14193 to_intel_crtc_state(old_crtc_state);
14194 bool modeset = needs_modeset(crtc->state);
14195
14196 /* Perform vblank evasion around commit operation */
14197 intel_pipe_update_start(intel_crtc);
14198
14199 if (modeset)
14200 return;
14201
14202 if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14203 intel_color_set_csc(crtc->state);
14204 intel_color_load_luts(crtc->state);
14205 }
14206
14207 if (to_intel_crtc_state(crtc->state)->update_pipe)
14208 intel_update_pipe_config(intel_crtc, old_intel_state);
14209 else if (INTEL_INFO(dev)->gen >= 9)
14210 skl_detach_scalers(intel_crtc);
14211}
14212
14213static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14214 struct drm_crtc_state *old_crtc_state)
14215{
14216 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14217
14218 intel_pipe_update_end(intel_crtc, NULL);
14219}
14220
cf4c7c12 14221/**
4a3b8769
MR
14222 * intel_plane_destroy - destroy a plane
14223 * @plane: plane to destroy
cf4c7c12 14224 *
4a3b8769
MR
14225 * Common destruction function for all types of planes (primary, cursor,
14226 * sprite).
cf4c7c12 14227 */
4a3b8769 14228void intel_plane_destroy(struct drm_plane *plane)
465c120c 14229{
69ae561f
VS
14230 if (!plane)
14231 return;
14232
465c120c 14233 drm_plane_cleanup(plane);
69ae561f 14234 kfree(to_intel_plane(plane));
465c120c
MR
14235}
14236
65a3fea0 14237const struct drm_plane_funcs intel_plane_funcs = {
70a101f8
MR
14238 .update_plane = drm_atomic_helper_update_plane,
14239 .disable_plane = drm_atomic_helper_disable_plane,
3d7d6510 14240 .destroy = intel_plane_destroy,
c196e1d6 14241 .set_property = drm_atomic_helper_plane_set_property,
a98b3431
MR
14242 .atomic_get_property = intel_plane_atomic_get_property,
14243 .atomic_set_property = intel_plane_atomic_set_property,
ea2c67bb
MR
14244 .atomic_duplicate_state = intel_plane_duplicate_state,
14245 .atomic_destroy_state = intel_plane_destroy_state,
14246
465c120c
MR
14247};
14248
14249static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14250 int pipe)
14251{
fca0ce2a
VS
14252 struct intel_plane *primary = NULL;
14253 struct intel_plane_state *state = NULL;
465c120c 14254 const uint32_t *intel_primary_formats;
45e3743a 14255 unsigned int num_formats;
fca0ce2a 14256 int ret;
465c120c
MR
14257
14258 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
fca0ce2a
VS
14259 if (!primary)
14260 goto fail;
465c120c 14261
8e7d688b 14262 state = intel_create_plane_state(&primary->base);
fca0ce2a
VS
14263 if (!state)
14264 goto fail;
8e7d688b 14265 primary->base.state = &state->base;
ea2c67bb 14266
465c120c
MR
14267 primary->can_scale = false;
14268 primary->max_downscale = 1;
6156a456
CK
14269 if (INTEL_INFO(dev)->gen >= 9) {
14270 primary->can_scale = true;
af99ceda 14271 state->scaler_id = -1;
6156a456 14272 }
465c120c
MR
14273 primary->pipe = pipe;
14274 primary->plane = pipe;
a9ff8714 14275 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
c59cb179 14276 primary->check_plane = intel_check_primary_plane;
465c120c
MR
14277 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14278 primary->plane = !pipe;
14279
6c0fd451
DL
14280 if (INTEL_INFO(dev)->gen >= 9) {
14281 intel_primary_formats = skl_primary_formats;
14282 num_formats = ARRAY_SIZE(skl_primary_formats);
a8d201af
ML
14283
14284 primary->update_plane = skylake_update_primary_plane;
14285 primary->disable_plane = skylake_disable_primary_plane;
14286 } else if (HAS_PCH_SPLIT(dev)) {
14287 intel_primary_formats = i965_primary_formats;
14288 num_formats = ARRAY_SIZE(i965_primary_formats);
14289
14290 primary->update_plane = ironlake_update_primary_plane;
14291 primary->disable_plane = i9xx_disable_primary_plane;
6c0fd451 14292 } else if (INTEL_INFO(dev)->gen >= 4) {
568db4f2
DL
14293 intel_primary_formats = i965_primary_formats;
14294 num_formats = ARRAY_SIZE(i965_primary_formats);
a8d201af
ML
14295
14296 primary->update_plane = i9xx_update_primary_plane;
14297 primary->disable_plane = i9xx_disable_primary_plane;
6c0fd451
DL
14298 } else {
14299 intel_primary_formats = i8xx_primary_formats;
14300 num_formats = ARRAY_SIZE(i8xx_primary_formats);
a8d201af
ML
14301
14302 primary->update_plane = i9xx_update_primary_plane;
14303 primary->disable_plane = i9xx_disable_primary_plane;
465c120c
MR
14304 }
14305
38573dc1
VS
14306 if (INTEL_INFO(dev)->gen >= 9)
14307 ret = drm_universal_plane_init(dev, &primary->base, 0,
14308 &intel_plane_funcs,
14309 intel_primary_formats, num_formats,
14310 DRM_PLANE_TYPE_PRIMARY,
14311 "plane 1%c", pipe_name(pipe));
14312 else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
14313 ret = drm_universal_plane_init(dev, &primary->base, 0,
14314 &intel_plane_funcs,
14315 intel_primary_formats, num_formats,
14316 DRM_PLANE_TYPE_PRIMARY,
14317 "primary %c", pipe_name(pipe));
14318 else
14319 ret = drm_universal_plane_init(dev, &primary->base, 0,
14320 &intel_plane_funcs,
14321 intel_primary_formats, num_formats,
14322 DRM_PLANE_TYPE_PRIMARY,
14323 "plane %c", plane_name(primary->plane));
fca0ce2a
VS
14324 if (ret)
14325 goto fail;
48404c1e 14326
3b7a5119
SJ
14327 if (INTEL_INFO(dev)->gen >= 4)
14328 intel_create_rotation_property(dev, primary);
48404c1e 14329
ea2c67bb
MR
14330 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14331
465c120c 14332 return &primary->base;
fca0ce2a
VS
14333
14334fail:
14335 kfree(state);
14336 kfree(primary);
14337
14338 return NULL;
465c120c
MR
14339}
14340
3b7a5119
SJ
14341void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14342{
14343 if (!dev->mode_config.rotation_property) {
14344 unsigned long flags = BIT(DRM_ROTATE_0) |
14345 BIT(DRM_ROTATE_180);
14346
14347 if (INTEL_INFO(dev)->gen >= 9)
14348 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
14349
14350 dev->mode_config.rotation_property =
14351 drm_mode_create_rotation_property(dev, flags);
14352 }
14353 if (dev->mode_config.rotation_property)
14354 drm_object_attach_property(&plane->base.base,
14355 dev->mode_config.rotation_property,
14356 plane->base.state->rotation);
14357}
14358
3d7d6510 14359static int
852e787c 14360intel_check_cursor_plane(struct drm_plane *plane,
061e4b8d 14361 struct intel_crtc_state *crtc_state,
852e787c 14362 struct intel_plane_state *state)
3d7d6510 14363{
061e4b8d 14364 struct drm_crtc *crtc = crtc_state->base.crtc;
2b875c22 14365 struct drm_framebuffer *fb = state->base.fb;
757f9a3e 14366 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
b29ec92c 14367 enum pipe pipe = to_intel_plane(plane)->pipe;
757f9a3e
GP
14368 unsigned stride;
14369 int ret;
3d7d6510 14370
061e4b8d
ML
14371 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14372 &state->dst, &state->clip,
9b8b013d 14373 state->base.rotation,
3d7d6510
MR
14374 DRM_PLANE_HELPER_NO_SCALING,
14375 DRM_PLANE_HELPER_NO_SCALING,
852e787c 14376 true, true, &state->visible);
757f9a3e
GP
14377 if (ret)
14378 return ret;
14379
757f9a3e
GP
14380 /* if we want to turn off the cursor ignore width and height */
14381 if (!obj)
da20eabd 14382 return 0;
757f9a3e 14383
757f9a3e 14384 /* Check for which cursor types we support */
061e4b8d 14385 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
ea2c67bb
MR
14386 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14387 state->base.crtc_w, state->base.crtc_h);
757f9a3e
GP
14388 return -EINVAL;
14389 }
14390
ea2c67bb
MR
14391 stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14392 if (obj->base.size < stride * state->base.crtc_h) {
757f9a3e
GP
14393 DRM_DEBUG_KMS("buffer is too small\n");
14394 return -ENOMEM;
14395 }
14396
3a656b54 14397 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
757f9a3e 14398 DRM_DEBUG_KMS("cursor cannot be tiled\n");
da20eabd 14399 return -EINVAL;
32b7eeec
MR
14400 }
14401
b29ec92c
VS
14402 /*
14403 * There's something wrong with the cursor on CHV pipe C.
14404 * If it straddles the left edge of the screen then
14405 * moving it away from the edge or disabling it often
14406 * results in a pipe underrun, and often that can lead to
14407 * dead pipe (constant underrun reported, and it scans
14408 * out just a solid color). To recover from that, the
14409 * display power well must be turned off and on again.
14410 * Refuse the put the cursor into that compromised position.
14411 */
14412 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14413 state->visible && state->base.crtc_x < 0) {
14414 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14415 return -EINVAL;
14416 }
14417
da20eabd 14418 return 0;
852e787c 14419}
3d7d6510 14420
a8ad0d8e
ML
14421static void
14422intel_disable_cursor_plane(struct drm_plane *plane,
7fabf5ef 14423 struct drm_crtc *crtc)
a8ad0d8e 14424{
f2858021
ML
14425 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14426
14427 intel_crtc->cursor_addr = 0;
55a08b3f 14428 intel_crtc_update_cursor(crtc, NULL);
a8ad0d8e
ML
14429}
14430
f4a2cf29 14431static void
55a08b3f
ML
14432intel_update_cursor_plane(struct drm_plane *plane,
14433 const struct intel_crtc_state *crtc_state,
14434 const struct intel_plane_state *state)
852e787c 14435{
55a08b3f
ML
14436 struct drm_crtc *crtc = crtc_state->base.crtc;
14437 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ea2c67bb 14438 struct drm_device *dev = plane->dev;
2b875c22 14439 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
a912f12f 14440 uint32_t addr;
852e787c 14441
f4a2cf29 14442 if (!obj)
a912f12f 14443 addr = 0;
f4a2cf29 14444 else if (!INTEL_INFO(dev)->cursor_needs_physical)
a912f12f 14445 addr = i915_gem_obj_ggtt_offset(obj);
f4a2cf29 14446 else
a912f12f 14447 addr = obj->phys_handle->busaddr;
852e787c 14448
a912f12f 14449 intel_crtc->cursor_addr = addr;
55a08b3f 14450 intel_crtc_update_cursor(crtc, state);
852e787c
GP
14451}
14452
3d7d6510
MR
14453static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14454 int pipe)
14455{
fca0ce2a
VS
14456 struct intel_plane *cursor = NULL;
14457 struct intel_plane_state *state = NULL;
14458 int ret;
3d7d6510
MR
14459
14460 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
fca0ce2a
VS
14461 if (!cursor)
14462 goto fail;
3d7d6510 14463
8e7d688b 14464 state = intel_create_plane_state(&cursor->base);
fca0ce2a
VS
14465 if (!state)
14466 goto fail;
8e7d688b 14467 cursor->base.state = &state->base;
ea2c67bb 14468
3d7d6510
MR
14469 cursor->can_scale = false;
14470 cursor->max_downscale = 1;
14471 cursor->pipe = pipe;
14472 cursor->plane = pipe;
a9ff8714 14473 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
c59cb179 14474 cursor->check_plane = intel_check_cursor_plane;
55a08b3f 14475 cursor->update_plane = intel_update_cursor_plane;
a8ad0d8e 14476 cursor->disable_plane = intel_disable_cursor_plane;
3d7d6510 14477
fca0ce2a
VS
14478 ret = drm_universal_plane_init(dev, &cursor->base, 0,
14479 &intel_plane_funcs,
14480 intel_cursor_formats,
14481 ARRAY_SIZE(intel_cursor_formats),
38573dc1
VS
14482 DRM_PLANE_TYPE_CURSOR,
14483 "cursor %c", pipe_name(pipe));
fca0ce2a
VS
14484 if (ret)
14485 goto fail;
4398ad45
VS
14486
14487 if (INTEL_INFO(dev)->gen >= 4) {
14488 if (!dev->mode_config.rotation_property)
14489 dev->mode_config.rotation_property =
14490 drm_mode_create_rotation_property(dev,
14491 BIT(DRM_ROTATE_0) |
14492 BIT(DRM_ROTATE_180));
14493 if (dev->mode_config.rotation_property)
14494 drm_object_attach_property(&cursor->base.base,
14495 dev->mode_config.rotation_property,
8e7d688b 14496 state->base.rotation);
4398ad45
VS
14497 }
14498
af99ceda
CK
14499 if (INTEL_INFO(dev)->gen >=9)
14500 state->scaler_id = -1;
14501
ea2c67bb
MR
14502 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14503
3d7d6510 14504 return &cursor->base;
fca0ce2a
VS
14505
14506fail:
14507 kfree(state);
14508 kfree(cursor);
14509
14510 return NULL;
3d7d6510
MR
14511}
14512
549e2bfb
CK
14513static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14514 struct intel_crtc_state *crtc_state)
14515{
14516 int i;
14517 struct intel_scaler *intel_scaler;
14518 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14519
14520 for (i = 0; i < intel_crtc->num_scalers; i++) {
14521 intel_scaler = &scaler_state->scalers[i];
14522 intel_scaler->in_use = 0;
549e2bfb
CK
14523 intel_scaler->mode = PS_SCALER_MODE_DYN;
14524 }
14525
14526 scaler_state->scaler_id = -1;
14527}
14528
b358d0a6 14529static void intel_crtc_init(struct drm_device *dev, int pipe)
79e53945 14530{
fac5e23e 14531 struct drm_i915_private *dev_priv = to_i915(dev);
79e53945 14532 struct intel_crtc *intel_crtc;
f5de6e07 14533 struct intel_crtc_state *crtc_state = NULL;
3d7d6510
MR
14534 struct drm_plane *primary = NULL;
14535 struct drm_plane *cursor = NULL;
8563b1e8 14536 int ret;
79e53945 14537
955382f3 14538 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
79e53945
JB
14539 if (intel_crtc == NULL)
14540 return;
14541
f5de6e07
ACO
14542 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14543 if (!crtc_state)
14544 goto fail;
550acefd
ACO
14545 intel_crtc->config = crtc_state;
14546 intel_crtc->base.state = &crtc_state->base;
07878248 14547 crtc_state->base.crtc = &intel_crtc->base;
f5de6e07 14548
549e2bfb
CK
14549 /* initialize shared scalers */
14550 if (INTEL_INFO(dev)->gen >= 9) {
14551 if (pipe == PIPE_C)
14552 intel_crtc->num_scalers = 1;
14553 else
14554 intel_crtc->num_scalers = SKL_NUM_SCALERS;
14555
14556 skl_init_scalers(dev, intel_crtc, crtc_state);
14557 }
14558
465c120c 14559 primary = intel_primary_plane_create(dev, pipe);
3d7d6510
MR
14560 if (!primary)
14561 goto fail;
14562
14563 cursor = intel_cursor_plane_create(dev, pipe);
14564 if (!cursor)
14565 goto fail;
14566
465c120c 14567 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
4d5d72b7
VS
14568 cursor, &intel_crtc_funcs,
14569 "pipe %c", pipe_name(pipe));
3d7d6510
MR
14570 if (ret)
14571 goto fail;
79e53945 14572
1f1c2e24
VS
14573 /*
14574 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
8c0f92e1 14575 * is hooked to pipe B. Hence we want plane A feeding pipe B.
1f1c2e24 14576 */
80824003
JB
14577 intel_crtc->pipe = pipe;
14578 intel_crtc->plane = pipe;
3a77c4c4 14579 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
28c97730 14580 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
e2e767ab 14581 intel_crtc->plane = !pipe;
80824003
JB
14582 }
14583
4b0e333e
CW
14584 intel_crtc->cursor_base = ~0;
14585 intel_crtc->cursor_cntl = ~0;
dc41c154 14586 intel_crtc->cursor_size = ~0;
8d7849db 14587
852eb00d
VS
14588 intel_crtc->wm.cxsr_allowed = true;
14589
22fd0fab
JB
14590 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14591 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14592 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14593 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14594
79e53945 14595 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
87b6b101 14596
8563b1e8
LL
14597 intel_color_init(&intel_crtc->base);
14598
87b6b101 14599 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
3d7d6510
MR
14600 return;
14601
14602fail:
69ae561f
VS
14603 intel_plane_destroy(primary);
14604 intel_plane_destroy(cursor);
f5de6e07 14605 kfree(crtc_state);
3d7d6510 14606 kfree(intel_crtc);
79e53945
JB
14607}
14608
752aa88a
JB
14609enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14610{
14611 struct drm_encoder *encoder = connector->base.encoder;
6e9f798d 14612 struct drm_device *dev = connector->base.dev;
752aa88a 14613
51fd371b 14614 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
752aa88a 14615
d3babd3f 14616 if (!encoder || WARN_ON(!encoder->crtc))
752aa88a
JB
14617 return INVALID_PIPE;
14618
14619 return to_intel_crtc(encoder->crtc)->pipe;
14620}
14621
08d7b3d1 14622int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
05394f39 14623 struct drm_file *file)
08d7b3d1 14624{
08d7b3d1 14625 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7707e653 14626 struct drm_crtc *drmmode_crtc;
c05422d5 14627 struct intel_crtc *crtc;
08d7b3d1 14628
7707e653 14629 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
71240ed2 14630 if (!drmmode_crtc)
3f2c2057 14631 return -ENOENT;
08d7b3d1 14632
7707e653 14633 crtc = to_intel_crtc(drmmode_crtc);
c05422d5 14634 pipe_from_crtc_id->pipe = crtc->pipe;
08d7b3d1 14635
c05422d5 14636 return 0;
08d7b3d1
CW
14637}
14638
66a9278e 14639static int intel_encoder_clones(struct intel_encoder *encoder)
79e53945 14640{
66a9278e
DV
14641 struct drm_device *dev = encoder->base.dev;
14642 struct intel_encoder *source_encoder;
79e53945 14643 int index_mask = 0;
79e53945
JB
14644 int entry = 0;
14645
b2784e15 14646 for_each_intel_encoder(dev, source_encoder) {
bc079e8b 14647 if (encoders_cloneable(encoder, source_encoder))
66a9278e
DV
14648 index_mask |= (1 << entry);
14649
79e53945
JB
14650 entry++;
14651 }
4ef69c7a 14652
79e53945
JB
14653 return index_mask;
14654}
14655
4d302442
CW
14656static bool has_edp_a(struct drm_device *dev)
14657{
fac5e23e 14658 struct drm_i915_private *dev_priv = to_i915(dev);
4d302442
CW
14659
14660 if (!IS_MOBILE(dev))
14661 return false;
14662
14663 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14664 return false;
14665
e3589908 14666 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
4d302442
CW
14667 return false;
14668
14669 return true;
14670}
14671
84b4e042
JB
14672static bool intel_crt_present(struct drm_device *dev)
14673{
fac5e23e 14674 struct drm_i915_private *dev_priv = to_i915(dev);
84b4e042 14675
884497ed
DL
14676 if (INTEL_INFO(dev)->gen >= 9)
14677 return false;
14678
cf404ce4 14679 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
84b4e042
JB
14680 return false;
14681
14682 if (IS_CHERRYVIEW(dev))
14683 return false;
14684
65e472e4
VS
14685 if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14686 return false;
14687
70ac54d0
VS
14688 /* DDI E can't be used if DDI A requires 4 lanes */
14689 if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14690 return false;
14691
e4abb733 14692 if (!dev_priv->vbt.int_crt_support)
84b4e042
JB
14693 return false;
14694
14695 return true;
14696}
14697
79e53945
JB
14698static void intel_setup_outputs(struct drm_device *dev)
14699{
fac5e23e 14700 struct drm_i915_private *dev_priv = to_i915(dev);
4ef69c7a 14701 struct intel_encoder *encoder;
cb0953d7 14702 bool dpd_is_edp = false;
79e53945 14703
97a824e1
ID
14704 /*
14705 * intel_edp_init_connector() depends on this completing first, to
14706 * prevent the registeration of both eDP and LVDS and the incorrect
14707 * sharing of the PPS.
14708 */
c9093354 14709 intel_lvds_init(dev);
79e53945 14710
84b4e042 14711 if (intel_crt_present(dev))
79935fca 14712 intel_crt_init(dev);
cb0953d7 14713
c776eb2e
VK
14714 if (IS_BROXTON(dev)) {
14715 /*
14716 * FIXME: Broxton doesn't support port detection via the
14717 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14718 * detect the ports.
14719 */
14720 intel_ddi_init(dev, PORT_A);
14721 intel_ddi_init(dev, PORT_B);
14722 intel_ddi_init(dev, PORT_C);
c6c794a2
SS
14723
14724 intel_dsi_init(dev);
c776eb2e 14725 } else if (HAS_DDI(dev)) {
0e72a5b5
ED
14726 int found;
14727
de31facd
JB
14728 /*
14729 * Haswell uses DDI functions to detect digital outputs.
14730 * On SKL pre-D0 the strap isn't connected, so we assume
14731 * it's there.
14732 */
77179400 14733 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
de31facd 14734 /* WaIgnoreDDIAStrap: skl */
ef11bdb3 14735 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
0e72a5b5
ED
14736 intel_ddi_init(dev, PORT_A);
14737
14738 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
14739 * register */
14740 found = I915_READ(SFUSE_STRAP);
14741
14742 if (found & SFUSE_STRAP_DDIB_DETECTED)
14743 intel_ddi_init(dev, PORT_B);
14744 if (found & SFUSE_STRAP_DDIC_DETECTED)
14745 intel_ddi_init(dev, PORT_C);
14746 if (found & SFUSE_STRAP_DDID_DETECTED)
14747 intel_ddi_init(dev, PORT_D);
2800e4c2
RV
14748 /*
14749 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14750 */
ef11bdb3 14751 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
2800e4c2
RV
14752 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14753 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14754 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14755 intel_ddi_init(dev, PORT_E);
14756
0e72a5b5 14757 } else if (HAS_PCH_SPLIT(dev)) {
cb0953d7 14758 int found;
5d8a7752 14759 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
270b3042
DV
14760
14761 if (has_edp_a(dev))
14762 intel_dp_init(dev, DP_A, PORT_A);
cb0953d7 14763
dc0fa718 14764 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
461ed3ca 14765 /* PCH SDVOB multiplex with HDMIB */
2a5c0832 14766 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
30ad48b7 14767 if (!found)
e2debe91 14768 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
5eb08b69 14769 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
ab9d7c30 14770 intel_dp_init(dev, PCH_DP_B, PORT_B);
30ad48b7
ZW
14771 }
14772
dc0fa718 14773 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
e2debe91 14774 intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
30ad48b7 14775
dc0fa718 14776 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
e2debe91 14777 intel_hdmi_init(dev, PCH_HDMID, PORT_D);
30ad48b7 14778
5eb08b69 14779 if (I915_READ(PCH_DP_C) & DP_DETECTED)
ab9d7c30 14780 intel_dp_init(dev, PCH_DP_C, PORT_C);
5eb08b69 14781
270b3042 14782 if (I915_READ(PCH_DP_D) & DP_DETECTED)
ab9d7c30 14783 intel_dp_init(dev, PCH_DP_D, PORT_D);
666a4537 14784 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
22f35042 14785 bool has_edp, has_port;
457c52d8 14786
e17ac6db
VS
14787 /*
14788 * The DP_DETECTED bit is the latched state of the DDC
14789 * SDA pin at boot. However since eDP doesn't require DDC
14790 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14791 * eDP ports may have been muxed to an alternate function.
14792 * Thus we can't rely on the DP_DETECTED bit alone to detect
14793 * eDP ports. Consult the VBT as well as DP_DETECTED to
14794 * detect eDP ports.
22f35042
VS
14795 *
14796 * Sadly the straps seem to be missing sometimes even for HDMI
14797 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14798 * and VBT for the presence of the port. Additionally we can't
14799 * trust the port type the VBT declares as we've seen at least
14800 * HDMI ports that the VBT claim are DP or eDP.
e17ac6db 14801 */
457c52d8 14802 has_edp = intel_dp_is_edp(dev, PORT_B);
22f35042
VS
14803 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14804 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
457c52d8 14805 has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
22f35042 14806 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
e66eb81d 14807 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
585a94b8 14808
457c52d8 14809 has_edp = intel_dp_is_edp(dev, PORT_C);
22f35042
VS
14810 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14811 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
457c52d8 14812 has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
22f35042 14813 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
e66eb81d 14814 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
19c03924 14815
9418c1f1 14816 if (IS_CHERRYVIEW(dev)) {
22f35042
VS
14817 /*
14818 * eDP not supported on port D,
14819 * so no need to worry about it
14820 */
14821 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14822 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
e66eb81d 14823 intel_dp_init(dev, CHV_DP_D, PORT_D);
22f35042
VS
14824 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14825 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
9418c1f1
VS
14826 }
14827
3cfca973 14828 intel_dsi_init(dev);
09da55dc 14829 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
27185ae1 14830 bool found = false;
7d57382e 14831
e2debe91 14832 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 14833 DRM_DEBUG_KMS("probing SDVOB\n");
2a5c0832 14834 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
3fec3d2f 14835 if (!found && IS_G4X(dev)) {
b01f2c3a 14836 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
e2debe91 14837 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
b01f2c3a 14838 }
27185ae1 14839
3fec3d2f 14840 if (!found && IS_G4X(dev))
ab9d7c30 14841 intel_dp_init(dev, DP_B, PORT_B);
725e30ad 14842 }
13520b05
KH
14843
14844 /* Before G4X SDVOC doesn't have its own detect register */
13520b05 14845
e2debe91 14846 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 14847 DRM_DEBUG_KMS("probing SDVOC\n");
2a5c0832 14848 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
b01f2c3a 14849 }
27185ae1 14850
e2debe91 14851 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
27185ae1 14852
3fec3d2f 14853 if (IS_G4X(dev)) {
b01f2c3a 14854 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
e2debe91 14855 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
b01f2c3a 14856 }
3fec3d2f 14857 if (IS_G4X(dev))
ab9d7c30 14858 intel_dp_init(dev, DP_C, PORT_C);
725e30ad 14859 }
27185ae1 14860
3fec3d2f 14861 if (IS_G4X(dev) &&
e7281eab 14862 (I915_READ(DP_D) & DP_DETECTED))
ab9d7c30 14863 intel_dp_init(dev, DP_D, PORT_D);
bad720ff 14864 } else if (IS_GEN2(dev))
79e53945
JB
14865 intel_dvo_init(dev);
14866
103a196f 14867 if (SUPPORTS_TV(dev))
79e53945
JB
14868 intel_tv_init(dev);
14869
0bc12bcb 14870 intel_psr_init(dev);
7c8f8a70 14871
b2784e15 14872 for_each_intel_encoder(dev, encoder) {
4ef69c7a
CW
14873 encoder->base.possible_crtcs = encoder->crtc_mask;
14874 encoder->base.possible_clones =
66a9278e 14875 intel_encoder_clones(encoder);
79e53945 14876 }
47356eb6 14877
dde86e2d 14878 intel_init_pch_refclk(dev);
270b3042
DV
14879
14880 drm_helper_move_panel_connectors_to_head(dev);
79e53945
JB
14881}
14882
14883static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14884{
60a5ca01 14885 struct drm_device *dev = fb->dev;
79e53945 14886 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
79e53945 14887
ef2d633e 14888 drm_framebuffer_cleanup(fb);
60a5ca01 14889 mutex_lock(&dev->struct_mutex);
ef2d633e 14890 WARN_ON(!intel_fb->obj->framebuffer_references--);
60a5ca01
VS
14891 drm_gem_object_unreference(&intel_fb->obj->base);
14892 mutex_unlock(&dev->struct_mutex);
79e53945
JB
14893 kfree(intel_fb);
14894}
14895
14896static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
05394f39 14897 struct drm_file *file,
79e53945
JB
14898 unsigned int *handle)
14899{
14900 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
05394f39 14901 struct drm_i915_gem_object *obj = intel_fb->obj;
79e53945 14902
cc917ab4
CW
14903 if (obj->userptr.mm) {
14904 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14905 return -EINVAL;
14906 }
14907
05394f39 14908 return drm_gem_handle_create(file, &obj->base, handle);
79e53945
JB
14909}
14910
86c98588
RV
14911static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14912 struct drm_file *file,
14913 unsigned flags, unsigned color,
14914 struct drm_clip_rect *clips,
14915 unsigned num_clips)
14916{
14917 struct drm_device *dev = fb->dev;
14918 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14919 struct drm_i915_gem_object *obj = intel_fb->obj;
14920
14921 mutex_lock(&dev->struct_mutex);
74b4ea1e 14922 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
86c98588
RV
14923 mutex_unlock(&dev->struct_mutex);
14924
14925 return 0;
14926}
14927
79e53945
JB
14928static const struct drm_framebuffer_funcs intel_fb_funcs = {
14929 .destroy = intel_user_framebuffer_destroy,
14930 .create_handle = intel_user_framebuffer_create_handle,
86c98588 14931 .dirty = intel_user_framebuffer_dirty,
79e53945
JB
14932};
14933
b321803d
DL
14934static
14935u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14936 uint32_t pixel_format)
14937{
14938 u32 gen = INTEL_INFO(dev)->gen;
14939
14940 if (gen >= 9) {
ac484963
VS
14941 int cpp = drm_format_plane_cpp(pixel_format, 0);
14942
b321803d
DL
14943 /* "The stride in bytes must not exceed the of the size of 8K
14944 * pixels and 32K bytes."
14945 */
ac484963 14946 return min(8192 * cpp, 32768);
666a4537 14947 } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
b321803d
DL
14948 return 32*1024;
14949 } else if (gen >= 4) {
14950 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14951 return 16*1024;
14952 else
14953 return 32*1024;
14954 } else if (gen >= 3) {
14955 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14956 return 8*1024;
14957 else
14958 return 16*1024;
14959 } else {
14960 /* XXX DSPC is limited to 4k tiled */
14961 return 8*1024;
14962 }
14963}
14964
b5ea642a
DV
14965static int intel_framebuffer_init(struct drm_device *dev,
14966 struct intel_framebuffer *intel_fb,
14967 struct drm_mode_fb_cmd2 *mode_cmd,
14968 struct drm_i915_gem_object *obj)
79e53945 14969{
7b49f948 14970 struct drm_i915_private *dev_priv = to_i915(dev);
6761dd31 14971 unsigned int aligned_height;
79e53945 14972 int ret;
b321803d 14973 u32 pitch_limit, stride_alignment;
79e53945 14974
dd4916c5
DV
14975 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14976
2a80eada
DV
14977 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14978 /* Enforce that fb modifier and tiling mode match, but only for
14979 * X-tiled. This is needed for FBC. */
14980 if (!!(obj->tiling_mode == I915_TILING_X) !=
14981 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14982 DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14983 return -EINVAL;
14984 }
14985 } else {
14986 if (obj->tiling_mode == I915_TILING_X)
14987 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14988 else if (obj->tiling_mode == I915_TILING_Y) {
14989 DRM_DEBUG("No Y tiling for legacy addfb\n");
14990 return -EINVAL;
14991 }
14992 }
14993
9a8f0a12
TU
14994 /* Passed in modifier sanity checking. */
14995 switch (mode_cmd->modifier[0]) {
14996 case I915_FORMAT_MOD_Y_TILED:
14997 case I915_FORMAT_MOD_Yf_TILED:
14998 if (INTEL_INFO(dev)->gen < 9) {
14999 DRM_DEBUG("Unsupported tiling 0x%llx!\n",
15000 mode_cmd->modifier[0]);
15001 return -EINVAL;
15002 }
15003 case DRM_FORMAT_MOD_NONE:
15004 case I915_FORMAT_MOD_X_TILED:
15005 break;
15006 default:
c0f40428
JB
15007 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
15008 mode_cmd->modifier[0]);
57cd6508 15009 return -EINVAL;
c16ed4be 15010 }
57cd6508 15011
7b49f948
VS
15012 stride_alignment = intel_fb_stride_alignment(dev_priv,
15013 mode_cmd->modifier[0],
b321803d
DL
15014 mode_cmd->pixel_format);
15015 if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
15016 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
15017 mode_cmd->pitches[0], stride_alignment);
57cd6508 15018 return -EINVAL;
c16ed4be 15019 }
57cd6508 15020
b321803d
DL
15021 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
15022 mode_cmd->pixel_format);
a35cdaa0 15023 if (mode_cmd->pitches[0] > pitch_limit) {
b321803d
DL
15024 DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
15025 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
2a80eada 15026 "tiled" : "linear",
a35cdaa0 15027 mode_cmd->pitches[0], pitch_limit);
5d7bd705 15028 return -EINVAL;
c16ed4be 15029 }
5d7bd705 15030
2a80eada 15031 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
c16ed4be
CW
15032 mode_cmd->pitches[0] != obj->stride) {
15033 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
15034 mode_cmd->pitches[0], obj->stride);
5d7bd705 15035 return -EINVAL;
c16ed4be 15036 }
5d7bd705 15037
57779d06 15038 /* Reject formats not supported by any plane early. */
308e5bcb 15039 switch (mode_cmd->pixel_format) {
57779d06 15040 case DRM_FORMAT_C8:
04b3924d
VS
15041 case DRM_FORMAT_RGB565:
15042 case DRM_FORMAT_XRGB8888:
15043 case DRM_FORMAT_ARGB8888:
57779d06
VS
15044 break;
15045 case DRM_FORMAT_XRGB1555:
c16ed4be 15046 if (INTEL_INFO(dev)->gen > 3) {
4ee62c76
VS
15047 DRM_DEBUG("unsupported pixel format: %s\n",
15048 drm_get_format_name(mode_cmd->pixel_format));
57779d06 15049 return -EINVAL;
c16ed4be 15050 }
57779d06 15051 break;
57779d06 15052 case DRM_FORMAT_ABGR8888:
666a4537
WB
15053 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
15054 INTEL_INFO(dev)->gen < 9) {
6c0fd451
DL
15055 DRM_DEBUG("unsupported pixel format: %s\n",
15056 drm_get_format_name(mode_cmd->pixel_format));
15057 return -EINVAL;
15058 }
15059 break;
15060 case DRM_FORMAT_XBGR8888:
04b3924d 15061 case DRM_FORMAT_XRGB2101010:
57779d06 15062 case DRM_FORMAT_XBGR2101010:
c16ed4be 15063 if (INTEL_INFO(dev)->gen < 4) {
4ee62c76
VS
15064 DRM_DEBUG("unsupported pixel format: %s\n",
15065 drm_get_format_name(mode_cmd->pixel_format));
57779d06 15066 return -EINVAL;
c16ed4be 15067 }
b5626747 15068 break;
7531208b 15069 case DRM_FORMAT_ABGR2101010:
666a4537 15070 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
7531208b
DL
15071 DRM_DEBUG("unsupported pixel format: %s\n",
15072 drm_get_format_name(mode_cmd->pixel_format));
15073 return -EINVAL;
15074 }
15075 break;
04b3924d
VS
15076 case DRM_FORMAT_YUYV:
15077 case DRM_FORMAT_UYVY:
15078 case DRM_FORMAT_YVYU:
15079 case DRM_FORMAT_VYUY:
c16ed4be 15080 if (INTEL_INFO(dev)->gen < 5) {
4ee62c76
VS
15081 DRM_DEBUG("unsupported pixel format: %s\n",
15082 drm_get_format_name(mode_cmd->pixel_format));
57779d06 15083 return -EINVAL;
c16ed4be 15084 }
57cd6508
CW
15085 break;
15086 default:
4ee62c76
VS
15087 DRM_DEBUG("unsupported pixel format: %s\n",
15088 drm_get_format_name(mode_cmd->pixel_format));
57cd6508
CW
15089 return -EINVAL;
15090 }
15091
90f9a336
VS
15092 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15093 if (mode_cmd->offsets[0] != 0)
15094 return -EINVAL;
15095
ec2c981e 15096 aligned_height = intel_fb_align_height(dev, mode_cmd->height,
091df6cb
DV
15097 mode_cmd->pixel_format,
15098 mode_cmd->modifier[0]);
53155c0a
DV
15099 /* FIXME drm helper for size checks (especially planar formats)? */
15100 if (obj->base.size < aligned_height * mode_cmd->pitches[0])
15101 return -EINVAL;
15102
c7d73f6a
DV
15103 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
15104 intel_fb->obj = obj;
15105
2d7a215f
VS
15106 intel_fill_fb_info(dev_priv, &intel_fb->base);
15107
79e53945
JB
15108 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
15109 if (ret) {
15110 DRM_ERROR("framebuffer init failed %d\n", ret);
15111 return ret;
15112 }
15113
0b05e1e0
VS
15114 intel_fb->obj->framebuffer_references++;
15115
79e53945
JB
15116 return 0;
15117}
15118
79e53945
JB
15119static struct drm_framebuffer *
15120intel_user_framebuffer_create(struct drm_device *dev,
15121 struct drm_file *filp,
1eb83451 15122 const struct drm_mode_fb_cmd2 *user_mode_cmd)
79e53945 15123{
dcb1394e 15124 struct drm_framebuffer *fb;
05394f39 15125 struct drm_i915_gem_object *obj;
76dc3769 15126 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
79e53945 15127
a8ad0bd8 15128 obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
c8725226 15129 if (&obj->base == NULL)
cce13ff7 15130 return ERR_PTR(-ENOENT);
79e53945 15131
92907cbb 15132 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
dcb1394e
LW
15133 if (IS_ERR(fb))
15134 drm_gem_object_unreference_unlocked(&obj->base);
15135
15136 return fb;
79e53945
JB
15137}
15138
0695726e 15139#ifndef CONFIG_DRM_FBDEV_EMULATION
0632fef6 15140static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
4520f53a
DV
15141{
15142}
15143#endif
15144
79e53945 15145static const struct drm_mode_config_funcs intel_mode_funcs = {
79e53945 15146 .fb_create = intel_user_framebuffer_create,
0632fef6 15147 .output_poll_changed = intel_fbdev_output_poll_changed,
5ee67f1c
MR
15148 .atomic_check = intel_atomic_check,
15149 .atomic_commit = intel_atomic_commit,
de419ab6
ML
15150 .atomic_state_alloc = intel_atomic_state_alloc,
15151 .atomic_state_clear = intel_atomic_state_clear,
79e53945
JB
15152};
15153
88212941
ID
15154/**
15155 * intel_init_display_hooks - initialize the display modesetting hooks
15156 * @dev_priv: device private
15157 */
15158void intel_init_display_hooks(struct drm_i915_private *dev_priv)
e70236a8 15159{
88212941 15160 if (INTEL_INFO(dev_priv)->gen >= 9) {
bc8d7dff 15161 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5724dbd1
DL
15162 dev_priv->display.get_initial_plane_config =
15163 skylake_get_initial_plane_config;
bc8d7dff
DL
15164 dev_priv->display.crtc_compute_clock =
15165 haswell_crtc_compute_clock;
15166 dev_priv->display.crtc_enable = haswell_crtc_enable;
15167 dev_priv->display.crtc_disable = haswell_crtc_disable;
88212941 15168 } else if (HAS_DDI(dev_priv)) {
0e8ffe1b 15169 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5724dbd1
DL
15170 dev_priv->display.get_initial_plane_config =
15171 ironlake_get_initial_plane_config;
797d0259
ACO
15172 dev_priv->display.crtc_compute_clock =
15173 haswell_crtc_compute_clock;
4f771f10
PZ
15174 dev_priv->display.crtc_enable = haswell_crtc_enable;
15175 dev_priv->display.crtc_disable = haswell_crtc_disable;
88212941 15176 } else if (HAS_PCH_SPLIT(dev_priv)) {
0e8ffe1b 15177 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
5724dbd1
DL
15178 dev_priv->display.get_initial_plane_config =
15179 ironlake_get_initial_plane_config;
3fb37703
ACO
15180 dev_priv->display.crtc_compute_clock =
15181 ironlake_crtc_compute_clock;
76e5a89c
DV
15182 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15183 dev_priv->display.crtc_disable = ironlake_crtc_disable;
65b3d6a9 15184 } else if (IS_CHERRYVIEW(dev_priv)) {
89b667f8 15185 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5724dbd1
DL
15186 dev_priv->display.get_initial_plane_config =
15187 i9xx_get_initial_plane_config;
65b3d6a9
ACO
15188 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15189 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15190 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15191 } else if (IS_VALLEYVIEW(dev_priv)) {
15192 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15193 dev_priv->display.get_initial_plane_config =
15194 i9xx_get_initial_plane_config;
15195 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
89b667f8
JB
15196 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15197 dev_priv->display.crtc_disable = i9xx_crtc_disable;
19ec6693
ACO
15198 } else if (IS_G4X(dev_priv)) {
15199 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15200 dev_priv->display.get_initial_plane_config =
15201 i9xx_get_initial_plane_config;
15202 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15203 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15204 dev_priv->display.crtc_disable = i9xx_crtc_disable;
70e8aa21
ACO
15205 } else if (IS_PINEVIEW(dev_priv)) {
15206 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15207 dev_priv->display.get_initial_plane_config =
15208 i9xx_get_initial_plane_config;
15209 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15210 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15211 dev_priv->display.crtc_disable = i9xx_crtc_disable;
81c97f52 15212 } else if (!IS_GEN2(dev_priv)) {
0e8ffe1b 15213 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5724dbd1
DL
15214 dev_priv->display.get_initial_plane_config =
15215 i9xx_get_initial_plane_config;
d6dfee7a 15216 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
76e5a89c
DV
15217 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15218 dev_priv->display.crtc_disable = i9xx_crtc_disable;
81c97f52
ACO
15219 } else {
15220 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15221 dev_priv->display.get_initial_plane_config =
15222 i9xx_get_initial_plane_config;
15223 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15224 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15225 dev_priv->display.crtc_disable = i9xx_crtc_disable;
f564048e 15226 }
e70236a8 15227
e70236a8 15228 /* Returns the core display clock speed */
88212941 15229 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1652d19e
VS
15230 dev_priv->display.get_display_clock_speed =
15231 skylake_get_display_clock_speed;
88212941 15232 else if (IS_BROXTON(dev_priv))
acd3f3d3
BP
15233 dev_priv->display.get_display_clock_speed =
15234 broxton_get_display_clock_speed;
88212941 15235 else if (IS_BROADWELL(dev_priv))
1652d19e
VS
15236 dev_priv->display.get_display_clock_speed =
15237 broadwell_get_display_clock_speed;
88212941 15238 else if (IS_HASWELL(dev_priv))
1652d19e
VS
15239 dev_priv->display.get_display_clock_speed =
15240 haswell_get_display_clock_speed;
88212941 15241 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
25eb05fc
JB
15242 dev_priv->display.get_display_clock_speed =
15243 valleyview_get_display_clock_speed;
88212941 15244 else if (IS_GEN5(dev_priv))
b37a6434
VS
15245 dev_priv->display.get_display_clock_speed =
15246 ilk_get_display_clock_speed;
88212941
ID
15247 else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
15248 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
e70236a8
JB
15249 dev_priv->display.get_display_clock_speed =
15250 i945_get_display_clock_speed;
88212941 15251 else if (IS_GM45(dev_priv))
34edce2f
VS
15252 dev_priv->display.get_display_clock_speed =
15253 gm45_get_display_clock_speed;
88212941 15254 else if (IS_CRESTLINE(dev_priv))
34edce2f
VS
15255 dev_priv->display.get_display_clock_speed =
15256 i965gm_get_display_clock_speed;
88212941 15257 else if (IS_PINEVIEW(dev_priv))
34edce2f
VS
15258 dev_priv->display.get_display_clock_speed =
15259 pnv_get_display_clock_speed;
88212941 15260 else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
34edce2f
VS
15261 dev_priv->display.get_display_clock_speed =
15262 g33_get_display_clock_speed;
88212941 15263 else if (IS_I915G(dev_priv))
e70236a8
JB
15264 dev_priv->display.get_display_clock_speed =
15265 i915_get_display_clock_speed;
88212941 15266 else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
e70236a8
JB
15267 dev_priv->display.get_display_clock_speed =
15268 i9xx_misc_get_display_clock_speed;
88212941 15269 else if (IS_I915GM(dev_priv))
e70236a8
JB
15270 dev_priv->display.get_display_clock_speed =
15271 i915gm_get_display_clock_speed;
88212941 15272 else if (IS_I865G(dev_priv))
e70236a8
JB
15273 dev_priv->display.get_display_clock_speed =
15274 i865_get_display_clock_speed;
88212941 15275 else if (IS_I85X(dev_priv))
e70236a8 15276 dev_priv->display.get_display_clock_speed =
1b1d2716 15277 i85x_get_display_clock_speed;
623e01e5 15278 else { /* 830 */
88212941 15279 WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
e70236a8
JB
15280 dev_priv->display.get_display_clock_speed =
15281 i830_get_display_clock_speed;
623e01e5 15282 }
e70236a8 15283
88212941 15284 if (IS_GEN5(dev_priv)) {
3bb11b53 15285 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
88212941 15286 } else if (IS_GEN6(dev_priv)) {
3bb11b53 15287 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
88212941 15288 } else if (IS_IVYBRIDGE(dev_priv)) {
3bb11b53
SJ
15289 /* FIXME: detect B0+ stepping and use auto training */
15290 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
88212941 15291 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3bb11b53 15292 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
445e780b
VS
15293 }
15294
15295 if (IS_BROADWELL(dev_priv)) {
15296 dev_priv->display.modeset_commit_cdclk =
15297 broadwell_modeset_commit_cdclk;
15298 dev_priv->display.modeset_calc_cdclk =
15299 broadwell_modeset_calc_cdclk;
88212941 15300 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
27c329ed
ML
15301 dev_priv->display.modeset_commit_cdclk =
15302 valleyview_modeset_commit_cdclk;
15303 dev_priv->display.modeset_calc_cdclk =
15304 valleyview_modeset_calc_cdclk;
88212941 15305 } else if (IS_BROXTON(dev_priv)) {
27c329ed 15306 dev_priv->display.modeset_commit_cdclk =
324513c0 15307 bxt_modeset_commit_cdclk;
27c329ed 15308 dev_priv->display.modeset_calc_cdclk =
324513c0 15309 bxt_modeset_calc_cdclk;
c89e39f3
CT
15310 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
15311 dev_priv->display.modeset_commit_cdclk =
15312 skl_modeset_commit_cdclk;
15313 dev_priv->display.modeset_calc_cdclk =
15314 skl_modeset_calc_cdclk;
e70236a8 15315 }
5a21b665
DV
15316
15317 switch (INTEL_INFO(dev_priv)->gen) {
15318 case 2:
15319 dev_priv->display.queue_flip = intel_gen2_queue_flip;
15320 break;
15321
15322 case 3:
15323 dev_priv->display.queue_flip = intel_gen3_queue_flip;
15324 break;
15325
15326 case 4:
15327 case 5:
15328 dev_priv->display.queue_flip = intel_gen4_queue_flip;
15329 break;
15330
15331 case 6:
15332 dev_priv->display.queue_flip = intel_gen6_queue_flip;
15333 break;
15334 case 7:
15335 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15336 dev_priv->display.queue_flip = intel_gen7_queue_flip;
15337 break;
15338 case 9:
15339 /* Drop through - unsupported since execlist only. */
15340 default:
15341 /* Default just returns -ENODEV to indicate unsupported */
15342 dev_priv->display.queue_flip = intel_default_queue_flip;
15343 }
e70236a8
JB
15344}
15345
b690e96c
JB
15346/*
15347 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15348 * resume, or other times. This quirk makes sure that's the case for
15349 * affected systems.
15350 */
0206e353 15351static void quirk_pipea_force(struct drm_device *dev)
b690e96c 15352{
fac5e23e 15353 struct drm_i915_private *dev_priv = to_i915(dev);
b690e96c
JB
15354
15355 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
bc0daf48 15356 DRM_INFO("applying pipe a force quirk\n");
b690e96c
JB
15357}
15358
b6b5d049
VS
15359static void quirk_pipeb_force(struct drm_device *dev)
15360{
fac5e23e 15361 struct drm_i915_private *dev_priv = to_i915(dev);
b6b5d049
VS
15362
15363 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15364 DRM_INFO("applying pipe b force quirk\n");
15365}
15366
435793df
KP
15367/*
15368 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15369 */
15370static void quirk_ssc_force_disable(struct drm_device *dev)
15371{
fac5e23e 15372 struct drm_i915_private *dev_priv = to_i915(dev);
435793df 15373 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
bc0daf48 15374 DRM_INFO("applying lvds SSC disable quirk\n");
435793df
KP
15375}
15376
4dca20ef 15377/*
5a15ab5b
CE
15378 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15379 * brightness value
4dca20ef
CE
15380 */
15381static void quirk_invert_brightness(struct drm_device *dev)
15382{
fac5e23e 15383 struct drm_i915_private *dev_priv = to_i915(dev);
4dca20ef 15384 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
bc0daf48 15385 DRM_INFO("applying inverted panel brightness quirk\n");
435793df
KP
15386}
15387
9c72cc6f
SD
15388/* Some VBT's incorrectly indicate no backlight is present */
15389static void quirk_backlight_present(struct drm_device *dev)
15390{
fac5e23e 15391 struct drm_i915_private *dev_priv = to_i915(dev);
9c72cc6f
SD
15392 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15393 DRM_INFO("applying backlight present quirk\n");
15394}
15395
b690e96c
JB
15396struct intel_quirk {
15397 int device;
15398 int subsystem_vendor;
15399 int subsystem_device;
15400 void (*hook)(struct drm_device *dev);
15401};
15402
5f85f176
EE
15403/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15404struct intel_dmi_quirk {
15405 void (*hook)(struct drm_device *dev);
15406 const struct dmi_system_id (*dmi_id_list)[];
15407};
15408
15409static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15410{
15411 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15412 return 1;
15413}
15414
15415static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15416 {
15417 .dmi_id_list = &(const struct dmi_system_id[]) {
15418 {
15419 .callback = intel_dmi_reverse_brightness,
15420 .ident = "NCR Corporation",
15421 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15422 DMI_MATCH(DMI_PRODUCT_NAME, ""),
15423 },
15424 },
15425 { } /* terminating entry */
15426 },
15427 .hook = quirk_invert_brightness,
15428 },
15429};
15430
c43b5634 15431static struct intel_quirk intel_quirks[] = {
b690e96c
JB
15432 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15433 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15434
b690e96c
JB
15435 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15436 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15437
5f080c0f
VS
15438 /* 830 needs to leave pipe A & dpll A up */
15439 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15440
b6b5d049
VS
15441 /* 830 needs to leave pipe B & dpll B up */
15442 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15443
435793df
KP
15444 /* Lenovo U160 cannot use SSC on LVDS */
15445 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
070d329a
MAS
15446
15447 /* Sony Vaio Y cannot use SSC on LVDS */
15448 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
5a15ab5b 15449
be505f64
AH
15450 /* Acer Aspire 5734Z must invert backlight brightness */
15451 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15452
15453 /* Acer/eMachines G725 */
15454 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15455
15456 /* Acer/eMachines e725 */
15457 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15458
15459 /* Acer/Packard Bell NCL20 */
15460 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15461
15462 /* Acer Aspire 4736Z */
15463 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
0f540c3a
JN
15464
15465 /* Acer Aspire 5336 */
15466 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
2e93a1aa
SD
15467
15468 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15469 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
d4967d8c 15470
dfb3d47b
SD
15471 /* Acer C720 Chromebook (Core i3 4005U) */
15472 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15473
b2a9601c 15474 /* Apple Macbook 2,1 (Core 2 T7400) */
15475 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15476
1b9448b0
JN
15477 /* Apple Macbook 4,1 */
15478 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15479
d4967d8c
SD
15480 /* Toshiba CB35 Chromebook (Celeron 2955U) */
15481 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
724cb06f
SD
15482
15483 /* HP Chromebook 14 (Celeron 2955U) */
15484 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
cf6f0af9
JN
15485
15486 /* Dell Chromebook 11 */
15487 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
9be64eee
JN
15488
15489 /* Dell Chromebook 11 (2015 version) */
15490 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
b690e96c
JB
15491};
15492
15493static void intel_init_quirks(struct drm_device *dev)
15494{
15495 struct pci_dev *d = dev->pdev;
15496 int i;
15497
15498 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15499 struct intel_quirk *q = &intel_quirks[i];
15500
15501 if (d->device == q->device &&
15502 (d->subsystem_vendor == q->subsystem_vendor ||
15503 q->subsystem_vendor == PCI_ANY_ID) &&
15504 (d->subsystem_device == q->subsystem_device ||
15505 q->subsystem_device == PCI_ANY_ID))
15506 q->hook(dev);
15507 }
5f85f176
EE
15508 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15509 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15510 intel_dmi_quirks[i].hook(dev);
15511 }
b690e96c
JB
15512}
15513
9cce37f4
JB
15514/* Disable the VGA plane that we never use */
15515static void i915_disable_vga(struct drm_device *dev)
15516{
fac5e23e 15517 struct drm_i915_private *dev_priv = to_i915(dev);
9cce37f4 15518 u8 sr1;
f0f59a00 15519 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
9cce37f4 15520
2b37c616 15521 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
9cce37f4 15522 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
3fdcf431 15523 outb(SR01, VGA_SR_INDEX);
9cce37f4
JB
15524 sr1 = inb(VGA_SR_DATA);
15525 outb(sr1 | 1<<5, VGA_SR_DATA);
15526 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15527 udelay(300);
15528
01f5a626 15529 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9cce37f4
JB
15530 POSTING_READ(vga_reg);
15531}
15532
f817586c
DV
15533void intel_modeset_init_hw(struct drm_device *dev)
15534{
fac5e23e 15535 struct drm_i915_private *dev_priv = to_i915(dev);
1a617b77 15536
b6283055 15537 intel_update_cdclk(dev);
1a617b77
ML
15538
15539 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15540
f817586c 15541 intel_init_clock_gating(dev);
dc97997a 15542 intel_enable_gt_powersave(dev_priv);
f817586c
DV
15543}
15544
d93c0372
MR
15545/*
15546 * Calculate what we think the watermarks should be for the state we've read
15547 * out of the hardware and then immediately program those watermarks so that
15548 * we ensure the hardware settings match our internal state.
15549 *
15550 * We can calculate what we think WM's should be by creating a duplicate of the
15551 * current state (which was constructed during hardware readout) and running it
15552 * through the atomic check code to calculate new watermark values in the
15553 * state object.
15554 */
15555static void sanitize_watermarks(struct drm_device *dev)
15556{
15557 struct drm_i915_private *dev_priv = to_i915(dev);
15558 struct drm_atomic_state *state;
15559 struct drm_crtc *crtc;
15560 struct drm_crtc_state *cstate;
15561 struct drm_modeset_acquire_ctx ctx;
15562 int ret;
15563 int i;
15564
15565 /* Only supported on platforms that use atomic watermark design */
ed4a6a7c 15566 if (!dev_priv->display.optimize_watermarks)
d93c0372
MR
15567 return;
15568
15569 /*
15570 * We need to hold connection_mutex before calling duplicate_state so
15571 * that the connector loop is protected.
15572 */
15573 drm_modeset_acquire_init(&ctx, 0);
15574retry:
0cd1262d 15575 ret = drm_modeset_lock_all_ctx(dev, &ctx);
d93c0372
MR
15576 if (ret == -EDEADLK) {
15577 drm_modeset_backoff(&ctx);
15578 goto retry;
15579 } else if (WARN_ON(ret)) {
0cd1262d 15580 goto fail;
d93c0372
MR
15581 }
15582
15583 state = drm_atomic_helper_duplicate_state(dev, &ctx);
15584 if (WARN_ON(IS_ERR(state)))
0cd1262d 15585 goto fail;
d93c0372 15586
ed4a6a7c
MR
15587 /*
15588 * Hardware readout is the only time we don't want to calculate
15589 * intermediate watermarks (since we don't trust the current
15590 * watermarks).
15591 */
15592 to_intel_atomic_state(state)->skip_intermediate_wm = true;
15593
d93c0372
MR
15594 ret = intel_atomic_check(dev, state);
15595 if (ret) {
15596 /*
15597 * If we fail here, it means that the hardware appears to be
15598 * programmed in a way that shouldn't be possible, given our
15599 * understanding of watermark requirements. This might mean a
15600 * mistake in the hardware readout code or a mistake in the
15601 * watermark calculations for a given platform. Raise a WARN
15602 * so that this is noticeable.
15603 *
15604 * If this actually happens, we'll have to just leave the
15605 * BIOS-programmed watermarks untouched and hope for the best.
15606 */
15607 WARN(true, "Could not determine valid watermarks for inherited state\n");
0cd1262d 15608 goto fail;
d93c0372
MR
15609 }
15610
15611 /* Write calculated watermark values back */
d93c0372
MR
15612 for_each_crtc_in_state(state, crtc, cstate, i) {
15613 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15614
ed4a6a7c
MR
15615 cs->wm.need_postvbl_update = true;
15616 dev_priv->display.optimize_watermarks(cs);
d93c0372
MR
15617 }
15618
15619 drm_atomic_state_free(state);
0cd1262d 15620fail:
d93c0372
MR
15621 drm_modeset_drop_locks(&ctx);
15622 drm_modeset_acquire_fini(&ctx);
15623}
15624
79e53945
JB
15625void intel_modeset_init(struct drm_device *dev)
15626{
72e96d64
JL
15627 struct drm_i915_private *dev_priv = to_i915(dev);
15628 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1fe47785 15629 int sprite, ret;
8cc87b75 15630 enum pipe pipe;
46f297fb 15631 struct intel_crtc *crtc;
79e53945
JB
15632
15633 drm_mode_config_init(dev);
15634
15635 dev->mode_config.min_width = 0;
15636 dev->mode_config.min_height = 0;
15637
019d96cb
DA
15638 dev->mode_config.preferred_depth = 24;
15639 dev->mode_config.prefer_shadow = 1;
15640
25bab385
TU
15641 dev->mode_config.allow_fb_modifiers = true;
15642
e6ecefaa 15643 dev->mode_config.funcs = &intel_mode_funcs;
79e53945 15644
b690e96c
JB
15645 intel_init_quirks(dev);
15646
1fa61106
ED
15647 intel_init_pm(dev);
15648
e3c74757
BW
15649 if (INTEL_INFO(dev)->num_pipes == 0)
15650 return;
15651
69f92f67
LW
15652 /*
15653 * There may be no VBT; and if the BIOS enabled SSC we can
15654 * just keep using it to avoid unnecessary flicker. Whereas if the
15655 * BIOS isn't using it, don't assume it will work even if the VBT
15656 * indicates as much.
15657 */
15658 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15659 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15660 DREF_SSC1_ENABLE);
15661
15662 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15663 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15664 bios_lvds_use_ssc ? "en" : "dis",
15665 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15666 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15667 }
15668 }
15669
a6c45cf0
CW
15670 if (IS_GEN2(dev)) {
15671 dev->mode_config.max_width = 2048;
15672 dev->mode_config.max_height = 2048;
15673 } else if (IS_GEN3(dev)) {
5e4d6fa7
KP
15674 dev->mode_config.max_width = 4096;
15675 dev->mode_config.max_height = 4096;
79e53945 15676 } else {
a6c45cf0
CW
15677 dev->mode_config.max_width = 8192;
15678 dev->mode_config.max_height = 8192;
79e53945 15679 }
068be561 15680
dc41c154
VS
15681 if (IS_845G(dev) || IS_I865G(dev)) {
15682 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15683 dev->mode_config.cursor_height = 1023;
15684 } else if (IS_GEN2(dev)) {
068be561
DL
15685 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15686 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15687 } else {
15688 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15689 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15690 }
15691
72e96d64 15692 dev->mode_config.fb_base = ggtt->mappable_base;
79e53945 15693
28c97730 15694 DRM_DEBUG_KMS("%d display pipe%s available.\n",
7eb552ae
BW
15695 INTEL_INFO(dev)->num_pipes,
15696 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
79e53945 15697
055e393f 15698 for_each_pipe(dev_priv, pipe) {
8cc87b75 15699 intel_crtc_init(dev, pipe);
3bdcfc0c 15700 for_each_sprite(dev_priv, pipe, sprite) {
1fe47785 15701 ret = intel_plane_init(dev, pipe, sprite);
7f1f3851 15702 if (ret)
06da8da2 15703 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
1fe47785 15704 pipe_name(pipe), sprite_name(pipe, sprite), ret);
7f1f3851 15705 }
79e53945
JB
15706 }
15707
bfa7df01
VS
15708 intel_update_czclk(dev_priv);
15709 intel_update_cdclk(dev);
15710
e72f9fbf 15711 intel_shared_dpll_init(dev);
ee7b9f93 15712
b2045352
VS
15713 if (dev_priv->max_cdclk_freq == 0)
15714 intel_update_max_cdclk(dev);
15715
9cce37f4
JB
15716 /* Just disable it once at startup */
15717 i915_disable_vga(dev);
79e53945 15718 intel_setup_outputs(dev);
11be49eb 15719
6e9f798d 15720 drm_modeset_lock_all(dev);
043e9bda 15721 intel_modeset_setup_hw_state(dev);
6e9f798d 15722 drm_modeset_unlock_all(dev);
46f297fb 15723
d3fcc808 15724 for_each_intel_crtc(dev, crtc) {
eeebeac5
ML
15725 struct intel_initial_plane_config plane_config = {};
15726
46f297fb
JB
15727 if (!crtc->active)
15728 continue;
15729
46f297fb 15730 /*
46f297fb
JB
15731 * Note that reserving the BIOS fb up front prevents us
15732 * from stuffing other stolen allocations like the ring
15733 * on top. This prevents some ugliness at boot time, and
15734 * can even allow for smooth boot transitions if the BIOS
15735 * fb is large enough for the active pipe configuration.
15736 */
eeebeac5
ML
15737 dev_priv->display.get_initial_plane_config(crtc,
15738 &plane_config);
15739
15740 /*
15741 * If the fb is shared between multiple heads, we'll
15742 * just get the first one.
15743 */
15744 intel_find_initial_plane_obj(crtc, &plane_config);
46f297fb 15745 }
d93c0372
MR
15746
15747 /*
15748 * Make sure hardware watermarks really match the state we read out.
15749 * Note that we need to do this after reconstructing the BIOS fb's
15750 * since the watermark calculation done here will use pstate->fb.
15751 */
15752 sanitize_watermarks(dev);
2c7111db
CW
15753}
15754
7fad798e
DV
15755static void intel_enable_pipe_a(struct drm_device *dev)
15756{
15757 struct intel_connector *connector;
15758 struct drm_connector *crt = NULL;
15759 struct intel_load_detect_pipe load_detect_temp;
208bf9fd 15760 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
7fad798e
DV
15761
15762 /* We can't just switch on the pipe A, we need to set things up with a
15763 * proper mode and output configuration. As a gross hack, enable pipe A
15764 * by enabling the load detect pipe once. */
3a3371ff 15765 for_each_intel_connector(dev, connector) {
7fad798e
DV
15766 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15767 crt = &connector->base;
15768 break;
15769 }
15770 }
15771
15772 if (!crt)
15773 return;
15774
208bf9fd 15775 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
49172fee 15776 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
7fad798e
DV
15777}
15778
fa555837
DV
15779static bool
15780intel_check_plane_mapping(struct intel_crtc *crtc)
15781{
7eb552ae 15782 struct drm_device *dev = crtc->base.dev;
fac5e23e 15783 struct drm_i915_private *dev_priv = to_i915(dev);
649636ef 15784 u32 val;
fa555837 15785
7eb552ae 15786 if (INTEL_INFO(dev)->num_pipes == 1)
fa555837
DV
15787 return true;
15788
649636ef 15789 val = I915_READ(DSPCNTR(!crtc->plane));
fa555837
DV
15790
15791 if ((val & DISPLAY_PLANE_ENABLE) &&
15792 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15793 return false;
15794
15795 return true;
15796}
15797
02e93c35
VS
15798static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15799{
15800 struct drm_device *dev = crtc->base.dev;
15801 struct intel_encoder *encoder;
15802
15803 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15804 return true;
15805
15806 return false;
15807}
15808
dd756198
VS
15809static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15810{
15811 struct drm_device *dev = encoder->base.dev;
15812 struct intel_connector *connector;
15813
15814 for_each_connector_on_encoder(dev, &encoder->base, connector)
15815 return true;
15816
15817 return false;
15818}
15819
24929352
DV
15820static void intel_sanitize_crtc(struct intel_crtc *crtc)
15821{
15822 struct drm_device *dev = crtc->base.dev;
fac5e23e 15823 struct drm_i915_private *dev_priv = to_i915(dev);
4d1de975 15824 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
24929352 15825
24929352 15826 /* Clear any frame start delays used for debugging left by the BIOS */
4d1de975
JN
15827 if (!transcoder_is_dsi(cpu_transcoder)) {
15828 i915_reg_t reg = PIPECONF(cpu_transcoder);
15829
15830 I915_WRITE(reg,
15831 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15832 }
24929352 15833
d3eaf884 15834 /* restore vblank interrupts to correct state */
9625604c 15835 drm_crtc_vblank_reset(&crtc->base);
d297e103 15836 if (crtc->active) {
f9cd7b88
VS
15837 struct intel_plane *plane;
15838
9625604c 15839 drm_crtc_vblank_on(&crtc->base);
f9cd7b88
VS
15840
15841 /* Disable everything but the primary plane */
15842 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15843 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15844 continue;
15845
15846 plane->disable_plane(&plane->base, &crtc->base);
15847 }
9625604c 15848 }
d3eaf884 15849
24929352 15850 /* We need to sanitize the plane -> pipe mapping first because this will
fa555837
DV
15851 * disable the crtc (and hence change the state) if it is wrong. Note
15852 * that gen4+ has a fixed plane -> pipe mapping. */
15853 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
24929352
DV
15854 bool plane;
15855
78108b7c
VS
15856 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
15857 crtc->base.base.id, crtc->base.name);
24929352
DV
15858
15859 /* Pipe has the wrong plane attached and the plane is active.
15860 * Temporarily change the plane mapping and disable everything
15861 * ... */
15862 plane = crtc->plane;
b70709a6 15863 to_intel_plane_state(crtc->base.primary->state)->visible = true;
24929352 15864 crtc->plane = !plane;
b17d48e2 15865 intel_crtc_disable_noatomic(&crtc->base);
24929352 15866 crtc->plane = plane;
24929352 15867 }
24929352 15868
7fad798e
DV
15869 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15870 crtc->pipe == PIPE_A && !crtc->active) {
15871 /* BIOS forgot to enable pipe A, this mostly happens after
15872 * resume. Force-enable the pipe to fix this, the update_dpms
15873 * call below we restore the pipe to the right state, but leave
15874 * the required bits on. */
15875 intel_enable_pipe_a(dev);
15876 }
15877
24929352
DV
15878 /* Adjust the state of the output pipe according to whether we
15879 * have active connectors/encoders. */
842e0307 15880 if (crtc->active && !intel_crtc_has_encoders(crtc))
b17d48e2 15881 intel_crtc_disable_noatomic(&crtc->base);
24929352 15882
a3ed6aad 15883 if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
4cc31489
DV
15884 /*
15885 * We start out with underrun reporting disabled to avoid races.
15886 * For correct bookkeeping mark this on active crtcs.
15887 *
c5ab3bc0
DV
15888 * Also on gmch platforms we dont have any hardware bits to
15889 * disable the underrun reporting. Which means we need to start
15890 * out with underrun reporting disabled also on inactive pipes,
15891 * since otherwise we'll complain about the garbage we read when
15892 * e.g. coming up after runtime pm.
15893 *
4cc31489
DV
15894 * No protection against concurrent access is required - at
15895 * worst a fifo underrun happens which also sets this to false.
15896 */
15897 crtc->cpu_fifo_underrun_disabled = true;
15898 crtc->pch_fifo_underrun_disabled = true;
15899 }
24929352
DV
15900}
15901
15902static void intel_sanitize_encoder(struct intel_encoder *encoder)
15903{
15904 struct intel_connector *connector;
15905 struct drm_device *dev = encoder->base.dev;
15906
15907 /* We need to check both for a crtc link (meaning that the
15908 * encoder is active and trying to read from a pipe) and the
15909 * pipe itself being active. */
15910 bool has_active_crtc = encoder->base.crtc &&
15911 to_intel_crtc(encoder->base.crtc)->active;
15912
dd756198 15913 if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
24929352
DV
15914 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15915 encoder->base.base.id,
8e329a03 15916 encoder->base.name);
24929352
DV
15917
15918 /* Connector is active, but has no active pipe. This is
15919 * fallout from our resume register restoring. Disable
15920 * the encoder manually again. */
15921 if (encoder->base.crtc) {
15922 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15923 encoder->base.base.id,
8e329a03 15924 encoder->base.name);
24929352 15925 encoder->disable(encoder);
a62d1497
VS
15926 if (encoder->post_disable)
15927 encoder->post_disable(encoder);
24929352 15928 }
7f1950fb 15929 encoder->base.crtc = NULL;
24929352
DV
15930
15931 /* Inconsistent output/port/pipe state happens presumably due to
15932 * a bug in one of the get_hw_state functions. Or someplace else
15933 * in our code, like the register restore mess on resume. Clamp
15934 * things to off as a safer default. */
3a3371ff 15935 for_each_intel_connector(dev, connector) {
24929352
DV
15936 if (connector->encoder != encoder)
15937 continue;
7f1950fb
EE
15938 connector->base.dpms = DRM_MODE_DPMS_OFF;
15939 connector->base.encoder = NULL;
24929352
DV
15940 }
15941 }
15942 /* Enabled encoders without active connectors will be fixed in
15943 * the crtc fixup. */
15944}
15945
04098753 15946void i915_redisable_vga_power_on(struct drm_device *dev)
0fde901f 15947{
fac5e23e 15948 struct drm_i915_private *dev_priv = to_i915(dev);
f0f59a00 15949 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
0fde901f 15950
04098753
ID
15951 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15952 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15953 i915_disable_vga(dev);
15954 }
15955}
15956
15957void i915_redisable_vga(struct drm_device *dev)
15958{
fac5e23e 15959 struct drm_i915_private *dev_priv = to_i915(dev);
04098753 15960
8dc8a27c
PZ
15961 /* This function can be called both from intel_modeset_setup_hw_state or
15962 * at a very early point in our resume sequence, where the power well
15963 * structures are not yet restored. Since this function is at a very
15964 * paranoid "someone might have enabled VGA while we were not looking"
15965 * level, just check if the power well is enabled instead of trying to
15966 * follow the "don't touch the power well if we don't need it" policy
15967 * the rest of the driver uses. */
6392f847 15968 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
8dc8a27c
PZ
15969 return;
15970
04098753 15971 i915_redisable_vga_power_on(dev);
6392f847
ID
15972
15973 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
0fde901f
KM
15974}
15975
f9cd7b88 15976static bool primary_get_hw_state(struct intel_plane *plane)
98ec7739 15977{
f9cd7b88 15978 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
98ec7739 15979
f9cd7b88 15980 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
d032ffa0
ML
15981}
15982
f9cd7b88
VS
15983/* FIXME read out full plane state for all planes */
15984static void readout_plane_state(struct intel_crtc *crtc)
d032ffa0 15985{
b26d3ea3 15986 struct drm_plane *primary = crtc->base.primary;
f9cd7b88 15987 struct intel_plane_state *plane_state =
b26d3ea3 15988 to_intel_plane_state(primary->state);
d032ffa0 15989
19b8d387 15990 plane_state->visible = crtc->active &&
b26d3ea3
ML
15991 primary_get_hw_state(to_intel_plane(primary));
15992
15993 if (plane_state->visible)
15994 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
98ec7739
VS
15995}
15996
30e984df 15997static void intel_modeset_readout_hw_state(struct drm_device *dev)
24929352 15998{
fac5e23e 15999 struct drm_i915_private *dev_priv = to_i915(dev);
24929352 16000 enum pipe pipe;
24929352
DV
16001 struct intel_crtc *crtc;
16002 struct intel_encoder *encoder;
16003 struct intel_connector *connector;
5358901f 16004 int i;
24929352 16005
565602d7
ML
16006 dev_priv->active_crtcs = 0;
16007
d3fcc808 16008 for_each_intel_crtc(dev, crtc) {
565602d7
ML
16009 struct intel_crtc_state *crtc_state = crtc->config;
16010 int pixclk = 0;
3b117c8f 16011
ec2dc6a0 16012 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
565602d7
ML
16013 memset(crtc_state, 0, sizeof(*crtc_state));
16014 crtc_state->base.crtc = &crtc->base;
24929352 16015
565602d7
ML
16016 crtc_state->base.active = crtc_state->base.enable =
16017 dev_priv->display.get_pipe_config(crtc, crtc_state);
16018
16019 crtc->base.enabled = crtc_state->base.enable;
16020 crtc->active = crtc_state->base.active;
16021
16022 if (crtc_state->base.active) {
16023 dev_priv->active_crtcs |= 1 << crtc->pipe;
16024
c89e39f3 16025 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
565602d7 16026 pixclk = ilk_pipe_pixel_rate(crtc_state);
9558d15d 16027 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
565602d7
ML
16028 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
16029 else
16030 WARN_ON(dev_priv->display.modeset_calc_cdclk);
9558d15d
VS
16031
16032 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
16033 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
16034 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
565602d7
ML
16035 }
16036
16037 dev_priv->min_pixclk[crtc->pipe] = pixclk;
b70709a6 16038
f9cd7b88 16039 readout_plane_state(crtc);
24929352 16040
78108b7c
VS
16041 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16042 crtc->base.base.id, crtc->base.name,
24929352
DV
16043 crtc->active ? "enabled" : "disabled");
16044 }
16045
5358901f
DV
16046 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16047 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16048
2edd6443
ACO
16049 pll->on = pll->funcs.get_hw_state(dev_priv, pll,
16050 &pll->config.hw_state);
3e369b76 16051 pll->config.crtc_mask = 0;
d3fcc808 16052 for_each_intel_crtc(dev, crtc) {
2dd66ebd 16053 if (crtc->active && crtc->config->shared_dpll == pll)
3e369b76 16054 pll->config.crtc_mask |= 1 << crtc->pipe;
5358901f 16055 }
2dd66ebd 16056 pll->active_mask = pll->config.crtc_mask;
5358901f 16057
1e6f2ddc 16058 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
3e369b76 16059 pll->name, pll->config.crtc_mask, pll->on);
5358901f
DV
16060 }
16061
b2784e15 16062 for_each_intel_encoder(dev, encoder) {
24929352
DV
16063 pipe = 0;
16064
16065 if (encoder->get_hw_state(encoder, &pipe)) {
045ac3b5
JB
16066 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
16067 encoder->base.crtc = &crtc->base;
253c84c8 16068 crtc->config->output_types |= 1 << encoder->type;
6e3c9717 16069 encoder->get_config(encoder, crtc->config);
24929352
DV
16070 } else {
16071 encoder->base.crtc = NULL;
16072 }
16073
6f2bcceb 16074 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
24929352 16075 encoder->base.base.id,
8e329a03 16076 encoder->base.name,
24929352 16077 encoder->base.crtc ? "enabled" : "disabled",
6f2bcceb 16078 pipe_name(pipe));
24929352
DV
16079 }
16080
3a3371ff 16081 for_each_intel_connector(dev, connector) {
24929352
DV
16082 if (connector->get_hw_state(connector)) {
16083 connector->base.dpms = DRM_MODE_DPMS_ON;
2aa974c9
ML
16084
16085 encoder = connector->encoder;
16086 connector->base.encoder = &encoder->base;
16087
16088 if (encoder->base.crtc &&
16089 encoder->base.crtc->state->active) {
16090 /*
16091 * This has to be done during hardware readout
16092 * because anything calling .crtc_disable may
16093 * rely on the connector_mask being accurate.
16094 */
16095 encoder->base.crtc->state->connector_mask |=
16096 1 << drm_connector_index(&connector->base);
e87a52b3
ML
16097 encoder->base.crtc->state->encoder_mask |=
16098 1 << drm_encoder_index(&encoder->base);
2aa974c9
ML
16099 }
16100
24929352
DV
16101 } else {
16102 connector->base.dpms = DRM_MODE_DPMS_OFF;
16103 connector->base.encoder = NULL;
16104 }
16105 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16106 connector->base.base.id,
c23cc417 16107 connector->base.name,
24929352
DV
16108 connector->base.encoder ? "enabled" : "disabled");
16109 }
7f4c6284
VS
16110
16111 for_each_intel_crtc(dev, crtc) {
16112 crtc->base.hwmode = crtc->config->base.adjusted_mode;
16113
16114 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16115 if (crtc->base.state->active) {
16116 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
16117 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
16118 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16119
16120 /*
16121 * The initial mode needs to be set in order to keep
16122 * the atomic core happy. It wants a valid mode if the
16123 * crtc's enabled, so we do the above call.
16124 *
16125 * At this point some state updated by the connectors
16126 * in their ->detect() callback has not run yet, so
16127 * no recalculation can be done yet.
16128 *
16129 * Even if we could do a recalculation and modeset
16130 * right now it would cause a double modeset if
16131 * fbdev or userspace chooses a different initial mode.
16132 *
16133 * If that happens, someone indicated they wanted a
16134 * mode change, which means it's safe to do a full
16135 * recalculation.
16136 */
16137 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
9eca6832
VS
16138
16139 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
16140 update_scanline_offset(crtc);
7f4c6284 16141 }
e3b247da
VS
16142
16143 intel_pipe_config_sanity_check(dev_priv, crtc->config);
7f4c6284 16144 }
30e984df
DV
16145}
16146
043e9bda
ML
16147/* Scan out the current hw modeset state,
16148 * and sanitizes it to the current state
16149 */
16150static void
16151intel_modeset_setup_hw_state(struct drm_device *dev)
30e984df 16152{
fac5e23e 16153 struct drm_i915_private *dev_priv = to_i915(dev);
30e984df 16154 enum pipe pipe;
30e984df
DV
16155 struct intel_crtc *crtc;
16156 struct intel_encoder *encoder;
35c95375 16157 int i;
30e984df
DV
16158
16159 intel_modeset_readout_hw_state(dev);
24929352
DV
16160
16161 /* HW state is read out, now we need to sanitize this mess. */
b2784e15 16162 for_each_intel_encoder(dev, encoder) {
24929352
DV
16163 intel_sanitize_encoder(encoder);
16164 }
16165
055e393f 16166 for_each_pipe(dev_priv, pipe) {
24929352
DV
16167 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
16168 intel_sanitize_crtc(crtc);
6e3c9717
ACO
16169 intel_dump_pipe_config(crtc, crtc->config,
16170 "[setup_hw_state]");
24929352 16171 }
9a935856 16172
d29b2f9d
ACO
16173 intel_modeset_update_connector_atomic_state(dev);
16174
35c95375
DV
16175 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16176 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16177
2dd66ebd 16178 if (!pll->on || pll->active_mask)
35c95375
DV
16179 continue;
16180
16181 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
16182
2edd6443 16183 pll->funcs.disable(dev_priv, pll);
35c95375
DV
16184 pll->on = false;
16185 }
16186
666a4537 16187 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6eb1a681
VS
16188 vlv_wm_get_hw_state(dev);
16189 else if (IS_GEN9(dev))
3078999f
PB
16190 skl_wm_get_hw_state(dev);
16191 else if (HAS_PCH_SPLIT(dev))
243e6a44 16192 ilk_wm_get_hw_state(dev);
292b990e
ML
16193
16194 for_each_intel_crtc(dev, crtc) {
16195 unsigned long put_domains;
16196
74bff5f9 16197 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
292b990e
ML
16198 if (WARN_ON(put_domains))
16199 modeset_put_power_domains(dev_priv, put_domains);
16200 }
16201 intel_display_set_init_power(dev_priv, false);
010cf73d
PZ
16202
16203 intel_fbc_init_pipe_state(dev_priv);
043e9bda 16204}
7d0bc1ea 16205
043e9bda
ML
16206void intel_display_resume(struct drm_device *dev)
16207{
e2c8b870
ML
16208 struct drm_i915_private *dev_priv = to_i915(dev);
16209 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16210 struct drm_modeset_acquire_ctx ctx;
043e9bda 16211 int ret;
e2c8b870 16212 bool setup = false;
f30da187 16213
e2c8b870 16214 dev_priv->modeset_restore_state = NULL;
043e9bda 16215
ea49c9ac
ML
16216 /*
16217 * This is a cludge because with real atomic modeset mode_config.mutex
16218 * won't be taken. Unfortunately some probed state like
16219 * audio_codec_enable is still protected by mode_config.mutex, so lock
16220 * it here for now.
16221 */
16222 mutex_lock(&dev->mode_config.mutex);
e2c8b870 16223 drm_modeset_acquire_init(&ctx, 0);
043e9bda 16224
e2c8b870
ML
16225retry:
16226 ret = drm_modeset_lock_all_ctx(dev, &ctx);
043e9bda 16227
e2c8b870
ML
16228 if (ret == 0 && !setup) {
16229 setup = true;
043e9bda 16230
e2c8b870
ML
16231 intel_modeset_setup_hw_state(dev);
16232 i915_redisable_vga(dev);
45e2b5f6 16233 }
8af6cf88 16234
e2c8b870
ML
16235 if (ret == 0 && state) {
16236 struct drm_crtc_state *crtc_state;
16237 struct drm_crtc *crtc;
16238 int i;
043e9bda 16239
e2c8b870
ML
16240 state->acquire_ctx = &ctx;
16241
e3d5457c
VS
16242 /* ignore any reset values/BIOS leftovers in the WM registers */
16243 to_intel_atomic_state(state)->skip_intermediate_wm = true;
16244
e2c8b870
ML
16245 for_each_crtc_in_state(state, crtc, crtc_state, i) {
16246 /*
16247 * Force recalculation even if we restore
16248 * current state. With fast modeset this may not result
16249 * in a modeset when the state is compatible.
16250 */
16251 crtc_state->mode_changed = true;
16252 }
16253
16254 ret = drm_atomic_commit(state);
043e9bda
ML
16255 }
16256
e2c8b870
ML
16257 if (ret == -EDEADLK) {
16258 drm_modeset_backoff(&ctx);
16259 goto retry;
16260 }
043e9bda 16261
e2c8b870
ML
16262 drm_modeset_drop_locks(&ctx);
16263 drm_modeset_acquire_fini(&ctx);
ea49c9ac 16264 mutex_unlock(&dev->mode_config.mutex);
043e9bda 16265
e2c8b870
ML
16266 if (ret) {
16267 DRM_ERROR("Restoring old state failed with %i\n", ret);
16268 drm_atomic_state_free(state);
16269 }
2c7111db
CW
16270}
16271
16272void intel_modeset_gem_init(struct drm_device *dev)
16273{
dc97997a 16274 struct drm_i915_private *dev_priv = to_i915(dev);
484b41dd 16275 struct drm_crtc *c;
2ff8fde1 16276 struct drm_i915_gem_object *obj;
e0d6149b 16277 int ret;
484b41dd 16278
dc97997a 16279 intel_init_gt_powersave(dev_priv);
ae48434c 16280
1833b134 16281 intel_modeset_init_hw(dev);
02e792fb 16282
1ee8da6d 16283 intel_setup_overlay(dev_priv);
484b41dd
JB
16284
16285 /*
16286 * Make sure any fbs we allocated at startup are properly
16287 * pinned & fenced. When we do the allocation it's too early
16288 * for this.
16289 */
70e1e0ec 16290 for_each_crtc(dev, c) {
2ff8fde1
MR
16291 obj = intel_fb_obj(c->primary->fb);
16292 if (obj == NULL)
484b41dd
JB
16293 continue;
16294
e0d6149b 16295 mutex_lock(&dev->struct_mutex);
3465c580
VS
16296 ret = intel_pin_and_fence_fb_obj(c->primary->fb,
16297 c->primary->state->rotation);
e0d6149b
TU
16298 mutex_unlock(&dev->struct_mutex);
16299 if (ret) {
484b41dd
JB
16300 DRM_ERROR("failed to pin boot fb on pipe %d\n",
16301 to_intel_crtc(c)->pipe);
66e514c1 16302 drm_framebuffer_unreference(c->primary->fb);
5a21b665 16303 c->primary->fb = NULL;
36750f28 16304 c->primary->crtc = c->primary->state->crtc = NULL;
5a21b665 16305 update_state_fb(c->primary);
36750f28 16306 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
484b41dd
JB
16307 }
16308 }
1ebaa0b9
CW
16309}
16310
16311int intel_connector_register(struct drm_connector *connector)
16312{
16313 struct intel_connector *intel_connector = to_intel_connector(connector);
16314 int ret;
16315
16316 ret = intel_backlight_device_register(intel_connector);
16317 if (ret)
16318 goto err;
16319
16320 return 0;
0962c3c9 16321
1ebaa0b9
CW
16322err:
16323 return ret;
79e53945
JB
16324}
16325
c191eca1 16326void intel_connector_unregister(struct drm_connector *connector)
4932e2c3 16327{
e63d87c0 16328 struct intel_connector *intel_connector = to_intel_connector(connector);
4932e2c3 16329
e63d87c0 16330 intel_backlight_device_unregister(intel_connector);
4932e2c3 16331 intel_panel_destroy_backlight(connector);
4932e2c3
ID
16332}
16333
79e53945
JB
16334void intel_modeset_cleanup(struct drm_device *dev)
16335{
fac5e23e 16336 struct drm_i915_private *dev_priv = to_i915(dev);
652c393a 16337
dc97997a 16338 intel_disable_gt_powersave(dev_priv);
2eb5252e 16339
fd0c0642
DV
16340 /*
16341 * Interrupts and polling as the first thing to avoid creating havoc.
2eb5252e 16342 * Too much stuff here (turning of connectors, ...) would
fd0c0642
DV
16343 * experience fancy races otherwise.
16344 */
2aeb7d3a 16345 intel_irq_uninstall(dev_priv);
eb21b92b 16346
fd0c0642
DV
16347 /*
16348 * Due to the hpd irq storm handling the hotplug work can re-arm the
16349 * poll handlers. Hence disable polling after hpd handling is shut down.
16350 */
f87ea761 16351 drm_kms_helper_poll_fini(dev);
fd0c0642 16352
723bfd70
JB
16353 intel_unregister_dsm_handler();
16354
c937ab3e 16355 intel_fbc_global_disable(dev_priv);
69341a5e 16356
1630fe75
CW
16357 /* flush any delayed tasks or pending work */
16358 flush_scheduled_work();
16359
79e53945 16360 drm_mode_config_cleanup(dev);
4d7bb011 16361
1ee8da6d 16362 intel_cleanup_overlay(dev_priv);
ae48434c 16363
dc97997a 16364 intel_cleanup_gt_powersave(dev_priv);
f5949141
DV
16365
16366 intel_teardown_gmbus(dev);
79e53945
JB
16367}
16368
df0e9248
CW
16369void intel_connector_attach_encoder(struct intel_connector *connector,
16370 struct intel_encoder *encoder)
16371{
16372 connector->encoder = encoder;
16373 drm_mode_connector_attach_encoder(&connector->base,
16374 &encoder->base);
79e53945 16375}
28d52043
DA
16376
16377/*
16378 * set vga decode state - true == enable VGA decode
16379 */
16380int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16381{
fac5e23e 16382 struct drm_i915_private *dev_priv = to_i915(dev);
a885b3cc 16383 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
28d52043
DA
16384 u16 gmch_ctrl;
16385
75fa041d
CW
16386 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16387 DRM_ERROR("failed to read control word\n");
16388 return -EIO;
16389 }
16390
c0cc8a55
CW
16391 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16392 return 0;
16393
28d52043
DA
16394 if (state)
16395 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16396 else
16397 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
75fa041d
CW
16398
16399 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16400 DRM_ERROR("failed to write control word\n");
16401 return -EIO;
16402 }
16403
28d52043
DA
16404 return 0;
16405}
c4a1d9e4 16406
c4a1d9e4 16407struct intel_display_error_state {
ff57f1b0
PZ
16408
16409 u32 power_well_driver;
16410
63b66e5b
CW
16411 int num_transcoders;
16412
c4a1d9e4
CW
16413 struct intel_cursor_error_state {
16414 u32 control;
16415 u32 position;
16416 u32 base;
16417 u32 size;
52331309 16418 } cursor[I915_MAX_PIPES];
c4a1d9e4
CW
16419
16420 struct intel_pipe_error_state {
ddf9c536 16421 bool power_domain_on;
c4a1d9e4 16422 u32 source;
f301b1e1 16423 u32 stat;
52331309 16424 } pipe[I915_MAX_PIPES];
c4a1d9e4
CW
16425
16426 struct intel_plane_error_state {
16427 u32 control;
16428 u32 stride;
16429 u32 size;
16430 u32 pos;
16431 u32 addr;
16432 u32 surface;
16433 u32 tile_offset;
52331309 16434 } plane[I915_MAX_PIPES];
63b66e5b
CW
16435
16436 struct intel_transcoder_error_state {
ddf9c536 16437 bool power_domain_on;
63b66e5b
CW
16438 enum transcoder cpu_transcoder;
16439
16440 u32 conf;
16441
16442 u32 htotal;
16443 u32 hblank;
16444 u32 hsync;
16445 u32 vtotal;
16446 u32 vblank;
16447 u32 vsync;
16448 } transcoder[4];
c4a1d9e4
CW
16449};
16450
16451struct intel_display_error_state *
c033666a 16452intel_display_capture_error_state(struct drm_i915_private *dev_priv)
c4a1d9e4 16453{
c4a1d9e4 16454 struct intel_display_error_state *error;
63b66e5b
CW
16455 int transcoders[] = {
16456 TRANSCODER_A,
16457 TRANSCODER_B,
16458 TRANSCODER_C,
16459 TRANSCODER_EDP,
16460 };
c4a1d9e4
CW
16461 int i;
16462
c033666a 16463 if (INTEL_INFO(dev_priv)->num_pipes == 0)
63b66e5b
CW
16464 return NULL;
16465
9d1cb914 16466 error = kzalloc(sizeof(*error), GFP_ATOMIC);
c4a1d9e4
CW
16467 if (error == NULL)
16468 return NULL;
16469
c033666a 16470 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ff57f1b0
PZ
16471 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16472
055e393f 16473 for_each_pipe(dev_priv, i) {
ddf9c536 16474 error->pipe[i].power_domain_on =
f458ebbc
DV
16475 __intel_display_power_is_enabled(dev_priv,
16476 POWER_DOMAIN_PIPE(i));
ddf9c536 16477 if (!error->pipe[i].power_domain_on)
9d1cb914
PZ
16478 continue;
16479
5efb3e28
VS
16480 error->cursor[i].control = I915_READ(CURCNTR(i));
16481 error->cursor[i].position = I915_READ(CURPOS(i));
16482 error->cursor[i].base = I915_READ(CURBASE(i));
c4a1d9e4
CW
16483
16484 error->plane[i].control = I915_READ(DSPCNTR(i));
16485 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
c033666a 16486 if (INTEL_GEN(dev_priv) <= 3) {
51889b35 16487 error->plane[i].size = I915_READ(DSPSIZE(i));
80ca378b
PZ
16488 error->plane[i].pos = I915_READ(DSPPOS(i));
16489 }
c033666a 16490 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
ca291363 16491 error->plane[i].addr = I915_READ(DSPADDR(i));
c033666a 16492 if (INTEL_GEN(dev_priv) >= 4) {
c4a1d9e4
CW
16493 error->plane[i].surface = I915_READ(DSPSURF(i));
16494 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16495 }
16496
c4a1d9e4 16497 error->pipe[i].source = I915_READ(PIPESRC(i));
f301b1e1 16498
c033666a 16499 if (HAS_GMCH_DISPLAY(dev_priv))
f301b1e1 16500 error->pipe[i].stat = I915_READ(PIPESTAT(i));
63b66e5b
CW
16501 }
16502
4d1de975 16503 /* Note: this does not include DSI transcoders. */
c033666a 16504 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
2d1fe073 16505 if (HAS_DDI(dev_priv))
63b66e5b
CW
16506 error->num_transcoders++; /* Account for eDP. */
16507
16508 for (i = 0; i < error->num_transcoders; i++) {
16509 enum transcoder cpu_transcoder = transcoders[i];
16510
ddf9c536 16511 error->transcoder[i].power_domain_on =
f458ebbc 16512 __intel_display_power_is_enabled(dev_priv,
38cc1daf 16513 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
ddf9c536 16514 if (!error->transcoder[i].power_domain_on)
9d1cb914
PZ
16515 continue;
16516
63b66e5b
CW
16517 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16518
16519 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16520 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16521 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16522 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16523 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16524 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16525 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
c4a1d9e4
CW
16526 }
16527
16528 return error;
16529}
16530
edc3d884
MK
16531#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16532
c4a1d9e4 16533void
edc3d884 16534intel_display_print_error_state(struct drm_i915_error_state_buf *m,
c4a1d9e4
CW
16535 struct drm_device *dev,
16536 struct intel_display_error_state *error)
16537{
fac5e23e 16538 struct drm_i915_private *dev_priv = to_i915(dev);
c4a1d9e4
CW
16539 int i;
16540
63b66e5b
CW
16541 if (!error)
16542 return;
16543
edc3d884 16544 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
190be112 16545 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
edc3d884 16546 err_printf(m, "PWR_WELL_CTL2: %08x\n",
ff57f1b0 16547 error->power_well_driver);
055e393f 16548 for_each_pipe(dev_priv, i) {
edc3d884 16549 err_printf(m, "Pipe [%d]:\n", i);
ddf9c536 16550 err_printf(m, " Power: %s\n",
87ad3212 16551 onoff(error->pipe[i].power_domain_on));
edc3d884 16552 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
f301b1e1 16553 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
edc3d884
MK
16554
16555 err_printf(m, "Plane [%d]:\n", i);
16556 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
16557 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
80ca378b 16558 if (INTEL_INFO(dev)->gen <= 3) {
edc3d884
MK
16559 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
16560 err_printf(m, " POS: %08x\n", error->plane[i].pos);
80ca378b 16561 }
4b71a570 16562 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
edc3d884 16563 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
c4a1d9e4 16564 if (INTEL_INFO(dev)->gen >= 4) {
edc3d884
MK
16565 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
16566 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
c4a1d9e4
CW
16567 }
16568
edc3d884
MK
16569 err_printf(m, "Cursor [%d]:\n", i);
16570 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
16571 err_printf(m, " POS: %08x\n", error->cursor[i].position);
16572 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
c4a1d9e4 16573 }
63b66e5b
CW
16574
16575 for (i = 0; i < error->num_transcoders; i++) {
da205630 16576 err_printf(m, "CPU transcoder: %s\n",
63b66e5b 16577 transcoder_name(error->transcoder[i].cpu_transcoder));
ddf9c536 16578 err_printf(m, " Power: %s\n",
87ad3212 16579 onoff(error->transcoder[i].power_domain_on));
63b66e5b
CW
16580 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
16581 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
16582 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
16583 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
16584 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
16585 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
16586 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
16587 }
c4a1d9e4 16588}