]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_display.c
drm/i915: Convert wait_for(I915_READ(reg)) to intel_wait_for_register()
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_display.c
CommitLineData
79e53945
JB
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
618563e3 27#include <linux/dmi.h>
c1c7af60
JB
28#include <linux/module.h>
29#include <linux/input.h>
79e53945 30#include <linux/i2c.h>
7662c8bd 31#include <linux/kernel.h>
5a0e3ad6 32#include <linux/slab.h>
9cce37f4 33#include <linux/vgaarb.h>
e0dac65e 34#include <drm/drm_edid.h>
760285e7 35#include <drm/drmP.h>
79e53945 36#include "intel_drv.h"
760285e7 37#include <drm/i915_drm.h>
79e53945 38#include "i915_drv.h"
c37efb99 39#include "i915_gem_dmabuf.h"
db18b6a6 40#include "intel_dsi.h"
e5510fac 41#include "i915_trace.h"
319c1d42 42#include <drm/drm_atomic.h>
c196e1d6 43#include <drm/drm_atomic_helper.h>
760285e7
DH
44#include <drm/drm_dp_helper.h>
45#include <drm/drm_crtc_helper.h>
465c120c
MR
46#include <drm/drm_plane_helper.h>
47#include <drm/drm_rect.h>
c0f372b3 48#include <linux/dma_remapping.h>
fd8e058a 49#include <linux/reservation.h>
79e53945 50
5a21b665
DV
51static bool is_mmio_work(struct intel_flip_work *work)
52{
53 return work->mmio_work.func;
54}
55
465c120c 56/* Primary plane formats for gen <= 3 */
568db4f2 57static const uint32_t i8xx_primary_formats[] = {
67fe7dc5
DL
58 DRM_FORMAT_C8,
59 DRM_FORMAT_RGB565,
465c120c 60 DRM_FORMAT_XRGB1555,
67fe7dc5 61 DRM_FORMAT_XRGB8888,
465c120c
MR
62};
63
64/* Primary plane formats for gen >= 4 */
568db4f2 65static const uint32_t i965_primary_formats[] = {
6c0fd451
DL
66 DRM_FORMAT_C8,
67 DRM_FORMAT_RGB565,
68 DRM_FORMAT_XRGB8888,
69 DRM_FORMAT_XBGR8888,
70 DRM_FORMAT_XRGB2101010,
71 DRM_FORMAT_XBGR2101010,
72};
73
74static const uint32_t skl_primary_formats[] = {
67fe7dc5
DL
75 DRM_FORMAT_C8,
76 DRM_FORMAT_RGB565,
77 DRM_FORMAT_XRGB8888,
465c120c 78 DRM_FORMAT_XBGR8888,
67fe7dc5 79 DRM_FORMAT_ARGB8888,
465c120c
MR
80 DRM_FORMAT_ABGR8888,
81 DRM_FORMAT_XRGB2101010,
465c120c 82 DRM_FORMAT_XBGR2101010,
ea916ea0
KM
83 DRM_FORMAT_YUYV,
84 DRM_FORMAT_YVYU,
85 DRM_FORMAT_UYVY,
86 DRM_FORMAT_VYUY,
465c120c
MR
87};
88
3d7d6510
MR
89/* Cursor formats */
90static const uint32_t intel_cursor_formats[] = {
91 DRM_FORMAT_ARGB8888,
92};
93
f1f644dc 94static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 95 struct intel_crtc_state *pipe_config);
18442d08 96static void ironlake_pch_clock_get(struct intel_crtc *crtc,
5cec258b 97 struct intel_crtc_state *pipe_config);
f1f644dc 98
eb1bfe80
JB
99static int intel_framebuffer_init(struct drm_device *dev,
100 struct intel_framebuffer *ifb,
101 struct drm_mode_fb_cmd2 *mode_cmd,
102 struct drm_i915_gem_object *obj);
5b18e57c
DV
103static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
104static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
bc58be60 105static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
29407aab 106static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
f769cd24
VK
107 struct intel_link_m_n *m_n,
108 struct intel_link_m_n *m2_n2);
29407aab 109static void ironlake_set_pipeconf(struct drm_crtc *crtc);
229fca97 110static void haswell_set_pipeconf(struct drm_crtc *crtc);
391bf048 111static void haswell_set_pipemisc(struct drm_crtc *crtc);
d288f65f 112static void vlv_prepare_pll(struct intel_crtc *crtc,
5cec258b 113 const struct intel_crtc_state *pipe_config);
d288f65f 114static void chv_prepare_pll(struct intel_crtc *crtc,
5cec258b 115 const struct intel_crtc_state *pipe_config);
5a21b665
DV
116static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
117static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
549e2bfb
CK
118static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
119 struct intel_crtc_state *crtc_state);
bfd16b2a
ML
120static void skylake_pfit_enable(struct intel_crtc *crtc);
121static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
122static void ironlake_pfit_enable(struct intel_crtc *crtc);
043e9bda 123static void intel_modeset_setup_hw_state(struct drm_device *dev);
2622a081 124static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
4e5ca60f 125static int ilk_max_pixel_rate(struct drm_atomic_state *state);
324513c0 126static int bxt_calc_cdclk(int max_pixclk);
e7457a9a 127
d4906093 128struct intel_limit {
4c5def93
ACO
129 struct {
130 int min, max;
131 } dot, vco, n, m, m1, m2, p, p1;
132
133 struct {
134 int dot_limit;
135 int p2_slow, p2_fast;
136 } p2;
d4906093 137};
79e53945 138
bfa7df01
VS
139/* returns HPLL frequency in kHz */
140static int valleyview_get_vco(struct drm_i915_private *dev_priv)
141{
142 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
143
144 /* Obtain SKU information */
145 mutex_lock(&dev_priv->sb_lock);
146 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
147 CCK_FUSE_HPLL_FREQ_MASK;
148 mutex_unlock(&dev_priv->sb_lock);
149
150 return vco_freq[hpll_freq] * 1000;
151}
152
c30fec65
VS
153int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
154 const char *name, u32 reg, int ref_freq)
bfa7df01
VS
155{
156 u32 val;
157 int divider;
158
bfa7df01
VS
159 mutex_lock(&dev_priv->sb_lock);
160 val = vlv_cck_read(dev_priv, reg);
161 mutex_unlock(&dev_priv->sb_lock);
162
163 divider = val & CCK_FREQUENCY_VALUES;
164
165 WARN((val & CCK_FREQUENCY_STATUS) !=
166 (divider << CCK_FREQUENCY_STATUS_SHIFT),
167 "%s change in progress\n", name);
168
c30fec65
VS
169 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
170}
171
172static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
173 const char *name, u32 reg)
174{
175 if (dev_priv->hpll_freq == 0)
176 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
177
178 return vlv_get_cck_clock(dev_priv, name, reg,
179 dev_priv->hpll_freq);
bfa7df01
VS
180}
181
e7dc33f3
VS
182static int
183intel_pch_rawclk(struct drm_i915_private *dev_priv)
d2acd215 184{
e7dc33f3
VS
185 return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
186}
d2acd215 187
e7dc33f3
VS
188static int
189intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
190{
19ab4ed3 191 /* RAWCLK_FREQ_VLV register updated from power well code */
35d38d1f
VS
192 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
193 CCK_DISPLAY_REF_CLOCK_CONTROL);
d2acd215
DV
194}
195
e7dc33f3
VS
196static int
197intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
79e50a4f 198{
79e50a4f
JN
199 uint32_t clkcfg;
200
e7dc33f3 201 /* hrawclock is 1/4 the FSB frequency */
79e50a4f
JN
202 clkcfg = I915_READ(CLKCFG);
203 switch (clkcfg & CLKCFG_FSB_MASK) {
204 case CLKCFG_FSB_400:
e7dc33f3 205 return 100000;
79e50a4f 206 case CLKCFG_FSB_533:
e7dc33f3 207 return 133333;
79e50a4f 208 case CLKCFG_FSB_667:
e7dc33f3 209 return 166667;
79e50a4f 210 case CLKCFG_FSB_800:
e7dc33f3 211 return 200000;
79e50a4f 212 case CLKCFG_FSB_1067:
e7dc33f3 213 return 266667;
79e50a4f 214 case CLKCFG_FSB_1333:
e7dc33f3 215 return 333333;
79e50a4f
JN
216 /* these two are just a guess; one of them might be right */
217 case CLKCFG_FSB_1600:
218 case CLKCFG_FSB_1600_ALT:
e7dc33f3 219 return 400000;
79e50a4f 220 default:
e7dc33f3 221 return 133333;
79e50a4f
JN
222 }
223}
224
19ab4ed3 225void intel_update_rawclk(struct drm_i915_private *dev_priv)
e7dc33f3
VS
226{
227 if (HAS_PCH_SPLIT(dev_priv))
228 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
229 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
230 dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
231 else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
232 dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
233 else
234 return; /* no rawclk on other platforms, or no need to know it */
235
236 DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
237}
238
bfa7df01
VS
239static void intel_update_czclk(struct drm_i915_private *dev_priv)
240{
666a4537 241 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
bfa7df01
VS
242 return;
243
244 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
245 CCK_CZ_CLOCK_CONTROL);
246
247 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
248}
249
021357ac 250static inline u32 /* units of 100MHz */
21a727b3
VS
251intel_fdi_link_freq(struct drm_i915_private *dev_priv,
252 const struct intel_crtc_state *pipe_config)
021357ac 253{
21a727b3
VS
254 if (HAS_DDI(dev_priv))
255 return pipe_config->port_clock; /* SPLL */
256 else if (IS_GEN5(dev_priv))
257 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
e3b247da 258 else
21a727b3 259 return 270000;
021357ac
CW
260}
261
1b6f4958 262static const struct intel_limit intel_limits_i8xx_dac = {
0206e353 263 .dot = { .min = 25000, .max = 350000 },
9c333719 264 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 265 .n = { .min = 2, .max = 16 },
0206e353
AJ
266 .m = { .min = 96, .max = 140 },
267 .m1 = { .min = 18, .max = 26 },
268 .m2 = { .min = 6, .max = 16 },
269 .p = { .min = 4, .max = 128 },
270 .p1 = { .min = 2, .max = 33 },
273e27ca
EA
271 .p2 = { .dot_limit = 165000,
272 .p2_slow = 4, .p2_fast = 2 },
e4b36699
KP
273};
274
1b6f4958 275static const struct intel_limit intel_limits_i8xx_dvo = {
5d536e28 276 .dot = { .min = 25000, .max = 350000 },
9c333719 277 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 278 .n = { .min = 2, .max = 16 },
5d536e28
DV
279 .m = { .min = 96, .max = 140 },
280 .m1 = { .min = 18, .max = 26 },
281 .m2 = { .min = 6, .max = 16 },
282 .p = { .min = 4, .max = 128 },
283 .p1 = { .min = 2, .max = 33 },
284 .p2 = { .dot_limit = 165000,
285 .p2_slow = 4, .p2_fast = 4 },
286};
287
1b6f4958 288static const struct intel_limit intel_limits_i8xx_lvds = {
0206e353 289 .dot = { .min = 25000, .max = 350000 },
9c333719 290 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 291 .n = { .min = 2, .max = 16 },
0206e353
AJ
292 .m = { .min = 96, .max = 140 },
293 .m1 = { .min = 18, .max = 26 },
294 .m2 = { .min = 6, .max = 16 },
295 .p = { .min = 4, .max = 128 },
296 .p1 = { .min = 1, .max = 6 },
273e27ca
EA
297 .p2 = { .dot_limit = 165000,
298 .p2_slow = 14, .p2_fast = 7 },
e4b36699 299};
273e27ca 300
1b6f4958 301static const struct intel_limit intel_limits_i9xx_sdvo = {
0206e353
AJ
302 .dot = { .min = 20000, .max = 400000 },
303 .vco = { .min = 1400000, .max = 2800000 },
304 .n = { .min = 1, .max = 6 },
305 .m = { .min = 70, .max = 120 },
4f7dfb67
PJ
306 .m1 = { .min = 8, .max = 18 },
307 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
308 .p = { .min = 5, .max = 80 },
309 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
310 .p2 = { .dot_limit = 200000,
311 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
312};
313
1b6f4958 314static const struct intel_limit intel_limits_i9xx_lvds = {
0206e353
AJ
315 .dot = { .min = 20000, .max = 400000 },
316 .vco = { .min = 1400000, .max = 2800000 },
317 .n = { .min = 1, .max = 6 },
318 .m = { .min = 70, .max = 120 },
53a7d2d1
PJ
319 .m1 = { .min = 8, .max = 18 },
320 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
321 .p = { .min = 7, .max = 98 },
322 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
323 .p2 = { .dot_limit = 112000,
324 .p2_slow = 14, .p2_fast = 7 },
e4b36699
KP
325};
326
273e27ca 327
1b6f4958 328static const struct intel_limit intel_limits_g4x_sdvo = {
273e27ca
EA
329 .dot = { .min = 25000, .max = 270000 },
330 .vco = { .min = 1750000, .max = 3500000},
331 .n = { .min = 1, .max = 4 },
332 .m = { .min = 104, .max = 138 },
333 .m1 = { .min = 17, .max = 23 },
334 .m2 = { .min = 5, .max = 11 },
335 .p = { .min = 10, .max = 30 },
336 .p1 = { .min = 1, .max = 3},
337 .p2 = { .dot_limit = 270000,
338 .p2_slow = 10,
339 .p2_fast = 10
044c7c41 340 },
e4b36699
KP
341};
342
1b6f4958 343static const struct intel_limit intel_limits_g4x_hdmi = {
273e27ca
EA
344 .dot = { .min = 22000, .max = 400000 },
345 .vco = { .min = 1750000, .max = 3500000},
346 .n = { .min = 1, .max = 4 },
347 .m = { .min = 104, .max = 138 },
348 .m1 = { .min = 16, .max = 23 },
349 .m2 = { .min = 5, .max = 11 },
350 .p = { .min = 5, .max = 80 },
351 .p1 = { .min = 1, .max = 8},
352 .p2 = { .dot_limit = 165000,
353 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
354};
355
1b6f4958 356static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
273e27ca
EA
357 .dot = { .min = 20000, .max = 115000 },
358 .vco = { .min = 1750000, .max = 3500000 },
359 .n = { .min = 1, .max = 3 },
360 .m = { .min = 104, .max = 138 },
361 .m1 = { .min = 17, .max = 23 },
362 .m2 = { .min = 5, .max = 11 },
363 .p = { .min = 28, .max = 112 },
364 .p1 = { .min = 2, .max = 8 },
365 .p2 = { .dot_limit = 0,
366 .p2_slow = 14, .p2_fast = 14
044c7c41 367 },
e4b36699
KP
368};
369
1b6f4958 370static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
273e27ca
EA
371 .dot = { .min = 80000, .max = 224000 },
372 .vco = { .min = 1750000, .max = 3500000 },
373 .n = { .min = 1, .max = 3 },
374 .m = { .min = 104, .max = 138 },
375 .m1 = { .min = 17, .max = 23 },
376 .m2 = { .min = 5, .max = 11 },
377 .p = { .min = 14, .max = 42 },
378 .p1 = { .min = 2, .max = 6 },
379 .p2 = { .dot_limit = 0,
380 .p2_slow = 7, .p2_fast = 7
044c7c41 381 },
e4b36699
KP
382};
383
1b6f4958 384static const struct intel_limit intel_limits_pineview_sdvo = {
0206e353
AJ
385 .dot = { .min = 20000, .max = 400000},
386 .vco = { .min = 1700000, .max = 3500000 },
273e27ca 387 /* Pineview's Ncounter is a ring counter */
0206e353
AJ
388 .n = { .min = 3, .max = 6 },
389 .m = { .min = 2, .max = 256 },
273e27ca 390 /* Pineview only has one combined m divider, which we treat as m2. */
0206e353
AJ
391 .m1 = { .min = 0, .max = 0 },
392 .m2 = { .min = 0, .max = 254 },
393 .p = { .min = 5, .max = 80 },
394 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
395 .p2 = { .dot_limit = 200000,
396 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
397};
398
1b6f4958 399static const struct intel_limit intel_limits_pineview_lvds = {
0206e353
AJ
400 .dot = { .min = 20000, .max = 400000 },
401 .vco = { .min = 1700000, .max = 3500000 },
402 .n = { .min = 3, .max = 6 },
403 .m = { .min = 2, .max = 256 },
404 .m1 = { .min = 0, .max = 0 },
405 .m2 = { .min = 0, .max = 254 },
406 .p = { .min = 7, .max = 112 },
407 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
408 .p2 = { .dot_limit = 112000,
409 .p2_slow = 14, .p2_fast = 14 },
e4b36699
KP
410};
411
273e27ca
EA
412/* Ironlake / Sandybridge
413 *
414 * We calculate clock using (register_value + 2) for N/M1/M2, so here
415 * the range value for them is (actual_value - 2).
416 */
1b6f4958 417static const struct intel_limit intel_limits_ironlake_dac = {
273e27ca
EA
418 .dot = { .min = 25000, .max = 350000 },
419 .vco = { .min = 1760000, .max = 3510000 },
420 .n = { .min = 1, .max = 5 },
421 .m = { .min = 79, .max = 127 },
422 .m1 = { .min = 12, .max = 22 },
423 .m2 = { .min = 5, .max = 9 },
424 .p = { .min = 5, .max = 80 },
425 .p1 = { .min = 1, .max = 8 },
426 .p2 = { .dot_limit = 225000,
427 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
428};
429
1b6f4958 430static const struct intel_limit intel_limits_ironlake_single_lvds = {
273e27ca
EA
431 .dot = { .min = 25000, .max = 350000 },
432 .vco = { .min = 1760000, .max = 3510000 },
433 .n = { .min = 1, .max = 3 },
434 .m = { .min = 79, .max = 118 },
435 .m1 = { .min = 12, .max = 22 },
436 .m2 = { .min = 5, .max = 9 },
437 .p = { .min = 28, .max = 112 },
438 .p1 = { .min = 2, .max = 8 },
439 .p2 = { .dot_limit = 225000,
440 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
441};
442
1b6f4958 443static const struct intel_limit intel_limits_ironlake_dual_lvds = {
273e27ca
EA
444 .dot = { .min = 25000, .max = 350000 },
445 .vco = { .min = 1760000, .max = 3510000 },
446 .n = { .min = 1, .max = 3 },
447 .m = { .min = 79, .max = 127 },
448 .m1 = { .min = 12, .max = 22 },
449 .m2 = { .min = 5, .max = 9 },
450 .p = { .min = 14, .max = 56 },
451 .p1 = { .min = 2, .max = 8 },
452 .p2 = { .dot_limit = 225000,
453 .p2_slow = 7, .p2_fast = 7 },
b91ad0ec
ZW
454};
455
273e27ca 456/* LVDS 100mhz refclk limits. */
1b6f4958 457static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
273e27ca
EA
458 .dot = { .min = 25000, .max = 350000 },
459 .vco = { .min = 1760000, .max = 3510000 },
460 .n = { .min = 1, .max = 2 },
461 .m = { .min = 79, .max = 126 },
462 .m1 = { .min = 12, .max = 22 },
463 .m2 = { .min = 5, .max = 9 },
464 .p = { .min = 28, .max = 112 },
0206e353 465 .p1 = { .min = 2, .max = 8 },
273e27ca
EA
466 .p2 = { .dot_limit = 225000,
467 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
468};
469
1b6f4958 470static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
273e27ca
EA
471 .dot = { .min = 25000, .max = 350000 },
472 .vco = { .min = 1760000, .max = 3510000 },
473 .n = { .min = 1, .max = 3 },
474 .m = { .min = 79, .max = 126 },
475 .m1 = { .min = 12, .max = 22 },
476 .m2 = { .min = 5, .max = 9 },
477 .p = { .min = 14, .max = 42 },
0206e353 478 .p1 = { .min = 2, .max = 6 },
273e27ca
EA
479 .p2 = { .dot_limit = 225000,
480 .p2_slow = 7, .p2_fast = 7 },
4547668a
ZY
481};
482
1b6f4958 483static const struct intel_limit intel_limits_vlv = {
f01b7962
VS
484 /*
485 * These are the data rate limits (measured in fast clocks)
486 * since those are the strictest limits we have. The fast
487 * clock and actual rate limits are more relaxed, so checking
488 * them would make no difference.
489 */
490 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
75e53986 491 .vco = { .min = 4000000, .max = 6000000 },
a0c4da24 492 .n = { .min = 1, .max = 7 },
a0c4da24
JB
493 .m1 = { .min = 2, .max = 3 },
494 .m2 = { .min = 11, .max = 156 },
b99ab663 495 .p1 = { .min = 2, .max = 3 },
5fdc9c49 496 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
a0c4da24
JB
497};
498
1b6f4958 499static const struct intel_limit intel_limits_chv = {
ef9348c8
CML
500 /*
501 * These are the data rate limits (measured in fast clocks)
502 * since those are the strictest limits we have. The fast
503 * clock and actual rate limits are more relaxed, so checking
504 * them would make no difference.
505 */
506 .dot = { .min = 25000 * 5, .max = 540000 * 5},
17fe1021 507 .vco = { .min = 4800000, .max = 6480000 },
ef9348c8
CML
508 .n = { .min = 1, .max = 1 },
509 .m1 = { .min = 2, .max = 2 },
510 .m2 = { .min = 24 << 22, .max = 175 << 22 },
511 .p1 = { .min = 2, .max = 4 },
512 .p2 = { .p2_slow = 1, .p2_fast = 14 },
513};
514
1b6f4958 515static const struct intel_limit intel_limits_bxt = {
5ab7b0b7
ID
516 /* FIXME: find real dot limits */
517 .dot = { .min = 0, .max = INT_MAX },
e6292556 518 .vco = { .min = 4800000, .max = 6700000 },
5ab7b0b7
ID
519 .n = { .min = 1, .max = 1 },
520 .m1 = { .min = 2, .max = 2 },
521 /* FIXME: find real m2 limits */
522 .m2 = { .min = 2 << 22, .max = 255 << 22 },
523 .p1 = { .min = 2, .max = 4 },
524 .p2 = { .p2_slow = 1, .p2_fast = 20 },
525};
526
cdba954e
ACO
527static bool
528needs_modeset(struct drm_crtc_state *state)
529{
fc596660 530 return drm_atomic_crtc_needs_modeset(state);
cdba954e
ACO
531}
532
e0638cdf
PZ
533/**
534 * Returns whether any output on the specified pipe is of the specified type
535 */
4093561b 536bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
e0638cdf 537{
409ee761 538 struct drm_device *dev = crtc->base.dev;
e0638cdf
PZ
539 struct intel_encoder *encoder;
540
409ee761 541 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
e0638cdf
PZ
542 if (encoder->type == type)
543 return true;
544
545 return false;
546}
547
d0737e1d
ACO
548/**
549 * Returns whether any output on the specified pipe will have the specified
550 * type after a staged modeset is complete, i.e., the same as
551 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
552 * encoder->crtc.
553 */
a93e255f
ACO
554static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
555 int type)
d0737e1d 556{
a93e255f 557 struct drm_atomic_state *state = crtc_state->base.state;
da3ced29 558 struct drm_connector *connector;
a93e255f 559 struct drm_connector_state *connector_state;
d0737e1d 560 struct intel_encoder *encoder;
a93e255f
ACO
561 int i, num_connectors = 0;
562
da3ced29 563 for_each_connector_in_state(state, connector, connector_state, i) {
a93e255f
ACO
564 if (connector_state->crtc != crtc_state->base.crtc)
565 continue;
566
567 num_connectors++;
d0737e1d 568
a93e255f
ACO
569 encoder = to_intel_encoder(connector_state->best_encoder);
570 if (encoder->type == type)
d0737e1d 571 return true;
a93e255f
ACO
572 }
573
574 WARN_ON(num_connectors == 0);
d0737e1d
ACO
575
576 return false;
577}
578
dccbea3b
ID
579/*
580 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
581 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
582 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
583 * The helpers' return value is the rate of the clock that is fed to the
584 * display engine's pipe which can be the above fast dot clock rate or a
585 * divided-down version of it.
586 */
f2b115e6 587/* m1 is reserved as 0 in Pineview, n is a ring counter */
9e2c8475 588static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
79e53945 589{
2177832f
SL
590 clock->m = clock->m2 + 2;
591 clock->p = clock->p1 * clock->p2;
ed5ca77e 592 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 593 return 0;
fb03ac01
VS
594 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
595 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
596
597 return clock->dot;
2177832f
SL
598}
599
7429e9d4
DV
600static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
601{
602 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
603}
604
9e2c8475 605static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
2177832f 606{
7429e9d4 607 clock->m = i9xx_dpll_compute_m(clock);
79e53945 608 clock->p = clock->p1 * clock->p2;
ed5ca77e 609 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
dccbea3b 610 return 0;
fb03ac01
VS
611 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
612 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
613
614 return clock->dot;
79e53945
JB
615}
616
9e2c8475 617static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
589eca67
ID
618{
619 clock->m = clock->m1 * clock->m2;
620 clock->p = clock->p1 * clock->p2;
621 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 622 return 0;
589eca67
ID
623 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
624 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
625
626 return clock->dot / 5;
589eca67
ID
627}
628
9e2c8475 629int chv_calc_dpll_params(int refclk, struct dpll *clock)
ef9348c8
CML
630{
631 clock->m = clock->m1 * clock->m2;
632 clock->p = clock->p1 * clock->p2;
633 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 634 return 0;
ef9348c8
CML
635 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
636 clock->n << 22);
637 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
638
639 return clock->dot / 5;
ef9348c8
CML
640}
641
7c04d1d9 642#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
79e53945
JB
643/**
644 * Returns whether the given set of divisors are valid for a given refclk with
645 * the given connectors.
646 */
647
1b894b59 648static bool intel_PLL_is_valid(struct drm_device *dev,
1b6f4958 649 const struct intel_limit *limit,
9e2c8475 650 const struct dpll *clock)
79e53945 651{
f01b7962
VS
652 if (clock->n < limit->n.min || limit->n.max < clock->n)
653 INTELPllInvalid("n out of range\n");
79e53945 654 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
0206e353 655 INTELPllInvalid("p1 out of range\n");
79e53945 656 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
0206e353 657 INTELPllInvalid("m2 out of range\n");
79e53945 658 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
0206e353 659 INTELPllInvalid("m1 out of range\n");
f01b7962 660
666a4537
WB
661 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
662 !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
f01b7962
VS
663 if (clock->m1 <= clock->m2)
664 INTELPllInvalid("m1 <= m2\n");
665
666a4537 666 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
f01b7962
VS
667 if (clock->p < limit->p.min || limit->p.max < clock->p)
668 INTELPllInvalid("p out of range\n");
669 if (clock->m < limit->m.min || limit->m.max < clock->m)
670 INTELPllInvalid("m out of range\n");
671 }
672
79e53945 673 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
0206e353 674 INTELPllInvalid("vco out of range\n");
79e53945
JB
675 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
676 * connector, etc., rather than just a single range.
677 */
678 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
0206e353 679 INTELPllInvalid("dot out of range\n");
79e53945
JB
680
681 return true;
682}
683
3b1429d9 684static int
1b6f4958 685i9xx_select_p2_div(const struct intel_limit *limit,
3b1429d9
VS
686 const struct intel_crtc_state *crtc_state,
687 int target)
79e53945 688{
3b1429d9 689 struct drm_device *dev = crtc_state->base.crtc->dev;
79e53945 690
a93e255f 691 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
79e53945 692 /*
a210b028
DV
693 * For LVDS just rely on its current settings for dual-channel.
694 * We haven't figured out how to reliably set up different
695 * single/dual channel state, if we even can.
79e53945 696 */
1974cad0 697 if (intel_is_dual_link_lvds(dev))
3b1429d9 698 return limit->p2.p2_fast;
79e53945 699 else
3b1429d9 700 return limit->p2.p2_slow;
79e53945
JB
701 } else {
702 if (target < limit->p2.dot_limit)
3b1429d9 703 return limit->p2.p2_slow;
79e53945 704 else
3b1429d9 705 return limit->p2.p2_fast;
79e53945 706 }
3b1429d9
VS
707}
708
70e8aa21
ACO
709/*
710 * Returns a set of divisors for the desired target clock with the given
711 * refclk, or FALSE. The returned values represent the clock equation:
712 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
713 *
714 * Target and reference clocks are specified in kHz.
715 *
716 * If match_clock is provided, then best_clock P divider must match the P
717 * divider from @match_clock used for LVDS downclocking.
718 */
3b1429d9 719static bool
1b6f4958 720i9xx_find_best_dpll(const struct intel_limit *limit,
3b1429d9 721 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
722 int target, int refclk, struct dpll *match_clock,
723 struct dpll *best_clock)
3b1429d9
VS
724{
725 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 726 struct dpll clock;
3b1429d9 727 int err = target;
79e53945 728
0206e353 729 memset(best_clock, 0, sizeof(*best_clock));
79e53945 730
3b1429d9
VS
731 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
732
42158660
ZY
733 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
734 clock.m1++) {
735 for (clock.m2 = limit->m2.min;
736 clock.m2 <= limit->m2.max; clock.m2++) {
c0efc387 737 if (clock.m2 >= clock.m1)
42158660
ZY
738 break;
739 for (clock.n = limit->n.min;
740 clock.n <= limit->n.max; clock.n++) {
741 for (clock.p1 = limit->p1.min;
742 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
743 int this_err;
744
dccbea3b 745 i9xx_calc_dpll_params(refclk, &clock);
ac58c3f0
DV
746 if (!intel_PLL_is_valid(dev, limit,
747 &clock))
748 continue;
749 if (match_clock &&
750 clock.p != match_clock->p)
751 continue;
752
753 this_err = abs(clock.dot - target);
754 if (this_err < err) {
755 *best_clock = clock;
756 err = this_err;
757 }
758 }
759 }
760 }
761 }
762
763 return (err != target);
764}
765
70e8aa21
ACO
766/*
767 * Returns a set of divisors for the desired target clock with the given
768 * refclk, or FALSE. The returned values represent the clock equation:
769 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
770 *
771 * Target and reference clocks are specified in kHz.
772 *
773 * If match_clock is provided, then best_clock P divider must match the P
774 * divider from @match_clock used for LVDS downclocking.
775 */
ac58c3f0 776static bool
1b6f4958 777pnv_find_best_dpll(const struct intel_limit *limit,
a93e255f 778 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
779 int target, int refclk, struct dpll *match_clock,
780 struct dpll *best_clock)
79e53945 781{
3b1429d9 782 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 783 struct dpll clock;
79e53945
JB
784 int err = target;
785
0206e353 786 memset(best_clock, 0, sizeof(*best_clock));
79e53945 787
3b1429d9
VS
788 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
789
42158660
ZY
790 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
791 clock.m1++) {
792 for (clock.m2 = limit->m2.min;
793 clock.m2 <= limit->m2.max; clock.m2++) {
42158660
ZY
794 for (clock.n = limit->n.min;
795 clock.n <= limit->n.max; clock.n++) {
796 for (clock.p1 = limit->p1.min;
797 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
798 int this_err;
799
dccbea3b 800 pnv_calc_dpll_params(refclk, &clock);
1b894b59
CW
801 if (!intel_PLL_is_valid(dev, limit,
802 &clock))
79e53945 803 continue;
cec2f356
SP
804 if (match_clock &&
805 clock.p != match_clock->p)
806 continue;
79e53945
JB
807
808 this_err = abs(clock.dot - target);
809 if (this_err < err) {
810 *best_clock = clock;
811 err = this_err;
812 }
813 }
814 }
815 }
816 }
817
818 return (err != target);
819}
820
997c030c
ACO
821/*
822 * Returns a set of divisors for the desired target clock with the given
823 * refclk, or FALSE. The returned values represent the clock equation:
824 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
70e8aa21
ACO
825 *
826 * Target and reference clocks are specified in kHz.
827 *
828 * If match_clock is provided, then best_clock P divider must match the P
829 * divider from @match_clock used for LVDS downclocking.
997c030c 830 */
d4906093 831static bool
1b6f4958 832g4x_find_best_dpll(const struct intel_limit *limit,
a93e255f 833 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
834 int target, int refclk, struct dpll *match_clock,
835 struct dpll *best_clock)
d4906093 836{
3b1429d9 837 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 838 struct dpll clock;
d4906093 839 int max_n;
3b1429d9 840 bool found = false;
6ba770dc
AJ
841 /* approximately equals target * 0.00585 */
842 int err_most = (target >> 8) + (target >> 9);
d4906093
ML
843
844 memset(best_clock, 0, sizeof(*best_clock));
3b1429d9
VS
845
846 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
847
d4906093 848 max_n = limit->n.max;
f77f13e2 849 /* based on hardware requirement, prefer smaller n to precision */
d4906093 850 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
f77f13e2 851 /* based on hardware requirement, prefere larger m1,m2 */
d4906093
ML
852 for (clock.m1 = limit->m1.max;
853 clock.m1 >= limit->m1.min; clock.m1--) {
854 for (clock.m2 = limit->m2.max;
855 clock.m2 >= limit->m2.min; clock.m2--) {
856 for (clock.p1 = limit->p1.max;
857 clock.p1 >= limit->p1.min; clock.p1--) {
858 int this_err;
859
dccbea3b 860 i9xx_calc_dpll_params(refclk, &clock);
1b894b59
CW
861 if (!intel_PLL_is_valid(dev, limit,
862 &clock))
d4906093 863 continue;
1b894b59
CW
864
865 this_err = abs(clock.dot - target);
d4906093
ML
866 if (this_err < err_most) {
867 *best_clock = clock;
868 err_most = this_err;
869 max_n = clock.n;
870 found = true;
871 }
872 }
873 }
874 }
875 }
2c07245f
ZW
876 return found;
877}
878
d5dd62bd
ID
879/*
880 * Check if the calculated PLL configuration is more optimal compared to the
881 * best configuration and error found so far. Return the calculated error.
882 */
883static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
9e2c8475
ACO
884 const struct dpll *calculated_clock,
885 const struct dpll *best_clock,
d5dd62bd
ID
886 unsigned int best_error_ppm,
887 unsigned int *error_ppm)
888{
9ca3ba01
ID
889 /*
890 * For CHV ignore the error and consider only the P value.
891 * Prefer a bigger P value based on HW requirements.
892 */
893 if (IS_CHERRYVIEW(dev)) {
894 *error_ppm = 0;
895
896 return calculated_clock->p > best_clock->p;
897 }
898
24be4e46
ID
899 if (WARN_ON_ONCE(!target_freq))
900 return false;
901
d5dd62bd
ID
902 *error_ppm = div_u64(1000000ULL *
903 abs(target_freq - calculated_clock->dot),
904 target_freq);
905 /*
906 * Prefer a better P value over a better (smaller) error if the error
907 * is small. Ensure this preference for future configurations too by
908 * setting the error to 0.
909 */
910 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
911 *error_ppm = 0;
912
913 return true;
914 }
915
916 return *error_ppm + 10 < best_error_ppm;
917}
918
65b3d6a9
ACO
919/*
920 * Returns a set of divisors for the desired target clock with the given
921 * refclk, or FALSE. The returned values represent the clock equation:
922 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
923 */
a0c4da24 924static bool
1b6f4958 925vlv_find_best_dpll(const struct intel_limit *limit,
a93e255f 926 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
927 int target, int refclk, struct dpll *match_clock,
928 struct dpll *best_clock)
a0c4da24 929{
a93e255f 930 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a919ff14 931 struct drm_device *dev = crtc->base.dev;
9e2c8475 932 struct dpll clock;
69e4f900 933 unsigned int bestppm = 1000000;
27e639bf
VS
934 /* min update 19.2 MHz */
935 int max_n = min(limit->n.max, refclk / 19200);
49e497ef 936 bool found = false;
a0c4da24 937
6b4bf1c4
VS
938 target *= 5; /* fast clock */
939
940 memset(best_clock, 0, sizeof(*best_clock));
a0c4da24
JB
941
942 /* based on hardware requirement, prefer smaller n to precision */
27e639bf 943 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
811bbf05 944 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
889059d8 945 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
c1a9ae43 946 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
6b4bf1c4 947 clock.p = clock.p1 * clock.p2;
a0c4da24 948 /* based on hardware requirement, prefer bigger m1,m2 values */
6b4bf1c4 949 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
d5dd62bd 950 unsigned int ppm;
69e4f900 951
6b4bf1c4
VS
952 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
953 refclk * clock.m1);
954
dccbea3b 955 vlv_calc_dpll_params(refclk, &clock);
43b0ac53 956
f01b7962
VS
957 if (!intel_PLL_is_valid(dev, limit,
958 &clock))
43b0ac53
VS
959 continue;
960
d5dd62bd
ID
961 if (!vlv_PLL_is_optimal(dev, target,
962 &clock,
963 best_clock,
964 bestppm, &ppm))
965 continue;
6b4bf1c4 966
d5dd62bd
ID
967 *best_clock = clock;
968 bestppm = ppm;
969 found = true;
a0c4da24
JB
970 }
971 }
972 }
973 }
a0c4da24 974
49e497ef 975 return found;
a0c4da24 976}
a4fc5ed6 977
65b3d6a9
ACO
978/*
979 * Returns a set of divisors for the desired target clock with the given
980 * refclk, or FALSE. The returned values represent the clock equation:
981 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
982 */
ef9348c8 983static bool
1b6f4958 984chv_find_best_dpll(const struct intel_limit *limit,
a93e255f 985 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
986 int target, int refclk, struct dpll *match_clock,
987 struct dpll *best_clock)
ef9348c8 988{
a93e255f 989 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a919ff14 990 struct drm_device *dev = crtc->base.dev;
9ca3ba01 991 unsigned int best_error_ppm;
9e2c8475 992 struct dpll clock;
ef9348c8
CML
993 uint64_t m2;
994 int found = false;
995
996 memset(best_clock, 0, sizeof(*best_clock));
9ca3ba01 997 best_error_ppm = 1000000;
ef9348c8
CML
998
999 /*
1000 * Based on hardware doc, the n always set to 1, and m1 always
1001 * set to 2. If requires to support 200Mhz refclk, we need to
1002 * revisit this because n may not 1 anymore.
1003 */
1004 clock.n = 1, clock.m1 = 2;
1005 target *= 5; /* fast clock */
1006
1007 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1008 for (clock.p2 = limit->p2.p2_fast;
1009 clock.p2 >= limit->p2.p2_slow;
1010 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
9ca3ba01 1011 unsigned int error_ppm;
ef9348c8
CML
1012
1013 clock.p = clock.p1 * clock.p2;
1014
1015 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1016 clock.n) << 22, refclk * clock.m1);
1017
1018 if (m2 > INT_MAX/clock.m1)
1019 continue;
1020
1021 clock.m2 = m2;
1022
dccbea3b 1023 chv_calc_dpll_params(refclk, &clock);
ef9348c8
CML
1024
1025 if (!intel_PLL_is_valid(dev, limit, &clock))
1026 continue;
1027
9ca3ba01
ID
1028 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1029 best_error_ppm, &error_ppm))
1030 continue;
1031
1032 *best_clock = clock;
1033 best_error_ppm = error_ppm;
1034 found = true;
ef9348c8
CML
1035 }
1036 }
1037
1038 return found;
1039}
1040
5ab7b0b7 1041bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
9e2c8475 1042 struct dpll *best_clock)
5ab7b0b7 1043{
65b3d6a9 1044 int refclk = 100000;
1b6f4958 1045 const struct intel_limit *limit = &intel_limits_bxt;
5ab7b0b7 1046
65b3d6a9 1047 return chv_find_best_dpll(limit, crtc_state,
5ab7b0b7
ID
1048 target_clock, refclk, NULL, best_clock);
1049}
1050
20ddf665
VS
1051bool intel_crtc_active(struct drm_crtc *crtc)
1052{
1053 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1054
1055 /* Be paranoid as we can arrive here with only partial
1056 * state retrieved from the hardware during setup.
1057 *
241bfc38 1058 * We can ditch the adjusted_mode.crtc_clock check as soon
20ddf665
VS
1059 * as Haswell has gained clock readout/fastboot support.
1060 *
66e514c1 1061 * We can ditch the crtc->primary->fb check as soon as we can
20ddf665 1062 * properly reconstruct framebuffers.
c3d1f436
MR
1063 *
1064 * FIXME: The intel_crtc->active here should be switched to
1065 * crtc->state->active once we have proper CRTC states wired up
1066 * for atomic.
20ddf665 1067 */
c3d1f436 1068 return intel_crtc->active && crtc->primary->state->fb &&
6e3c9717 1069 intel_crtc->config->base.adjusted_mode.crtc_clock;
20ddf665
VS
1070}
1071
a5c961d1
PZ
1072enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1073 enum pipe pipe)
1074{
1075 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1076 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1077
6e3c9717 1078 return intel_crtc->config->cpu_transcoder;
a5c961d1
PZ
1079}
1080
fbf49ea2
VS
1081static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1082{
1083 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1084 i915_reg_t reg = PIPEDSL(pipe);
fbf49ea2
VS
1085 u32 line1, line2;
1086 u32 line_mask;
1087
1088 if (IS_GEN2(dev))
1089 line_mask = DSL_LINEMASK_GEN2;
1090 else
1091 line_mask = DSL_LINEMASK_GEN3;
1092
1093 line1 = I915_READ(reg) & line_mask;
6adfb1ef 1094 msleep(5);
fbf49ea2
VS
1095 line2 = I915_READ(reg) & line_mask;
1096
1097 return line1 == line2;
1098}
1099
ab7ad7f6
KP
1100/*
1101 * intel_wait_for_pipe_off - wait for pipe to turn off
575f7ab7 1102 * @crtc: crtc whose pipe to wait for
9d0498a2
JB
1103 *
1104 * After disabling a pipe, we can't wait for vblank in the usual way,
1105 * spinning on the vblank interrupt status bit, since we won't actually
1106 * see an interrupt when the pipe is disabled.
1107 *
ab7ad7f6
KP
1108 * On Gen4 and above:
1109 * wait for the pipe register state bit to turn off
1110 *
1111 * Otherwise:
1112 * wait for the display line value to settle (it usually
1113 * ends up stopping at the start of the next frame).
58e10eb9 1114 *
9d0498a2 1115 */
575f7ab7 1116static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
9d0498a2 1117{
575f7ab7 1118 struct drm_device *dev = crtc->base.dev;
9d0498a2 1119 struct drm_i915_private *dev_priv = dev->dev_private;
6e3c9717 1120 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
575f7ab7 1121 enum pipe pipe = crtc->pipe;
ab7ad7f6
KP
1122
1123 if (INTEL_INFO(dev)->gen >= 4) {
f0f59a00 1124 i915_reg_t reg = PIPECONF(cpu_transcoder);
ab7ad7f6
KP
1125
1126 /* Wait for the Pipe State to go off */
b8511f53
CW
1127 if (intel_wait_for_register(dev_priv,
1128 reg, I965_PIPECONF_ACTIVE, 0,
1129 100))
284637d9 1130 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 1131 } else {
ab7ad7f6 1132 /* Wait for the display line to settle */
fbf49ea2 1133 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
284637d9 1134 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 1135 }
79e53945
JB
1136}
1137
b24e7179 1138/* Only for pre-ILK configs */
55607e8a
DV
1139void assert_pll(struct drm_i915_private *dev_priv,
1140 enum pipe pipe, bool state)
b24e7179 1141{
b24e7179
JB
1142 u32 val;
1143 bool cur_state;
1144
649636ef 1145 val = I915_READ(DPLL(pipe));
b24e7179 1146 cur_state = !!(val & DPLL_VCO_ENABLE);
e2c719b7 1147 I915_STATE_WARN(cur_state != state,
b24e7179 1148 "PLL state assertion failure (expected %s, current %s)\n",
87ad3212 1149 onoff(state), onoff(cur_state));
b24e7179 1150}
b24e7179 1151
23538ef1 1152/* XXX: the dsi pll is shared between MIPI DSI ports */
8563b1e8 1153void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
23538ef1
JN
1154{
1155 u32 val;
1156 bool cur_state;
1157
a580516d 1158 mutex_lock(&dev_priv->sb_lock);
23538ef1 1159 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
a580516d 1160 mutex_unlock(&dev_priv->sb_lock);
23538ef1
JN
1161
1162 cur_state = val & DSI_PLL_VCO_EN;
e2c719b7 1163 I915_STATE_WARN(cur_state != state,
23538ef1 1164 "DSI PLL state assertion failure (expected %s, current %s)\n",
87ad3212 1165 onoff(state), onoff(cur_state));
23538ef1 1166}
23538ef1 1167
040484af
JB
1168static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1169 enum pipe pipe, bool state)
1170{
040484af 1171 bool cur_state;
ad80a810
PZ
1172 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1173 pipe);
040484af 1174
2d1fe073 1175 if (HAS_DDI(dev_priv)) {
affa9354 1176 /* DDI does not have a specific FDI_TX register */
649636ef 1177 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
ad80a810 1178 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
bf507ef7 1179 } else {
649636ef 1180 u32 val = I915_READ(FDI_TX_CTL(pipe));
bf507ef7
ED
1181 cur_state = !!(val & FDI_TX_ENABLE);
1182 }
e2c719b7 1183 I915_STATE_WARN(cur_state != state,
040484af 1184 "FDI TX state assertion failure (expected %s, current %s)\n",
87ad3212 1185 onoff(state), onoff(cur_state));
040484af
JB
1186}
1187#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1188#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1189
1190static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1191 enum pipe pipe, bool state)
1192{
040484af
JB
1193 u32 val;
1194 bool cur_state;
1195
649636ef 1196 val = I915_READ(FDI_RX_CTL(pipe));
d63fa0dc 1197 cur_state = !!(val & FDI_RX_ENABLE);
e2c719b7 1198 I915_STATE_WARN(cur_state != state,
040484af 1199 "FDI RX state assertion failure (expected %s, current %s)\n",
87ad3212 1200 onoff(state), onoff(cur_state));
040484af
JB
1201}
1202#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1203#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1204
1205static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1206 enum pipe pipe)
1207{
040484af
JB
1208 u32 val;
1209
1210 /* ILK FDI PLL is always enabled */
7e22dbbb 1211 if (IS_GEN5(dev_priv))
040484af
JB
1212 return;
1213
bf507ef7 1214 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
2d1fe073 1215 if (HAS_DDI(dev_priv))
bf507ef7
ED
1216 return;
1217
649636ef 1218 val = I915_READ(FDI_TX_CTL(pipe));
e2c719b7 1219 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
040484af
JB
1220}
1221
55607e8a
DV
1222void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1223 enum pipe pipe, bool state)
040484af 1224{
040484af 1225 u32 val;
55607e8a 1226 bool cur_state;
040484af 1227
649636ef 1228 val = I915_READ(FDI_RX_CTL(pipe));
55607e8a 1229 cur_state = !!(val & FDI_RX_PLL_ENABLE);
e2c719b7 1230 I915_STATE_WARN(cur_state != state,
55607e8a 1231 "FDI RX PLL assertion failure (expected %s, current %s)\n",
87ad3212 1232 onoff(state), onoff(cur_state));
040484af
JB
1233}
1234
b680c37a
DV
1235void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1236 enum pipe pipe)
ea0760cf 1237{
bedd4dba 1238 struct drm_device *dev = dev_priv->dev;
f0f59a00 1239 i915_reg_t pp_reg;
ea0760cf
JB
1240 u32 val;
1241 enum pipe panel_pipe = PIPE_A;
0de3b485 1242 bool locked = true;
ea0760cf 1243
bedd4dba
JN
1244 if (WARN_ON(HAS_DDI(dev)))
1245 return;
1246
1247 if (HAS_PCH_SPLIT(dev)) {
1248 u32 port_sel;
1249
ea0760cf 1250 pp_reg = PCH_PP_CONTROL;
bedd4dba
JN
1251 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1252
1253 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1254 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1255 panel_pipe = PIPE_B;
1256 /* XXX: else fix for eDP */
666a4537 1257 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
bedd4dba
JN
1258 /* presumably write lock depends on pipe, not port select */
1259 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1260 panel_pipe = pipe;
ea0760cf
JB
1261 } else {
1262 pp_reg = PP_CONTROL;
bedd4dba
JN
1263 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1264 panel_pipe = PIPE_B;
ea0760cf
JB
1265 }
1266
1267 val = I915_READ(pp_reg);
1268 if (!(val & PANEL_POWER_ON) ||
ec49ba2d 1269 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
ea0760cf
JB
1270 locked = false;
1271
e2c719b7 1272 I915_STATE_WARN(panel_pipe == pipe && locked,
ea0760cf 1273 "panel assertion failure, pipe %c regs locked\n",
9db4a9c7 1274 pipe_name(pipe));
ea0760cf
JB
1275}
1276
93ce0ba6
JN
1277static void assert_cursor(struct drm_i915_private *dev_priv,
1278 enum pipe pipe, bool state)
1279{
1280 struct drm_device *dev = dev_priv->dev;
1281 bool cur_state;
1282
d9d82081 1283 if (IS_845G(dev) || IS_I865G(dev))
0b87c24e 1284 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
d9d82081 1285 else
5efb3e28 1286 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
93ce0ba6 1287
e2c719b7 1288 I915_STATE_WARN(cur_state != state,
93ce0ba6 1289 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
87ad3212 1290 pipe_name(pipe), onoff(state), onoff(cur_state));
93ce0ba6
JN
1291}
1292#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1293#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1294
b840d907
JB
1295void assert_pipe(struct drm_i915_private *dev_priv,
1296 enum pipe pipe, bool state)
b24e7179 1297{
63d7bbe9 1298 bool cur_state;
702e7a56
PZ
1299 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1300 pipe);
4feed0eb 1301 enum intel_display_power_domain power_domain;
b24e7179 1302
b6b5d049
VS
1303 /* if we need the pipe quirk it must be always on */
1304 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1305 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
8e636784
DV
1306 state = true;
1307
4feed0eb
ID
1308 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1309 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
649636ef 1310 u32 val = I915_READ(PIPECONF(cpu_transcoder));
69310161 1311 cur_state = !!(val & PIPECONF_ENABLE);
4feed0eb
ID
1312
1313 intel_display_power_put(dev_priv, power_domain);
1314 } else {
1315 cur_state = false;
69310161
PZ
1316 }
1317
e2c719b7 1318 I915_STATE_WARN(cur_state != state,
63d7bbe9 1319 "pipe %c assertion failure (expected %s, current %s)\n",
87ad3212 1320 pipe_name(pipe), onoff(state), onoff(cur_state));
b24e7179
JB
1321}
1322
931872fc
CW
1323static void assert_plane(struct drm_i915_private *dev_priv,
1324 enum plane plane, bool state)
b24e7179 1325{
b24e7179 1326 u32 val;
931872fc 1327 bool cur_state;
b24e7179 1328
649636ef 1329 val = I915_READ(DSPCNTR(plane));
931872fc 1330 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
e2c719b7 1331 I915_STATE_WARN(cur_state != state,
931872fc 1332 "plane %c assertion failure (expected %s, current %s)\n",
87ad3212 1333 plane_name(plane), onoff(state), onoff(cur_state));
b24e7179
JB
1334}
1335
931872fc
CW
1336#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1337#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1338
b24e7179
JB
1339static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1340 enum pipe pipe)
1341{
653e1026 1342 struct drm_device *dev = dev_priv->dev;
649636ef 1343 int i;
b24e7179 1344
653e1026
VS
1345 /* Primary planes are fixed to pipes on gen4+ */
1346 if (INTEL_INFO(dev)->gen >= 4) {
649636ef 1347 u32 val = I915_READ(DSPCNTR(pipe));
e2c719b7 1348 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
28c05794
AJ
1349 "plane %c assertion failure, should be disabled but not\n",
1350 plane_name(pipe));
19ec1358 1351 return;
28c05794 1352 }
19ec1358 1353
b24e7179 1354 /* Need to check both planes against the pipe */
055e393f 1355 for_each_pipe(dev_priv, i) {
649636ef
VS
1356 u32 val = I915_READ(DSPCNTR(i));
1357 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
b24e7179 1358 DISPPLANE_SEL_PIPE_SHIFT;
e2c719b7 1359 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
9db4a9c7
JB
1360 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1361 plane_name(i), pipe_name(pipe));
b24e7179
JB
1362 }
1363}
1364
19332d7a
JB
1365static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1366 enum pipe pipe)
1367{
20674eef 1368 struct drm_device *dev = dev_priv->dev;
649636ef 1369 int sprite;
19332d7a 1370
7feb8b88 1371 if (INTEL_INFO(dev)->gen >= 9) {
3bdcfc0c 1372 for_each_sprite(dev_priv, pipe, sprite) {
649636ef 1373 u32 val = I915_READ(PLANE_CTL(pipe, sprite));
e2c719b7 1374 I915_STATE_WARN(val & PLANE_CTL_ENABLE,
7feb8b88
DL
1375 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1376 sprite, pipe_name(pipe));
1377 }
666a4537 1378 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3bdcfc0c 1379 for_each_sprite(dev_priv, pipe, sprite) {
649636ef 1380 u32 val = I915_READ(SPCNTR(pipe, sprite));
e2c719b7 1381 I915_STATE_WARN(val & SP_ENABLE,
20674eef 1382 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1fe47785 1383 sprite_name(pipe, sprite), pipe_name(pipe));
20674eef
VS
1384 }
1385 } else if (INTEL_INFO(dev)->gen >= 7) {
649636ef 1386 u32 val = I915_READ(SPRCTL(pipe));
e2c719b7 1387 I915_STATE_WARN(val & SPRITE_ENABLE,
06da8da2 1388 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
20674eef
VS
1389 plane_name(pipe), pipe_name(pipe));
1390 } else if (INTEL_INFO(dev)->gen >= 5) {
649636ef 1391 u32 val = I915_READ(DVSCNTR(pipe));
e2c719b7 1392 I915_STATE_WARN(val & DVS_ENABLE,
06da8da2 1393 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
20674eef 1394 plane_name(pipe), pipe_name(pipe));
19332d7a
JB
1395 }
1396}
1397
08c71e5e
VS
1398static void assert_vblank_disabled(struct drm_crtc *crtc)
1399{
e2c719b7 1400 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
08c71e5e
VS
1401 drm_crtc_vblank_put(crtc);
1402}
1403
7abd4b35
ACO
1404void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1405 enum pipe pipe)
92f2584a 1406{
92f2584a
JB
1407 u32 val;
1408 bool enabled;
1409
649636ef 1410 val = I915_READ(PCH_TRANSCONF(pipe));
92f2584a 1411 enabled = !!(val & TRANS_ENABLE);
e2c719b7 1412 I915_STATE_WARN(enabled,
9db4a9c7
JB
1413 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1414 pipe_name(pipe));
92f2584a
JB
1415}
1416
4e634389
KP
1417static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1418 enum pipe pipe, u32 port_sel, u32 val)
f0575e92
KP
1419{
1420 if ((val & DP_PORT_EN) == 0)
1421 return false;
1422
2d1fe073 1423 if (HAS_PCH_CPT(dev_priv)) {
f0f59a00 1424 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
f0575e92
KP
1425 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1426 return false;
2d1fe073 1427 } else if (IS_CHERRYVIEW(dev_priv)) {
44f37d1f
CML
1428 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1429 return false;
f0575e92
KP
1430 } else {
1431 if ((val & DP_PIPE_MASK) != (pipe << 30))
1432 return false;
1433 }
1434 return true;
1435}
1436
1519b995
KP
1437static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1438 enum pipe pipe, u32 val)
1439{
dc0fa718 1440 if ((val & SDVO_ENABLE) == 0)
1519b995
KP
1441 return false;
1442
2d1fe073 1443 if (HAS_PCH_CPT(dev_priv)) {
dc0fa718 1444 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1519b995 1445 return false;
2d1fe073 1446 } else if (IS_CHERRYVIEW(dev_priv)) {
44f37d1f
CML
1447 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1448 return false;
1519b995 1449 } else {
dc0fa718 1450 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1519b995
KP
1451 return false;
1452 }
1453 return true;
1454}
1455
1456static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1457 enum pipe pipe, u32 val)
1458{
1459 if ((val & LVDS_PORT_EN) == 0)
1460 return false;
1461
2d1fe073 1462 if (HAS_PCH_CPT(dev_priv)) {
1519b995
KP
1463 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1464 return false;
1465 } else {
1466 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1467 return false;
1468 }
1469 return true;
1470}
1471
1472static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1473 enum pipe pipe, u32 val)
1474{
1475 if ((val & ADPA_DAC_ENABLE) == 0)
1476 return false;
2d1fe073 1477 if (HAS_PCH_CPT(dev_priv)) {
1519b995
KP
1478 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1479 return false;
1480 } else {
1481 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1482 return false;
1483 }
1484 return true;
1485}
1486
291906f1 1487static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
f0f59a00
VS
1488 enum pipe pipe, i915_reg_t reg,
1489 u32 port_sel)
291906f1 1490{
47a05eca 1491 u32 val = I915_READ(reg);
e2c719b7 1492 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
291906f1 1493 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
f0f59a00 1494 i915_mmio_reg_offset(reg), pipe_name(pipe));
de9a35ab 1495
2d1fe073 1496 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
75c5da27 1497 && (val & DP_PIPEB_SELECT),
de9a35ab 1498 "IBX PCH dp port still using transcoder B\n");
291906f1
JB
1499}
1500
1501static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
f0f59a00 1502 enum pipe pipe, i915_reg_t reg)
291906f1 1503{
47a05eca 1504 u32 val = I915_READ(reg);
e2c719b7 1505 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
23c99e77 1506 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
f0f59a00 1507 i915_mmio_reg_offset(reg), pipe_name(pipe));
de9a35ab 1508
2d1fe073 1509 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
75c5da27 1510 && (val & SDVO_PIPE_B_SELECT),
de9a35ab 1511 "IBX PCH hdmi port still using transcoder B\n");
291906f1
JB
1512}
1513
1514static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1515 enum pipe pipe)
1516{
291906f1 1517 u32 val;
291906f1 1518
f0575e92
KP
1519 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1520 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1521 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
291906f1 1522
649636ef 1523 val = I915_READ(PCH_ADPA);
e2c719b7 1524 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
291906f1 1525 "PCH VGA enabled on transcoder %c, should be disabled\n",
9db4a9c7 1526 pipe_name(pipe));
291906f1 1527
649636ef 1528 val = I915_READ(PCH_LVDS);
e2c719b7 1529 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
291906f1 1530 "PCH LVDS enabled on transcoder %c, should be disabled\n",
9db4a9c7 1531 pipe_name(pipe));
291906f1 1532
e2debe91
PZ
1533 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1534 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1535 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
291906f1
JB
1536}
1537
cd2d34d9
VS
1538static void _vlv_enable_pll(struct intel_crtc *crtc,
1539 const struct intel_crtc_state *pipe_config)
1540{
1541 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1542 enum pipe pipe = crtc->pipe;
1543
1544 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1545 POSTING_READ(DPLL(pipe));
1546 udelay(150);
1547
2c30b43b
CW
1548 if (intel_wait_for_register(dev_priv,
1549 DPLL(pipe),
1550 DPLL_LOCK_VLV,
1551 DPLL_LOCK_VLV,
1552 1))
cd2d34d9
VS
1553 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1554}
1555
d288f65f 1556static void vlv_enable_pll(struct intel_crtc *crtc,
5cec258b 1557 const struct intel_crtc_state *pipe_config)
87442f73 1558{
cd2d34d9 1559 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8bd3f301 1560 enum pipe pipe = crtc->pipe;
87442f73 1561
8bd3f301 1562 assert_pipe_disabled(dev_priv, pipe);
87442f73 1563
87442f73 1564 /* PLL is protected by panel, make sure we can write it */
7d1a83cb 1565 assert_panel_unlocked(dev_priv, pipe);
87442f73 1566
cd2d34d9
VS
1567 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1568 _vlv_enable_pll(crtc, pipe_config);
426115cf 1569
8bd3f301
VS
1570 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1571 POSTING_READ(DPLL_MD(pipe));
87442f73
DV
1572}
1573
cd2d34d9
VS
1574
1575static void _chv_enable_pll(struct intel_crtc *crtc,
1576 const struct intel_crtc_state *pipe_config)
9d556c99 1577{
cd2d34d9 1578 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8bd3f301 1579 enum pipe pipe = crtc->pipe;
9d556c99 1580 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9d556c99
CML
1581 u32 tmp;
1582
a580516d 1583 mutex_lock(&dev_priv->sb_lock);
9d556c99
CML
1584
1585 /* Enable back the 10bit clock to display controller */
1586 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1587 tmp |= DPIO_DCLKP_EN;
1588 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1589
54433e91
VS
1590 mutex_unlock(&dev_priv->sb_lock);
1591
9d556c99
CML
1592 /*
1593 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1594 */
1595 udelay(1);
1596
1597 /* Enable PLL */
d288f65f 1598 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
9d556c99
CML
1599
1600 /* Check PLL is locked */
a11b0703 1601 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
9d556c99 1602 DRM_ERROR("PLL %d failed to lock\n", pipe);
cd2d34d9
VS
1603}
1604
1605static void chv_enable_pll(struct intel_crtc *crtc,
1606 const struct intel_crtc_state *pipe_config)
1607{
1608 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1609 enum pipe pipe = crtc->pipe;
1610
1611 assert_pipe_disabled(dev_priv, pipe);
1612
1613 /* PLL is protected by panel, make sure we can write it */
1614 assert_panel_unlocked(dev_priv, pipe);
1615
1616 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1617 _chv_enable_pll(crtc, pipe_config);
9d556c99 1618
c231775c
VS
1619 if (pipe != PIPE_A) {
1620 /*
1621 * WaPixelRepeatModeFixForC0:chv
1622 *
1623 * DPLLCMD is AWOL. Use chicken bits to propagate
1624 * the value from DPLLBMD to either pipe B or C.
1625 */
1626 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
1627 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1628 I915_WRITE(CBR4_VLV, 0);
1629 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1630
1631 /*
1632 * DPLLB VGA mode also seems to cause problems.
1633 * We should always have it disabled.
1634 */
1635 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1636 } else {
1637 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1638 POSTING_READ(DPLL_MD(pipe));
1639 }
9d556c99
CML
1640}
1641
1c4e0274
VS
1642static int intel_num_dvo_pipes(struct drm_device *dev)
1643{
1644 struct intel_crtc *crtc;
1645 int count = 0;
1646
1647 for_each_intel_crtc(dev, crtc)
3538b9df 1648 count += crtc->base.state->active &&
409ee761 1649 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1c4e0274
VS
1650
1651 return count;
1652}
1653
66e3d5c0 1654static void i9xx_enable_pll(struct intel_crtc *crtc)
63d7bbe9 1655{
66e3d5c0
DV
1656 struct drm_device *dev = crtc->base.dev;
1657 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1658 i915_reg_t reg = DPLL(crtc->pipe);
6e3c9717 1659 u32 dpll = crtc->config->dpll_hw_state.dpll;
63d7bbe9 1660
66e3d5c0 1661 assert_pipe_disabled(dev_priv, crtc->pipe);
58c6eaa2 1662
63d7bbe9 1663 /* PLL is protected by panel, make sure we can write it */
66e3d5c0
DV
1664 if (IS_MOBILE(dev) && !IS_I830(dev))
1665 assert_panel_unlocked(dev_priv, crtc->pipe);
63d7bbe9 1666
1c4e0274
VS
1667 /* Enable DVO 2x clock on both PLLs if necessary */
1668 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1669 /*
1670 * It appears to be important that we don't enable this
1671 * for the current pipe before otherwise configuring the
1672 * PLL. No idea how this should be handled if multiple
1673 * DVO outputs are enabled simultaneosly.
1674 */
1675 dpll |= DPLL_DVO_2X_MODE;
1676 I915_WRITE(DPLL(!crtc->pipe),
1677 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1678 }
66e3d5c0 1679
c2b63374
VS
1680 /*
1681 * Apparently we need to have VGA mode enabled prior to changing
1682 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1683 * dividers, even though the register value does change.
1684 */
1685 I915_WRITE(reg, 0);
1686
8e7a65aa
VS
1687 I915_WRITE(reg, dpll);
1688
66e3d5c0
DV
1689 /* Wait for the clocks to stabilize. */
1690 POSTING_READ(reg);
1691 udelay(150);
1692
1693 if (INTEL_INFO(dev)->gen >= 4) {
1694 I915_WRITE(DPLL_MD(crtc->pipe),
6e3c9717 1695 crtc->config->dpll_hw_state.dpll_md);
66e3d5c0
DV
1696 } else {
1697 /* The pixel multiplier can only be updated once the
1698 * DPLL is enabled and the clocks are stable.
1699 *
1700 * So write it again.
1701 */
1702 I915_WRITE(reg, dpll);
1703 }
63d7bbe9
JB
1704
1705 /* We do this three times for luck */
66e3d5c0 1706 I915_WRITE(reg, dpll);
63d7bbe9
JB
1707 POSTING_READ(reg);
1708 udelay(150); /* wait for warmup */
66e3d5c0 1709 I915_WRITE(reg, dpll);
63d7bbe9
JB
1710 POSTING_READ(reg);
1711 udelay(150); /* wait for warmup */
66e3d5c0 1712 I915_WRITE(reg, dpll);
63d7bbe9
JB
1713 POSTING_READ(reg);
1714 udelay(150); /* wait for warmup */
1715}
1716
1717/**
50b44a44 1718 * i9xx_disable_pll - disable a PLL
63d7bbe9
JB
1719 * @dev_priv: i915 private structure
1720 * @pipe: pipe PLL to disable
1721 *
1722 * Disable the PLL for @pipe, making sure the pipe is off first.
1723 *
1724 * Note! This is for pre-ILK only.
1725 */
1c4e0274 1726static void i9xx_disable_pll(struct intel_crtc *crtc)
63d7bbe9 1727{
1c4e0274
VS
1728 struct drm_device *dev = crtc->base.dev;
1729 struct drm_i915_private *dev_priv = dev->dev_private;
1730 enum pipe pipe = crtc->pipe;
1731
1732 /* Disable DVO 2x clock on both PLLs if necessary */
1733 if (IS_I830(dev) &&
409ee761 1734 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
3538b9df 1735 !intel_num_dvo_pipes(dev)) {
1c4e0274
VS
1736 I915_WRITE(DPLL(PIPE_B),
1737 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1738 I915_WRITE(DPLL(PIPE_A),
1739 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1740 }
1741
b6b5d049
VS
1742 /* Don't disable pipe or pipe PLLs if needed */
1743 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1744 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
63d7bbe9
JB
1745 return;
1746
1747 /* Make sure the pipe isn't still relying on us */
1748 assert_pipe_disabled(dev_priv, pipe);
1749
b8afb911 1750 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
50b44a44 1751 POSTING_READ(DPLL(pipe));
63d7bbe9
JB
1752}
1753
f6071166
JB
1754static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1755{
b8afb911 1756 u32 val;
f6071166
JB
1757
1758 /* Make sure the pipe isn't still relying on us */
1759 assert_pipe_disabled(dev_priv, pipe);
1760
03ed5cbf
VS
1761 val = DPLL_INTEGRATED_REF_CLK_VLV |
1762 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1763 if (pipe != PIPE_A)
1764 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1765
f6071166
JB
1766 I915_WRITE(DPLL(pipe), val);
1767 POSTING_READ(DPLL(pipe));
076ed3b2
CML
1768}
1769
1770static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1771{
d752048d 1772 enum dpio_channel port = vlv_pipe_to_channel(pipe);
076ed3b2
CML
1773 u32 val;
1774
a11b0703
VS
1775 /* Make sure the pipe isn't still relying on us */
1776 assert_pipe_disabled(dev_priv, pipe);
076ed3b2 1777
60bfe44f
VS
1778 val = DPLL_SSC_REF_CLK_CHV |
1779 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
a11b0703
VS
1780 if (pipe != PIPE_A)
1781 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
03ed5cbf 1782
a11b0703
VS
1783 I915_WRITE(DPLL(pipe), val);
1784 POSTING_READ(DPLL(pipe));
d752048d 1785
a580516d 1786 mutex_lock(&dev_priv->sb_lock);
d752048d
VS
1787
1788 /* Disable 10bit clock to display controller */
1789 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1790 val &= ~DPIO_DCLKP_EN;
1791 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1792
a580516d 1793 mutex_unlock(&dev_priv->sb_lock);
f6071166
JB
1794}
1795
e4607fcf 1796void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
9b6de0a1
VS
1797 struct intel_digital_port *dport,
1798 unsigned int expected_mask)
89b667f8
JB
1799{
1800 u32 port_mask;
f0f59a00 1801 i915_reg_t dpll_reg;
89b667f8 1802
e4607fcf
CML
1803 switch (dport->port) {
1804 case PORT_B:
89b667f8 1805 port_mask = DPLL_PORTB_READY_MASK;
00fc31b7 1806 dpll_reg = DPLL(0);
e4607fcf
CML
1807 break;
1808 case PORT_C:
89b667f8 1809 port_mask = DPLL_PORTC_READY_MASK;
00fc31b7 1810 dpll_reg = DPLL(0);
9b6de0a1 1811 expected_mask <<= 4;
00fc31b7
CML
1812 break;
1813 case PORT_D:
1814 port_mask = DPLL_PORTD_READY_MASK;
1815 dpll_reg = DPIO_PHY_STATUS;
e4607fcf
CML
1816 break;
1817 default:
1818 BUG();
1819 }
89b667f8 1820
9b6de0a1
VS
1821 if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1822 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1823 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
89b667f8
JB
1824}
1825
b8a4f404
PZ
1826static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1827 enum pipe pipe)
040484af 1828{
23670b32 1829 struct drm_device *dev = dev_priv->dev;
7c26e5c6 1830 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
e2b78267 1831 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
f0f59a00
VS
1832 i915_reg_t reg;
1833 uint32_t val, pipeconf_val;
040484af 1834
040484af 1835 /* Make sure PCH DPLL is enabled */
8106ddbd 1836 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
040484af
JB
1837
1838 /* FDI must be feeding us bits for PCH ports */
1839 assert_fdi_tx_enabled(dev_priv, pipe);
1840 assert_fdi_rx_enabled(dev_priv, pipe);
1841
23670b32
DV
1842 if (HAS_PCH_CPT(dev)) {
1843 /* Workaround: Set the timing override bit before enabling the
1844 * pch transcoder. */
1845 reg = TRANS_CHICKEN2(pipe);
1846 val = I915_READ(reg);
1847 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1848 I915_WRITE(reg, val);
59c859d6 1849 }
23670b32 1850
ab9412ba 1851 reg = PCH_TRANSCONF(pipe);
040484af 1852 val = I915_READ(reg);
5f7f726d 1853 pipeconf_val = I915_READ(PIPECONF(pipe));
e9bcff5c 1854
2d1fe073 1855 if (HAS_PCH_IBX(dev_priv)) {
e9bcff5c 1856 /*
c5de7c6f
VS
1857 * Make the BPC in transcoder be consistent with
1858 * that in pipeconf reg. For HDMI we must use 8bpc
1859 * here for both 8bpc and 12bpc.
e9bcff5c 1860 */
dfd07d72 1861 val &= ~PIPECONF_BPC_MASK;
c5de7c6f
VS
1862 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1863 val |= PIPECONF_8BPC;
1864 else
1865 val |= pipeconf_val & PIPECONF_BPC_MASK;
e9bcff5c 1866 }
5f7f726d
PZ
1867
1868 val &= ~TRANS_INTERLACE_MASK;
1869 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2d1fe073 1870 if (HAS_PCH_IBX(dev_priv) &&
409ee761 1871 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7c26e5c6
PZ
1872 val |= TRANS_LEGACY_INTERLACED_ILK;
1873 else
1874 val |= TRANS_INTERLACED;
5f7f726d
PZ
1875 else
1876 val |= TRANS_PROGRESSIVE;
1877
040484af
JB
1878 I915_WRITE(reg, val | TRANS_ENABLE);
1879 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4bb6f1f3 1880 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
040484af
JB
1881}
1882
8fb033d7 1883static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
937bb610 1884 enum transcoder cpu_transcoder)
040484af 1885{
8fb033d7 1886 u32 val, pipeconf_val;
8fb033d7 1887
8fb033d7 1888 /* FDI must be feeding us bits for PCH ports */
1a240d4d 1889 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
937bb610 1890 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
8fb033d7 1891
223a6fdf 1892 /* Workaround: set timing override bit. */
36c0d0cf 1893 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
23670b32 1894 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
36c0d0cf 1895 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
223a6fdf 1896
25f3ef11 1897 val = TRANS_ENABLE;
937bb610 1898 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
8fb033d7 1899
9a76b1c6
PZ
1900 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1901 PIPECONF_INTERLACED_ILK)
a35f2679 1902 val |= TRANS_INTERLACED;
8fb033d7
PZ
1903 else
1904 val |= TRANS_PROGRESSIVE;
1905
ab9412ba
DV
1906 I915_WRITE(LPT_TRANSCONF, val);
1907 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
937bb610 1908 DRM_ERROR("Failed to enable PCH transcoder\n");
8fb033d7
PZ
1909}
1910
b8a4f404
PZ
1911static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1912 enum pipe pipe)
040484af 1913{
23670b32 1914 struct drm_device *dev = dev_priv->dev;
f0f59a00
VS
1915 i915_reg_t reg;
1916 uint32_t val;
040484af
JB
1917
1918 /* FDI relies on the transcoder */
1919 assert_fdi_tx_disabled(dev_priv, pipe);
1920 assert_fdi_rx_disabled(dev_priv, pipe);
1921
291906f1
JB
1922 /* Ports must be off as well */
1923 assert_pch_ports_disabled(dev_priv, pipe);
1924
ab9412ba 1925 reg = PCH_TRANSCONF(pipe);
040484af
JB
1926 val = I915_READ(reg);
1927 val &= ~TRANS_ENABLE;
1928 I915_WRITE(reg, val);
1929 /* wait for PCH transcoder off, transcoder state */
1930 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4bb6f1f3 1931 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
23670b32 1932
c465613b 1933 if (HAS_PCH_CPT(dev)) {
23670b32
DV
1934 /* Workaround: Clear the timing override chicken bit again. */
1935 reg = TRANS_CHICKEN2(pipe);
1936 val = I915_READ(reg);
1937 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1938 I915_WRITE(reg, val);
1939 }
040484af
JB
1940}
1941
ab4d966c 1942static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
8fb033d7 1943{
8fb033d7
PZ
1944 u32 val;
1945
ab9412ba 1946 val = I915_READ(LPT_TRANSCONF);
8fb033d7 1947 val &= ~TRANS_ENABLE;
ab9412ba 1948 I915_WRITE(LPT_TRANSCONF, val);
8fb033d7 1949 /* wait for PCH transcoder off, transcoder state */
ab9412ba 1950 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
8a52fd9f 1951 DRM_ERROR("Failed to disable PCH transcoder\n");
223a6fdf
PZ
1952
1953 /* Workaround: clear timing override bit. */
36c0d0cf 1954 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
23670b32 1955 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
36c0d0cf 1956 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
040484af
JB
1957}
1958
b24e7179 1959/**
309cfea8 1960 * intel_enable_pipe - enable a pipe, asserting requirements
0372264a 1961 * @crtc: crtc responsible for the pipe
b24e7179 1962 *
0372264a 1963 * Enable @crtc's pipe, making sure that various hardware specific requirements
b24e7179 1964 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
b24e7179 1965 */
e1fdc473 1966static void intel_enable_pipe(struct intel_crtc *crtc)
b24e7179 1967{
0372264a
PZ
1968 struct drm_device *dev = crtc->base.dev;
1969 struct drm_i915_private *dev_priv = dev->dev_private;
1970 enum pipe pipe = crtc->pipe;
1a70a728 1971 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1a240d4d 1972 enum pipe pch_transcoder;
f0f59a00 1973 i915_reg_t reg;
b24e7179
JB
1974 u32 val;
1975
9e2ee2dd
VS
1976 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1977
58c6eaa2 1978 assert_planes_disabled(dev_priv, pipe);
93ce0ba6 1979 assert_cursor_disabled(dev_priv, pipe);
58c6eaa2
DV
1980 assert_sprites_disabled(dev_priv, pipe);
1981
2d1fe073 1982 if (HAS_PCH_LPT(dev_priv))
cc391bbb
PZ
1983 pch_transcoder = TRANSCODER_A;
1984 else
1985 pch_transcoder = pipe;
1986
b24e7179
JB
1987 /*
1988 * A pipe without a PLL won't actually be able to drive bits from
1989 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1990 * need the check.
1991 */
2d1fe073 1992 if (HAS_GMCH_DISPLAY(dev_priv))
a65347ba 1993 if (crtc->config->has_dsi_encoder)
23538ef1
JN
1994 assert_dsi_pll_enabled(dev_priv);
1995 else
1996 assert_pll_enabled(dev_priv, pipe);
040484af 1997 else {
6e3c9717 1998 if (crtc->config->has_pch_encoder) {
040484af 1999 /* if driving the PCH, we need FDI enabled */
cc391bbb 2000 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1a240d4d
DV
2001 assert_fdi_tx_pll_enabled(dev_priv,
2002 (enum pipe) cpu_transcoder);
040484af
JB
2003 }
2004 /* FIXME: assert CPU port conditions for SNB+ */
2005 }
b24e7179 2006
702e7a56 2007 reg = PIPECONF(cpu_transcoder);
b24e7179 2008 val = I915_READ(reg);
7ad25d48 2009 if (val & PIPECONF_ENABLE) {
b6b5d049
VS
2010 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2011 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
00d70b15 2012 return;
7ad25d48 2013 }
00d70b15
CW
2014
2015 I915_WRITE(reg, val | PIPECONF_ENABLE);
851855d8 2016 POSTING_READ(reg);
b7792d8b
VS
2017
2018 /*
2019 * Until the pipe starts DSL will read as 0, which would cause
2020 * an apparent vblank timestamp jump, which messes up also the
2021 * frame count when it's derived from the timestamps. So let's
2022 * wait for the pipe to start properly before we call
2023 * drm_crtc_vblank_on()
2024 */
2025 if (dev->max_vblank_count == 0 &&
2026 wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2027 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
b24e7179
JB
2028}
2029
2030/**
309cfea8 2031 * intel_disable_pipe - disable a pipe, asserting requirements
575f7ab7 2032 * @crtc: crtc whose pipes is to be disabled
b24e7179 2033 *
575f7ab7
VS
2034 * Disable the pipe of @crtc, making sure that various hardware
2035 * specific requirements are met, if applicable, e.g. plane
2036 * disabled, panel fitter off, etc.
b24e7179
JB
2037 *
2038 * Will wait until the pipe has shut down before returning.
2039 */
575f7ab7 2040static void intel_disable_pipe(struct intel_crtc *crtc)
b24e7179 2041{
575f7ab7 2042 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
6e3c9717 2043 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
575f7ab7 2044 enum pipe pipe = crtc->pipe;
f0f59a00 2045 i915_reg_t reg;
b24e7179
JB
2046 u32 val;
2047
9e2ee2dd
VS
2048 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2049
b24e7179
JB
2050 /*
2051 * Make sure planes won't keep trying to pump pixels to us,
2052 * or we might hang the display.
2053 */
2054 assert_planes_disabled(dev_priv, pipe);
93ce0ba6 2055 assert_cursor_disabled(dev_priv, pipe);
19332d7a 2056 assert_sprites_disabled(dev_priv, pipe);
b24e7179 2057
702e7a56 2058 reg = PIPECONF(cpu_transcoder);
b24e7179 2059 val = I915_READ(reg);
00d70b15
CW
2060 if ((val & PIPECONF_ENABLE) == 0)
2061 return;
2062
67adc644
VS
2063 /*
2064 * Double wide has implications for planes
2065 * so best keep it disabled when not needed.
2066 */
6e3c9717 2067 if (crtc->config->double_wide)
67adc644
VS
2068 val &= ~PIPECONF_DOUBLE_WIDE;
2069
2070 /* Don't disable pipe or pipe PLLs if needed */
b6b5d049
VS
2071 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2072 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
67adc644
VS
2073 val &= ~PIPECONF_ENABLE;
2074
2075 I915_WRITE(reg, val);
2076 if ((val & PIPECONF_ENABLE) == 0)
2077 intel_wait_for_pipe_off(crtc);
b24e7179
JB
2078}
2079
693db184
CW
2080static bool need_vtd_wa(struct drm_device *dev)
2081{
2082#ifdef CONFIG_INTEL_IOMMU
2083 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2084 return true;
2085#endif
2086 return false;
2087}
2088
832be82f
VS
2089static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2090{
2091 return IS_GEN2(dev_priv) ? 2048 : 4096;
2092}
2093
27ba3910
VS
2094static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
2095 uint64_t fb_modifier, unsigned int cpp)
7b49f948
VS
2096{
2097 switch (fb_modifier) {
2098 case DRM_FORMAT_MOD_NONE:
2099 return cpp;
2100 case I915_FORMAT_MOD_X_TILED:
2101 if (IS_GEN2(dev_priv))
2102 return 128;
2103 else
2104 return 512;
2105 case I915_FORMAT_MOD_Y_TILED:
2106 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2107 return 128;
2108 else
2109 return 512;
2110 case I915_FORMAT_MOD_Yf_TILED:
2111 switch (cpp) {
2112 case 1:
2113 return 64;
2114 case 2:
2115 case 4:
2116 return 128;
2117 case 8:
2118 case 16:
2119 return 256;
2120 default:
2121 MISSING_CASE(cpp);
2122 return cpp;
2123 }
2124 break;
2125 default:
2126 MISSING_CASE(fb_modifier);
2127 return cpp;
2128 }
2129}
2130
832be82f
VS
2131unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2132 uint64_t fb_modifier, unsigned int cpp)
a57ce0b2 2133{
832be82f
VS
2134 if (fb_modifier == DRM_FORMAT_MOD_NONE)
2135 return 1;
2136 else
2137 return intel_tile_size(dev_priv) /
27ba3910 2138 intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
6761dd31
TU
2139}
2140
8d0deca8
VS
2141/* Return the tile dimensions in pixel units */
2142static void intel_tile_dims(const struct drm_i915_private *dev_priv,
2143 unsigned int *tile_width,
2144 unsigned int *tile_height,
2145 uint64_t fb_modifier,
2146 unsigned int cpp)
2147{
2148 unsigned int tile_width_bytes =
2149 intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2150
2151 *tile_width = tile_width_bytes / cpp;
2152 *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
2153}
2154
6761dd31
TU
2155unsigned int
2156intel_fb_align_height(struct drm_device *dev, unsigned int height,
832be82f 2157 uint32_t pixel_format, uint64_t fb_modifier)
6761dd31 2158{
832be82f
VS
2159 unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2160 unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2161
2162 return ALIGN(height, tile_height);
a57ce0b2
JB
2163}
2164
1663b9d6
VS
2165unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2166{
2167 unsigned int size = 0;
2168 int i;
2169
2170 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2171 size += rot_info->plane[i].width * rot_info->plane[i].height;
2172
2173 return size;
2174}
2175
75c82a53 2176static void
3465c580
VS
2177intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2178 const struct drm_framebuffer *fb,
2179 unsigned int rotation)
f64b98cd 2180{
2d7a215f
VS
2181 if (intel_rotation_90_or_270(rotation)) {
2182 *view = i915_ggtt_view_rotated;
2183 view->params.rotated = to_intel_framebuffer(fb)->rot_info;
2184 } else {
2185 *view = i915_ggtt_view_normal;
2186 }
2187}
50470bb0 2188
2d7a215f
VS
2189static void
2190intel_fill_fb_info(struct drm_i915_private *dev_priv,
2191 struct drm_framebuffer *fb)
2192{
2193 struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
2194 unsigned int tile_size, tile_width, tile_height, cpp;
50470bb0 2195
d9b3288e
VS
2196 tile_size = intel_tile_size(dev_priv);
2197
2198 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
8d0deca8
VS
2199 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2200 fb->modifier[0], cpp);
d9b3288e 2201
1663b9d6
VS
2202 info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
2203 info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
84fe03f7 2204
89e3e142 2205 if (info->pixel_format == DRM_FORMAT_NV12) {
832be82f 2206 cpp = drm_format_plane_cpp(fb->pixel_format, 1);
8d0deca8
VS
2207 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2208 fb->modifier[1], cpp);
d9b3288e 2209
2d7a215f 2210 info->uv_offset = fb->offsets[1];
1663b9d6
VS
2211 info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
2212 info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
89e3e142 2213 }
f64b98cd
TU
2214}
2215
603525d7 2216static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
4e9a86b6
VS
2217{
2218 if (INTEL_INFO(dev_priv)->gen >= 9)
2219 return 256 * 1024;
985b8bb4 2220 else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
666a4537 2221 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4e9a86b6
VS
2222 return 128 * 1024;
2223 else if (INTEL_INFO(dev_priv)->gen >= 4)
2224 return 4 * 1024;
2225 else
44c5905e 2226 return 0;
4e9a86b6
VS
2227}
2228
603525d7
VS
2229static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2230 uint64_t fb_modifier)
2231{
2232 switch (fb_modifier) {
2233 case DRM_FORMAT_MOD_NONE:
2234 return intel_linear_alignment(dev_priv);
2235 case I915_FORMAT_MOD_X_TILED:
2236 if (INTEL_INFO(dev_priv)->gen >= 9)
2237 return 256 * 1024;
2238 return 0;
2239 case I915_FORMAT_MOD_Y_TILED:
2240 case I915_FORMAT_MOD_Yf_TILED:
2241 return 1 * 1024 * 1024;
2242 default:
2243 MISSING_CASE(fb_modifier);
2244 return 0;
2245 }
2246}
2247
127bd2ac 2248int
3465c580
VS
2249intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2250 unsigned int rotation)
6b95a207 2251{
850c4cdc 2252 struct drm_device *dev = fb->dev;
ce453d81 2253 struct drm_i915_private *dev_priv = dev->dev_private;
850c4cdc 2254 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
f64b98cd 2255 struct i915_ggtt_view view;
6b95a207
KH
2256 u32 alignment;
2257 int ret;
2258
ebcdd39e
MR
2259 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2260
603525d7 2261 alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
6b95a207 2262
3465c580 2263 intel_fill_fb_ggtt_view(&view, fb, rotation);
f64b98cd 2264
693db184
CW
2265 /* Note that the w/a also requires 64 PTE of padding following the
2266 * bo. We currently fill all unused PTE with the shadow page and so
2267 * we should always have valid PTE following the scanout preventing
2268 * the VT-d warning.
2269 */
2270 if (need_vtd_wa(dev) && alignment < 256 * 1024)
2271 alignment = 256 * 1024;
2272
d6dd6843
PZ
2273 /*
2274 * Global gtt pte registers are special registers which actually forward
2275 * writes to a chunk of system memory. Which means that there is no risk
2276 * that the register values disappear as soon as we call
2277 * intel_runtime_pm_put(), so it is correct to wrap only the
2278 * pin/unpin/fence and not more.
2279 */
2280 intel_runtime_pm_get(dev_priv);
2281
7580d774
ML
2282 ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2283 &view);
48b956c5 2284 if (ret)
b26a6b35 2285 goto err_pm;
6b95a207
KH
2286
2287 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2288 * fence, whereas 965+ only requires a fence if using
2289 * framebuffer compression. For simplicity, we always install
2290 * a fence as the cost is not that onerous.
2291 */
9807216f
VK
2292 if (view.type == I915_GGTT_VIEW_NORMAL) {
2293 ret = i915_gem_object_get_fence(obj);
2294 if (ret == -EDEADLK) {
2295 /*
2296 * -EDEADLK means there are no free fences
2297 * no pending flips.
2298 *
2299 * This is propagated to atomic, but it uses
2300 * -EDEADLK to force a locking recovery, so
2301 * change the returned error to -EBUSY.
2302 */
2303 ret = -EBUSY;
2304 goto err_unpin;
2305 } else if (ret)
2306 goto err_unpin;
1690e1eb 2307
9807216f
VK
2308 i915_gem_object_pin_fence(obj);
2309 }
6b95a207 2310
d6dd6843 2311 intel_runtime_pm_put(dev_priv);
6b95a207 2312 return 0;
48b956c5
CW
2313
2314err_unpin:
f64b98cd 2315 i915_gem_object_unpin_from_display_plane(obj, &view);
b26a6b35 2316err_pm:
d6dd6843 2317 intel_runtime_pm_put(dev_priv);
48b956c5 2318 return ret;
6b95a207
KH
2319}
2320
fb4b8ce1 2321void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
1690e1eb 2322{
82bc3b2d 2323 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
f64b98cd 2324 struct i915_ggtt_view view;
82bc3b2d 2325
ebcdd39e
MR
2326 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2327
3465c580 2328 intel_fill_fb_ggtt_view(&view, fb, rotation);
f64b98cd 2329
9807216f
VK
2330 if (view.type == I915_GGTT_VIEW_NORMAL)
2331 i915_gem_object_unpin_fence(obj);
2332
f64b98cd 2333 i915_gem_object_unpin_from_display_plane(obj, &view);
1690e1eb
CW
2334}
2335
29cf9491
VS
2336/*
2337 * Adjust the tile offset by moving the difference into
2338 * the x/y offsets.
2339 *
2340 * Input tile dimensions and pitch must already be
2341 * rotated to match x and y, and in pixel units.
2342 */
2343static u32 intel_adjust_tile_offset(int *x, int *y,
2344 unsigned int tile_width,
2345 unsigned int tile_height,
2346 unsigned int tile_size,
2347 unsigned int pitch_tiles,
2348 u32 old_offset,
2349 u32 new_offset)
2350{
2351 unsigned int tiles;
2352
2353 WARN_ON(old_offset & (tile_size - 1));
2354 WARN_ON(new_offset & (tile_size - 1));
2355 WARN_ON(new_offset > old_offset);
2356
2357 tiles = (old_offset - new_offset) / tile_size;
2358
2359 *y += tiles / pitch_tiles * tile_height;
2360 *x += tiles % pitch_tiles * tile_width;
2361
2362 return new_offset;
2363}
2364
8d0deca8
VS
2365/*
2366 * Computes the linear offset to the base tile and adjusts
2367 * x, y. bytes per pixel is assumed to be a power-of-two.
2368 *
2369 * In the 90/270 rotated case, x and y are assumed
2370 * to be already rotated to match the rotated GTT view, and
2371 * pitch is the tile_height aligned framebuffer height.
2372 */
4f2d9934
VS
2373u32 intel_compute_tile_offset(int *x, int *y,
2374 const struct drm_framebuffer *fb, int plane,
8d0deca8
VS
2375 unsigned int pitch,
2376 unsigned int rotation)
c2c75131 2377{
4f2d9934
VS
2378 const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2379 uint64_t fb_modifier = fb->modifier[plane];
2380 unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
29cf9491
VS
2381 u32 offset, offset_aligned, alignment;
2382
2383 alignment = intel_surf_alignment(dev_priv, fb_modifier);
2384 if (alignment)
2385 alignment--;
2386
b5c65338 2387 if (fb_modifier != DRM_FORMAT_MOD_NONE) {
8d0deca8
VS
2388 unsigned int tile_size, tile_width, tile_height;
2389 unsigned int tile_rows, tiles, pitch_tiles;
c2c75131 2390
d843310d 2391 tile_size = intel_tile_size(dev_priv);
8d0deca8
VS
2392 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2393 fb_modifier, cpp);
2394
2395 if (intel_rotation_90_or_270(rotation)) {
2396 pitch_tiles = pitch / tile_height;
2397 swap(tile_width, tile_height);
2398 } else {
2399 pitch_tiles = pitch / (tile_width * cpp);
2400 }
d843310d
VS
2401
2402 tile_rows = *y / tile_height;
2403 *y %= tile_height;
c2c75131 2404
8d0deca8
VS
2405 tiles = *x / tile_width;
2406 *x %= tile_width;
bc752862 2407
29cf9491
VS
2408 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2409 offset_aligned = offset & ~alignment;
bc752862 2410
29cf9491
VS
2411 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2412 tile_size, pitch_tiles,
2413 offset, offset_aligned);
2414 } else {
bc752862 2415 offset = *y * pitch + *x * cpp;
29cf9491
VS
2416 offset_aligned = offset & ~alignment;
2417
4e9a86b6
VS
2418 *y = (offset & alignment) / pitch;
2419 *x = ((offset & alignment) - *y * pitch) / cpp;
bc752862 2420 }
29cf9491
VS
2421
2422 return offset_aligned;
c2c75131
DV
2423}
2424
b35d63fa 2425static int i9xx_format_to_fourcc(int format)
46f297fb
JB
2426{
2427 switch (format) {
2428 case DISPPLANE_8BPP:
2429 return DRM_FORMAT_C8;
2430 case DISPPLANE_BGRX555:
2431 return DRM_FORMAT_XRGB1555;
2432 case DISPPLANE_BGRX565:
2433 return DRM_FORMAT_RGB565;
2434 default:
2435 case DISPPLANE_BGRX888:
2436 return DRM_FORMAT_XRGB8888;
2437 case DISPPLANE_RGBX888:
2438 return DRM_FORMAT_XBGR8888;
2439 case DISPPLANE_BGRX101010:
2440 return DRM_FORMAT_XRGB2101010;
2441 case DISPPLANE_RGBX101010:
2442 return DRM_FORMAT_XBGR2101010;
2443 }
2444}
2445
bc8d7dff
DL
2446static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2447{
2448 switch (format) {
2449 case PLANE_CTL_FORMAT_RGB_565:
2450 return DRM_FORMAT_RGB565;
2451 default:
2452 case PLANE_CTL_FORMAT_XRGB_8888:
2453 if (rgb_order) {
2454 if (alpha)
2455 return DRM_FORMAT_ABGR8888;
2456 else
2457 return DRM_FORMAT_XBGR8888;
2458 } else {
2459 if (alpha)
2460 return DRM_FORMAT_ARGB8888;
2461 else
2462 return DRM_FORMAT_XRGB8888;
2463 }
2464 case PLANE_CTL_FORMAT_XRGB_2101010:
2465 if (rgb_order)
2466 return DRM_FORMAT_XBGR2101010;
2467 else
2468 return DRM_FORMAT_XRGB2101010;
2469 }
2470}
2471
5724dbd1 2472static bool
f6936e29
DV
2473intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2474 struct intel_initial_plane_config *plane_config)
46f297fb
JB
2475{
2476 struct drm_device *dev = crtc->base.dev;
3badb49f 2477 struct drm_i915_private *dev_priv = to_i915(dev);
72e96d64 2478 struct i915_ggtt *ggtt = &dev_priv->ggtt;
46f297fb
JB
2479 struct drm_i915_gem_object *obj = NULL;
2480 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2d14030b 2481 struct drm_framebuffer *fb = &plane_config->fb->base;
f37b5c2b
DV
2482 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2483 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2484 PAGE_SIZE);
2485
2486 size_aligned -= base_aligned;
46f297fb 2487
ff2652ea
CW
2488 if (plane_config->size == 0)
2489 return false;
2490
3badb49f
PZ
2491 /* If the FB is too big, just don't use it since fbdev is not very
2492 * important and we should probably use that space with FBC or other
2493 * features. */
72e96d64 2494 if (size_aligned * 2 > ggtt->stolen_usable_size)
3badb49f
PZ
2495 return false;
2496
12c83d99
TU
2497 mutex_lock(&dev->struct_mutex);
2498
f37b5c2b
DV
2499 obj = i915_gem_object_create_stolen_for_preallocated(dev,
2500 base_aligned,
2501 base_aligned,
2502 size_aligned);
12c83d99
TU
2503 if (!obj) {
2504 mutex_unlock(&dev->struct_mutex);
484b41dd 2505 return false;
12c83d99 2506 }
46f297fb 2507
49af449b
DL
2508 obj->tiling_mode = plane_config->tiling;
2509 if (obj->tiling_mode == I915_TILING_X)
6bf129df 2510 obj->stride = fb->pitches[0];
46f297fb 2511
6bf129df
DL
2512 mode_cmd.pixel_format = fb->pixel_format;
2513 mode_cmd.width = fb->width;
2514 mode_cmd.height = fb->height;
2515 mode_cmd.pitches[0] = fb->pitches[0];
18c5247e
DV
2516 mode_cmd.modifier[0] = fb->modifier[0];
2517 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
46f297fb 2518
6bf129df 2519 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
484b41dd 2520 &mode_cmd, obj)) {
46f297fb
JB
2521 DRM_DEBUG_KMS("intel fb init failed\n");
2522 goto out_unref_obj;
2523 }
12c83d99 2524
46f297fb 2525 mutex_unlock(&dev->struct_mutex);
484b41dd 2526
f6936e29 2527 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
484b41dd 2528 return true;
46f297fb
JB
2529
2530out_unref_obj:
2531 drm_gem_object_unreference(&obj->base);
2532 mutex_unlock(&dev->struct_mutex);
484b41dd
JB
2533 return false;
2534}
2535
5a21b665
DV
2536/* Update plane->state->fb to match plane->fb after driver-internal updates */
2537static void
2538update_state_fb(struct drm_plane *plane)
2539{
2540 if (plane->fb == plane->state->fb)
2541 return;
2542
2543 if (plane->state->fb)
2544 drm_framebuffer_unreference(plane->state->fb);
2545 plane->state->fb = plane->fb;
2546 if (plane->state->fb)
2547 drm_framebuffer_reference(plane->state->fb);
2548}
2549
5724dbd1 2550static void
f6936e29
DV
2551intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2552 struct intel_initial_plane_config *plane_config)
484b41dd
JB
2553{
2554 struct drm_device *dev = intel_crtc->base.dev;
d9ceb816 2555 struct drm_i915_private *dev_priv = dev->dev_private;
484b41dd
JB
2556 struct drm_crtc *c;
2557 struct intel_crtc *i;
2ff8fde1 2558 struct drm_i915_gem_object *obj;
88595ac9 2559 struct drm_plane *primary = intel_crtc->base.primary;
be5651f2 2560 struct drm_plane_state *plane_state = primary->state;
200757f5
MR
2561 struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2562 struct intel_plane *intel_plane = to_intel_plane(primary);
0a8d8a86
MR
2563 struct intel_plane_state *intel_state =
2564 to_intel_plane_state(plane_state);
88595ac9 2565 struct drm_framebuffer *fb;
484b41dd 2566
2d14030b 2567 if (!plane_config->fb)
484b41dd
JB
2568 return;
2569
f6936e29 2570 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
88595ac9
DV
2571 fb = &plane_config->fb->base;
2572 goto valid_fb;
f55548b5 2573 }
484b41dd 2574
2d14030b 2575 kfree(plane_config->fb);
484b41dd
JB
2576
2577 /*
2578 * Failed to alloc the obj, check to see if we should share
2579 * an fb with another CRTC instead
2580 */
70e1e0ec 2581 for_each_crtc(dev, c) {
484b41dd
JB
2582 i = to_intel_crtc(c);
2583
2584 if (c == &intel_crtc->base)
2585 continue;
2586
2ff8fde1
MR
2587 if (!i->active)
2588 continue;
2589
88595ac9
DV
2590 fb = c->primary->fb;
2591 if (!fb)
484b41dd
JB
2592 continue;
2593
88595ac9 2594 obj = intel_fb_obj(fb);
2ff8fde1 2595 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
88595ac9
DV
2596 drm_framebuffer_reference(fb);
2597 goto valid_fb;
484b41dd
JB
2598 }
2599 }
88595ac9 2600
200757f5
MR
2601 /*
2602 * We've failed to reconstruct the BIOS FB. Current display state
2603 * indicates that the primary plane is visible, but has a NULL FB,
2604 * which will lead to problems later if we don't fix it up. The
2605 * simplest solution is to just disable the primary plane now and
2606 * pretend the BIOS never had it enabled.
2607 */
2608 to_intel_plane_state(plane_state)->visible = false;
2609 crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2622a081 2610 intel_pre_disable_primary_noatomic(&intel_crtc->base);
200757f5
MR
2611 intel_plane->disable_plane(primary, &intel_crtc->base);
2612
88595ac9
DV
2613 return;
2614
2615valid_fb:
f44e2659
VS
2616 plane_state->src_x = 0;
2617 plane_state->src_y = 0;
be5651f2
ML
2618 plane_state->src_w = fb->width << 16;
2619 plane_state->src_h = fb->height << 16;
2620
f44e2659
VS
2621 plane_state->crtc_x = 0;
2622 plane_state->crtc_y = 0;
be5651f2
ML
2623 plane_state->crtc_w = fb->width;
2624 plane_state->crtc_h = fb->height;
2625
0a8d8a86
MR
2626 intel_state->src.x1 = plane_state->src_x;
2627 intel_state->src.y1 = plane_state->src_y;
2628 intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2629 intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2630 intel_state->dst.x1 = plane_state->crtc_x;
2631 intel_state->dst.y1 = plane_state->crtc_y;
2632 intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2633 intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2634
88595ac9
DV
2635 obj = intel_fb_obj(fb);
2636 if (obj->tiling_mode != I915_TILING_NONE)
2637 dev_priv->preserve_bios_swizzle = true;
2638
be5651f2
ML
2639 drm_framebuffer_reference(fb);
2640 primary->fb = primary->state->fb = fb;
36750f28 2641 primary->crtc = primary->state->crtc = &intel_crtc->base;
36750f28 2642 intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
a9ff8714 2643 obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
46f297fb
JB
2644}
2645
a8d201af
ML
2646static void i9xx_update_primary_plane(struct drm_plane *primary,
2647 const struct intel_crtc_state *crtc_state,
2648 const struct intel_plane_state *plane_state)
81255565 2649{
a8d201af 2650 struct drm_device *dev = primary->dev;
81255565 2651 struct drm_i915_private *dev_priv = dev->dev_private;
a8d201af
ML
2652 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2653 struct drm_framebuffer *fb = plane_state->base.fb;
2654 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
81255565 2655 int plane = intel_crtc->plane;
54ea9da8 2656 u32 linear_offset;
81255565 2657 u32 dspcntr;
f0f59a00 2658 i915_reg_t reg = DSPCNTR(plane);
8d0deca8 2659 unsigned int rotation = plane_state->base.rotation;
ac484963 2660 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
54ea9da8
VS
2661 int x = plane_state->src.x1 >> 16;
2662 int y = plane_state->src.y1 >> 16;
c9ba6fad 2663
f45651ba
VS
2664 dspcntr = DISPPLANE_GAMMA_ENABLE;
2665
fdd508a6 2666 dspcntr |= DISPLAY_PLANE_ENABLE;
f45651ba
VS
2667
2668 if (INTEL_INFO(dev)->gen < 4) {
2669 if (intel_crtc->pipe == PIPE_B)
2670 dspcntr |= DISPPLANE_SEL_PIPE_B;
2671
2672 /* pipesrc and dspsize control the size that is scaled from,
2673 * which should always be the user's requested size.
2674 */
2675 I915_WRITE(DSPSIZE(plane),
a8d201af
ML
2676 ((crtc_state->pipe_src_h - 1) << 16) |
2677 (crtc_state->pipe_src_w - 1));
f45651ba 2678 I915_WRITE(DSPPOS(plane), 0);
c14b0485
VS
2679 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2680 I915_WRITE(PRIMSIZE(plane),
a8d201af
ML
2681 ((crtc_state->pipe_src_h - 1) << 16) |
2682 (crtc_state->pipe_src_w - 1));
c14b0485
VS
2683 I915_WRITE(PRIMPOS(plane), 0);
2684 I915_WRITE(PRIMCNSTALPHA(plane), 0);
f45651ba 2685 }
81255565 2686
57779d06
VS
2687 switch (fb->pixel_format) {
2688 case DRM_FORMAT_C8:
81255565
JB
2689 dspcntr |= DISPPLANE_8BPP;
2690 break;
57779d06 2691 case DRM_FORMAT_XRGB1555:
57779d06 2692 dspcntr |= DISPPLANE_BGRX555;
81255565 2693 break;
57779d06
VS
2694 case DRM_FORMAT_RGB565:
2695 dspcntr |= DISPPLANE_BGRX565;
2696 break;
2697 case DRM_FORMAT_XRGB8888:
57779d06
VS
2698 dspcntr |= DISPPLANE_BGRX888;
2699 break;
2700 case DRM_FORMAT_XBGR8888:
57779d06
VS
2701 dspcntr |= DISPPLANE_RGBX888;
2702 break;
2703 case DRM_FORMAT_XRGB2101010:
57779d06
VS
2704 dspcntr |= DISPPLANE_BGRX101010;
2705 break;
2706 case DRM_FORMAT_XBGR2101010:
57779d06 2707 dspcntr |= DISPPLANE_RGBX101010;
81255565
JB
2708 break;
2709 default:
baba133a 2710 BUG();
81255565 2711 }
57779d06 2712
f45651ba
VS
2713 if (INTEL_INFO(dev)->gen >= 4 &&
2714 obj->tiling_mode != I915_TILING_NONE)
2715 dspcntr |= DISPPLANE_TILED;
81255565 2716
de1aa629
VS
2717 if (IS_G4X(dev))
2718 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2719
ac484963 2720 linear_offset = y * fb->pitches[0] + x * cpp;
81255565 2721
c2c75131
DV
2722 if (INTEL_INFO(dev)->gen >= 4) {
2723 intel_crtc->dspaddr_offset =
4f2d9934 2724 intel_compute_tile_offset(&x, &y, fb, 0,
8d0deca8 2725 fb->pitches[0], rotation);
c2c75131
DV
2726 linear_offset -= intel_crtc->dspaddr_offset;
2727 } else {
e506a0c6 2728 intel_crtc->dspaddr_offset = linear_offset;
c2c75131 2729 }
e506a0c6 2730
8d0deca8 2731 if (rotation == BIT(DRM_ROTATE_180)) {
48404c1e
SJ
2732 dspcntr |= DISPPLANE_ROTATE_180;
2733
a8d201af
ML
2734 x += (crtc_state->pipe_src_w - 1);
2735 y += (crtc_state->pipe_src_h - 1);
48404c1e
SJ
2736
2737 /* Finding the last pixel of the last line of the display
2738 data and adding to linear_offset*/
2739 linear_offset +=
a8d201af 2740 (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
ac484963 2741 (crtc_state->pipe_src_w - 1) * cpp;
48404c1e
SJ
2742 }
2743
2db3366b
PZ
2744 intel_crtc->adjusted_x = x;
2745 intel_crtc->adjusted_y = y;
2746
48404c1e
SJ
2747 I915_WRITE(reg, dspcntr);
2748
01f2c773 2749 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
a6c45cf0 2750 if (INTEL_INFO(dev)->gen >= 4) {
85ba7b7d
DV
2751 I915_WRITE(DSPSURF(plane),
2752 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
5eddb70b 2753 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
e506a0c6 2754 I915_WRITE(DSPLINOFF(plane), linear_offset);
5eddb70b 2755 } else
f343c5f6 2756 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
5eddb70b 2757 POSTING_READ(reg);
17638cd6
JB
2758}
2759
a8d201af
ML
2760static void i9xx_disable_primary_plane(struct drm_plane *primary,
2761 struct drm_crtc *crtc)
17638cd6
JB
2762{
2763 struct drm_device *dev = crtc->dev;
2764 struct drm_i915_private *dev_priv = dev->dev_private;
2765 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
17638cd6 2766 int plane = intel_crtc->plane;
f45651ba 2767
a8d201af
ML
2768 I915_WRITE(DSPCNTR(plane), 0);
2769 if (INTEL_INFO(dev_priv)->gen >= 4)
fdd508a6 2770 I915_WRITE(DSPSURF(plane), 0);
a8d201af
ML
2771 else
2772 I915_WRITE(DSPADDR(plane), 0);
2773 POSTING_READ(DSPCNTR(plane));
2774}
c9ba6fad 2775
a8d201af
ML
2776static void ironlake_update_primary_plane(struct drm_plane *primary,
2777 const struct intel_crtc_state *crtc_state,
2778 const struct intel_plane_state *plane_state)
2779{
2780 struct drm_device *dev = primary->dev;
2781 struct drm_i915_private *dev_priv = dev->dev_private;
2782 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2783 struct drm_framebuffer *fb = plane_state->base.fb;
2784 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2785 int plane = intel_crtc->plane;
54ea9da8 2786 u32 linear_offset;
a8d201af
ML
2787 u32 dspcntr;
2788 i915_reg_t reg = DSPCNTR(plane);
8d0deca8 2789 unsigned int rotation = plane_state->base.rotation;
ac484963 2790 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
a8d201af
ML
2791 int x = plane_state->src.x1 >> 16;
2792 int y = plane_state->src.y1 >> 16;
c9ba6fad 2793
f45651ba 2794 dspcntr = DISPPLANE_GAMMA_ENABLE;
fdd508a6 2795 dspcntr |= DISPLAY_PLANE_ENABLE;
f45651ba
VS
2796
2797 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2798 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
17638cd6 2799
57779d06
VS
2800 switch (fb->pixel_format) {
2801 case DRM_FORMAT_C8:
17638cd6
JB
2802 dspcntr |= DISPPLANE_8BPP;
2803 break;
57779d06
VS
2804 case DRM_FORMAT_RGB565:
2805 dspcntr |= DISPPLANE_BGRX565;
17638cd6 2806 break;
57779d06 2807 case DRM_FORMAT_XRGB8888:
57779d06
VS
2808 dspcntr |= DISPPLANE_BGRX888;
2809 break;
2810 case DRM_FORMAT_XBGR8888:
57779d06
VS
2811 dspcntr |= DISPPLANE_RGBX888;
2812 break;
2813 case DRM_FORMAT_XRGB2101010:
57779d06
VS
2814 dspcntr |= DISPPLANE_BGRX101010;
2815 break;
2816 case DRM_FORMAT_XBGR2101010:
57779d06 2817 dspcntr |= DISPPLANE_RGBX101010;
17638cd6
JB
2818 break;
2819 default:
baba133a 2820 BUG();
17638cd6
JB
2821 }
2822
2823 if (obj->tiling_mode != I915_TILING_NONE)
2824 dspcntr |= DISPPLANE_TILED;
17638cd6 2825
f45651ba 2826 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1f5d76db 2827 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
17638cd6 2828
ac484963 2829 linear_offset = y * fb->pitches[0] + x * cpp;
c2c75131 2830 intel_crtc->dspaddr_offset =
4f2d9934 2831 intel_compute_tile_offset(&x, &y, fb, 0,
8d0deca8 2832 fb->pitches[0], rotation);
c2c75131 2833 linear_offset -= intel_crtc->dspaddr_offset;
8d0deca8 2834 if (rotation == BIT(DRM_ROTATE_180)) {
48404c1e
SJ
2835 dspcntr |= DISPPLANE_ROTATE_180;
2836
2837 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
a8d201af
ML
2838 x += (crtc_state->pipe_src_w - 1);
2839 y += (crtc_state->pipe_src_h - 1);
48404c1e
SJ
2840
2841 /* Finding the last pixel of the last line of the display
2842 data and adding to linear_offset*/
2843 linear_offset +=
a8d201af 2844 (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
ac484963 2845 (crtc_state->pipe_src_w - 1) * cpp;
48404c1e
SJ
2846 }
2847 }
2848
2db3366b
PZ
2849 intel_crtc->adjusted_x = x;
2850 intel_crtc->adjusted_y = y;
2851
48404c1e 2852 I915_WRITE(reg, dspcntr);
17638cd6 2853
01f2c773 2854 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
85ba7b7d
DV
2855 I915_WRITE(DSPSURF(plane),
2856 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
b3dc685e 2857 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
bc1c91eb
DL
2858 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2859 } else {
2860 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2861 I915_WRITE(DSPLINOFF(plane), linear_offset);
2862 }
17638cd6 2863 POSTING_READ(reg);
17638cd6
JB
2864}
2865
7b49f948
VS
2866u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2867 uint64_t fb_modifier, uint32_t pixel_format)
b321803d 2868{
7b49f948 2869 if (fb_modifier == DRM_FORMAT_MOD_NONE) {
b321803d 2870 return 64;
7b49f948
VS
2871 } else {
2872 int cpp = drm_format_plane_cpp(pixel_format, 0);
2873
27ba3910 2874 return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
b321803d
DL
2875 }
2876}
2877
44eb0cb9
MK
2878u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2879 struct drm_i915_gem_object *obj,
2880 unsigned int plane)
121920fa 2881{
ce7f1728 2882 struct i915_ggtt_view view;
dedf278c 2883 struct i915_vma *vma;
44eb0cb9 2884 u64 offset;
121920fa 2885
e7941294 2886 intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
3465c580 2887 intel_plane->base.state->rotation);
121920fa 2888
ce7f1728 2889 vma = i915_gem_obj_to_ggtt_view(obj, &view);
dedf278c 2890 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
ce7f1728 2891 view.type))
dedf278c
TU
2892 return -1;
2893
44eb0cb9 2894 offset = vma->node.start;
dedf278c
TU
2895
2896 if (plane == 1) {
7723f47d 2897 offset += vma->ggtt_view.params.rotated.uv_start_page *
dedf278c
TU
2898 PAGE_SIZE;
2899 }
2900
44eb0cb9
MK
2901 WARN_ON(upper_32_bits(offset));
2902
2903 return lower_32_bits(offset);
121920fa
TU
2904}
2905
e435d6e5
ML
2906static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2907{
2908 struct drm_device *dev = intel_crtc->base.dev;
2909 struct drm_i915_private *dev_priv = dev->dev_private;
2910
2911 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2912 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2913 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
e435d6e5
ML
2914}
2915
a1b2278e
CK
2916/*
2917 * This function detaches (aka. unbinds) unused scalers in hardware
2918 */
0583236e 2919static void skl_detach_scalers(struct intel_crtc *intel_crtc)
a1b2278e 2920{
a1b2278e
CK
2921 struct intel_crtc_scaler_state *scaler_state;
2922 int i;
2923
a1b2278e
CK
2924 scaler_state = &intel_crtc->config->scaler_state;
2925
2926 /* loop through and disable scalers that aren't in use */
2927 for (i = 0; i < intel_crtc->num_scalers; i++) {
e435d6e5
ML
2928 if (!scaler_state->scalers[i].in_use)
2929 skl_detach_scaler(intel_crtc, i);
a1b2278e
CK
2930 }
2931}
2932
6156a456 2933u32 skl_plane_ctl_format(uint32_t pixel_format)
70d21f0e 2934{
6156a456 2935 switch (pixel_format) {
d161cf7a 2936 case DRM_FORMAT_C8:
c34ce3d1 2937 return PLANE_CTL_FORMAT_INDEXED;
70d21f0e 2938 case DRM_FORMAT_RGB565:
c34ce3d1 2939 return PLANE_CTL_FORMAT_RGB_565;
70d21f0e 2940 case DRM_FORMAT_XBGR8888:
c34ce3d1 2941 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
6156a456 2942 case DRM_FORMAT_XRGB8888:
c34ce3d1 2943 return PLANE_CTL_FORMAT_XRGB_8888;
6156a456
CK
2944 /*
2945 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
2946 * to be already pre-multiplied. We need to add a knob (or a different
2947 * DRM_FORMAT) for user-space to configure that.
2948 */
f75fb42a 2949 case DRM_FORMAT_ABGR8888:
c34ce3d1 2950 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
6156a456 2951 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
6156a456 2952 case DRM_FORMAT_ARGB8888:
c34ce3d1 2953 return PLANE_CTL_FORMAT_XRGB_8888 |
6156a456 2954 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
70d21f0e 2955 case DRM_FORMAT_XRGB2101010:
c34ce3d1 2956 return PLANE_CTL_FORMAT_XRGB_2101010;
70d21f0e 2957 case DRM_FORMAT_XBGR2101010:
c34ce3d1 2958 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
6156a456 2959 case DRM_FORMAT_YUYV:
c34ce3d1 2960 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
6156a456 2961 case DRM_FORMAT_YVYU:
c34ce3d1 2962 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
6156a456 2963 case DRM_FORMAT_UYVY:
c34ce3d1 2964 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
6156a456 2965 case DRM_FORMAT_VYUY:
c34ce3d1 2966 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
70d21f0e 2967 default:
4249eeef 2968 MISSING_CASE(pixel_format);
70d21f0e 2969 }
8cfcba41 2970
c34ce3d1 2971 return 0;
6156a456 2972}
70d21f0e 2973
6156a456
CK
2974u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
2975{
6156a456 2976 switch (fb_modifier) {
30af77c4 2977 case DRM_FORMAT_MOD_NONE:
70d21f0e 2978 break;
30af77c4 2979 case I915_FORMAT_MOD_X_TILED:
c34ce3d1 2980 return PLANE_CTL_TILED_X;
b321803d 2981 case I915_FORMAT_MOD_Y_TILED:
c34ce3d1 2982 return PLANE_CTL_TILED_Y;
b321803d 2983 case I915_FORMAT_MOD_Yf_TILED:
c34ce3d1 2984 return PLANE_CTL_TILED_YF;
70d21f0e 2985 default:
6156a456 2986 MISSING_CASE(fb_modifier);
70d21f0e 2987 }
8cfcba41 2988
c34ce3d1 2989 return 0;
6156a456 2990}
70d21f0e 2991
6156a456
CK
2992u32 skl_plane_ctl_rotation(unsigned int rotation)
2993{
3b7a5119 2994 switch (rotation) {
6156a456
CK
2995 case BIT(DRM_ROTATE_0):
2996 break;
1e8df167
SJ
2997 /*
2998 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
2999 * while i915 HW rotation is clockwise, thats why this swapping.
3000 */
3b7a5119 3001 case BIT(DRM_ROTATE_90):
1e8df167 3002 return PLANE_CTL_ROTATE_270;
3b7a5119 3003 case BIT(DRM_ROTATE_180):
c34ce3d1 3004 return PLANE_CTL_ROTATE_180;
3b7a5119 3005 case BIT(DRM_ROTATE_270):
1e8df167 3006 return PLANE_CTL_ROTATE_90;
6156a456
CK
3007 default:
3008 MISSING_CASE(rotation);
3009 }
3010
c34ce3d1 3011 return 0;
6156a456
CK
3012}
3013
a8d201af
ML
3014static void skylake_update_primary_plane(struct drm_plane *plane,
3015 const struct intel_crtc_state *crtc_state,
3016 const struct intel_plane_state *plane_state)
6156a456 3017{
a8d201af 3018 struct drm_device *dev = plane->dev;
6156a456 3019 struct drm_i915_private *dev_priv = dev->dev_private;
a8d201af
ML
3020 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3021 struct drm_framebuffer *fb = plane_state->base.fb;
3022 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6156a456
CK
3023 int pipe = intel_crtc->pipe;
3024 u32 plane_ctl, stride_div, stride;
3025 u32 tile_height, plane_offset, plane_size;
a8d201af 3026 unsigned int rotation = plane_state->base.rotation;
6156a456 3027 int x_offset, y_offset;
44eb0cb9 3028 u32 surf_addr;
a8d201af
ML
3029 int scaler_id = plane_state->scaler_id;
3030 int src_x = plane_state->src.x1 >> 16;
3031 int src_y = plane_state->src.y1 >> 16;
3032 int src_w = drm_rect_width(&plane_state->src) >> 16;
3033 int src_h = drm_rect_height(&plane_state->src) >> 16;
3034 int dst_x = plane_state->dst.x1;
3035 int dst_y = plane_state->dst.y1;
3036 int dst_w = drm_rect_width(&plane_state->dst);
3037 int dst_h = drm_rect_height(&plane_state->dst);
70d21f0e 3038
6156a456
CK
3039 plane_ctl = PLANE_CTL_ENABLE |
3040 PLANE_CTL_PIPE_GAMMA_ENABLE |
3041 PLANE_CTL_PIPE_CSC_ENABLE;
3042
3043 plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3044 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3045 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
6156a456
CK
3046 plane_ctl |= skl_plane_ctl_rotation(rotation);
3047
7b49f948 3048 stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
b321803d 3049 fb->pixel_format);
dedf278c 3050 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3b7a5119 3051
a42e5a23
PZ
3052 WARN_ON(drm_rect_width(&plane_state->src) == 0);
3053
3b7a5119 3054 if (intel_rotation_90_or_270(rotation)) {
832be82f
VS
3055 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3056
3b7a5119 3057 /* stride = Surface height in tiles */
832be82f 3058 tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3b7a5119 3059 stride = DIV_ROUND_UP(fb->height, tile_height);
a8d201af
ML
3060 x_offset = stride * tile_height - src_y - src_h;
3061 y_offset = src_x;
6156a456 3062 plane_size = (src_w - 1) << 16 | (src_h - 1);
3b7a5119
SJ
3063 } else {
3064 stride = fb->pitches[0] / stride_div;
a8d201af
ML
3065 x_offset = src_x;
3066 y_offset = src_y;
6156a456 3067 plane_size = (src_h - 1) << 16 | (src_w - 1);
3b7a5119
SJ
3068 }
3069 plane_offset = y_offset << 16 | x_offset;
b321803d 3070
2db3366b
PZ
3071 intel_crtc->adjusted_x = x_offset;
3072 intel_crtc->adjusted_y = y_offset;
3073
70d21f0e 3074 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3b7a5119
SJ
3075 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3076 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3077 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
6156a456
CK
3078
3079 if (scaler_id >= 0) {
3080 uint32_t ps_ctrl = 0;
3081
3082 WARN_ON(!dst_w || !dst_h);
3083 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3084 crtc_state->scaler_state.scalers[scaler_id].mode;
3085 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3086 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3087 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3088 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3089 I915_WRITE(PLANE_POS(pipe, 0), 0);
3090 } else {
3091 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3092 }
3093
121920fa 3094 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
70d21f0e
DL
3095
3096 POSTING_READ(PLANE_SURF(pipe, 0));
3097}
3098
a8d201af
ML
3099static void skylake_disable_primary_plane(struct drm_plane *primary,
3100 struct drm_crtc *crtc)
17638cd6
JB
3101{
3102 struct drm_device *dev = crtc->dev;
3103 struct drm_i915_private *dev_priv = dev->dev_private;
a8d201af 3104 int pipe = to_intel_crtc(crtc)->pipe;
17638cd6 3105
a8d201af
ML
3106 I915_WRITE(PLANE_CTL(pipe, 0), 0);
3107 I915_WRITE(PLANE_SURF(pipe, 0), 0);
3108 POSTING_READ(PLANE_SURF(pipe, 0));
3109}
29b9bde6 3110
a8d201af
ML
3111/* Assume fb object is pinned & idle & fenced and just update base pointers */
3112static int
3113intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3114 int x, int y, enum mode_set_atomic state)
3115{
3116 /* Support for kgdboc is disabled, this needs a major rework. */
3117 DRM_ERROR("legacy panic handler not supported any more.\n");
3118
3119 return -ENODEV;
81255565
JB
3120}
3121
5a21b665
DV
3122static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
3123{
3124 struct intel_crtc *crtc;
3125
3126 for_each_intel_crtc(dev_priv->dev, crtc)
3127 intel_finish_page_flip_cs(dev_priv, crtc->pipe);
3128}
3129
7514747d
VS
3130static void intel_update_primary_planes(struct drm_device *dev)
3131{
7514747d 3132 struct drm_crtc *crtc;
96a02917 3133
70e1e0ec 3134 for_each_crtc(dev, crtc) {
11c22da6
ML
3135 struct intel_plane *plane = to_intel_plane(crtc->primary);
3136 struct intel_plane_state *plane_state;
96a02917 3137
11c22da6 3138 drm_modeset_lock_crtc(crtc, &plane->base);
11c22da6
ML
3139 plane_state = to_intel_plane_state(plane->base.state);
3140
a8d201af
ML
3141 if (plane_state->visible)
3142 plane->update_plane(&plane->base,
3143 to_intel_crtc_state(crtc->state),
3144 plane_state);
11c22da6
ML
3145
3146 drm_modeset_unlock_crtc(crtc);
96a02917
VS
3147 }
3148}
3149
c033666a 3150void intel_prepare_reset(struct drm_i915_private *dev_priv)
7514747d
VS
3151{
3152 /* no reset support for gen2 */
c033666a 3153 if (IS_GEN2(dev_priv))
7514747d
VS
3154 return;
3155
3156 /* reset doesn't touch the display */
c033666a 3157 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
7514747d
VS
3158 return;
3159
c033666a 3160 drm_modeset_lock_all(dev_priv->dev);
f98ce92f
VS
3161 /*
3162 * Disabling the crtcs gracefully seems nicer. Also the
3163 * g33 docs say we should at least disable all the planes.
3164 */
c033666a 3165 intel_display_suspend(dev_priv->dev);
7514747d
VS
3166}
3167
c033666a 3168void intel_finish_reset(struct drm_i915_private *dev_priv)
7514747d 3169{
5a21b665
DV
3170 /*
3171 * Flips in the rings will be nuked by the reset,
3172 * so complete all pending flips so that user space
3173 * will get its events and not get stuck.
3174 */
3175 intel_complete_page_flips(dev_priv);
3176
7514747d 3177 /* no reset support for gen2 */
c033666a 3178 if (IS_GEN2(dev_priv))
7514747d
VS
3179 return;
3180
3181 /* reset doesn't touch the display */
c033666a 3182 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
7514747d
VS
3183 /*
3184 * Flips in the rings have been nuked by the reset,
3185 * so update the base address of all primary
3186 * planes to the the last fb to make sure we're
3187 * showing the correct fb after a reset.
11c22da6
ML
3188 *
3189 * FIXME: Atomic will make this obsolete since we won't schedule
3190 * CS-based flips (which might get lost in gpu resets) any more.
7514747d 3191 */
c033666a 3192 intel_update_primary_planes(dev_priv->dev);
7514747d
VS
3193 return;
3194 }
3195
3196 /*
3197 * The display has been reset as well,
3198 * so need a full re-initialization.
3199 */
3200 intel_runtime_pm_disable_interrupts(dev_priv);
3201 intel_runtime_pm_enable_interrupts(dev_priv);
3202
c033666a 3203 intel_modeset_init_hw(dev_priv->dev);
7514747d
VS
3204
3205 spin_lock_irq(&dev_priv->irq_lock);
3206 if (dev_priv->display.hpd_irq_setup)
91d14251 3207 dev_priv->display.hpd_irq_setup(dev_priv);
7514747d
VS
3208 spin_unlock_irq(&dev_priv->irq_lock);
3209
c033666a 3210 intel_display_resume(dev_priv->dev);
7514747d
VS
3211
3212 intel_hpd_init(dev_priv);
3213
c033666a 3214 drm_modeset_unlock_all(dev_priv->dev);
7514747d
VS
3215}
3216
7d5e3799
CW
3217static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3218{
5a21b665
DV
3219 struct drm_device *dev = crtc->dev;
3220 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3221 unsigned reset_counter;
3222 bool pending;
3223
3224 reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
3225 if (intel_crtc->reset_counter != reset_counter)
3226 return false;
3227
3228 spin_lock_irq(&dev->event_lock);
3229 pending = to_intel_crtc(crtc)->flip_work != NULL;
3230 spin_unlock_irq(&dev->event_lock);
3231
3232 return pending;
7d5e3799
CW
3233}
3234
bfd16b2a
ML
3235static void intel_update_pipe_config(struct intel_crtc *crtc,
3236 struct intel_crtc_state *old_crtc_state)
e30e8f75
GP
3237{
3238 struct drm_device *dev = crtc->base.dev;
3239 struct drm_i915_private *dev_priv = dev->dev_private;
bfd16b2a
ML
3240 struct intel_crtc_state *pipe_config =
3241 to_intel_crtc_state(crtc->base.state);
e30e8f75 3242
bfd16b2a
ML
3243 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3244 crtc->base.mode = crtc->base.state->mode;
3245
3246 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3247 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3248 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
e30e8f75
GP
3249
3250 /*
3251 * Update pipe size and adjust fitter if needed: the reason for this is
3252 * that in compute_mode_changes we check the native mode (not the pfit
3253 * mode) to see if we can flip rather than do a full mode set. In the
3254 * fastboot case, we'll flip, but if we don't update the pipesrc and
3255 * pfit state, we'll end up with a big fb scanned out into the wrong
3256 * sized surface.
e30e8f75
GP
3257 */
3258
e30e8f75 3259 I915_WRITE(PIPESRC(crtc->pipe),
bfd16b2a
ML
3260 ((pipe_config->pipe_src_w - 1) << 16) |
3261 (pipe_config->pipe_src_h - 1));
3262
3263 /* on skylake this is done by detaching scalers */
3264 if (INTEL_INFO(dev)->gen >= 9) {
3265 skl_detach_scalers(crtc);
3266
3267 if (pipe_config->pch_pfit.enabled)
3268 skylake_pfit_enable(crtc);
3269 } else if (HAS_PCH_SPLIT(dev)) {
3270 if (pipe_config->pch_pfit.enabled)
3271 ironlake_pfit_enable(crtc);
3272 else if (old_crtc_state->pch_pfit.enabled)
3273 ironlake_pfit_disable(crtc, true);
e30e8f75 3274 }
e30e8f75
GP
3275}
3276
5e84e1a4
ZW
3277static void intel_fdi_normal_train(struct drm_crtc *crtc)
3278{
3279 struct drm_device *dev = crtc->dev;
3280 struct drm_i915_private *dev_priv = dev->dev_private;
3281 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3282 int pipe = intel_crtc->pipe;
f0f59a00
VS
3283 i915_reg_t reg;
3284 u32 temp;
5e84e1a4
ZW
3285
3286 /* enable normal train */
3287 reg = FDI_TX_CTL(pipe);
3288 temp = I915_READ(reg);
61e499bf 3289 if (IS_IVYBRIDGE(dev)) {
357555c0
JB
3290 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3291 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
61e499bf
KP
3292 } else {
3293 temp &= ~FDI_LINK_TRAIN_NONE;
3294 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
357555c0 3295 }
5e84e1a4
ZW
3296 I915_WRITE(reg, temp);
3297
3298 reg = FDI_RX_CTL(pipe);
3299 temp = I915_READ(reg);
3300 if (HAS_PCH_CPT(dev)) {
3301 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3302 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3303 } else {
3304 temp &= ~FDI_LINK_TRAIN_NONE;
3305 temp |= FDI_LINK_TRAIN_NONE;
3306 }
3307 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3308
3309 /* wait one idle pattern time */
3310 POSTING_READ(reg);
3311 udelay(1000);
357555c0
JB
3312
3313 /* IVB wants error correction enabled */
3314 if (IS_IVYBRIDGE(dev))
3315 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3316 FDI_FE_ERRC_ENABLE);
5e84e1a4
ZW
3317}
3318
8db9d77b
ZW
3319/* The FDI link training functions for ILK/Ibexpeak. */
3320static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3321{
3322 struct drm_device *dev = crtc->dev;
3323 struct drm_i915_private *dev_priv = dev->dev_private;
3324 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3325 int pipe = intel_crtc->pipe;
f0f59a00
VS
3326 i915_reg_t reg;
3327 u32 temp, tries;
8db9d77b 3328
1c8562f6 3329 /* FDI needs bits from pipe first */
0fc932b8 3330 assert_pipe_enabled(dev_priv, pipe);
0fc932b8 3331
e1a44743
AJ
3332 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3333 for train result */
5eddb70b
CW
3334 reg = FDI_RX_IMR(pipe);
3335 temp = I915_READ(reg);
e1a44743
AJ
3336 temp &= ~FDI_RX_SYMBOL_LOCK;
3337 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
3338 I915_WRITE(reg, temp);
3339 I915_READ(reg);
e1a44743
AJ
3340 udelay(150);
3341
8db9d77b 3342 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
3343 reg = FDI_TX_CTL(pipe);
3344 temp = I915_READ(reg);
627eb5a3 3345 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3346 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
8db9d77b
ZW
3347 temp &= ~FDI_LINK_TRAIN_NONE;
3348 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b 3349 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 3350
5eddb70b
CW
3351 reg = FDI_RX_CTL(pipe);
3352 temp = I915_READ(reg);
8db9d77b
ZW
3353 temp &= ~FDI_LINK_TRAIN_NONE;
3354 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b
CW
3355 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3356
3357 POSTING_READ(reg);
8db9d77b
ZW
3358 udelay(150);
3359
5b2adf89 3360 /* Ironlake workaround, enable clock pointer after FDI enable*/
8f5718a6
DV
3361 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3362 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3363 FDI_RX_PHASE_SYNC_POINTER_EN);
5b2adf89 3364
5eddb70b 3365 reg = FDI_RX_IIR(pipe);
e1a44743 3366 for (tries = 0; tries < 5; tries++) {
5eddb70b 3367 temp = I915_READ(reg);
8db9d77b
ZW
3368 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3369
3370 if ((temp & FDI_RX_BIT_LOCK)) {
3371 DRM_DEBUG_KMS("FDI train 1 done.\n");
5eddb70b 3372 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
8db9d77b
ZW
3373 break;
3374 }
8db9d77b 3375 }
e1a44743 3376 if (tries == 5)
5eddb70b 3377 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
3378
3379 /* Train 2 */
5eddb70b
CW
3380 reg = FDI_TX_CTL(pipe);
3381 temp = I915_READ(reg);
8db9d77b
ZW
3382 temp &= ~FDI_LINK_TRAIN_NONE;
3383 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 3384 I915_WRITE(reg, temp);
8db9d77b 3385
5eddb70b
CW
3386 reg = FDI_RX_CTL(pipe);
3387 temp = I915_READ(reg);
8db9d77b
ZW
3388 temp &= ~FDI_LINK_TRAIN_NONE;
3389 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 3390 I915_WRITE(reg, temp);
8db9d77b 3391
5eddb70b
CW
3392 POSTING_READ(reg);
3393 udelay(150);
8db9d77b 3394
5eddb70b 3395 reg = FDI_RX_IIR(pipe);
e1a44743 3396 for (tries = 0; tries < 5; tries++) {
5eddb70b 3397 temp = I915_READ(reg);
8db9d77b
ZW
3398 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3399
3400 if (temp & FDI_RX_SYMBOL_LOCK) {
5eddb70b 3401 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
8db9d77b
ZW
3402 DRM_DEBUG_KMS("FDI train 2 done.\n");
3403 break;
3404 }
8db9d77b 3405 }
e1a44743 3406 if (tries == 5)
5eddb70b 3407 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
3408
3409 DRM_DEBUG_KMS("FDI train done\n");
5c5313c8 3410
8db9d77b
ZW
3411}
3412
0206e353 3413static const int snb_b_fdi_train_param[] = {
8db9d77b
ZW
3414 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3415 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3416 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3417 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3418};
3419
3420/* The FDI link training functions for SNB/Cougarpoint. */
3421static void gen6_fdi_link_train(struct drm_crtc *crtc)
3422{
3423 struct drm_device *dev = crtc->dev;
3424 struct drm_i915_private *dev_priv = dev->dev_private;
3425 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3426 int pipe = intel_crtc->pipe;
f0f59a00
VS
3427 i915_reg_t reg;
3428 u32 temp, i, retry;
8db9d77b 3429
e1a44743
AJ
3430 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3431 for train result */
5eddb70b
CW
3432 reg = FDI_RX_IMR(pipe);
3433 temp = I915_READ(reg);
e1a44743
AJ
3434 temp &= ~FDI_RX_SYMBOL_LOCK;
3435 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
3436 I915_WRITE(reg, temp);
3437
3438 POSTING_READ(reg);
e1a44743
AJ
3439 udelay(150);
3440
8db9d77b 3441 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
3442 reg = FDI_TX_CTL(pipe);
3443 temp = I915_READ(reg);
627eb5a3 3444 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3445 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
8db9d77b
ZW
3446 temp &= ~FDI_LINK_TRAIN_NONE;
3447 temp |= FDI_LINK_TRAIN_PATTERN_1;
3448 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3449 /* SNB-B */
3450 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5eddb70b 3451 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 3452
d74cf324
DV
3453 I915_WRITE(FDI_RX_MISC(pipe),
3454 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3455
5eddb70b
CW
3456 reg = FDI_RX_CTL(pipe);
3457 temp = I915_READ(reg);
8db9d77b
ZW
3458 if (HAS_PCH_CPT(dev)) {
3459 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3460 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3461 } else {
3462 temp &= ~FDI_LINK_TRAIN_NONE;
3463 temp |= FDI_LINK_TRAIN_PATTERN_1;
3464 }
5eddb70b
CW
3465 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3466
3467 POSTING_READ(reg);
8db9d77b
ZW
3468 udelay(150);
3469
0206e353 3470 for (i = 0; i < 4; i++) {
5eddb70b
CW
3471 reg = FDI_TX_CTL(pipe);
3472 temp = I915_READ(reg);
8db9d77b
ZW
3473 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3474 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
3475 I915_WRITE(reg, temp);
3476
3477 POSTING_READ(reg);
8db9d77b
ZW
3478 udelay(500);
3479
fa37d39e
SP
3480 for (retry = 0; retry < 5; retry++) {
3481 reg = FDI_RX_IIR(pipe);
3482 temp = I915_READ(reg);
3483 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3484 if (temp & FDI_RX_BIT_LOCK) {
3485 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3486 DRM_DEBUG_KMS("FDI train 1 done.\n");
3487 break;
3488 }
3489 udelay(50);
8db9d77b 3490 }
fa37d39e
SP
3491 if (retry < 5)
3492 break;
8db9d77b
ZW
3493 }
3494 if (i == 4)
5eddb70b 3495 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
3496
3497 /* Train 2 */
5eddb70b
CW
3498 reg = FDI_TX_CTL(pipe);
3499 temp = I915_READ(reg);
8db9d77b
ZW
3500 temp &= ~FDI_LINK_TRAIN_NONE;
3501 temp |= FDI_LINK_TRAIN_PATTERN_2;
3502 if (IS_GEN6(dev)) {
3503 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3504 /* SNB-B */
3505 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3506 }
5eddb70b 3507 I915_WRITE(reg, temp);
8db9d77b 3508
5eddb70b
CW
3509 reg = FDI_RX_CTL(pipe);
3510 temp = I915_READ(reg);
8db9d77b
ZW
3511 if (HAS_PCH_CPT(dev)) {
3512 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3513 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3514 } else {
3515 temp &= ~FDI_LINK_TRAIN_NONE;
3516 temp |= FDI_LINK_TRAIN_PATTERN_2;
3517 }
5eddb70b
CW
3518 I915_WRITE(reg, temp);
3519
3520 POSTING_READ(reg);
8db9d77b
ZW
3521 udelay(150);
3522
0206e353 3523 for (i = 0; i < 4; i++) {
5eddb70b
CW
3524 reg = FDI_TX_CTL(pipe);
3525 temp = I915_READ(reg);
8db9d77b
ZW
3526 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3527 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
3528 I915_WRITE(reg, temp);
3529
3530 POSTING_READ(reg);
8db9d77b
ZW
3531 udelay(500);
3532
fa37d39e
SP
3533 for (retry = 0; retry < 5; retry++) {
3534 reg = FDI_RX_IIR(pipe);
3535 temp = I915_READ(reg);
3536 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3537 if (temp & FDI_RX_SYMBOL_LOCK) {
3538 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3539 DRM_DEBUG_KMS("FDI train 2 done.\n");
3540 break;
3541 }
3542 udelay(50);
8db9d77b 3543 }
fa37d39e
SP
3544 if (retry < 5)
3545 break;
8db9d77b
ZW
3546 }
3547 if (i == 4)
5eddb70b 3548 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
3549
3550 DRM_DEBUG_KMS("FDI train done.\n");
3551}
3552
357555c0
JB
3553/* Manual link training for Ivy Bridge A0 parts */
3554static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3555{
3556 struct drm_device *dev = crtc->dev;
3557 struct drm_i915_private *dev_priv = dev->dev_private;
3558 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3559 int pipe = intel_crtc->pipe;
f0f59a00
VS
3560 i915_reg_t reg;
3561 u32 temp, i, j;
357555c0
JB
3562
3563 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3564 for train result */
3565 reg = FDI_RX_IMR(pipe);
3566 temp = I915_READ(reg);
3567 temp &= ~FDI_RX_SYMBOL_LOCK;
3568 temp &= ~FDI_RX_BIT_LOCK;
3569 I915_WRITE(reg, temp);
3570
3571 POSTING_READ(reg);
3572 udelay(150);
3573
01a415fd
DV
3574 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3575 I915_READ(FDI_RX_IIR(pipe)));
3576
139ccd3f
JB
3577 /* Try each vswing and preemphasis setting twice before moving on */
3578 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3579 /* disable first in case we need to retry */
3580 reg = FDI_TX_CTL(pipe);
3581 temp = I915_READ(reg);
3582 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3583 temp &= ~FDI_TX_ENABLE;
3584 I915_WRITE(reg, temp);
357555c0 3585
139ccd3f
JB
3586 reg = FDI_RX_CTL(pipe);
3587 temp = I915_READ(reg);
3588 temp &= ~FDI_LINK_TRAIN_AUTO;
3589 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3590 temp &= ~FDI_RX_ENABLE;
3591 I915_WRITE(reg, temp);
357555c0 3592
139ccd3f 3593 /* enable CPU FDI TX and PCH FDI RX */
357555c0
JB
3594 reg = FDI_TX_CTL(pipe);
3595 temp = I915_READ(reg);
139ccd3f 3596 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3597 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
139ccd3f 3598 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
357555c0 3599 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
139ccd3f
JB
3600 temp |= snb_b_fdi_train_param[j/2];
3601 temp |= FDI_COMPOSITE_SYNC;
3602 I915_WRITE(reg, temp | FDI_TX_ENABLE);
357555c0 3603
139ccd3f
JB
3604 I915_WRITE(FDI_RX_MISC(pipe),
3605 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
357555c0 3606
139ccd3f 3607 reg = FDI_RX_CTL(pipe);
357555c0 3608 temp = I915_READ(reg);
139ccd3f
JB
3609 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3610 temp |= FDI_COMPOSITE_SYNC;
3611 I915_WRITE(reg, temp | FDI_RX_ENABLE);
357555c0 3612
139ccd3f
JB
3613 POSTING_READ(reg);
3614 udelay(1); /* should be 0.5us */
357555c0 3615
139ccd3f
JB
3616 for (i = 0; i < 4; i++) {
3617 reg = FDI_RX_IIR(pipe);
3618 temp = I915_READ(reg);
3619 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 3620
139ccd3f
JB
3621 if (temp & FDI_RX_BIT_LOCK ||
3622 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3623 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3624 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3625 i);
3626 break;
3627 }
3628 udelay(1); /* should be 0.5us */
3629 }
3630 if (i == 4) {
3631 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3632 continue;
3633 }
357555c0 3634
139ccd3f 3635 /* Train 2 */
357555c0
JB
3636 reg = FDI_TX_CTL(pipe);
3637 temp = I915_READ(reg);
139ccd3f
JB
3638 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3639 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3640 I915_WRITE(reg, temp);
3641
3642 reg = FDI_RX_CTL(pipe);
3643 temp = I915_READ(reg);
3644 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3645 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
357555c0
JB
3646 I915_WRITE(reg, temp);
3647
3648 POSTING_READ(reg);
139ccd3f 3649 udelay(2); /* should be 1.5us */
357555c0 3650
139ccd3f
JB
3651 for (i = 0; i < 4; i++) {
3652 reg = FDI_RX_IIR(pipe);
3653 temp = I915_READ(reg);
3654 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 3655
139ccd3f
JB
3656 if (temp & FDI_RX_SYMBOL_LOCK ||
3657 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3658 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3659 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3660 i);
3661 goto train_done;
3662 }
3663 udelay(2); /* should be 1.5us */
357555c0 3664 }
139ccd3f
JB
3665 if (i == 4)
3666 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
357555c0 3667 }
357555c0 3668
139ccd3f 3669train_done:
357555c0
JB
3670 DRM_DEBUG_KMS("FDI train done.\n");
3671}
3672
88cefb6c 3673static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2c07245f 3674{
88cefb6c 3675 struct drm_device *dev = intel_crtc->base.dev;
2c07245f 3676 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f 3677 int pipe = intel_crtc->pipe;
f0f59a00
VS
3678 i915_reg_t reg;
3679 u32 temp;
c64e311e 3680
c98e9dcf 3681 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5eddb70b
CW
3682 reg = FDI_RX_CTL(pipe);
3683 temp = I915_READ(reg);
627eb5a3 3684 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
6e3c9717 3685 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
dfd07d72 3686 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5eddb70b
CW
3687 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3688
3689 POSTING_READ(reg);
c98e9dcf
JB
3690 udelay(200);
3691
3692 /* Switch from Rawclk to PCDclk */
5eddb70b
CW
3693 temp = I915_READ(reg);
3694 I915_WRITE(reg, temp | FDI_PCDCLK);
3695
3696 POSTING_READ(reg);
c98e9dcf
JB
3697 udelay(200);
3698
20749730
PZ
3699 /* Enable CPU FDI TX PLL, always on for Ironlake */
3700 reg = FDI_TX_CTL(pipe);
3701 temp = I915_READ(reg);
3702 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3703 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5eddb70b 3704
20749730
PZ
3705 POSTING_READ(reg);
3706 udelay(100);
6be4a607 3707 }
0e23b99d
JB
3708}
3709
88cefb6c
DV
3710static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3711{
3712 struct drm_device *dev = intel_crtc->base.dev;
3713 struct drm_i915_private *dev_priv = dev->dev_private;
3714 int pipe = intel_crtc->pipe;
f0f59a00
VS
3715 i915_reg_t reg;
3716 u32 temp;
88cefb6c
DV
3717
3718 /* Switch from PCDclk to Rawclk */
3719 reg = FDI_RX_CTL(pipe);
3720 temp = I915_READ(reg);
3721 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3722
3723 /* Disable CPU FDI TX PLL */
3724 reg = FDI_TX_CTL(pipe);
3725 temp = I915_READ(reg);
3726 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3727
3728 POSTING_READ(reg);
3729 udelay(100);
3730
3731 reg = FDI_RX_CTL(pipe);
3732 temp = I915_READ(reg);
3733 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3734
3735 /* Wait for the clocks to turn off. */
3736 POSTING_READ(reg);
3737 udelay(100);
3738}
3739
0fc932b8
JB
3740static void ironlake_fdi_disable(struct drm_crtc *crtc)
3741{
3742 struct drm_device *dev = crtc->dev;
3743 struct drm_i915_private *dev_priv = dev->dev_private;
3744 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3745 int pipe = intel_crtc->pipe;
f0f59a00
VS
3746 i915_reg_t reg;
3747 u32 temp;
0fc932b8
JB
3748
3749 /* disable CPU FDI tx and PCH FDI rx */
3750 reg = FDI_TX_CTL(pipe);
3751 temp = I915_READ(reg);
3752 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3753 POSTING_READ(reg);
3754
3755 reg = FDI_RX_CTL(pipe);
3756 temp = I915_READ(reg);
3757 temp &= ~(0x7 << 16);
dfd07d72 3758 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
3759 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3760
3761 POSTING_READ(reg);
3762 udelay(100);
3763
3764 /* Ironlake workaround, disable clock pointer after downing FDI */
eba905b2 3765 if (HAS_PCH_IBX(dev))
6f06ce18 3766 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
0fc932b8
JB
3767
3768 /* still set train pattern 1 */
3769 reg = FDI_TX_CTL(pipe);
3770 temp = I915_READ(reg);
3771 temp &= ~FDI_LINK_TRAIN_NONE;
3772 temp |= FDI_LINK_TRAIN_PATTERN_1;
3773 I915_WRITE(reg, temp);
3774
3775 reg = FDI_RX_CTL(pipe);
3776 temp = I915_READ(reg);
3777 if (HAS_PCH_CPT(dev)) {
3778 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3779 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3780 } else {
3781 temp &= ~FDI_LINK_TRAIN_NONE;
3782 temp |= FDI_LINK_TRAIN_PATTERN_1;
3783 }
3784 /* BPC in FDI rx is consistent with that in PIPECONF */
3785 temp &= ~(0x07 << 16);
dfd07d72 3786 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
3787 I915_WRITE(reg, temp);
3788
3789 POSTING_READ(reg);
3790 udelay(100);
3791}
3792
5dce5b93
CW
3793bool intel_has_pending_fb_unpin(struct drm_device *dev)
3794{
3795 struct intel_crtc *crtc;
3796
3797 /* Note that we don't need to be called with mode_config.lock here
3798 * as our list of CRTC objects is static for the lifetime of the
3799 * device and so cannot disappear as we iterate. Similarly, we can
3800 * happily treat the predicates as racy, atomic checks as userspace
3801 * cannot claim and pin a new fb without at least acquring the
3802 * struct_mutex and so serialising with us.
3803 */
d3fcc808 3804 for_each_intel_crtc(dev, crtc) {
5dce5b93
CW
3805 if (atomic_read(&crtc->unpin_work_count) == 0)
3806 continue;
3807
5a21b665 3808 if (crtc->flip_work)
5dce5b93
CW
3809 intel_wait_for_vblank(dev, crtc->pipe);
3810
3811 return true;
3812 }
3813
3814 return false;
3815}
3816
5a21b665 3817static void page_flip_completed(struct intel_crtc *intel_crtc)
d6bbafa1
CW
3818{
3819 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5a21b665
DV
3820 struct intel_flip_work *work = intel_crtc->flip_work;
3821
3822 intel_crtc->flip_work = NULL;
d6bbafa1
CW
3823
3824 if (work->event)
560ce1dc 3825 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
d6bbafa1
CW
3826
3827 drm_crtc_vblank_put(&intel_crtc->base);
3828
5a21b665 3829 wake_up_all(&dev_priv->pending_flip_queue);
143f73b3 3830 queue_work(dev_priv->wq, &work->unpin_work);
5a21b665
DV
3831
3832 trace_i915_flip_complete(intel_crtc->plane,
3833 work->pending_flip_obj);
d6bbafa1
CW
3834}
3835
5008e874 3836static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
e6c3a2a6 3837{
0f91128d 3838 struct drm_device *dev = crtc->dev;
5bb61643 3839 struct drm_i915_private *dev_priv = dev->dev_private;
5008e874 3840 long ret;
e6c3a2a6 3841
2c10d571 3842 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
5008e874
ML
3843
3844 ret = wait_event_interruptible_timeout(
3845 dev_priv->pending_flip_queue,
3846 !intel_crtc_has_pending_flip(crtc),
3847 60*HZ);
3848
3849 if (ret < 0)
3850 return ret;
3851
5a21b665
DV
3852 if (ret == 0) {
3853 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3854 struct intel_flip_work *work;
3855
3856 spin_lock_irq(&dev->event_lock);
3857 work = intel_crtc->flip_work;
3858 if (work && !is_mmio_work(work)) {
3859 WARN_ONCE(1, "Removing stuck page flip\n");
3860 page_flip_completed(intel_crtc);
3861 }
3862 spin_unlock_irq(&dev->event_lock);
3863 }
5bb61643 3864
5008e874 3865 return 0;
e6c3a2a6
CW
3866}
3867
060f02d8
VS
3868static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3869{
3870 u32 temp;
3871
3872 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3873
3874 mutex_lock(&dev_priv->sb_lock);
3875
3876 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3877 temp |= SBI_SSCCTL_DISABLE;
3878 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3879
3880 mutex_unlock(&dev_priv->sb_lock);
3881}
3882
e615efe4
ED
3883/* Program iCLKIP clock to the desired frequency */
3884static void lpt_program_iclkip(struct drm_crtc *crtc)
3885{
64b46a06 3886 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6e3c9717 3887 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
e615efe4
ED
3888 u32 divsel, phaseinc, auxdiv, phasedir = 0;
3889 u32 temp;
3890
060f02d8 3891 lpt_disable_iclkip(dev_priv);
e615efe4 3892
64b46a06
VS
3893 /* The iCLK virtual clock root frequency is in MHz,
3894 * but the adjusted_mode->crtc_clock in in KHz. To get the
3895 * divisors, it is necessary to divide one by another, so we
3896 * convert the virtual clock precision to KHz here for higher
3897 * precision.
3898 */
3899 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
e615efe4
ED
3900 u32 iclk_virtual_root_freq = 172800 * 1000;
3901 u32 iclk_pi_range = 64;
64b46a06 3902 u32 desired_divisor;
e615efe4 3903
64b46a06
VS
3904 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3905 clock << auxdiv);
3906 divsel = (desired_divisor / iclk_pi_range) - 2;
3907 phaseinc = desired_divisor % iclk_pi_range;
e615efe4 3908
64b46a06
VS
3909 /*
3910 * Near 20MHz is a corner case which is
3911 * out of range for the 7-bit divisor
3912 */
3913 if (divsel <= 0x7f)
3914 break;
e615efe4
ED
3915 }
3916
3917 /* This should not happen with any sane values */
3918 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3919 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3920 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3921 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3922
3923 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
12d7ceed 3924 clock,
e615efe4
ED
3925 auxdiv,
3926 divsel,
3927 phasedir,
3928 phaseinc);
3929
060f02d8
VS
3930 mutex_lock(&dev_priv->sb_lock);
3931
e615efe4 3932 /* Program SSCDIVINTPHASE6 */
988d6ee8 3933 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
e615efe4
ED
3934 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3935 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3936 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3937 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3938 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3939 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
988d6ee8 3940 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
e615efe4
ED
3941
3942 /* Program SSCAUXDIV */
988d6ee8 3943 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
e615efe4
ED
3944 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3945 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
988d6ee8 3946 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
e615efe4
ED
3947
3948 /* Enable modulator and associated divider */
988d6ee8 3949 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
e615efe4 3950 temp &= ~SBI_SSCCTL_DISABLE;
988d6ee8 3951 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
e615efe4 3952
060f02d8
VS
3953 mutex_unlock(&dev_priv->sb_lock);
3954
e615efe4
ED
3955 /* Wait for initialization time */
3956 udelay(24);
3957
3958 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3959}
3960
8802e5b6
VS
3961int lpt_get_iclkip(struct drm_i915_private *dev_priv)
3962{
3963 u32 divsel, phaseinc, auxdiv;
3964 u32 iclk_virtual_root_freq = 172800 * 1000;
3965 u32 iclk_pi_range = 64;
3966 u32 desired_divisor;
3967 u32 temp;
3968
3969 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
3970 return 0;
3971
3972 mutex_lock(&dev_priv->sb_lock);
3973
3974 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3975 if (temp & SBI_SSCCTL_DISABLE) {
3976 mutex_unlock(&dev_priv->sb_lock);
3977 return 0;
3978 }
3979
3980 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3981 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
3982 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
3983 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
3984 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
3985
3986 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3987 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
3988 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
3989
3990 mutex_unlock(&dev_priv->sb_lock);
3991
3992 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
3993
3994 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3995 desired_divisor << auxdiv);
3996}
3997
275f01b2
DV
3998static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3999 enum pipe pch_transcoder)
4000{
4001 struct drm_device *dev = crtc->base.dev;
4002 struct drm_i915_private *dev_priv = dev->dev_private;
6e3c9717 4003 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
275f01b2
DV
4004
4005 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
4006 I915_READ(HTOTAL(cpu_transcoder)));
4007 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
4008 I915_READ(HBLANK(cpu_transcoder)));
4009 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
4010 I915_READ(HSYNC(cpu_transcoder)));
4011
4012 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4013 I915_READ(VTOTAL(cpu_transcoder)));
4014 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4015 I915_READ(VBLANK(cpu_transcoder)));
4016 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4017 I915_READ(VSYNC(cpu_transcoder)));
4018 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4019 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4020}
4021
003632d9 4022static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
1fbc0d78
DV
4023{
4024 struct drm_i915_private *dev_priv = dev->dev_private;
4025 uint32_t temp;
4026
4027 temp = I915_READ(SOUTH_CHICKEN1);
003632d9 4028 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
1fbc0d78
DV
4029 return;
4030
4031 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4032 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4033
003632d9
ACO
4034 temp &= ~FDI_BC_BIFURCATION_SELECT;
4035 if (enable)
4036 temp |= FDI_BC_BIFURCATION_SELECT;
4037
4038 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
1fbc0d78
DV
4039 I915_WRITE(SOUTH_CHICKEN1, temp);
4040 POSTING_READ(SOUTH_CHICKEN1);
4041}
4042
4043static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4044{
4045 struct drm_device *dev = intel_crtc->base.dev;
1fbc0d78
DV
4046
4047 switch (intel_crtc->pipe) {
4048 case PIPE_A:
4049 break;
4050 case PIPE_B:
6e3c9717 4051 if (intel_crtc->config->fdi_lanes > 2)
003632d9 4052 cpt_set_fdi_bc_bifurcation(dev, false);
1fbc0d78 4053 else
003632d9 4054 cpt_set_fdi_bc_bifurcation(dev, true);
1fbc0d78
DV
4055
4056 break;
4057 case PIPE_C:
003632d9 4058 cpt_set_fdi_bc_bifurcation(dev, true);
1fbc0d78
DV
4059
4060 break;
4061 default:
4062 BUG();
4063 }
4064}
4065
c48b5305
VS
4066/* Return which DP Port should be selected for Transcoder DP control */
4067static enum port
4068intel_trans_dp_port_sel(struct drm_crtc *crtc)
4069{
4070 struct drm_device *dev = crtc->dev;
4071 struct intel_encoder *encoder;
4072
4073 for_each_encoder_on_crtc(dev, crtc, encoder) {
4074 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4075 encoder->type == INTEL_OUTPUT_EDP)
4076 return enc_to_dig_port(&encoder->base)->port;
4077 }
4078
4079 return -1;
4080}
4081
f67a559d
JB
4082/*
4083 * Enable PCH resources required for PCH ports:
4084 * - PCH PLLs
4085 * - FDI training & RX/TX
4086 * - update transcoder timings
4087 * - DP transcoding bits
4088 * - transcoder
4089 */
4090static void ironlake_pch_enable(struct drm_crtc *crtc)
0e23b99d
JB
4091{
4092 struct drm_device *dev = crtc->dev;
4093 struct drm_i915_private *dev_priv = dev->dev_private;
4094 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4095 int pipe = intel_crtc->pipe;
f0f59a00 4096 u32 temp;
2c07245f 4097
ab9412ba 4098 assert_pch_transcoder_disabled(dev_priv, pipe);
e7e164db 4099
1fbc0d78
DV
4100 if (IS_IVYBRIDGE(dev))
4101 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4102
cd986abb
DV
4103 /* Write the TU size bits before fdi link training, so that error
4104 * detection works. */
4105 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4106 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4107
c98e9dcf 4108 /* For PCH output, training FDI link */
674cf967 4109 dev_priv->display.fdi_link_train(crtc);
2c07245f 4110
3ad8a208
DV
4111 /* We need to program the right clock selection before writing the pixel
4112 * mutliplier into the DPLL. */
303b81e0 4113 if (HAS_PCH_CPT(dev)) {
ee7b9f93 4114 u32 sel;
4b645f14 4115
c98e9dcf 4116 temp = I915_READ(PCH_DPLL_SEL);
11887397
DV
4117 temp |= TRANS_DPLL_ENABLE(pipe);
4118 sel = TRANS_DPLLB_SEL(pipe);
8106ddbd
ACO
4119 if (intel_crtc->config->shared_dpll ==
4120 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
ee7b9f93
JB
4121 temp |= sel;
4122 else
4123 temp &= ~sel;
c98e9dcf 4124 I915_WRITE(PCH_DPLL_SEL, temp);
c98e9dcf 4125 }
5eddb70b 4126
3ad8a208
DV
4127 /* XXX: pch pll's can be enabled any time before we enable the PCH
4128 * transcoder, and we actually should do this to not upset any PCH
4129 * transcoder that already use the clock when we share it.
4130 *
4131 * Note that enable_shared_dpll tries to do the right thing, but
4132 * get_shared_dpll unconditionally resets the pll - we need that to have
4133 * the right LVDS enable sequence. */
85b3894f 4134 intel_enable_shared_dpll(intel_crtc);
3ad8a208 4135
d9b6cb56
JB
4136 /* set transcoder timing, panel must allow it */
4137 assert_panel_unlocked(dev_priv, pipe);
275f01b2 4138 ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
8db9d77b 4139
303b81e0 4140 intel_fdi_normal_train(crtc);
5e84e1a4 4141
c98e9dcf 4142 /* For PCH DP, enable TRANS_DP_CTL */
6e3c9717 4143 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
9c4edaee
VS
4144 const struct drm_display_mode *adjusted_mode =
4145 &intel_crtc->config->base.adjusted_mode;
dfd07d72 4146 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
f0f59a00 4147 i915_reg_t reg = TRANS_DP_CTL(pipe);
5eddb70b
CW
4148 temp = I915_READ(reg);
4149 temp &= ~(TRANS_DP_PORT_SEL_MASK |
220cad3c
EA
4150 TRANS_DP_SYNC_MASK |
4151 TRANS_DP_BPC_MASK);
e3ef4479 4152 temp |= TRANS_DP_OUTPUT_ENABLE;
9325c9f0 4153 temp |= bpc << 9; /* same format but at 11:9 */
c98e9dcf 4154
9c4edaee 4155 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5eddb70b 4156 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
9c4edaee 4157 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5eddb70b 4158 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
c98e9dcf
JB
4159
4160 switch (intel_trans_dp_port_sel(crtc)) {
c48b5305 4161 case PORT_B:
5eddb70b 4162 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf 4163 break;
c48b5305 4164 case PORT_C:
5eddb70b 4165 temp |= TRANS_DP_PORT_SEL_C;
c98e9dcf 4166 break;
c48b5305 4167 case PORT_D:
5eddb70b 4168 temp |= TRANS_DP_PORT_SEL_D;
c98e9dcf
JB
4169 break;
4170 default:
e95d41e1 4171 BUG();
32f9d658 4172 }
2c07245f 4173
5eddb70b 4174 I915_WRITE(reg, temp);
6be4a607 4175 }
b52eb4dc 4176
b8a4f404 4177 ironlake_enable_pch_transcoder(dev_priv, pipe);
f67a559d
JB
4178}
4179
1507e5bd
PZ
4180static void lpt_pch_enable(struct drm_crtc *crtc)
4181{
4182 struct drm_device *dev = crtc->dev;
4183 struct drm_i915_private *dev_priv = dev->dev_private;
4184 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 4185 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1507e5bd 4186
ab9412ba 4187 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
1507e5bd 4188
8c52b5e8 4189 lpt_program_iclkip(crtc);
1507e5bd 4190
0540e488 4191 /* Set transcoder timing. */
275f01b2 4192 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
1507e5bd 4193
937bb610 4194 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
f67a559d
JB
4195}
4196
a1520318 4197static void cpt_verify_modeset(struct drm_device *dev, int pipe)
d4270e57
JB
4198{
4199 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 4200 i915_reg_t dslreg = PIPEDSL(pipe);
d4270e57
JB
4201 u32 temp;
4202
4203 temp = I915_READ(dslreg);
4204 udelay(500);
4205 if (wait_for(I915_READ(dslreg) != temp, 5)) {
d4270e57 4206 if (wait_for(I915_READ(dslreg) != temp, 5))
84f44ce7 4207 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
d4270e57
JB
4208 }
4209}
4210
86adf9d7
ML
4211static int
4212skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4213 unsigned scaler_user, int *scaler_id, unsigned int rotation,
4214 int src_w, int src_h, int dst_w, int dst_h)
a1b2278e 4215{
86adf9d7
ML
4216 struct intel_crtc_scaler_state *scaler_state =
4217 &crtc_state->scaler_state;
4218 struct intel_crtc *intel_crtc =
4219 to_intel_crtc(crtc_state->base.crtc);
a1b2278e 4220 int need_scaling;
6156a456
CK
4221
4222 need_scaling = intel_rotation_90_or_270(rotation) ?
4223 (src_h != dst_w || src_w != dst_h):
4224 (src_w != dst_w || src_h != dst_h);
a1b2278e
CK
4225
4226 /*
4227 * if plane is being disabled or scaler is no more required or force detach
4228 * - free scaler binded to this plane/crtc
4229 * - in order to do this, update crtc->scaler_usage
4230 *
4231 * Here scaler state in crtc_state is set free so that
4232 * scaler can be assigned to other user. Actual register
4233 * update to free the scaler is done in plane/panel-fit programming.
4234 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4235 */
86adf9d7 4236 if (force_detach || !need_scaling) {
a1b2278e 4237 if (*scaler_id >= 0) {
86adf9d7 4238 scaler_state->scaler_users &= ~(1 << scaler_user);
a1b2278e
CK
4239 scaler_state->scalers[*scaler_id].in_use = 0;
4240
86adf9d7
ML
4241 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4242 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4243 intel_crtc->pipe, scaler_user, *scaler_id,
a1b2278e
CK
4244 scaler_state->scaler_users);
4245 *scaler_id = -1;
4246 }
4247 return 0;
4248 }
4249
4250 /* range checks */
4251 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4252 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4253
4254 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4255 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
86adf9d7 4256 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
a1b2278e 4257 "size is out of scaler range\n",
86adf9d7 4258 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
a1b2278e
CK
4259 return -EINVAL;
4260 }
4261
86adf9d7
ML
4262 /* mark this plane as a scaler user in crtc_state */
4263 scaler_state->scaler_users |= (1 << scaler_user);
4264 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4265 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4266 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4267 scaler_state->scaler_users);
4268
4269 return 0;
4270}
4271
4272/**
4273 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4274 *
4275 * @state: crtc's scaler state
86adf9d7
ML
4276 *
4277 * Return
4278 * 0 - scaler_usage updated successfully
4279 * error - requested scaling cannot be supported or other error condition
4280 */
e435d6e5 4281int skl_update_scaler_crtc(struct intel_crtc_state *state)
86adf9d7
ML
4282{
4283 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
7c5f93b0 4284 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
86adf9d7 4285
78108b7c
VS
4286 DRM_DEBUG_KMS("Updating scaler for [CRTC:%d:%s] scaler_user index %u.%u\n",
4287 intel_crtc->base.base.id, intel_crtc->base.name,
4288 intel_crtc->pipe, SKL_CRTC_INDEX);
86adf9d7 4289
e435d6e5 4290 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
fa5a7970 4291 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
86adf9d7 4292 state->pipe_src_w, state->pipe_src_h,
aad941d5 4293 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
86adf9d7
ML
4294}
4295
4296/**
4297 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4298 *
4299 * @state: crtc's scaler state
86adf9d7
ML
4300 * @plane_state: atomic plane state to update
4301 *
4302 * Return
4303 * 0 - scaler_usage updated successfully
4304 * error - requested scaling cannot be supported or other error condition
4305 */
da20eabd
ML
4306static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4307 struct intel_plane_state *plane_state)
86adf9d7
ML
4308{
4309
4310 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
da20eabd
ML
4311 struct intel_plane *intel_plane =
4312 to_intel_plane(plane_state->base.plane);
86adf9d7
ML
4313 struct drm_framebuffer *fb = plane_state->base.fb;
4314 int ret;
4315
4316 bool force_detach = !fb || !plane_state->visible;
4317
72660ce0
VS
4318 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d:%s] scaler_user index %u.%u\n",
4319 intel_plane->base.base.id, intel_plane->base.name,
4320 intel_crtc->pipe, drm_plane_index(&intel_plane->base));
86adf9d7
ML
4321
4322 ret = skl_update_scaler(crtc_state, force_detach,
4323 drm_plane_index(&intel_plane->base),
4324 &plane_state->scaler_id,
4325 plane_state->base.rotation,
4326 drm_rect_width(&plane_state->src) >> 16,
4327 drm_rect_height(&plane_state->src) >> 16,
4328 drm_rect_width(&plane_state->dst),
4329 drm_rect_height(&plane_state->dst));
4330
4331 if (ret || plane_state->scaler_id < 0)
4332 return ret;
4333
a1b2278e 4334 /* check colorkey */
818ed961 4335 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
72660ce0
VS
4336 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
4337 intel_plane->base.base.id,
4338 intel_plane->base.name);
a1b2278e
CK
4339 return -EINVAL;
4340 }
4341
4342 /* Check src format */
86adf9d7
ML
4343 switch (fb->pixel_format) {
4344 case DRM_FORMAT_RGB565:
4345 case DRM_FORMAT_XBGR8888:
4346 case DRM_FORMAT_XRGB8888:
4347 case DRM_FORMAT_ABGR8888:
4348 case DRM_FORMAT_ARGB8888:
4349 case DRM_FORMAT_XRGB2101010:
4350 case DRM_FORMAT_XBGR2101010:
4351 case DRM_FORMAT_YUYV:
4352 case DRM_FORMAT_YVYU:
4353 case DRM_FORMAT_UYVY:
4354 case DRM_FORMAT_VYUY:
4355 break;
4356 default:
72660ce0
VS
4357 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
4358 intel_plane->base.base.id, intel_plane->base.name,
4359 fb->base.id, fb->pixel_format);
86adf9d7 4360 return -EINVAL;
a1b2278e
CK
4361 }
4362
a1b2278e
CK
4363 return 0;
4364}
4365
e435d6e5
ML
4366static void skylake_scaler_disable(struct intel_crtc *crtc)
4367{
4368 int i;
4369
4370 for (i = 0; i < crtc->num_scalers; i++)
4371 skl_detach_scaler(crtc, i);
4372}
4373
4374static void skylake_pfit_enable(struct intel_crtc *crtc)
bd2e244f
JB
4375{
4376 struct drm_device *dev = crtc->base.dev;
4377 struct drm_i915_private *dev_priv = dev->dev_private;
4378 int pipe = crtc->pipe;
a1b2278e
CK
4379 struct intel_crtc_scaler_state *scaler_state =
4380 &crtc->config->scaler_state;
4381
4382 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4383
6e3c9717 4384 if (crtc->config->pch_pfit.enabled) {
a1b2278e
CK
4385 int id;
4386
4387 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4388 DRM_ERROR("Requesting pfit without getting a scaler first\n");
4389 return;
4390 }
4391
4392 id = scaler_state->scaler_id;
4393 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4394 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4395 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4396 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4397
4398 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
bd2e244f
JB
4399 }
4400}
4401
b074cec8
JB
4402static void ironlake_pfit_enable(struct intel_crtc *crtc)
4403{
4404 struct drm_device *dev = crtc->base.dev;
4405 struct drm_i915_private *dev_priv = dev->dev_private;
4406 int pipe = crtc->pipe;
4407
6e3c9717 4408 if (crtc->config->pch_pfit.enabled) {
b074cec8
JB
4409 /* Force use of hard-coded filter coefficients
4410 * as some pre-programmed values are broken,
4411 * e.g. x201.
4412 */
4413 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4414 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4415 PF_PIPE_SEL_IVB(pipe));
4416 else
4417 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
6e3c9717
ACO
4418 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4419 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
d4270e57
JB
4420 }
4421}
4422
20bc8673 4423void hsw_enable_ips(struct intel_crtc *crtc)
d77e4531 4424{
cea165c3
VS
4425 struct drm_device *dev = crtc->base.dev;
4426 struct drm_i915_private *dev_priv = dev->dev_private;
d77e4531 4427
6e3c9717 4428 if (!crtc->config->ips_enabled)
d77e4531
PZ
4429 return;
4430
307e4498
ML
4431 /*
4432 * We can only enable IPS after we enable a plane and wait for a vblank
4433 * This function is called from post_plane_update, which is run after
4434 * a vblank wait.
4435 */
cea165c3 4436
d77e4531 4437 assert_plane_enabled(dev_priv, crtc->plane);
cea165c3 4438 if (IS_BROADWELL(dev)) {
2a114cc1
BW
4439 mutex_lock(&dev_priv->rps.hw_lock);
4440 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4441 mutex_unlock(&dev_priv->rps.hw_lock);
4442 /* Quoting Art Runyan: "its not safe to expect any particular
4443 * value in IPS_CTL bit 31 after enabling IPS through the
e59150dc
JB
4444 * mailbox." Moreover, the mailbox may return a bogus state,
4445 * so we need to just enable it and continue on.
2a114cc1
BW
4446 */
4447 } else {
4448 I915_WRITE(IPS_CTL, IPS_ENABLE);
4449 /* The bit only becomes 1 in the next vblank, so this wait here
4450 * is essentially intel_wait_for_vblank. If we don't have this
4451 * and don't wait for vblanks until the end of crtc_enable, then
4452 * the HW state readout code will complain that the expected
4453 * IPS_CTL value is not the one we read. */
4454 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4455 DRM_ERROR("Timed out waiting for IPS enable\n");
4456 }
d77e4531
PZ
4457}
4458
20bc8673 4459void hsw_disable_ips(struct intel_crtc *crtc)
d77e4531
PZ
4460{
4461 struct drm_device *dev = crtc->base.dev;
4462 struct drm_i915_private *dev_priv = dev->dev_private;
4463
6e3c9717 4464 if (!crtc->config->ips_enabled)
d77e4531
PZ
4465 return;
4466
4467 assert_plane_enabled(dev_priv, crtc->plane);
23d0b130 4468 if (IS_BROADWELL(dev)) {
2a114cc1
BW
4469 mutex_lock(&dev_priv->rps.hw_lock);
4470 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4471 mutex_unlock(&dev_priv->rps.hw_lock);
23d0b130
BW
4472 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4473 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4474 DRM_ERROR("Timed out waiting for IPS disable\n");
e59150dc 4475 } else {
2a114cc1 4476 I915_WRITE(IPS_CTL, 0);
e59150dc
JB
4477 POSTING_READ(IPS_CTL);
4478 }
d77e4531
PZ
4479
4480 /* We need to wait for a vblank before we can disable the plane. */
4481 intel_wait_for_vblank(dev, crtc->pipe);
4482}
4483
7cac945f 4484static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
d3eedb1a 4485{
7cac945f 4486 if (intel_crtc->overlay) {
d3eedb1a
VS
4487 struct drm_device *dev = intel_crtc->base.dev;
4488 struct drm_i915_private *dev_priv = dev->dev_private;
4489
4490 mutex_lock(&dev->struct_mutex);
4491 dev_priv->mm.interruptible = false;
4492 (void) intel_overlay_switch_off(intel_crtc->overlay);
4493 dev_priv->mm.interruptible = true;
4494 mutex_unlock(&dev->struct_mutex);
4495 }
4496
4497 /* Let userspace switch the overlay on again. In most cases userspace
4498 * has to recompute where to put it anyway.
4499 */
4500}
4501
87d4300a
ML
4502/**
4503 * intel_post_enable_primary - Perform operations after enabling primary plane
4504 * @crtc: the CRTC whose primary plane was just enabled
4505 *
4506 * Performs potentially sleeping operations that must be done after the primary
4507 * plane is enabled, such as updating FBC and IPS. Note that this may be
4508 * called due to an explicit primary plane update, or due to an implicit
4509 * re-enable that is caused when a sprite plane is updated to no longer
4510 * completely hide the primary plane.
4511 */
4512static void
4513intel_post_enable_primary(struct drm_crtc *crtc)
a5c4d7bc
VS
4514{
4515 struct drm_device *dev = crtc->dev;
87d4300a 4516 struct drm_i915_private *dev_priv = dev->dev_private;
a5c4d7bc
VS
4517 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4518 int pipe = intel_crtc->pipe;
a5c4d7bc 4519
87d4300a
ML
4520 /*
4521 * FIXME IPS should be fine as long as one plane is
4522 * enabled, but in practice it seems to have problems
4523 * when going from primary only to sprite only and vice
4524 * versa.
4525 */
a5c4d7bc
VS
4526 hsw_enable_ips(intel_crtc);
4527
f99d7069 4528 /*
87d4300a
ML
4529 * Gen2 reports pipe underruns whenever all planes are disabled.
4530 * So don't enable underrun reporting before at least some planes
4531 * are enabled.
4532 * FIXME: Need to fix the logic to work when we turn off all planes
4533 * but leave the pipe running.
f99d7069 4534 */
87d4300a
ML
4535 if (IS_GEN2(dev))
4536 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4537
aca7b684
VS
4538 /* Underruns don't always raise interrupts, so check manually. */
4539 intel_check_cpu_fifo_underruns(dev_priv);
4540 intel_check_pch_fifo_underruns(dev_priv);
a5c4d7bc
VS
4541}
4542
2622a081 4543/* FIXME move all this to pre_plane_update() with proper state tracking */
87d4300a
ML
4544static void
4545intel_pre_disable_primary(struct drm_crtc *crtc)
a5c4d7bc
VS
4546{
4547 struct drm_device *dev = crtc->dev;
4548 struct drm_i915_private *dev_priv = dev->dev_private;
4549 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4550 int pipe = intel_crtc->pipe;
a5c4d7bc 4551
87d4300a
ML
4552 /*
4553 * Gen2 reports pipe underruns whenever all planes are disabled.
4554 * So diasble underrun reporting before all the planes get disabled.
4555 * FIXME: Need to fix the logic to work when we turn off all planes
4556 * but leave the pipe running.
4557 */
4558 if (IS_GEN2(dev))
4559 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
a5c4d7bc 4560
2622a081
VS
4561 /*
4562 * FIXME IPS should be fine as long as one plane is
4563 * enabled, but in practice it seems to have problems
4564 * when going from primary only to sprite only and vice
4565 * versa.
4566 */
4567 hsw_disable_ips(intel_crtc);
4568}
4569
4570/* FIXME get rid of this and use pre_plane_update */
4571static void
4572intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4573{
4574 struct drm_device *dev = crtc->dev;
4575 struct drm_i915_private *dev_priv = dev->dev_private;
4576 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4577 int pipe = intel_crtc->pipe;
4578
4579 intel_pre_disable_primary(crtc);
4580
87d4300a
ML
4581 /*
4582 * Vblank time updates from the shadow to live plane control register
4583 * are blocked if the memory self-refresh mode is active at that
4584 * moment. So to make sure the plane gets truly disabled, disable
4585 * first the self-refresh mode. The self-refresh enable bit in turn
4586 * will be checked/applied by the HW only at the next frame start
4587 * event which is after the vblank start event, so we need to have a
4588 * wait-for-vblank between disabling the plane and the pipe.
4589 */
262cd2e1 4590 if (HAS_GMCH_DISPLAY(dev)) {
87d4300a 4591 intel_set_memory_cxsr(dev_priv, false);
262cd2e1
VS
4592 dev_priv->wm.vlv.cxsr = false;
4593 intel_wait_for_vblank(dev, pipe);
4594 }
87d4300a
ML
4595}
4596
5a21b665
DV
4597static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
4598{
4599 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4600 struct drm_atomic_state *old_state = old_crtc_state->base.state;
4601 struct intel_crtc_state *pipe_config =
4602 to_intel_crtc_state(crtc->base.state);
4603 struct drm_device *dev = crtc->base.dev;
4604 struct drm_plane *primary = crtc->base.primary;
4605 struct drm_plane_state *old_pri_state =
4606 drm_atomic_get_existing_plane_state(old_state, primary);
4607
4608 intel_frontbuffer_flip(dev, pipe_config->fb_bits);
4609
4610 crtc->wm.cxsr_allowed = true;
4611
4612 if (pipe_config->update_wm_post && pipe_config->base.active)
4613 intel_update_watermarks(&crtc->base);
4614
4615 if (old_pri_state) {
4616 struct intel_plane_state *primary_state =
4617 to_intel_plane_state(primary->state);
4618 struct intel_plane_state *old_primary_state =
4619 to_intel_plane_state(old_pri_state);
4620
4621 intel_fbc_post_update(crtc);
4622
4623 if (primary_state->visible &&
4624 (needs_modeset(&pipe_config->base) ||
4625 !old_primary_state->visible))
4626 intel_post_enable_primary(&crtc->base);
4627 }
4628}
4629
5c74cd73 4630static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
ac21b225 4631{
5c74cd73 4632 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
ac21b225 4633 struct drm_device *dev = crtc->base.dev;
eddfcbcd 4634 struct drm_i915_private *dev_priv = dev->dev_private;
ab1d3a0e
ML
4635 struct intel_crtc_state *pipe_config =
4636 to_intel_crtc_state(crtc->base.state);
5c74cd73
ML
4637 struct drm_atomic_state *old_state = old_crtc_state->base.state;
4638 struct drm_plane *primary = crtc->base.primary;
4639 struct drm_plane_state *old_pri_state =
4640 drm_atomic_get_existing_plane_state(old_state, primary);
4641 bool modeset = needs_modeset(&pipe_config->base);
ac21b225 4642
5c74cd73
ML
4643 if (old_pri_state) {
4644 struct intel_plane_state *primary_state =
4645 to_intel_plane_state(primary->state);
4646 struct intel_plane_state *old_primary_state =
4647 to_intel_plane_state(old_pri_state);
4648
faf68d92 4649 intel_fbc_pre_update(crtc, pipe_config, primary_state);
31ae71fc 4650
5c74cd73
ML
4651 if (old_primary_state->visible &&
4652 (modeset || !primary_state->visible))
4653 intel_pre_disable_primary(&crtc->base);
4654 }
852eb00d 4655
a4015f9a 4656 if (pipe_config->disable_cxsr && HAS_GMCH_DISPLAY(dev)) {
852eb00d 4657 crtc->wm.cxsr_allowed = false;
2dfd178d 4658
2622a081
VS
4659 /*
4660 * Vblank time updates from the shadow to live plane control register
4661 * are blocked if the memory self-refresh mode is active at that
4662 * moment. So to make sure the plane gets truly disabled, disable
4663 * first the self-refresh mode. The self-refresh enable bit in turn
4664 * will be checked/applied by the HW only at the next frame start
4665 * event which is after the vblank start event, so we need to have a
4666 * wait-for-vblank between disabling the plane and the pipe.
4667 */
4668 if (old_crtc_state->base.active) {
2dfd178d 4669 intel_set_memory_cxsr(dev_priv, false);
2622a081
VS
4670 dev_priv->wm.vlv.cxsr = false;
4671 intel_wait_for_vblank(dev, crtc->pipe);
4672 }
852eb00d 4673 }
92826fcd 4674
ed4a6a7c
MR
4675 /*
4676 * IVB workaround: must disable low power watermarks for at least
4677 * one frame before enabling scaling. LP watermarks can be re-enabled
4678 * when scaling is disabled.
4679 *
4680 * WaCxSRDisabledForSpriteScaling:ivb
4681 */
4682 if (pipe_config->disable_lp_wm) {
4683 ilk_disable_lp_wm(dev);
4684 intel_wait_for_vblank(dev, crtc->pipe);
4685 }
4686
4687 /*
4688 * If we're doing a modeset, we're done. No need to do any pre-vblank
4689 * watermark programming here.
4690 */
4691 if (needs_modeset(&pipe_config->base))
4692 return;
4693
4694 /*
4695 * For platforms that support atomic watermarks, program the
4696 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
4697 * will be the intermediate values that are safe for both pre- and
4698 * post- vblank; when vblank happens, the 'active' values will be set
4699 * to the final 'target' values and we'll do this again to get the
4700 * optimal watermarks. For gen9+ platforms, the values we program here
4701 * will be the final target values which will get automatically latched
4702 * at vblank time; no further programming will be necessary.
4703 *
4704 * If a platform hasn't been transitioned to atomic watermarks yet,
4705 * we'll continue to update watermarks the old way, if flags tell
4706 * us to.
4707 */
4708 if (dev_priv->display.initial_watermarks != NULL)
4709 dev_priv->display.initial_watermarks(pipe_config);
caed361d 4710 else if (pipe_config->update_wm_pre)
92826fcd 4711 intel_update_watermarks(&crtc->base);
ac21b225
ML
4712}
4713
d032ffa0 4714static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
87d4300a
ML
4715{
4716 struct drm_device *dev = crtc->dev;
4717 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
d032ffa0 4718 struct drm_plane *p;
87d4300a
ML
4719 int pipe = intel_crtc->pipe;
4720
7cac945f 4721 intel_crtc_dpms_overlay_disable(intel_crtc);
27321ae8 4722
d032ffa0
ML
4723 drm_for_each_plane_mask(p, dev, plane_mask)
4724 to_intel_plane(p)->disable_plane(p, crtc);
f98551ae 4725
f99d7069
DV
4726 /*
4727 * FIXME: Once we grow proper nuclear flip support out of this we need
4728 * to compute the mask of flip planes precisely. For the time being
4729 * consider this a flip to a NULL plane.
4730 */
4731 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
a5c4d7bc
VS
4732}
4733
f67a559d
JB
4734static void ironlake_crtc_enable(struct drm_crtc *crtc)
4735{
4736 struct drm_device *dev = crtc->dev;
4737 struct drm_i915_private *dev_priv = dev->dev_private;
4738 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 4739 struct intel_encoder *encoder;
f67a559d 4740 int pipe = intel_crtc->pipe;
b95c5321
ML
4741 struct intel_crtc_state *pipe_config =
4742 to_intel_crtc_state(crtc->state);
f67a559d 4743
53d9f4e9 4744 if (WARN_ON(intel_crtc->active))
f67a559d
JB
4745 return;
4746
b2c0593a
VS
4747 /*
4748 * Sometimes spurious CPU pipe underruns happen during FDI
4749 * training, at least with VGA+HDMI cloning. Suppress them.
4750 *
4751 * On ILK we get an occasional spurious CPU pipe underruns
4752 * between eDP port A enable and vdd enable. Also PCH port
4753 * enable seems to result in the occasional CPU pipe underrun.
4754 *
4755 * Spurious PCH underruns also occur during PCH enabling.
4756 */
4757 if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
4758 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
81b088ca
VS
4759 if (intel_crtc->config->has_pch_encoder)
4760 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4761
6e3c9717 4762 if (intel_crtc->config->has_pch_encoder)
b14b1055
DV
4763 intel_prepare_shared_dpll(intel_crtc);
4764
6e3c9717 4765 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 4766 intel_dp_set_m_n(intel_crtc, M1_N1);
29407aab
DV
4767
4768 intel_set_pipe_timings(intel_crtc);
bc58be60 4769 intel_set_pipe_src_size(intel_crtc);
29407aab 4770
6e3c9717 4771 if (intel_crtc->config->has_pch_encoder) {
29407aab 4772 intel_cpu_transcoder_set_m_n(intel_crtc,
6e3c9717 4773 &intel_crtc->config->fdi_m_n, NULL);
29407aab
DV
4774 }
4775
4776 ironlake_set_pipeconf(crtc);
4777
f67a559d 4778 intel_crtc->active = true;
8664281b 4779
f6736a1a 4780 for_each_encoder_on_crtc(dev, crtc, encoder)
952735ee
DV
4781 if (encoder->pre_enable)
4782 encoder->pre_enable(encoder);
f67a559d 4783
6e3c9717 4784 if (intel_crtc->config->has_pch_encoder) {
fff367c7
DV
4785 /* Note: FDI PLL enabling _must_ be done before we enable the
4786 * cpu pipes, hence this is separate from all the other fdi/pch
4787 * enabling. */
88cefb6c 4788 ironlake_fdi_pll_enable(intel_crtc);
46b6f814
DV
4789 } else {
4790 assert_fdi_tx_disabled(dev_priv, pipe);
4791 assert_fdi_rx_disabled(dev_priv, pipe);
4792 }
f67a559d 4793
b074cec8 4794 ironlake_pfit_enable(intel_crtc);
f67a559d 4795
9c54c0dd
JB
4796 /*
4797 * On ILK+ LUT must be loaded before the pipe is running but with
4798 * clocks enabled
4799 */
b95c5321 4800 intel_color_load_luts(&pipe_config->base);
9c54c0dd 4801
1d5bf5d9
ID
4802 if (dev_priv->display.initial_watermarks != NULL)
4803 dev_priv->display.initial_watermarks(intel_crtc->config);
e1fdc473 4804 intel_enable_pipe(intel_crtc);
f67a559d 4805
6e3c9717 4806 if (intel_crtc->config->has_pch_encoder)
f67a559d 4807 ironlake_pch_enable(crtc);
c98e9dcf 4808
f9b61ff6
DV
4809 assert_vblank_disabled(crtc);
4810 drm_crtc_vblank_on(crtc);
4811
fa5c73b1
DV
4812 for_each_encoder_on_crtc(dev, crtc, encoder)
4813 encoder->enable(encoder);
61b77ddd
DV
4814
4815 if (HAS_PCH_CPT(dev))
a1520318 4816 cpt_verify_modeset(dev, intel_crtc->pipe);
37ca8d4c
VS
4817
4818 /* Must wait for vblank to avoid spurious PCH FIFO underruns */
4819 if (intel_crtc->config->has_pch_encoder)
4820 intel_wait_for_vblank(dev, pipe);
b2c0593a 4821 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
37ca8d4c 4822 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6be4a607
JB
4823}
4824
42db64ef
PZ
4825/* IPS only exists on ULT machines and is tied to pipe A. */
4826static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4827{
f5adf94e 4828 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
42db64ef
PZ
4829}
4830
4f771f10
PZ
4831static void haswell_crtc_enable(struct drm_crtc *crtc)
4832{
4833 struct drm_device *dev = crtc->dev;
4834 struct drm_i915_private *dev_priv = dev->dev_private;
4835 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4836 struct intel_encoder *encoder;
99d736a2 4837 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4d1de975 4838 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
99d736a2
ML
4839 struct intel_crtc_state *pipe_config =
4840 to_intel_crtc_state(crtc->state);
4f771f10 4841
53d9f4e9 4842 if (WARN_ON(intel_crtc->active))
4f771f10
PZ
4843 return;
4844
81b088ca
VS
4845 if (intel_crtc->config->has_pch_encoder)
4846 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4847 false);
4848
95a7a2ae
ID
4849 for_each_encoder_on_crtc(dev, crtc, encoder)
4850 if (encoder->pre_pll_enable)
4851 encoder->pre_pll_enable(encoder);
4852
8106ddbd 4853 if (intel_crtc->config->shared_dpll)
df8ad70c
DV
4854 intel_enable_shared_dpll(intel_crtc);
4855
6e3c9717 4856 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 4857 intel_dp_set_m_n(intel_crtc, M1_N1);
229fca97 4858
4d1de975
JN
4859 if (!intel_crtc->config->has_dsi_encoder)
4860 intel_set_pipe_timings(intel_crtc);
4861
bc58be60 4862 intel_set_pipe_src_size(intel_crtc);
229fca97 4863
4d1de975
JN
4864 if (cpu_transcoder != TRANSCODER_EDP &&
4865 !transcoder_is_dsi(cpu_transcoder)) {
4866 I915_WRITE(PIPE_MULT(cpu_transcoder),
6e3c9717 4867 intel_crtc->config->pixel_multiplier - 1);
ebb69c95
CT
4868 }
4869
6e3c9717 4870 if (intel_crtc->config->has_pch_encoder) {
229fca97 4871 intel_cpu_transcoder_set_m_n(intel_crtc,
6e3c9717 4872 &intel_crtc->config->fdi_m_n, NULL);
229fca97
DV
4873 }
4874
4d1de975
JN
4875 if (!intel_crtc->config->has_dsi_encoder)
4876 haswell_set_pipeconf(crtc);
4877
391bf048 4878 haswell_set_pipemisc(crtc);
229fca97 4879
b95c5321 4880 intel_color_set_csc(&pipe_config->base);
229fca97 4881
4f771f10 4882 intel_crtc->active = true;
8664281b 4883
6b698516
DV
4884 if (intel_crtc->config->has_pch_encoder)
4885 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4886 else
4887 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4888
7d4aefd0 4889 for_each_encoder_on_crtc(dev, crtc, encoder) {
4f771f10
PZ
4890 if (encoder->pre_enable)
4891 encoder->pre_enable(encoder);
7d4aefd0 4892 }
4f771f10 4893
d2d65408 4894 if (intel_crtc->config->has_pch_encoder)
4fe9467d 4895 dev_priv->display.fdi_link_train(crtc);
4fe9467d 4896
a65347ba 4897 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 4898 intel_ddi_enable_pipe_clock(intel_crtc);
4f771f10 4899
1c132b44 4900 if (INTEL_INFO(dev)->gen >= 9)
e435d6e5 4901 skylake_pfit_enable(intel_crtc);
ff6d9f55 4902 else
1c132b44 4903 ironlake_pfit_enable(intel_crtc);
4f771f10
PZ
4904
4905 /*
4906 * On ILK+ LUT must be loaded before the pipe is running but with
4907 * clocks enabled
4908 */
b95c5321 4909 intel_color_load_luts(&pipe_config->base);
4f771f10 4910
1f544388 4911 intel_ddi_set_pipe_settings(crtc);
a65347ba 4912 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 4913 intel_ddi_enable_transcoder_func(crtc);
4f771f10 4914
1d5bf5d9
ID
4915 if (dev_priv->display.initial_watermarks != NULL)
4916 dev_priv->display.initial_watermarks(pipe_config);
4917 else
4918 intel_update_watermarks(crtc);
4d1de975
JN
4919
4920 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
4921 if (!intel_crtc->config->has_dsi_encoder)
4922 intel_enable_pipe(intel_crtc);
42db64ef 4923
6e3c9717 4924 if (intel_crtc->config->has_pch_encoder)
1507e5bd 4925 lpt_pch_enable(crtc);
4f771f10 4926
a65347ba 4927 if (intel_crtc->config->dp_encoder_is_mst)
0e32b39c
DA
4928 intel_ddi_set_vc_payload_alloc(crtc, true);
4929
f9b61ff6
DV
4930 assert_vblank_disabled(crtc);
4931 drm_crtc_vblank_on(crtc);
4932
8807e55b 4933 for_each_encoder_on_crtc(dev, crtc, encoder) {
4f771f10 4934 encoder->enable(encoder);
8807e55b
JN
4935 intel_opregion_notify_encoder(encoder, true);
4936 }
4f771f10 4937
6b698516
DV
4938 if (intel_crtc->config->has_pch_encoder) {
4939 intel_wait_for_vblank(dev, pipe);
4940 intel_wait_for_vblank(dev, pipe);
4941 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
d2d65408
VS
4942 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4943 true);
6b698516 4944 }
d2d65408 4945
e4916946
PZ
4946 /* If we change the relative order between pipe/planes enabling, we need
4947 * to change the workaround. */
99d736a2
ML
4948 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
4949 if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
4950 intel_wait_for_vblank(dev, hsw_workaround_pipe);
4951 intel_wait_for_vblank(dev, hsw_workaround_pipe);
4952 }
4f771f10
PZ
4953}
4954
bfd16b2a 4955static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
3f8dce3a
DV
4956{
4957 struct drm_device *dev = crtc->base.dev;
4958 struct drm_i915_private *dev_priv = dev->dev_private;
4959 int pipe = crtc->pipe;
4960
4961 /* To avoid upsetting the power well on haswell only disable the pfit if
4962 * it's in use. The hw state code will make sure we get this right. */
bfd16b2a 4963 if (force || crtc->config->pch_pfit.enabled) {
3f8dce3a
DV
4964 I915_WRITE(PF_CTL(pipe), 0);
4965 I915_WRITE(PF_WIN_POS(pipe), 0);
4966 I915_WRITE(PF_WIN_SZ(pipe), 0);
4967 }
4968}
4969
6be4a607
JB
4970static void ironlake_crtc_disable(struct drm_crtc *crtc)
4971{
4972 struct drm_device *dev = crtc->dev;
4973 struct drm_i915_private *dev_priv = dev->dev_private;
4974 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 4975 struct intel_encoder *encoder;
6be4a607 4976 int pipe = intel_crtc->pipe;
b52eb4dc 4977
b2c0593a
VS
4978 /*
4979 * Sometimes spurious CPU pipe underruns happen when the
4980 * pipe is already disabled, but FDI RX/TX is still enabled.
4981 * Happens at least with VGA+HDMI cloning. Suppress them.
4982 */
4983 if (intel_crtc->config->has_pch_encoder) {
4984 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
37ca8d4c 4985 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
b2c0593a 4986 }
37ca8d4c 4987
ea9d758d
DV
4988 for_each_encoder_on_crtc(dev, crtc, encoder)
4989 encoder->disable(encoder);
4990
f9b61ff6
DV
4991 drm_crtc_vblank_off(crtc);
4992 assert_vblank_disabled(crtc);
4993
575f7ab7 4994 intel_disable_pipe(intel_crtc);
32f9d658 4995
bfd16b2a 4996 ironlake_pfit_disable(intel_crtc, false);
2c07245f 4997
b2c0593a 4998 if (intel_crtc->config->has_pch_encoder)
5a74f70a
VS
4999 ironlake_fdi_disable(crtc);
5000
bf49ec8c
DV
5001 for_each_encoder_on_crtc(dev, crtc, encoder)
5002 if (encoder->post_disable)
5003 encoder->post_disable(encoder);
2c07245f 5004
6e3c9717 5005 if (intel_crtc->config->has_pch_encoder) {
d925c59a 5006 ironlake_disable_pch_transcoder(dev_priv, pipe);
6be4a607 5007
d925c59a 5008 if (HAS_PCH_CPT(dev)) {
f0f59a00
VS
5009 i915_reg_t reg;
5010 u32 temp;
5011
d925c59a
DV
5012 /* disable TRANS_DP_CTL */
5013 reg = TRANS_DP_CTL(pipe);
5014 temp = I915_READ(reg);
5015 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
5016 TRANS_DP_PORT_SEL_MASK);
5017 temp |= TRANS_DP_PORT_SEL_NONE;
5018 I915_WRITE(reg, temp);
5019
5020 /* disable DPLL_SEL */
5021 temp = I915_READ(PCH_DPLL_SEL);
11887397 5022 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
d925c59a 5023 I915_WRITE(PCH_DPLL_SEL, temp);
9db4a9c7 5024 }
e3421a18 5025
d925c59a
DV
5026 ironlake_fdi_pll_disable(intel_crtc);
5027 }
81b088ca 5028
b2c0593a 5029 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
81b088ca 5030 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6be4a607 5031}
1b3c7a47 5032
4f771f10 5033static void haswell_crtc_disable(struct drm_crtc *crtc)
ee7b9f93 5034{
4f771f10
PZ
5035 struct drm_device *dev = crtc->dev;
5036 struct drm_i915_private *dev_priv = dev->dev_private;
ee7b9f93 5037 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4f771f10 5038 struct intel_encoder *encoder;
6e3c9717 5039 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
ee7b9f93 5040
d2d65408
VS
5041 if (intel_crtc->config->has_pch_encoder)
5042 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5043 false);
5044
8807e55b
JN
5045 for_each_encoder_on_crtc(dev, crtc, encoder) {
5046 intel_opregion_notify_encoder(encoder, false);
4f771f10 5047 encoder->disable(encoder);
8807e55b 5048 }
4f771f10 5049
f9b61ff6
DV
5050 drm_crtc_vblank_off(crtc);
5051 assert_vblank_disabled(crtc);
5052
4d1de975
JN
5053 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5054 if (!intel_crtc->config->has_dsi_encoder)
5055 intel_disable_pipe(intel_crtc);
4f771f10 5056
6e3c9717 5057 if (intel_crtc->config->dp_encoder_is_mst)
a4bf214f
VS
5058 intel_ddi_set_vc_payload_alloc(crtc, false);
5059
a65347ba 5060 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 5061 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4f771f10 5062
1c132b44 5063 if (INTEL_INFO(dev)->gen >= 9)
e435d6e5 5064 skylake_scaler_disable(intel_crtc);
ff6d9f55 5065 else
bfd16b2a 5066 ironlake_pfit_disable(intel_crtc, false);
4f771f10 5067
a65347ba 5068 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 5069 intel_ddi_disable_pipe_clock(intel_crtc);
4f771f10 5070
97b040aa
ID
5071 for_each_encoder_on_crtc(dev, crtc, encoder)
5072 if (encoder->post_disable)
5073 encoder->post_disable(encoder);
81b088ca 5074
92966a37
VS
5075 if (intel_crtc->config->has_pch_encoder) {
5076 lpt_disable_pch_transcoder(dev_priv);
503a74e9 5077 lpt_disable_iclkip(dev_priv);
92966a37
VS
5078 intel_ddi_fdi_disable(crtc);
5079
81b088ca
VS
5080 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5081 true);
92966a37 5082 }
4f771f10
PZ
5083}
5084
2dd24552
JB
5085static void i9xx_pfit_enable(struct intel_crtc *crtc)
5086{
5087 struct drm_device *dev = crtc->base.dev;
5088 struct drm_i915_private *dev_priv = dev->dev_private;
6e3c9717 5089 struct intel_crtc_state *pipe_config = crtc->config;
2dd24552 5090
681a8504 5091 if (!pipe_config->gmch_pfit.control)
2dd24552
JB
5092 return;
5093
2dd24552 5094 /*
c0b03411
DV
5095 * The panel fitter should only be adjusted whilst the pipe is disabled,
5096 * according to register description and PRM.
2dd24552 5097 */
c0b03411
DV
5098 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5099 assert_pipe_disabled(dev_priv, crtc->pipe);
2dd24552 5100
b074cec8
JB
5101 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5102 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5a80c45c
DV
5103
5104 /* Border color in case we don't scale up to the full screen. Black by
5105 * default, change to something else for debugging. */
5106 I915_WRITE(BCLRPAT(crtc->pipe), 0);
2dd24552
JB
5107}
5108
d05410f9
DA
5109static enum intel_display_power_domain port_to_power_domain(enum port port)
5110{
5111 switch (port) {
5112 case PORT_A:
6331a704 5113 return POWER_DOMAIN_PORT_DDI_A_LANES;
d05410f9 5114 case PORT_B:
6331a704 5115 return POWER_DOMAIN_PORT_DDI_B_LANES;
d05410f9 5116 case PORT_C:
6331a704 5117 return POWER_DOMAIN_PORT_DDI_C_LANES;
d05410f9 5118 case PORT_D:
6331a704 5119 return POWER_DOMAIN_PORT_DDI_D_LANES;
d8e19f99 5120 case PORT_E:
6331a704 5121 return POWER_DOMAIN_PORT_DDI_E_LANES;
d05410f9 5122 default:
b9fec167 5123 MISSING_CASE(port);
d05410f9
DA
5124 return POWER_DOMAIN_PORT_OTHER;
5125 }
5126}
5127
25f78f58
VS
5128static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5129{
5130 switch (port) {
5131 case PORT_A:
5132 return POWER_DOMAIN_AUX_A;
5133 case PORT_B:
5134 return POWER_DOMAIN_AUX_B;
5135 case PORT_C:
5136 return POWER_DOMAIN_AUX_C;
5137 case PORT_D:
5138 return POWER_DOMAIN_AUX_D;
5139 case PORT_E:
5140 /* FIXME: Check VBT for actual wiring of PORT E */
5141 return POWER_DOMAIN_AUX_D;
5142 default:
b9fec167 5143 MISSING_CASE(port);
25f78f58
VS
5144 return POWER_DOMAIN_AUX_A;
5145 }
5146}
5147
319be8ae
ID
5148enum intel_display_power_domain
5149intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5150{
5151 struct drm_device *dev = intel_encoder->base.dev;
5152 struct intel_digital_port *intel_dig_port;
5153
5154 switch (intel_encoder->type) {
5155 case INTEL_OUTPUT_UNKNOWN:
5156 /* Only DDI platforms should ever use this output type */
5157 WARN_ON_ONCE(!HAS_DDI(dev));
5158 case INTEL_OUTPUT_DISPLAYPORT:
5159 case INTEL_OUTPUT_HDMI:
5160 case INTEL_OUTPUT_EDP:
5161 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
d05410f9 5162 return port_to_power_domain(intel_dig_port->port);
0e32b39c
DA
5163 case INTEL_OUTPUT_DP_MST:
5164 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5165 return port_to_power_domain(intel_dig_port->port);
319be8ae
ID
5166 case INTEL_OUTPUT_ANALOG:
5167 return POWER_DOMAIN_PORT_CRT;
5168 case INTEL_OUTPUT_DSI:
5169 return POWER_DOMAIN_PORT_DSI;
5170 default:
5171 return POWER_DOMAIN_PORT_OTHER;
5172 }
5173}
5174
25f78f58
VS
5175enum intel_display_power_domain
5176intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5177{
5178 struct drm_device *dev = intel_encoder->base.dev;
5179 struct intel_digital_port *intel_dig_port;
5180
5181 switch (intel_encoder->type) {
5182 case INTEL_OUTPUT_UNKNOWN:
651174a4
ID
5183 case INTEL_OUTPUT_HDMI:
5184 /*
5185 * Only DDI platforms should ever use these output types.
5186 * We can get here after the HDMI detect code has already set
5187 * the type of the shared encoder. Since we can't be sure
5188 * what's the status of the given connectors, play safe and
5189 * run the DP detection too.
5190 */
25f78f58
VS
5191 WARN_ON_ONCE(!HAS_DDI(dev));
5192 case INTEL_OUTPUT_DISPLAYPORT:
5193 case INTEL_OUTPUT_EDP:
5194 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5195 return port_to_aux_power_domain(intel_dig_port->port);
5196 case INTEL_OUTPUT_DP_MST:
5197 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5198 return port_to_aux_power_domain(intel_dig_port->port);
5199 default:
b9fec167 5200 MISSING_CASE(intel_encoder->type);
25f78f58
VS
5201 return POWER_DOMAIN_AUX_A;
5202 }
5203}
5204
74bff5f9
ML
5205static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5206 struct intel_crtc_state *crtc_state)
77d22dca 5207{
319be8ae 5208 struct drm_device *dev = crtc->dev;
74bff5f9 5209 struct drm_encoder *encoder;
319be8ae
ID
5210 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5211 enum pipe pipe = intel_crtc->pipe;
77d22dca 5212 unsigned long mask;
74bff5f9 5213 enum transcoder transcoder = crtc_state->cpu_transcoder;
77d22dca 5214
74bff5f9 5215 if (!crtc_state->base.active)
292b990e
ML
5216 return 0;
5217
77d22dca
ID
5218 mask = BIT(POWER_DOMAIN_PIPE(pipe));
5219 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
74bff5f9
ML
5220 if (crtc_state->pch_pfit.enabled ||
5221 crtc_state->pch_pfit.force_thru)
77d22dca
ID
5222 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5223
74bff5f9
ML
5224 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5225 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5226
319be8ae 5227 mask |= BIT(intel_display_port_power_domain(intel_encoder));
74bff5f9 5228 }
319be8ae 5229
15e7ec29
ML
5230 if (crtc_state->shared_dpll)
5231 mask |= BIT(POWER_DOMAIN_PLLS);
5232
77d22dca
ID
5233 return mask;
5234}
5235
74bff5f9
ML
5236static unsigned long
5237modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5238 struct intel_crtc_state *crtc_state)
77d22dca 5239{
292b990e
ML
5240 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5241 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5242 enum intel_display_power_domain domain;
5a21b665 5243 unsigned long domains, new_domains, old_domains;
77d22dca 5244
292b990e 5245 old_domains = intel_crtc->enabled_power_domains;
74bff5f9
ML
5246 intel_crtc->enabled_power_domains = new_domains =
5247 get_crtc_power_domains(crtc, crtc_state);
77d22dca 5248
5a21b665 5249 domains = new_domains & ~old_domains;
292b990e
ML
5250
5251 for_each_power_domain(domain, domains)
5252 intel_display_power_get(dev_priv, domain);
5253
5a21b665 5254 return old_domains & ~new_domains;
292b990e
ML
5255}
5256
5257static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5258 unsigned long domains)
5259{
5260 enum intel_display_power_domain domain;
5261
5262 for_each_power_domain(domain, domains)
5263 intel_display_power_put(dev_priv, domain);
5264}
77d22dca 5265
adafdc6f
MK
5266static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5267{
5268 int max_cdclk_freq = dev_priv->max_cdclk_freq;
5269
5270 if (INTEL_INFO(dev_priv)->gen >= 9 ||
5271 IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5272 return max_cdclk_freq;
5273 else if (IS_CHERRYVIEW(dev_priv))
5274 return max_cdclk_freq*95/100;
5275 else if (INTEL_INFO(dev_priv)->gen < 4)
5276 return 2*max_cdclk_freq*90/100;
5277 else
5278 return max_cdclk_freq*90/100;
5279}
5280
b2045352
VS
5281static int skl_calc_cdclk(int max_pixclk, int vco);
5282
560a7ae4
DL
5283static void intel_update_max_cdclk(struct drm_device *dev)
5284{
5285 struct drm_i915_private *dev_priv = dev->dev_private;
5286
ef11bdb3 5287 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
560a7ae4 5288 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
b2045352
VS
5289 int max_cdclk, vco;
5290
5291 vco = dev_priv->skl_preferred_vco_freq;
63911d72 5292 WARN_ON(vco != 8100000 && vco != 8640000);
560a7ae4 5293
b2045352
VS
5294 /*
5295 * Use the lower (vco 8640) cdclk values as a
5296 * first guess. skl_calc_cdclk() will correct it
5297 * if the preferred vco is 8100 instead.
5298 */
560a7ae4 5299 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
487ed2e4 5300 max_cdclk = 617143;
560a7ae4 5301 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
b2045352 5302 max_cdclk = 540000;
560a7ae4 5303 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
b2045352 5304 max_cdclk = 432000;
560a7ae4 5305 else
487ed2e4 5306 max_cdclk = 308571;
b2045352
VS
5307
5308 dev_priv->max_cdclk_freq = skl_calc_cdclk(max_cdclk, vco);
281c114f
MR
5309 } else if (IS_BROXTON(dev)) {
5310 dev_priv->max_cdclk_freq = 624000;
560a7ae4
DL
5311 } else if (IS_BROADWELL(dev)) {
5312 /*
5313 * FIXME with extra cooling we can allow
5314 * 540 MHz for ULX and 675 Mhz for ULT.
5315 * How can we know if extra cooling is
5316 * available? PCI ID, VTB, something else?
5317 */
5318 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5319 dev_priv->max_cdclk_freq = 450000;
5320 else if (IS_BDW_ULX(dev))
5321 dev_priv->max_cdclk_freq = 450000;
5322 else if (IS_BDW_ULT(dev))
5323 dev_priv->max_cdclk_freq = 540000;
5324 else
5325 dev_priv->max_cdclk_freq = 675000;
0904deaf
MK
5326 } else if (IS_CHERRYVIEW(dev)) {
5327 dev_priv->max_cdclk_freq = 320000;
560a7ae4
DL
5328 } else if (IS_VALLEYVIEW(dev)) {
5329 dev_priv->max_cdclk_freq = 400000;
5330 } else {
5331 /* otherwise assume cdclk is fixed */
5332 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5333 }
5334
adafdc6f
MK
5335 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5336
560a7ae4
DL
5337 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5338 dev_priv->max_cdclk_freq);
adafdc6f
MK
5339
5340 DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5341 dev_priv->max_dotclk_freq);
560a7ae4
DL
5342}
5343
5344static void intel_update_cdclk(struct drm_device *dev)
5345{
5346 struct drm_i915_private *dev_priv = dev->dev_private;
5347
5348 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
2f2a121a 5349
83d7c81f 5350 if (INTEL_GEN(dev_priv) >= 9)
709e05c3
VS
5351 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz, VCO: %d kHz, ref: %d kHz\n",
5352 dev_priv->cdclk_freq, dev_priv->cdclk_pll.vco,
5353 dev_priv->cdclk_pll.ref);
2f2a121a
VS
5354 else
5355 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5356 dev_priv->cdclk_freq);
560a7ae4
DL
5357
5358 /*
b5d99ff9
VS
5359 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
5360 * Programmng [sic] note: bit[9:2] should be programmed to the number
5361 * of cdclk that generates 4MHz reference clock freq which is used to
5362 * generate GMBus clock. This will vary with the cdclk freq.
560a7ae4 5363 */
b5d99ff9 5364 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
560a7ae4 5365 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
560a7ae4
DL
5366}
5367
92891e45
VS
5368/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5369static int skl_cdclk_decimal(int cdclk)
5370{
5371 return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5372}
5373
5f199dfa
VS
5374static int bxt_de_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
5375{
5376 int ratio;
5377
5378 if (cdclk == dev_priv->cdclk_pll.ref)
5379 return 0;
5380
5381 switch (cdclk) {
5382 default:
5383 MISSING_CASE(cdclk);
5384 case 144000:
5385 case 288000:
5386 case 384000:
5387 case 576000:
5388 ratio = 60;
5389 break;
5390 case 624000:
5391 ratio = 65;
5392 break;
5393 }
5394
5395 return dev_priv->cdclk_pll.ref * ratio;
5396}
5397
2b73001e
VS
5398static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
5399{
5400 I915_WRITE(BXT_DE_PLL_ENABLE, 0);
5401
5402 /* Timeout 200us */
5403 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
5404 DRM_ERROR("timeout waiting for DE PLL unlock\n");
83d7c81f
VS
5405
5406 dev_priv->cdclk_pll.vco = 0;
2b73001e
VS
5407}
5408
5f199dfa 5409static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
2b73001e 5410{
5f199dfa 5411 int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk_pll.ref);
2b73001e
VS
5412 u32 val;
5413
5414 val = I915_READ(BXT_DE_PLL_CTL);
5415 val &= ~BXT_DE_PLL_RATIO_MASK;
5f199dfa 5416 val |= BXT_DE_PLL_RATIO(ratio);
2b73001e
VS
5417 I915_WRITE(BXT_DE_PLL_CTL, val);
5418
5419 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5420
5421 /* Timeout 200us */
5422 if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
5423 DRM_ERROR("timeout waiting for DE PLL lock\n");
83d7c81f 5424
5f199dfa 5425 dev_priv->cdclk_pll.vco = vco;
2b73001e
VS
5426}
5427
324513c0 5428static void bxt_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
f8437dd1 5429{
5f199dfa
VS
5430 u32 val, divider;
5431 int vco, ret;
f8437dd1 5432
5f199dfa
VS
5433 vco = bxt_de_pll_vco(dev_priv, cdclk);
5434
5435 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5436
5437 /* cdclk = vco / 2 / div{1,1.5,2,4} */
5438 switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
5439 case 8:
f8437dd1 5440 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
f8437dd1 5441 break;
5f199dfa 5442 case 4:
f8437dd1 5443 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
f8437dd1 5444 break;
5f199dfa 5445 case 3:
f8437dd1 5446 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
f8437dd1 5447 break;
5f199dfa 5448 case 2:
f8437dd1 5449 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
f8437dd1
VK
5450 break;
5451 default:
5f199dfa
VS
5452 WARN_ON(cdclk != dev_priv->cdclk_pll.ref);
5453 WARN_ON(vco != 0);
f8437dd1 5454
5f199dfa
VS
5455 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5456 break;
f8437dd1
VK
5457 }
5458
f8437dd1 5459 /* Inform power controller of upcoming frequency change */
5f199dfa 5460 mutex_lock(&dev_priv->rps.hw_lock);
f8437dd1
VK
5461 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5462 0x80000000);
5463 mutex_unlock(&dev_priv->rps.hw_lock);
5464
5465 if (ret) {
5466 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
9ef56154 5467 ret, cdclk);
f8437dd1
VK
5468 return;
5469 }
5470
5f199dfa
VS
5471 if (dev_priv->cdclk_pll.vco != 0 &&
5472 dev_priv->cdclk_pll.vco != vco)
2b73001e 5473 bxt_de_pll_disable(dev_priv);
f8437dd1 5474
5f199dfa
VS
5475 if (dev_priv->cdclk_pll.vco != vco)
5476 bxt_de_pll_enable(dev_priv, vco);
f8437dd1 5477
5f199dfa
VS
5478 val = divider | skl_cdclk_decimal(cdclk);
5479 /*
5480 * FIXME if only the cd2x divider needs changing, it could be done
5481 * without shutting off the pipe (if only one pipe is active).
5482 */
5483 val |= BXT_CDCLK_CD2X_PIPE_NONE;
5484 /*
5485 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5486 * enable otherwise.
5487 */
5488 if (cdclk >= 500000)
5489 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5490 I915_WRITE(CDCLK_CTL, val);
f8437dd1
VK
5491
5492 mutex_lock(&dev_priv->rps.hw_lock);
5493 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
9ef56154 5494 DIV_ROUND_UP(cdclk, 25000));
f8437dd1
VK
5495 mutex_unlock(&dev_priv->rps.hw_lock);
5496
5497 if (ret) {
5498 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
9ef56154 5499 ret, cdclk);
f8437dd1
VK
5500 return;
5501 }
5502
c6c4696f 5503 intel_update_cdclk(dev_priv->dev);
f8437dd1
VK
5504}
5505
d66a2194 5506static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
f8437dd1 5507{
d66a2194
ID
5508 u32 cdctl, expected;
5509
089c6fd5 5510 intel_update_cdclk(dev_priv->dev);
f8437dd1 5511
d66a2194
ID
5512 if (dev_priv->cdclk_pll.vco == 0 ||
5513 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
5514 goto sanitize;
5515
5516 /* DPLL okay; verify the cdclock
5517 *
5518 * Some BIOS versions leave an incorrect decimal frequency value and
5519 * set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
5520 * so sanitize this register.
5521 */
5522 cdctl = I915_READ(CDCLK_CTL);
5523 /*
5524 * Let's ignore the pipe field, since BIOS could have configured the
5525 * dividers both synching to an active pipe, or asynchronously
5526 * (PIPE_NONE).
5527 */
5528 cdctl &= ~BXT_CDCLK_CD2X_PIPE_NONE;
5529
5530 expected = (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) |
5531 skl_cdclk_decimal(dev_priv->cdclk_freq);
5532 /*
5533 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5534 * enable otherwise.
5535 */
5536 if (dev_priv->cdclk_freq >= 500000)
5537 expected |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
5538
5539 if (cdctl == expected)
5540 /* All well; nothing to sanitize */
5541 return;
5542
5543sanitize:
5544 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
5545
5546 /* force cdclk programming */
5547 dev_priv->cdclk_freq = 0;
5548
5549 /* force full PLL disable + enable */
5550 dev_priv->cdclk_pll.vco = -1;
5551}
5552
324513c0 5553void bxt_init_cdclk(struct drm_i915_private *dev_priv)
d66a2194
ID
5554{
5555 bxt_sanitize_cdclk(dev_priv);
5556
5557 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0)
089c6fd5 5558 return;
c2e001ef 5559
f8437dd1
VK
5560 /*
5561 * FIXME:
5562 * - The initial CDCLK needs to be read from VBT.
5563 * Need to make this change after VBT has changes for BXT.
f8437dd1 5564 */
324513c0 5565 bxt_set_cdclk(dev_priv, bxt_calc_cdclk(0));
f8437dd1
VK
5566}
5567
324513c0 5568void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
f8437dd1 5569{
324513c0 5570 bxt_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref);
f8437dd1
VK
5571}
5572
a8ca4934
VS
5573static int skl_calc_cdclk(int max_pixclk, int vco)
5574{
63911d72 5575 if (vco == 8640000) {
a8ca4934 5576 if (max_pixclk > 540000)
487ed2e4 5577 return 617143;
a8ca4934
VS
5578 else if (max_pixclk > 432000)
5579 return 540000;
487ed2e4 5580 else if (max_pixclk > 308571)
a8ca4934
VS
5581 return 432000;
5582 else
487ed2e4 5583 return 308571;
a8ca4934 5584 } else {
a8ca4934
VS
5585 if (max_pixclk > 540000)
5586 return 675000;
5587 else if (max_pixclk > 450000)
5588 return 540000;
5589 else if (max_pixclk > 337500)
5590 return 450000;
5591 else
5592 return 337500;
5593 }
5594}
5595
ea61791e
VS
5596static void
5597skl_dpll0_update(struct drm_i915_private *dev_priv)
5d96d8af 5598{
ea61791e 5599 u32 val;
5d96d8af 5600
709e05c3 5601 dev_priv->cdclk_pll.ref = 24000;
1c3f7700 5602 dev_priv->cdclk_pll.vco = 0;
709e05c3 5603
ea61791e 5604 val = I915_READ(LCPLL1_CTL);
1c3f7700 5605 if ((val & LCPLL_PLL_ENABLE) == 0)
ea61791e 5606 return;
5d96d8af 5607
1c3f7700
ID
5608 if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
5609 return;
9f7eb31a 5610
ea61791e
VS
5611 val = I915_READ(DPLL_CTRL1);
5612
1c3f7700
ID
5613 if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
5614 DPLL_CTRL1_SSC(SKL_DPLL0) |
5615 DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
5616 DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
5617 return;
9f7eb31a 5618
ea61791e
VS
5619 switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
5620 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, SKL_DPLL0):
5621 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
5622 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
5623 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
63911d72 5624 dev_priv->cdclk_pll.vco = 8100000;
ea61791e
VS
5625 break;
5626 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
5627 case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
63911d72 5628 dev_priv->cdclk_pll.vco = 8640000;
ea61791e
VS
5629 break;
5630 default:
5631 MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
ea61791e
VS
5632 break;
5633 }
5d96d8af
DL
5634}
5635
b2045352
VS
5636void skl_set_preferred_cdclk_vco(struct drm_i915_private *dev_priv, int vco)
5637{
5638 bool changed = dev_priv->skl_preferred_vco_freq != vco;
5639
5640 dev_priv->skl_preferred_vco_freq = vco;
5641
5642 if (changed)
5643 intel_update_max_cdclk(dev_priv->dev);
5644}
5645
5d96d8af 5646static void
3861fc60 5647skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5d96d8af 5648{
a8ca4934 5649 int min_cdclk = skl_calc_cdclk(0, vco);
5d96d8af
DL
5650 u32 val;
5651
63911d72 5652 WARN_ON(vco != 8100000 && vco != 8640000);
b2045352 5653
5d96d8af 5654 /* select the minimum CDCLK before enabling DPLL 0 */
9ef56154 5655 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
5d96d8af
DL
5656 I915_WRITE(CDCLK_CTL, val);
5657 POSTING_READ(CDCLK_CTL);
5658
5659 /*
5660 * We always enable DPLL0 with the lowest link rate possible, but still
5661 * taking into account the VCO required to operate the eDP panel at the
5662 * desired frequency. The usual DP link rates operate with a VCO of
5663 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5664 * The modeset code is responsible for the selection of the exact link
5665 * rate later on, with the constraint of choosing a frequency that
a8ca4934 5666 * works with vco.
5d96d8af
DL
5667 */
5668 val = I915_READ(DPLL_CTRL1);
5669
5670 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5671 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5672 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
63911d72 5673 if (vco == 8640000)
5d96d8af
DL
5674 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5675 SKL_DPLL0);
5676 else
5677 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5678 SKL_DPLL0);
5679
5680 I915_WRITE(DPLL_CTRL1, val);
5681 POSTING_READ(DPLL_CTRL1);
5682
5683 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5684
5685 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5686 DRM_ERROR("DPLL0 not locked\n");
1cd593e0 5687
63911d72 5688 dev_priv->cdclk_pll.vco = vco;
b2045352
VS
5689
5690 /* We'll want to keep using the current vco from now on. */
5691 skl_set_preferred_cdclk_vco(dev_priv, vco);
5d96d8af
DL
5692}
5693
430e05de
VS
5694static void
5695skl_dpll0_disable(struct drm_i915_private *dev_priv)
5696{
5697 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5698 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5699 DRM_ERROR("Couldn't disable DPLL0\n");
1cd593e0 5700
63911d72 5701 dev_priv->cdclk_pll.vco = 0;
430e05de
VS
5702}
5703
5d96d8af
DL
5704static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5705{
5706 int ret;
5707 u32 val;
5708
5709 /* inform PCU we want to change CDCLK */
5710 val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5711 mutex_lock(&dev_priv->rps.hw_lock);
5712 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5713 mutex_unlock(&dev_priv->rps.hw_lock);
5714
5715 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5716}
5717
5718static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5719{
5720 unsigned int i;
5721
5722 for (i = 0; i < 15; i++) {
5723 if (skl_cdclk_pcu_ready(dev_priv))
5724 return true;
5725 udelay(10);
5726 }
5727
5728 return false;
5729}
5730
1cd593e0 5731static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco)
5d96d8af 5732{
560a7ae4 5733 struct drm_device *dev = dev_priv->dev;
5d96d8af
DL
5734 u32 freq_select, pcu_ack;
5735
1cd593e0
VS
5736 WARN_ON((cdclk == 24000) != (vco == 0));
5737
63911d72 5738 DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco);
5d96d8af
DL
5739
5740 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5741 DRM_ERROR("failed to inform PCU about cdclk change\n");
5742 return;
5743 }
5744
5745 /* set CDCLK_CTL */
9ef56154 5746 switch (cdclk) {
5d96d8af
DL
5747 case 450000:
5748 case 432000:
5749 freq_select = CDCLK_FREQ_450_432;
5750 pcu_ack = 1;
5751 break;
5752 case 540000:
5753 freq_select = CDCLK_FREQ_540;
5754 pcu_ack = 2;
5755 break;
487ed2e4 5756 case 308571:
5d96d8af
DL
5757 case 337500:
5758 default:
5759 freq_select = CDCLK_FREQ_337_308;
5760 pcu_ack = 0;
5761 break;
487ed2e4 5762 case 617143:
5d96d8af
DL
5763 case 675000:
5764 freq_select = CDCLK_FREQ_675_617;
5765 pcu_ack = 3;
5766 break;
5767 }
5768
63911d72
VS
5769 if (dev_priv->cdclk_pll.vco != 0 &&
5770 dev_priv->cdclk_pll.vco != vco)
1cd593e0
VS
5771 skl_dpll0_disable(dev_priv);
5772
63911d72 5773 if (dev_priv->cdclk_pll.vco != vco)
1cd593e0
VS
5774 skl_dpll0_enable(dev_priv, vco);
5775
9ef56154 5776 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
5d96d8af
DL
5777 POSTING_READ(CDCLK_CTL);
5778
5779 /* inform PCU of the change */
5780 mutex_lock(&dev_priv->rps.hw_lock);
5781 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5782 mutex_unlock(&dev_priv->rps.hw_lock);
560a7ae4
DL
5783
5784 intel_update_cdclk(dev);
5d96d8af
DL
5785}
5786
9f7eb31a
VS
5787static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv);
5788
5d96d8af
DL
5789void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5790{
709e05c3 5791 skl_set_cdclk(dev_priv, dev_priv->cdclk_pll.ref, 0);
5d96d8af
DL
5792}
5793
5794void skl_init_cdclk(struct drm_i915_private *dev_priv)
5795{
9f7eb31a
VS
5796 int cdclk, vco;
5797
5798 skl_sanitize_cdclk(dev_priv);
5d96d8af 5799
63911d72 5800 if (dev_priv->cdclk_freq != 0 && dev_priv->cdclk_pll.vco != 0) {
9f7eb31a
VS
5801 /*
5802 * Use the current vco as our initial
5803 * guess as to what the preferred vco is.
5804 */
5805 if (dev_priv->skl_preferred_vco_freq == 0)
5806 skl_set_preferred_cdclk_vco(dev_priv,
63911d72 5807 dev_priv->cdclk_pll.vco);
70c2c184 5808 return;
1cd593e0 5809 }
5d96d8af 5810
70c2c184
VS
5811 vco = dev_priv->skl_preferred_vco_freq;
5812 if (vco == 0)
63911d72 5813 vco = 8100000;
70c2c184 5814 cdclk = skl_calc_cdclk(0, vco);
5d96d8af 5815
70c2c184 5816 skl_set_cdclk(dev_priv, cdclk, vco);
5d96d8af
DL
5817}
5818
9f7eb31a 5819static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
c73666f3 5820{
09492498 5821 uint32_t cdctl, expected;
c73666f3 5822
f1b391a5
SK
5823 /*
5824 * check if the pre-os intialized the display
5825 * There is SWF18 scratchpad register defined which is set by the
5826 * pre-os which can be used by the OS drivers to check the status
5827 */
5828 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5829 goto sanitize;
5830
1c3f7700 5831 intel_update_cdclk(dev_priv->dev);
c73666f3 5832 /* Is PLL enabled and locked ? */
1c3f7700
ID
5833 if (dev_priv->cdclk_pll.vco == 0 ||
5834 dev_priv->cdclk_freq == dev_priv->cdclk_pll.ref)
c73666f3
SK
5835 goto sanitize;
5836
5837 /* DPLL okay; verify the cdclock
5838 *
5839 * Noticed in some instances that the freq selection is correct but
5840 * decimal part is programmed wrong from BIOS where pre-os does not
5841 * enable display. Verify the same as well.
5842 */
09492498
VS
5843 cdctl = I915_READ(CDCLK_CTL);
5844 expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
5845 skl_cdclk_decimal(dev_priv->cdclk_freq);
5846 if (cdctl == expected)
c73666f3 5847 /* All well; nothing to sanitize */
9f7eb31a 5848 return;
c89e39f3 5849
9f7eb31a
VS
5850sanitize:
5851 DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
c73666f3 5852
9f7eb31a
VS
5853 /* force cdclk programming */
5854 dev_priv->cdclk_freq = 0;
5855 /* force full PLL disable + enable */
63911d72 5856 dev_priv->cdclk_pll.vco = -1;
c73666f3
SK
5857}
5858
30a970c6
JB
5859/* Adjust CDclk dividers to allow high res or save power if possible */
5860static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5861{
5862 struct drm_i915_private *dev_priv = dev->dev_private;
5863 u32 val, cmd;
5864
164dfd28
VK
5865 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5866 != dev_priv->cdclk_freq);
d60c4473 5867
dfcab17e 5868 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
30a970c6 5869 cmd = 2;
dfcab17e 5870 else if (cdclk == 266667)
30a970c6
JB
5871 cmd = 1;
5872 else
5873 cmd = 0;
5874
5875 mutex_lock(&dev_priv->rps.hw_lock);
5876 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5877 val &= ~DSPFREQGUAR_MASK;
5878 val |= (cmd << DSPFREQGUAR_SHIFT);
5879 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5880 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5881 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5882 50)) {
5883 DRM_ERROR("timed out waiting for CDclk change\n");
5884 }
5885 mutex_unlock(&dev_priv->rps.hw_lock);
5886
54433e91
VS
5887 mutex_lock(&dev_priv->sb_lock);
5888
dfcab17e 5889 if (cdclk == 400000) {
6bcda4f0 5890 u32 divider;
30a970c6 5891
6bcda4f0 5892 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
30a970c6 5893
30a970c6
JB
5894 /* adjust cdclk divider */
5895 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
87d5d259 5896 val &= ~CCK_FREQUENCY_VALUES;
30a970c6
JB
5897 val |= divider;
5898 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
a877e801
VS
5899
5900 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
87d5d259 5901 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
a877e801
VS
5902 50))
5903 DRM_ERROR("timed out waiting for CDclk change\n");
30a970c6
JB
5904 }
5905
30a970c6
JB
5906 /* adjust self-refresh exit latency value */
5907 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5908 val &= ~0x7f;
5909
5910 /*
5911 * For high bandwidth configs, we set a higher latency in the bunit
5912 * so that the core display fetch happens in time to avoid underruns.
5913 */
dfcab17e 5914 if (cdclk == 400000)
30a970c6
JB
5915 val |= 4500 / 250; /* 4.5 usec */
5916 else
5917 val |= 3000 / 250; /* 3.0 usec */
5918 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
54433e91 5919
a580516d 5920 mutex_unlock(&dev_priv->sb_lock);
30a970c6 5921
b6283055 5922 intel_update_cdclk(dev);
30a970c6
JB
5923}
5924
383c5a6a
VS
5925static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5926{
5927 struct drm_i915_private *dev_priv = dev->dev_private;
5928 u32 val, cmd;
5929
164dfd28
VK
5930 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5931 != dev_priv->cdclk_freq);
383c5a6a
VS
5932
5933 switch (cdclk) {
383c5a6a
VS
5934 case 333333:
5935 case 320000:
383c5a6a 5936 case 266667:
383c5a6a 5937 case 200000:
383c5a6a
VS
5938 break;
5939 default:
5f77eeb0 5940 MISSING_CASE(cdclk);
383c5a6a
VS
5941 return;
5942 }
5943
9d0d3fda
VS
5944 /*
5945 * Specs are full of misinformation, but testing on actual
5946 * hardware has shown that we just need to write the desired
5947 * CCK divider into the Punit register.
5948 */
5949 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5950
383c5a6a
VS
5951 mutex_lock(&dev_priv->rps.hw_lock);
5952 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5953 val &= ~DSPFREQGUAR_MASK_CHV;
5954 val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5955 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5956 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5957 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5958 50)) {
5959 DRM_ERROR("timed out waiting for CDclk change\n");
5960 }
5961 mutex_unlock(&dev_priv->rps.hw_lock);
5962
b6283055 5963 intel_update_cdclk(dev);
383c5a6a
VS
5964}
5965
30a970c6
JB
5966static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5967 int max_pixclk)
5968{
6bcda4f0 5969 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
6cca3195 5970 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
29dc7ef3 5971
30a970c6
JB
5972 /*
5973 * Really only a few cases to deal with, as only 4 CDclks are supported:
5974 * 200MHz
5975 * 267MHz
29dc7ef3 5976 * 320/333MHz (depends on HPLL freq)
6cca3195
VS
5977 * 400MHz (VLV only)
5978 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5979 * of the lower bin and adjust if needed.
e37c67a1
VS
5980 *
5981 * We seem to get an unstable or solid color picture at 200MHz.
5982 * Not sure what's wrong. For now use 200MHz only when all pipes
5983 * are off.
30a970c6 5984 */
6cca3195
VS
5985 if (!IS_CHERRYVIEW(dev_priv) &&
5986 max_pixclk > freq_320*limit/100)
dfcab17e 5987 return 400000;
6cca3195 5988 else if (max_pixclk > 266667*limit/100)
29dc7ef3 5989 return freq_320;
e37c67a1 5990 else if (max_pixclk > 0)
dfcab17e 5991 return 266667;
e37c67a1
VS
5992 else
5993 return 200000;
30a970c6
JB
5994}
5995
324513c0 5996static int bxt_calc_cdclk(int max_pixclk)
f8437dd1 5997{
760e1477 5998 if (max_pixclk > 576000)
f8437dd1 5999 return 624000;
760e1477 6000 else if (max_pixclk > 384000)
f8437dd1 6001 return 576000;
760e1477 6002 else if (max_pixclk > 288000)
f8437dd1 6003 return 384000;
760e1477 6004 else if (max_pixclk > 144000)
f8437dd1
VK
6005 return 288000;
6006 else
6007 return 144000;
6008}
6009
e8788cbc 6010/* Compute the max pixel clock for new configuration. */
a821fc46
ACO
6011static int intel_mode_max_pixclk(struct drm_device *dev,
6012 struct drm_atomic_state *state)
30a970c6 6013{
565602d7
ML
6014 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
6015 struct drm_i915_private *dev_priv = dev->dev_private;
6016 struct drm_crtc *crtc;
6017 struct drm_crtc_state *crtc_state;
6018 unsigned max_pixclk = 0, i;
6019 enum pipe pipe;
30a970c6 6020
565602d7
ML
6021 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
6022 sizeof(intel_state->min_pixclk));
304603f4 6023
565602d7
ML
6024 for_each_crtc_in_state(state, crtc, crtc_state, i) {
6025 int pixclk = 0;
6026
6027 if (crtc_state->enable)
6028 pixclk = crtc_state->adjusted_mode.crtc_clock;
304603f4 6029
565602d7 6030 intel_state->min_pixclk[i] = pixclk;
30a970c6
JB
6031 }
6032
565602d7
ML
6033 for_each_pipe(dev_priv, pipe)
6034 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
6035
30a970c6
JB
6036 return max_pixclk;
6037}
6038
27c329ed 6039static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
30a970c6 6040{
27c329ed
ML
6041 struct drm_device *dev = state->dev;
6042 struct drm_i915_private *dev_priv = dev->dev_private;
6043 int max_pixclk = intel_mode_max_pixclk(dev, state);
1a617b77
ML
6044 struct intel_atomic_state *intel_state =
6045 to_intel_atomic_state(state);
30a970c6 6046
1a617b77 6047 intel_state->cdclk = intel_state->dev_cdclk =
27c329ed 6048 valleyview_calc_cdclk(dev_priv, max_pixclk);
0a9ab303 6049
1a617b77
ML
6050 if (!intel_state->active_crtcs)
6051 intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
6052
27c329ed
ML
6053 return 0;
6054}
304603f4 6055
324513c0 6056static int bxt_modeset_calc_cdclk(struct drm_atomic_state *state)
27c329ed 6057{
4e5ca60f 6058 int max_pixclk = ilk_max_pixel_rate(state);
1a617b77
ML
6059 struct intel_atomic_state *intel_state =
6060 to_intel_atomic_state(state);
85a96e7a 6061
1a617b77 6062 intel_state->cdclk = intel_state->dev_cdclk =
324513c0 6063 bxt_calc_cdclk(max_pixclk);
85a96e7a 6064
1a617b77 6065 if (!intel_state->active_crtcs)
324513c0 6066 intel_state->dev_cdclk = bxt_calc_cdclk(0);
1a617b77 6067
27c329ed 6068 return 0;
30a970c6
JB
6069}
6070
1e69cd74
VS
6071static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
6072{
6073 unsigned int credits, default_credits;
6074
6075 if (IS_CHERRYVIEW(dev_priv))
6076 default_credits = PFI_CREDIT(12);
6077 else
6078 default_credits = PFI_CREDIT(8);
6079
bfa7df01 6080 if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
1e69cd74
VS
6081 /* CHV suggested value is 31 or 63 */
6082 if (IS_CHERRYVIEW(dev_priv))
fcc0008f 6083 credits = PFI_CREDIT_63;
1e69cd74
VS
6084 else
6085 credits = PFI_CREDIT(15);
6086 } else {
6087 credits = default_credits;
6088 }
6089
6090 /*
6091 * WA - write default credits before re-programming
6092 * FIXME: should we also set the resend bit here?
6093 */
6094 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6095 default_credits);
6096
6097 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6098 credits | PFI_CREDIT_RESEND);
6099
6100 /*
6101 * FIXME is this guaranteed to clear
6102 * immediately or should we poll for it?
6103 */
6104 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6105}
6106
27c329ed 6107static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
30a970c6 6108{
a821fc46 6109 struct drm_device *dev = old_state->dev;
30a970c6 6110 struct drm_i915_private *dev_priv = dev->dev_private;
1a617b77
ML
6111 struct intel_atomic_state *old_intel_state =
6112 to_intel_atomic_state(old_state);
6113 unsigned req_cdclk = old_intel_state->dev_cdclk;
30a970c6 6114
27c329ed
ML
6115 /*
6116 * FIXME: We can end up here with all power domains off, yet
6117 * with a CDCLK frequency other than the minimum. To account
6118 * for this take the PIPE-A power domain, which covers the HW
6119 * blocks needed for the following programming. This can be
6120 * removed once it's guaranteed that we get here either with
6121 * the minimum CDCLK set, or the required power domains
6122 * enabled.
6123 */
6124 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
738c05c0 6125
27c329ed
ML
6126 if (IS_CHERRYVIEW(dev))
6127 cherryview_set_cdclk(dev, req_cdclk);
6128 else
6129 valleyview_set_cdclk(dev, req_cdclk);
738c05c0 6130
27c329ed 6131 vlv_program_pfi_credits(dev_priv);
1e69cd74 6132
27c329ed 6133 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
30a970c6
JB
6134}
6135
89b667f8
JB
6136static void valleyview_crtc_enable(struct drm_crtc *crtc)
6137{
6138 struct drm_device *dev = crtc->dev;
a72e4c9f 6139 struct drm_i915_private *dev_priv = to_i915(dev);
89b667f8
JB
6140 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6141 struct intel_encoder *encoder;
b95c5321
ML
6142 struct intel_crtc_state *pipe_config =
6143 to_intel_crtc_state(crtc->state);
89b667f8 6144 int pipe = intel_crtc->pipe;
89b667f8 6145
53d9f4e9 6146 if (WARN_ON(intel_crtc->active))
89b667f8
JB
6147 return;
6148
6e3c9717 6149 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 6150 intel_dp_set_m_n(intel_crtc, M1_N1);
5b18e57c
DV
6151
6152 intel_set_pipe_timings(intel_crtc);
bc58be60 6153 intel_set_pipe_src_size(intel_crtc);
5b18e57c 6154
c14b0485
VS
6155 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6156 struct drm_i915_private *dev_priv = dev->dev_private;
6157
6158 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6159 I915_WRITE(CHV_CANVAS(pipe), 0);
6160 }
6161
5b18e57c
DV
6162 i9xx_set_pipeconf(intel_crtc);
6163
89b667f8 6164 intel_crtc->active = true;
89b667f8 6165
a72e4c9f 6166 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4a3436e8 6167
89b667f8
JB
6168 for_each_encoder_on_crtc(dev, crtc, encoder)
6169 if (encoder->pre_pll_enable)
6170 encoder->pre_pll_enable(encoder);
6171
cd2d34d9
VS
6172 if (IS_CHERRYVIEW(dev)) {
6173 chv_prepare_pll(intel_crtc, intel_crtc->config);
6174 chv_enable_pll(intel_crtc, intel_crtc->config);
6175 } else {
6176 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6177 vlv_enable_pll(intel_crtc, intel_crtc->config);
9d556c99 6178 }
89b667f8
JB
6179
6180 for_each_encoder_on_crtc(dev, crtc, encoder)
6181 if (encoder->pre_enable)
6182 encoder->pre_enable(encoder);
6183
2dd24552
JB
6184 i9xx_pfit_enable(intel_crtc);
6185
b95c5321 6186 intel_color_load_luts(&pipe_config->base);
63cbb074 6187
caed361d 6188 intel_update_watermarks(crtc);
e1fdc473 6189 intel_enable_pipe(intel_crtc);
be6a6f8e 6190
4b3a9526
VS
6191 assert_vblank_disabled(crtc);
6192 drm_crtc_vblank_on(crtc);
6193
f9b61ff6
DV
6194 for_each_encoder_on_crtc(dev, crtc, encoder)
6195 encoder->enable(encoder);
89b667f8
JB
6196}
6197
f13c2ef3
DV
6198static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6199{
6200 struct drm_device *dev = crtc->base.dev;
6201 struct drm_i915_private *dev_priv = dev->dev_private;
6202
6e3c9717
ACO
6203 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6204 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
f13c2ef3
DV
6205}
6206
0b8765c6 6207static void i9xx_crtc_enable(struct drm_crtc *crtc)
79e53945
JB
6208{
6209 struct drm_device *dev = crtc->dev;
a72e4c9f 6210 struct drm_i915_private *dev_priv = to_i915(dev);
79e53945 6211 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 6212 struct intel_encoder *encoder;
b95c5321
ML
6213 struct intel_crtc_state *pipe_config =
6214 to_intel_crtc_state(crtc->state);
cd2d34d9 6215 enum pipe pipe = intel_crtc->pipe;
79e53945 6216
53d9f4e9 6217 if (WARN_ON(intel_crtc->active))
f7abfe8b
CW
6218 return;
6219
f13c2ef3
DV
6220 i9xx_set_pll_dividers(intel_crtc);
6221
6e3c9717 6222 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 6223 intel_dp_set_m_n(intel_crtc, M1_N1);
5b18e57c
DV
6224
6225 intel_set_pipe_timings(intel_crtc);
bc58be60 6226 intel_set_pipe_src_size(intel_crtc);
5b18e57c 6227
5b18e57c
DV
6228 i9xx_set_pipeconf(intel_crtc);
6229
f7abfe8b 6230 intel_crtc->active = true;
6b383a7f 6231
4a3436e8 6232 if (!IS_GEN2(dev))
a72e4c9f 6233 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4a3436e8 6234
9d6d9f19
MK
6235 for_each_encoder_on_crtc(dev, crtc, encoder)
6236 if (encoder->pre_enable)
6237 encoder->pre_enable(encoder);
6238
f6736a1a
DV
6239 i9xx_enable_pll(intel_crtc);
6240
2dd24552
JB
6241 i9xx_pfit_enable(intel_crtc);
6242
b95c5321 6243 intel_color_load_luts(&pipe_config->base);
63cbb074 6244
f37fcc2a 6245 intel_update_watermarks(crtc);
e1fdc473 6246 intel_enable_pipe(intel_crtc);
be6a6f8e 6247
4b3a9526
VS
6248 assert_vblank_disabled(crtc);
6249 drm_crtc_vblank_on(crtc);
6250
f9b61ff6
DV
6251 for_each_encoder_on_crtc(dev, crtc, encoder)
6252 encoder->enable(encoder);
0b8765c6 6253}
79e53945 6254
87476d63
DV
6255static void i9xx_pfit_disable(struct intel_crtc *crtc)
6256{
6257 struct drm_device *dev = crtc->base.dev;
6258 struct drm_i915_private *dev_priv = dev->dev_private;
87476d63 6259
6e3c9717 6260 if (!crtc->config->gmch_pfit.control)
328d8e82 6261 return;
87476d63 6262
328d8e82 6263 assert_pipe_disabled(dev_priv, crtc->pipe);
87476d63 6264
328d8e82
DV
6265 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6266 I915_READ(PFIT_CONTROL));
6267 I915_WRITE(PFIT_CONTROL, 0);
87476d63
DV
6268}
6269
0b8765c6
JB
6270static void i9xx_crtc_disable(struct drm_crtc *crtc)
6271{
6272 struct drm_device *dev = crtc->dev;
6273 struct drm_i915_private *dev_priv = dev->dev_private;
6274 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 6275 struct intel_encoder *encoder;
0b8765c6 6276 int pipe = intel_crtc->pipe;
ef9c3aee 6277
6304cd91
VS
6278 /*
6279 * On gen2 planes are double buffered but the pipe isn't, so we must
6280 * wait for planes to fully turn off before disabling the pipe.
6281 */
90e83e53
ACO
6282 if (IS_GEN2(dev))
6283 intel_wait_for_vblank(dev, pipe);
6304cd91 6284
4b3a9526
VS
6285 for_each_encoder_on_crtc(dev, crtc, encoder)
6286 encoder->disable(encoder);
6287
f9b61ff6
DV
6288 drm_crtc_vblank_off(crtc);
6289 assert_vblank_disabled(crtc);
6290
575f7ab7 6291 intel_disable_pipe(intel_crtc);
24a1f16d 6292
87476d63 6293 i9xx_pfit_disable(intel_crtc);
24a1f16d 6294
89b667f8
JB
6295 for_each_encoder_on_crtc(dev, crtc, encoder)
6296 if (encoder->post_disable)
6297 encoder->post_disable(encoder);
6298
a65347ba 6299 if (!intel_crtc->config->has_dsi_encoder) {
076ed3b2
CML
6300 if (IS_CHERRYVIEW(dev))
6301 chv_disable_pll(dev_priv, pipe);
6302 else if (IS_VALLEYVIEW(dev))
6303 vlv_disable_pll(dev_priv, pipe);
6304 else
1c4e0274 6305 i9xx_disable_pll(intel_crtc);
076ed3b2 6306 }
0b8765c6 6307
d6db995f
VS
6308 for_each_encoder_on_crtc(dev, crtc, encoder)
6309 if (encoder->post_pll_disable)
6310 encoder->post_pll_disable(encoder);
6311
4a3436e8 6312 if (!IS_GEN2(dev))
a72e4c9f 6313 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
0b8765c6
JB
6314}
6315
b17d48e2
ML
6316static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6317{
842e0307 6318 struct intel_encoder *encoder;
b17d48e2
ML
6319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6320 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6321 enum intel_display_power_domain domain;
6322 unsigned long domains;
6323
6324 if (!intel_crtc->active)
6325 return;
6326
a539205a 6327 if (to_intel_plane_state(crtc->primary->state)->visible) {
5a21b665 6328 WARN_ON(intel_crtc->flip_work);
fc32b1fd 6329
2622a081 6330 intel_pre_disable_primary_noatomic(crtc);
54a41961
ML
6331
6332 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6333 to_intel_plane_state(crtc->primary->state)->visible = false;
a539205a
ML
6334 }
6335
b17d48e2 6336 dev_priv->display.crtc_disable(crtc);
842e0307 6337
78108b7c
VS
6338 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6339 crtc->base.id, crtc->name);
842e0307
ML
6340
6341 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6342 crtc->state->active = false;
37d9078b 6343 intel_crtc->active = false;
842e0307
ML
6344 crtc->enabled = false;
6345 crtc->state->connector_mask = 0;
6346 crtc->state->encoder_mask = 0;
6347
6348 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6349 encoder->base.crtc = NULL;
6350
58f9c0bc 6351 intel_fbc_disable(intel_crtc);
37d9078b 6352 intel_update_watermarks(crtc);
1f7457b1 6353 intel_disable_shared_dpll(intel_crtc);
b17d48e2
ML
6354
6355 domains = intel_crtc->enabled_power_domains;
6356 for_each_power_domain(domain, domains)
6357 intel_display_power_put(dev_priv, domain);
6358 intel_crtc->enabled_power_domains = 0;
565602d7
ML
6359
6360 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6361 dev_priv->min_pixclk[intel_crtc->pipe] = 0;
b17d48e2
ML
6362}
6363
6b72d486
ML
6364/*
6365 * turn all crtc's off, but do not adjust state
6366 * This has to be paired with a call to intel_modeset_setup_hw_state.
6367 */
70e0bd74 6368int intel_display_suspend(struct drm_device *dev)
ee7b9f93 6369{
e2c8b870 6370 struct drm_i915_private *dev_priv = to_i915(dev);
70e0bd74 6371 struct drm_atomic_state *state;
e2c8b870 6372 int ret;
70e0bd74 6373
e2c8b870
ML
6374 state = drm_atomic_helper_suspend(dev);
6375 ret = PTR_ERR_OR_ZERO(state);
70e0bd74
ML
6376 if (ret)
6377 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
e2c8b870
ML
6378 else
6379 dev_priv->modeset_restore_state = state;
70e0bd74 6380 return ret;
ee7b9f93
JB
6381}
6382
ea5b213a 6383void intel_encoder_destroy(struct drm_encoder *encoder)
7e7d76c3 6384{
4ef69c7a 6385 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
ea5b213a 6386
ea5b213a
CW
6387 drm_encoder_cleanup(encoder);
6388 kfree(intel_encoder);
7e7d76c3
JB
6389}
6390
0a91ca29
DV
6391/* Cross check the actual hw state with our own modeset state tracking (and it's
6392 * internal consistency). */
5a21b665 6393static void intel_connector_verify_state(struct intel_connector *connector)
79e53945 6394{
5a21b665 6395 struct drm_crtc *crtc = connector->base.state->crtc;
35dd3c64
ML
6396
6397 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6398 connector->base.base.id,
6399 connector->base.name);
6400
0a91ca29 6401 if (connector->get_hw_state(connector)) {
e85376cb 6402 struct intel_encoder *encoder = connector->encoder;
5a21b665 6403 struct drm_connector_state *conn_state = connector->base.state;
0a91ca29 6404
35dd3c64
ML
6405 I915_STATE_WARN(!crtc,
6406 "connector enabled without attached crtc\n");
0a91ca29 6407
35dd3c64
ML
6408 if (!crtc)
6409 return;
6410
6411 I915_STATE_WARN(!crtc->state->active,
6412 "connector is active, but attached crtc isn't\n");
6413
e85376cb 6414 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
35dd3c64
ML
6415 return;
6416
e85376cb 6417 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
35dd3c64
ML
6418 "atomic encoder doesn't match attached encoder\n");
6419
e85376cb 6420 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
35dd3c64
ML
6421 "attached encoder crtc differs from connector crtc\n");
6422 } else {
4d688a2a
ML
6423 I915_STATE_WARN(crtc && crtc->state->active,
6424 "attached crtc is active, but connector isn't\n");
5a21b665 6425 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
35dd3c64 6426 "best encoder set without crtc!\n");
0a91ca29 6427 }
79e53945
JB
6428}
6429
08d9bc92
ACO
6430int intel_connector_init(struct intel_connector *connector)
6431{
5350a031 6432 drm_atomic_helper_connector_reset(&connector->base);
08d9bc92 6433
5350a031 6434 if (!connector->base.state)
08d9bc92
ACO
6435 return -ENOMEM;
6436
08d9bc92
ACO
6437 return 0;
6438}
6439
6440struct intel_connector *intel_connector_alloc(void)
6441{
6442 struct intel_connector *connector;
6443
6444 connector = kzalloc(sizeof *connector, GFP_KERNEL);
6445 if (!connector)
6446 return NULL;
6447
6448 if (intel_connector_init(connector) < 0) {
6449 kfree(connector);
6450 return NULL;
6451 }
6452
6453 return connector;
6454}
6455
f0947c37
DV
6456/* Simple connector->get_hw_state implementation for encoders that support only
6457 * one connector and no cloning and hence the encoder state determines the state
6458 * of the connector. */
6459bool intel_connector_get_hw_state(struct intel_connector *connector)
ea5b213a 6460{
24929352 6461 enum pipe pipe = 0;
f0947c37 6462 struct intel_encoder *encoder = connector->encoder;
ea5b213a 6463
f0947c37 6464 return encoder->get_hw_state(encoder, &pipe);
ea5b213a
CW
6465}
6466
6d293983 6467static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
d272ddfa 6468{
6d293983
ACO
6469 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6470 return crtc_state->fdi_lanes;
d272ddfa
VS
6471
6472 return 0;
6473}
6474
6d293983 6475static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5cec258b 6476 struct intel_crtc_state *pipe_config)
1857e1da 6477{
6d293983
ACO
6478 struct drm_atomic_state *state = pipe_config->base.state;
6479 struct intel_crtc *other_crtc;
6480 struct intel_crtc_state *other_crtc_state;
6481
1857e1da
DV
6482 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6483 pipe_name(pipe), pipe_config->fdi_lanes);
6484 if (pipe_config->fdi_lanes > 4) {
6485 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6486 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6487 return -EINVAL;
1857e1da
DV
6488 }
6489
bafb6553 6490 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1857e1da
DV
6491 if (pipe_config->fdi_lanes > 2) {
6492 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6493 pipe_config->fdi_lanes);
6d293983 6494 return -EINVAL;
1857e1da 6495 } else {
6d293983 6496 return 0;
1857e1da
DV
6497 }
6498 }
6499
6500 if (INTEL_INFO(dev)->num_pipes == 2)
6d293983 6501 return 0;
1857e1da
DV
6502
6503 /* Ivybridge 3 pipe is really complicated */
6504 switch (pipe) {
6505 case PIPE_A:
6d293983 6506 return 0;
1857e1da 6507 case PIPE_B:
6d293983
ACO
6508 if (pipe_config->fdi_lanes <= 2)
6509 return 0;
6510
6511 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6512 other_crtc_state =
6513 intel_atomic_get_crtc_state(state, other_crtc);
6514 if (IS_ERR(other_crtc_state))
6515 return PTR_ERR(other_crtc_state);
6516
6517 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
1857e1da
DV
6518 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6519 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6520 return -EINVAL;
1857e1da 6521 }
6d293983 6522 return 0;
1857e1da 6523 case PIPE_C:
251cc67c
VS
6524 if (pipe_config->fdi_lanes > 2) {
6525 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6526 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6527 return -EINVAL;
251cc67c 6528 }
6d293983
ACO
6529
6530 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6531 other_crtc_state =
6532 intel_atomic_get_crtc_state(state, other_crtc);
6533 if (IS_ERR(other_crtc_state))
6534 return PTR_ERR(other_crtc_state);
6535
6536 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
1857e1da 6537 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6d293983 6538 return -EINVAL;
1857e1da 6539 }
6d293983 6540 return 0;
1857e1da
DV
6541 default:
6542 BUG();
6543 }
6544}
6545
e29c22c0
DV
6546#define RETRY 1
6547static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5cec258b 6548 struct intel_crtc_state *pipe_config)
877d48d5 6549{
1857e1da 6550 struct drm_device *dev = intel_crtc->base.dev;
7c5f93b0 6551 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6d293983
ACO
6552 int lane, link_bw, fdi_dotclock, ret;
6553 bool needs_recompute = false;
877d48d5 6554
e29c22c0 6555retry:
877d48d5
DV
6556 /* FDI is a binary signal running at ~2.7GHz, encoding
6557 * each output octet as 10 bits. The actual frequency
6558 * is stored as a divider into a 100MHz clock, and the
6559 * mode pixel clock is stored in units of 1KHz.
6560 * Hence the bw of each lane in terms of the mode signal
6561 * is:
6562 */
21a727b3 6563 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
877d48d5 6564
241bfc38 6565 fdi_dotclock = adjusted_mode->crtc_clock;
877d48d5 6566
2bd89a07 6567 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
877d48d5
DV
6568 pipe_config->pipe_bpp);
6569
6570 pipe_config->fdi_lanes = lane;
6571
2bd89a07 6572 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
877d48d5 6573 link_bw, &pipe_config->fdi_m_n);
1857e1da 6574
e3b247da 6575 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6d293983 6576 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
e29c22c0
DV
6577 pipe_config->pipe_bpp -= 2*3;
6578 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6579 pipe_config->pipe_bpp);
6580 needs_recompute = true;
6581 pipe_config->bw_constrained = true;
6582
6583 goto retry;
6584 }
6585
6586 if (needs_recompute)
6587 return RETRY;
6588
6d293983 6589 return ret;
877d48d5
DV
6590}
6591
8cfb3407
VS
6592static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6593 struct intel_crtc_state *pipe_config)
6594{
6595 if (pipe_config->pipe_bpp > 24)
6596 return false;
6597
6598 /* HSW can handle pixel rate up to cdclk? */
2d1fe073 6599 if (IS_HASWELL(dev_priv))
8cfb3407
VS
6600 return true;
6601
6602 /*
b432e5cf
VS
6603 * We compare against max which means we must take
6604 * the increased cdclk requirement into account when
6605 * calculating the new cdclk.
6606 *
6607 * Should measure whether using a lower cdclk w/o IPS
8cfb3407
VS
6608 */
6609 return ilk_pipe_pixel_rate(pipe_config) <=
6610 dev_priv->max_cdclk_freq * 95 / 100;
6611}
6612
42db64ef 6613static void hsw_compute_ips_config(struct intel_crtc *crtc,
5cec258b 6614 struct intel_crtc_state *pipe_config)
42db64ef 6615{
8cfb3407
VS
6616 struct drm_device *dev = crtc->base.dev;
6617 struct drm_i915_private *dev_priv = dev->dev_private;
6618
d330a953 6619 pipe_config->ips_enabled = i915.enable_ips &&
8cfb3407
VS
6620 hsw_crtc_supports_ips(crtc) &&
6621 pipe_config_supports_ips(dev_priv, pipe_config);
42db64ef
PZ
6622}
6623
39acb4aa
VS
6624static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6625{
6626 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6627
6628 /* GDG double wide on either pipe, otherwise pipe A only */
6629 return INTEL_INFO(dev_priv)->gen < 4 &&
6630 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6631}
6632
a43f6e0f 6633static int intel_crtc_compute_config(struct intel_crtc *crtc,
5cec258b 6634 struct intel_crtc_state *pipe_config)
79e53945 6635{
a43f6e0f 6636 struct drm_device *dev = crtc->base.dev;
8bd31e67 6637 struct drm_i915_private *dev_priv = dev->dev_private;
7c5f93b0 6638 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
f3261156 6639 int clock_limit = dev_priv->max_dotclk_freq;
89749350 6640
cf532bb2 6641 if (INTEL_INFO(dev)->gen < 4) {
f3261156 6642 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
cf532bb2
VS
6643
6644 /*
39acb4aa 6645 * Enable double wide mode when the dot clock
cf532bb2 6646 * is > 90% of the (display) core speed.
cf532bb2 6647 */
39acb4aa
VS
6648 if (intel_crtc_supports_double_wide(crtc) &&
6649 adjusted_mode->crtc_clock > clock_limit) {
f3261156 6650 clock_limit = dev_priv->max_dotclk_freq;
cf532bb2 6651 pipe_config->double_wide = true;
ad3a4479 6652 }
f3261156 6653 }
ad3a4479 6654
f3261156
VS
6655 if (adjusted_mode->crtc_clock > clock_limit) {
6656 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6657 adjusted_mode->crtc_clock, clock_limit,
6658 yesno(pipe_config->double_wide));
6659 return -EINVAL;
2c07245f 6660 }
89749350 6661
1d1d0e27
VS
6662 /*
6663 * Pipe horizontal size must be even in:
6664 * - DVO ganged mode
6665 * - LVDS dual channel mode
6666 * - Double wide pipe
6667 */
a93e255f 6668 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
1d1d0e27
VS
6669 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6670 pipe_config->pipe_src_w &= ~1;
6671
8693a824
DL
6672 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6673 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
44f46b42
CW
6674 */
6675 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
aad941d5 6676 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
e29c22c0 6677 return -EINVAL;
44f46b42 6678
f5adf94e 6679 if (HAS_IPS(dev))
a43f6e0f
DV
6680 hsw_compute_ips_config(crtc, pipe_config);
6681
877d48d5 6682 if (pipe_config->has_pch_encoder)
a43f6e0f 6683 return ironlake_fdi_compute_config(crtc, pipe_config);
877d48d5 6684
cf5a15be 6685 return 0;
79e53945
JB
6686}
6687
1652d19e
VS
6688static int skylake_get_display_clock_speed(struct drm_device *dev)
6689{
6690 struct drm_i915_private *dev_priv = to_i915(dev);
ea61791e 6691 uint32_t cdctl;
1652d19e 6692
ea61791e 6693 skl_dpll0_update(dev_priv);
1652d19e 6694
63911d72 6695 if (dev_priv->cdclk_pll.vco == 0)
709e05c3 6696 return dev_priv->cdclk_pll.ref;
1652d19e 6697
ea61791e 6698 cdctl = I915_READ(CDCLK_CTL);
1652d19e 6699
63911d72 6700 if (dev_priv->cdclk_pll.vco == 8640000) {
1652d19e
VS
6701 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6702 case CDCLK_FREQ_450_432:
6703 return 432000;
6704 case CDCLK_FREQ_337_308:
487ed2e4 6705 return 308571;
ea61791e
VS
6706 case CDCLK_FREQ_540:
6707 return 540000;
1652d19e 6708 case CDCLK_FREQ_675_617:
487ed2e4 6709 return 617143;
1652d19e 6710 default:
ea61791e 6711 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
1652d19e
VS
6712 }
6713 } else {
1652d19e
VS
6714 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6715 case CDCLK_FREQ_450_432:
6716 return 450000;
6717 case CDCLK_FREQ_337_308:
6718 return 337500;
ea61791e
VS
6719 case CDCLK_FREQ_540:
6720 return 540000;
1652d19e
VS
6721 case CDCLK_FREQ_675_617:
6722 return 675000;
6723 default:
ea61791e 6724 MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
1652d19e
VS
6725 }
6726 }
6727
709e05c3 6728 return dev_priv->cdclk_pll.ref;
1652d19e
VS
6729}
6730
83d7c81f
VS
6731static void bxt_de_pll_update(struct drm_i915_private *dev_priv)
6732{
6733 u32 val;
6734
6735 dev_priv->cdclk_pll.ref = 19200;
1c3f7700 6736 dev_priv->cdclk_pll.vco = 0;
83d7c81f
VS
6737
6738 val = I915_READ(BXT_DE_PLL_ENABLE);
1c3f7700 6739 if ((val & BXT_DE_PLL_PLL_ENABLE) == 0)
83d7c81f 6740 return;
83d7c81f 6741
1c3f7700
ID
6742 if (WARN_ON((val & BXT_DE_PLL_LOCK) == 0))
6743 return;
83d7c81f
VS
6744
6745 val = I915_READ(BXT_DE_PLL_CTL);
6746 dev_priv->cdclk_pll.vco = (val & BXT_DE_PLL_RATIO_MASK) *
6747 dev_priv->cdclk_pll.ref;
6748}
6749
acd3f3d3
BP
6750static int broxton_get_display_clock_speed(struct drm_device *dev)
6751{
6752 struct drm_i915_private *dev_priv = to_i915(dev);
f5986242
VS
6753 u32 divider;
6754 int div, vco;
acd3f3d3 6755
83d7c81f
VS
6756 bxt_de_pll_update(dev_priv);
6757
f5986242
VS
6758 vco = dev_priv->cdclk_pll.vco;
6759 if (vco == 0)
6760 return dev_priv->cdclk_pll.ref;
acd3f3d3 6761
f5986242 6762 divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
acd3f3d3 6763
f5986242 6764 switch (divider) {
acd3f3d3 6765 case BXT_CDCLK_CD2X_DIV_SEL_1:
f5986242
VS
6766 div = 2;
6767 break;
acd3f3d3 6768 case BXT_CDCLK_CD2X_DIV_SEL_1_5:
f5986242
VS
6769 div = 3;
6770 break;
acd3f3d3 6771 case BXT_CDCLK_CD2X_DIV_SEL_2:
f5986242
VS
6772 div = 4;
6773 break;
acd3f3d3 6774 case BXT_CDCLK_CD2X_DIV_SEL_4:
f5986242
VS
6775 div = 8;
6776 break;
6777 default:
6778 MISSING_CASE(divider);
6779 return dev_priv->cdclk_pll.ref;
acd3f3d3
BP
6780 }
6781
f5986242 6782 return DIV_ROUND_CLOSEST(vco, div);
acd3f3d3
BP
6783}
6784
1652d19e
VS
6785static int broadwell_get_display_clock_speed(struct drm_device *dev)
6786{
6787 struct drm_i915_private *dev_priv = dev->dev_private;
6788 uint32_t lcpll = I915_READ(LCPLL_CTL);
6789 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6790
6791 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6792 return 800000;
6793 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6794 return 450000;
6795 else if (freq == LCPLL_CLK_FREQ_450)
6796 return 450000;
6797 else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6798 return 540000;
6799 else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6800 return 337500;
6801 else
6802 return 675000;
6803}
6804
6805static int haswell_get_display_clock_speed(struct drm_device *dev)
6806{
6807 struct drm_i915_private *dev_priv = dev->dev_private;
6808 uint32_t lcpll = I915_READ(LCPLL_CTL);
6809 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6810
6811 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6812 return 800000;
6813 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6814 return 450000;
6815 else if (freq == LCPLL_CLK_FREQ_450)
6816 return 450000;
6817 else if (IS_HSW_ULT(dev))
6818 return 337500;
6819 else
6820 return 540000;
79e53945
JB
6821}
6822
25eb05fc
JB
6823static int valleyview_get_display_clock_speed(struct drm_device *dev)
6824{
bfa7df01
VS
6825 return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6826 CCK_DISPLAY_CLOCK_CONTROL);
25eb05fc
JB
6827}
6828
b37a6434
VS
6829static int ilk_get_display_clock_speed(struct drm_device *dev)
6830{
6831 return 450000;
6832}
6833
e70236a8
JB
6834static int i945_get_display_clock_speed(struct drm_device *dev)
6835{
6836 return 400000;
6837}
79e53945 6838
e70236a8 6839static int i915_get_display_clock_speed(struct drm_device *dev)
79e53945 6840{
e907f170 6841 return 333333;
e70236a8 6842}
79e53945 6843
e70236a8
JB
6844static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6845{
6846 return 200000;
6847}
79e53945 6848
257a7ffc
DV
6849static int pnv_get_display_clock_speed(struct drm_device *dev)
6850{
6851 u16 gcfgc = 0;
6852
6853 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6854
6855 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6856 case GC_DISPLAY_CLOCK_267_MHZ_PNV:
e907f170 6857 return 266667;
257a7ffc 6858 case GC_DISPLAY_CLOCK_333_MHZ_PNV:
e907f170 6859 return 333333;
257a7ffc 6860 case GC_DISPLAY_CLOCK_444_MHZ_PNV:
e907f170 6861 return 444444;
257a7ffc
DV
6862 case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6863 return 200000;
6864 default:
6865 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6866 case GC_DISPLAY_CLOCK_133_MHZ_PNV:
e907f170 6867 return 133333;
257a7ffc 6868 case GC_DISPLAY_CLOCK_167_MHZ_PNV:
e907f170 6869 return 166667;
257a7ffc
DV
6870 }
6871}
6872
e70236a8
JB
6873static int i915gm_get_display_clock_speed(struct drm_device *dev)
6874{
6875 u16 gcfgc = 0;
79e53945 6876
e70236a8
JB
6877 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6878
6879 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
e907f170 6880 return 133333;
e70236a8
JB
6881 else {
6882 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6883 case GC_DISPLAY_CLOCK_333_MHZ:
e907f170 6884 return 333333;
e70236a8
JB
6885 default:
6886 case GC_DISPLAY_CLOCK_190_200_MHZ:
6887 return 190000;
79e53945 6888 }
e70236a8
JB
6889 }
6890}
6891
6892static int i865_get_display_clock_speed(struct drm_device *dev)
6893{
e907f170 6894 return 266667;
e70236a8
JB
6895}
6896
1b1d2716 6897static int i85x_get_display_clock_speed(struct drm_device *dev)
e70236a8
JB
6898{
6899 u16 hpllcc = 0;
1b1d2716 6900
65cd2b3f
VS
6901 /*
6902 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6903 * encoding is different :(
6904 * FIXME is this the right way to detect 852GM/852GMV?
6905 */
6906 if (dev->pdev->revision == 0x1)
6907 return 133333;
6908
1b1d2716
VS
6909 pci_bus_read_config_word(dev->pdev->bus,
6910 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6911
e70236a8
JB
6912 /* Assume that the hardware is in the high speed state. This
6913 * should be the default.
6914 */
6915 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6916 case GC_CLOCK_133_200:
1b1d2716 6917 case GC_CLOCK_133_200_2:
e70236a8
JB
6918 case GC_CLOCK_100_200:
6919 return 200000;
6920 case GC_CLOCK_166_250:
6921 return 250000;
6922 case GC_CLOCK_100_133:
e907f170 6923 return 133333;
1b1d2716
VS
6924 case GC_CLOCK_133_266:
6925 case GC_CLOCK_133_266_2:
6926 case GC_CLOCK_166_266:
6927 return 266667;
e70236a8 6928 }
79e53945 6929
e70236a8
JB
6930 /* Shouldn't happen */
6931 return 0;
6932}
79e53945 6933
e70236a8
JB
6934static int i830_get_display_clock_speed(struct drm_device *dev)
6935{
e907f170 6936 return 133333;
79e53945
JB
6937}
6938
34edce2f
VS
6939static unsigned int intel_hpll_vco(struct drm_device *dev)
6940{
6941 struct drm_i915_private *dev_priv = dev->dev_private;
6942 static const unsigned int blb_vco[8] = {
6943 [0] = 3200000,
6944 [1] = 4000000,
6945 [2] = 5333333,
6946 [3] = 4800000,
6947 [4] = 6400000,
6948 };
6949 static const unsigned int pnv_vco[8] = {
6950 [0] = 3200000,
6951 [1] = 4000000,
6952 [2] = 5333333,
6953 [3] = 4800000,
6954 [4] = 2666667,
6955 };
6956 static const unsigned int cl_vco[8] = {
6957 [0] = 3200000,
6958 [1] = 4000000,
6959 [2] = 5333333,
6960 [3] = 6400000,
6961 [4] = 3333333,
6962 [5] = 3566667,
6963 [6] = 4266667,
6964 };
6965 static const unsigned int elk_vco[8] = {
6966 [0] = 3200000,
6967 [1] = 4000000,
6968 [2] = 5333333,
6969 [3] = 4800000,
6970 };
6971 static const unsigned int ctg_vco[8] = {
6972 [0] = 3200000,
6973 [1] = 4000000,
6974 [2] = 5333333,
6975 [3] = 6400000,
6976 [4] = 2666667,
6977 [5] = 4266667,
6978 };
6979 const unsigned int *vco_table;
6980 unsigned int vco;
6981 uint8_t tmp = 0;
6982
6983 /* FIXME other chipsets? */
6984 if (IS_GM45(dev))
6985 vco_table = ctg_vco;
6986 else if (IS_G4X(dev))
6987 vco_table = elk_vco;
6988 else if (IS_CRESTLINE(dev))
6989 vco_table = cl_vco;
6990 else if (IS_PINEVIEW(dev))
6991 vco_table = pnv_vco;
6992 else if (IS_G33(dev))
6993 vco_table = blb_vco;
6994 else
6995 return 0;
6996
6997 tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6998
6999 vco = vco_table[tmp & 0x7];
7000 if (vco == 0)
7001 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
7002 else
7003 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
7004
7005 return vco;
7006}
7007
7008static int gm45_get_display_clock_speed(struct drm_device *dev)
7009{
7010 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7011 uint16_t tmp = 0;
7012
7013 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7014
7015 cdclk_sel = (tmp >> 12) & 0x1;
7016
7017 switch (vco) {
7018 case 2666667:
7019 case 4000000:
7020 case 5333333:
7021 return cdclk_sel ? 333333 : 222222;
7022 case 3200000:
7023 return cdclk_sel ? 320000 : 228571;
7024 default:
7025 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
7026 return 222222;
7027 }
7028}
7029
7030static int i965gm_get_display_clock_speed(struct drm_device *dev)
7031{
7032 static const uint8_t div_3200[] = { 16, 10, 8 };
7033 static const uint8_t div_4000[] = { 20, 12, 10 };
7034 static const uint8_t div_5333[] = { 24, 16, 14 };
7035 const uint8_t *div_table;
7036 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7037 uint16_t tmp = 0;
7038
7039 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7040
7041 cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
7042
7043 if (cdclk_sel >= ARRAY_SIZE(div_3200))
7044 goto fail;
7045
7046 switch (vco) {
7047 case 3200000:
7048 div_table = div_3200;
7049 break;
7050 case 4000000:
7051 div_table = div_4000;
7052 break;
7053 case 5333333:
7054 div_table = div_5333;
7055 break;
7056 default:
7057 goto fail;
7058 }
7059
7060 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7061
caf4e252 7062fail:
34edce2f
VS
7063 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
7064 return 200000;
7065}
7066
7067static int g33_get_display_clock_speed(struct drm_device *dev)
7068{
7069 static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
7070 static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
7071 static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
7072 static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
7073 const uint8_t *div_table;
7074 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
7075 uint16_t tmp = 0;
7076
7077 pci_read_config_word(dev->pdev, GCFGC, &tmp);
7078
7079 cdclk_sel = (tmp >> 4) & 0x7;
7080
7081 if (cdclk_sel >= ARRAY_SIZE(div_3200))
7082 goto fail;
7083
7084 switch (vco) {
7085 case 3200000:
7086 div_table = div_3200;
7087 break;
7088 case 4000000:
7089 div_table = div_4000;
7090 break;
7091 case 4800000:
7092 div_table = div_4800;
7093 break;
7094 case 5333333:
7095 div_table = div_5333;
7096 break;
7097 default:
7098 goto fail;
7099 }
7100
7101 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
7102
caf4e252 7103fail:
34edce2f
VS
7104 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
7105 return 190476;
7106}
7107
2c07245f 7108static void
a65851af 7109intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2c07245f 7110{
a65851af
VS
7111 while (*num > DATA_LINK_M_N_MASK ||
7112 *den > DATA_LINK_M_N_MASK) {
2c07245f
ZW
7113 *num >>= 1;
7114 *den >>= 1;
7115 }
7116}
7117
a65851af
VS
7118static void compute_m_n(unsigned int m, unsigned int n,
7119 uint32_t *ret_m, uint32_t *ret_n)
7120{
7121 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7122 *ret_m = div_u64((uint64_t) m * *ret_n, n);
7123 intel_reduce_m_n_ratio(ret_m, ret_n);
7124}
7125
e69d0bc1
DV
7126void
7127intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7128 int pixel_clock, int link_clock,
7129 struct intel_link_m_n *m_n)
2c07245f 7130{
e69d0bc1 7131 m_n->tu = 64;
a65851af
VS
7132
7133 compute_m_n(bits_per_pixel * pixel_clock,
7134 link_clock * nlanes * 8,
7135 &m_n->gmch_m, &m_n->gmch_n);
7136
7137 compute_m_n(pixel_clock, link_clock,
7138 &m_n->link_m, &m_n->link_n);
2c07245f
ZW
7139}
7140
a7615030
CW
7141static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7142{
d330a953
JN
7143 if (i915.panel_use_ssc >= 0)
7144 return i915.panel_use_ssc != 0;
41aa3448 7145 return dev_priv->vbt.lvds_use_ssc
435793df 7146 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
a7615030
CW
7147}
7148
7429e9d4 7149static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
c65d77d8 7150{
7df00d7a 7151 return (1 << dpll->n) << 16 | dpll->m2;
7429e9d4 7152}
f47709a9 7153
7429e9d4
DV
7154static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7155{
7156 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
c65d77d8
JB
7157}
7158
f47709a9 7159static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
190f68c5 7160 struct intel_crtc_state *crtc_state,
9e2c8475 7161 struct dpll *reduced_clock)
a7516a05 7162{
f47709a9 7163 struct drm_device *dev = crtc->base.dev;
a7516a05
JB
7164 u32 fp, fp2 = 0;
7165
7166 if (IS_PINEVIEW(dev)) {
190f68c5 7167 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
a7516a05 7168 if (reduced_clock)
7429e9d4 7169 fp2 = pnv_dpll_compute_fp(reduced_clock);
a7516a05 7170 } else {
190f68c5 7171 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
a7516a05 7172 if (reduced_clock)
7429e9d4 7173 fp2 = i9xx_dpll_compute_fp(reduced_clock);
a7516a05
JB
7174 }
7175
190f68c5 7176 crtc_state->dpll_hw_state.fp0 = fp;
a7516a05 7177
f47709a9 7178 crtc->lowfreq_avail = false;
a93e255f 7179 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ab585dea 7180 reduced_clock) {
190f68c5 7181 crtc_state->dpll_hw_state.fp1 = fp2;
f47709a9 7182 crtc->lowfreq_avail = true;
a7516a05 7183 } else {
190f68c5 7184 crtc_state->dpll_hw_state.fp1 = fp;
a7516a05
JB
7185 }
7186}
7187
5e69f97f
CML
7188static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7189 pipe)
89b667f8
JB
7190{
7191 u32 reg_val;
7192
7193 /*
7194 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7195 * and set it to a reasonable value instead.
7196 */
ab3c759a 7197 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
89b667f8
JB
7198 reg_val &= 0xffffff00;
7199 reg_val |= 0x00000030;
ab3c759a 7200 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
89b667f8 7201
ab3c759a 7202 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
89b667f8
JB
7203 reg_val &= 0x8cffffff;
7204 reg_val = 0x8c000000;
ab3c759a 7205 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
89b667f8 7206
ab3c759a 7207 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
89b667f8 7208 reg_val &= 0xffffff00;
ab3c759a 7209 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
89b667f8 7210
ab3c759a 7211 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
89b667f8
JB
7212 reg_val &= 0x00ffffff;
7213 reg_val |= 0xb0000000;
ab3c759a 7214 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
89b667f8
JB
7215}
7216
b551842d
DV
7217static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7218 struct intel_link_m_n *m_n)
7219{
7220 struct drm_device *dev = crtc->base.dev;
7221 struct drm_i915_private *dev_priv = dev->dev_private;
7222 int pipe = crtc->pipe;
7223
e3b95f1e
DV
7224 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7225 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7226 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7227 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
b551842d
DV
7228}
7229
7230static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
f769cd24
VK
7231 struct intel_link_m_n *m_n,
7232 struct intel_link_m_n *m2_n2)
b551842d
DV
7233{
7234 struct drm_device *dev = crtc->base.dev;
7235 struct drm_i915_private *dev_priv = dev->dev_private;
7236 int pipe = crtc->pipe;
6e3c9717 7237 enum transcoder transcoder = crtc->config->cpu_transcoder;
b551842d
DV
7238
7239 if (INTEL_INFO(dev)->gen >= 5) {
7240 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7241 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7242 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7243 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
f769cd24
VK
7244 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7245 * for gen < 8) and if DRRS is supported (to make sure the
7246 * registers are not unnecessarily accessed).
7247 */
44395bfe 7248 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
6e3c9717 7249 crtc->config->has_drrs) {
f769cd24
VK
7250 I915_WRITE(PIPE_DATA_M2(transcoder),
7251 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7252 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7253 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7254 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7255 }
b551842d 7256 } else {
e3b95f1e
DV
7257 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7258 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7259 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7260 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
b551842d
DV
7261 }
7262}
7263
fe3cd48d 7264void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
03afc4a2 7265{
fe3cd48d
R
7266 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7267
7268 if (m_n == M1_N1) {
7269 dp_m_n = &crtc->config->dp_m_n;
7270 dp_m2_n2 = &crtc->config->dp_m2_n2;
7271 } else if (m_n == M2_N2) {
7272
7273 /*
7274 * M2_N2 registers are not supported. Hence m2_n2 divider value
7275 * needs to be programmed into M1_N1.
7276 */
7277 dp_m_n = &crtc->config->dp_m2_n2;
7278 } else {
7279 DRM_ERROR("Unsupported divider value\n");
7280 return;
7281 }
7282
6e3c9717
ACO
7283 if (crtc->config->has_pch_encoder)
7284 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
03afc4a2 7285 else
fe3cd48d 7286 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
03afc4a2
DV
7287}
7288
251ac862
DV
7289static void vlv_compute_dpll(struct intel_crtc *crtc,
7290 struct intel_crtc_state *pipe_config)
bdd4b6a6 7291{
03ed5cbf 7292 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
cd2d34d9 7293 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
03ed5cbf
VS
7294 if (crtc->pipe != PIPE_A)
7295 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
bdd4b6a6 7296
cd2d34d9 7297 /* DPLL not used with DSI, but still need the rest set up */
187a1c07 7298 if (!pipe_config->has_dsi_encoder)
cd2d34d9
VS
7299 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7300 DPLL_EXT_BUFFER_ENABLE_VLV;
7301
03ed5cbf
VS
7302 pipe_config->dpll_hw_state.dpll_md =
7303 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7304}
bdd4b6a6 7305
03ed5cbf
VS
7306static void chv_compute_dpll(struct intel_crtc *crtc,
7307 struct intel_crtc_state *pipe_config)
7308{
7309 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
cd2d34d9 7310 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
03ed5cbf
VS
7311 if (crtc->pipe != PIPE_A)
7312 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7313
cd2d34d9 7314 /* DPLL not used with DSI, but still need the rest set up */
187a1c07 7315 if (!pipe_config->has_dsi_encoder)
cd2d34d9
VS
7316 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7317
03ed5cbf
VS
7318 pipe_config->dpll_hw_state.dpll_md =
7319 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
bdd4b6a6
DV
7320}
7321
d288f65f 7322static void vlv_prepare_pll(struct intel_crtc *crtc,
5cec258b 7323 const struct intel_crtc_state *pipe_config)
a0c4da24 7324{
f47709a9 7325 struct drm_device *dev = crtc->base.dev;
a0c4da24 7326 struct drm_i915_private *dev_priv = dev->dev_private;
cd2d34d9 7327 enum pipe pipe = crtc->pipe;
bdd4b6a6 7328 u32 mdiv;
a0c4da24 7329 u32 bestn, bestm1, bestm2, bestp1, bestp2;
bdd4b6a6 7330 u32 coreclk, reg_val;
a0c4da24 7331
cd2d34d9
VS
7332 /* Enable Refclk */
7333 I915_WRITE(DPLL(pipe),
7334 pipe_config->dpll_hw_state.dpll &
7335 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7336
7337 /* No need to actually set up the DPLL with DSI */
7338 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7339 return;
7340
a580516d 7341 mutex_lock(&dev_priv->sb_lock);
09153000 7342
d288f65f
VS
7343 bestn = pipe_config->dpll.n;
7344 bestm1 = pipe_config->dpll.m1;
7345 bestm2 = pipe_config->dpll.m2;
7346 bestp1 = pipe_config->dpll.p1;
7347 bestp2 = pipe_config->dpll.p2;
a0c4da24 7348
89b667f8
JB
7349 /* See eDP HDMI DPIO driver vbios notes doc */
7350
7351 /* PLL B needs special handling */
bdd4b6a6 7352 if (pipe == PIPE_B)
5e69f97f 7353 vlv_pllb_recal_opamp(dev_priv, pipe);
89b667f8
JB
7354
7355 /* Set up Tx target for periodic Rcomp update */
ab3c759a 7356 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
89b667f8
JB
7357
7358 /* Disable target IRef on PLL */
ab3c759a 7359 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
89b667f8 7360 reg_val &= 0x00ffffff;
ab3c759a 7361 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
89b667f8
JB
7362
7363 /* Disable fast lock */
ab3c759a 7364 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
89b667f8
JB
7365
7366 /* Set idtafcrecal before PLL is enabled */
a0c4da24
JB
7367 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7368 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7369 mdiv |= ((bestn << DPIO_N_SHIFT));
a0c4da24 7370 mdiv |= (1 << DPIO_K_SHIFT);
7df5080b
JB
7371
7372 /*
7373 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7374 * but we don't support that).
7375 * Note: don't use the DAC post divider as it seems unstable.
7376 */
7377 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
ab3c759a 7378 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
a0c4da24 7379
a0c4da24 7380 mdiv |= DPIO_ENABLE_CALIBRATION;
ab3c759a 7381 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
a0c4da24 7382
89b667f8 7383 /* Set HBR and RBR LPF coefficients */
d288f65f 7384 if (pipe_config->port_clock == 162000 ||
409ee761
ACO
7385 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7386 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
ab3c759a 7387 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
885b0120 7388 0x009f0003);
89b667f8 7389 else
ab3c759a 7390 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
89b667f8
JB
7391 0x00d0000f);
7392
681a8504 7393 if (pipe_config->has_dp_encoder) {
89b667f8 7394 /* Use SSC source */
bdd4b6a6 7395 if (pipe == PIPE_A)
ab3c759a 7396 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7397 0x0df40000);
7398 else
ab3c759a 7399 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7400 0x0df70000);
7401 } else { /* HDMI or VGA */
7402 /* Use bend source */
bdd4b6a6 7403 if (pipe == PIPE_A)
ab3c759a 7404 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7405 0x0df70000);
7406 else
ab3c759a 7407 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7408 0x0df40000);
7409 }
a0c4da24 7410
ab3c759a 7411 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
89b667f8 7412 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
409ee761
ACO
7413 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7414 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
89b667f8 7415 coreclk |= 0x01000000;
ab3c759a 7416 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
a0c4da24 7417
ab3c759a 7418 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
a580516d 7419 mutex_unlock(&dev_priv->sb_lock);
a0c4da24
JB
7420}
7421
d288f65f 7422static void chv_prepare_pll(struct intel_crtc *crtc,
5cec258b 7423 const struct intel_crtc_state *pipe_config)
9d556c99
CML
7424{
7425 struct drm_device *dev = crtc->base.dev;
7426 struct drm_i915_private *dev_priv = dev->dev_private;
cd2d34d9 7427 enum pipe pipe = crtc->pipe;
9d556c99 7428 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9cbe40c1 7429 u32 loopfilter, tribuf_calcntr;
9d556c99 7430 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
a945ce7e 7431 u32 dpio_val;
9cbe40c1 7432 int vco;
9d556c99 7433
cd2d34d9
VS
7434 /* Enable Refclk and SSC */
7435 I915_WRITE(DPLL(pipe),
7436 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7437
7438 /* No need to actually set up the DPLL with DSI */
7439 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7440 return;
7441
d288f65f
VS
7442 bestn = pipe_config->dpll.n;
7443 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7444 bestm1 = pipe_config->dpll.m1;
7445 bestm2 = pipe_config->dpll.m2 >> 22;
7446 bestp1 = pipe_config->dpll.p1;
7447 bestp2 = pipe_config->dpll.p2;
9cbe40c1 7448 vco = pipe_config->dpll.vco;
a945ce7e 7449 dpio_val = 0;
9cbe40c1 7450 loopfilter = 0;
9d556c99 7451
a580516d 7452 mutex_lock(&dev_priv->sb_lock);
9d556c99 7453
9d556c99
CML
7454 /* p1 and p2 divider */
7455 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7456 5 << DPIO_CHV_S1_DIV_SHIFT |
7457 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7458 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7459 1 << DPIO_CHV_K_DIV_SHIFT);
7460
7461 /* Feedback post-divider - m2 */
7462 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7463
7464 /* Feedback refclk divider - n and m1 */
7465 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7466 DPIO_CHV_M1_DIV_BY_2 |
7467 1 << DPIO_CHV_N_DIV_SHIFT);
7468
7469 /* M2 fraction division */
25a25dfc 7470 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
9d556c99
CML
7471
7472 /* M2 fraction division enable */
a945ce7e
VP
7473 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7474 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7475 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7476 if (bestm2_frac)
7477 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7478 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
9d556c99 7479
de3a0fde
VP
7480 /* Program digital lock detect threshold */
7481 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7482 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7483 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7484 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7485 if (!bestm2_frac)
7486 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7487 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7488
9d556c99 7489 /* Loop filter */
9cbe40c1
VP
7490 if (vco == 5400000) {
7491 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7492 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7493 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7494 tribuf_calcntr = 0x9;
7495 } else if (vco <= 6200000) {
7496 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7497 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7498 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7499 tribuf_calcntr = 0x9;
7500 } else if (vco <= 6480000) {
7501 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7502 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7503 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7504 tribuf_calcntr = 0x8;
7505 } else {
7506 /* Not supported. Apply the same limits as in the max case */
7507 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7508 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7509 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7510 tribuf_calcntr = 0;
7511 }
9d556c99
CML
7512 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7513
968040b2 7514 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
9cbe40c1
VP
7515 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7516 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7517 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7518
9d556c99
CML
7519 /* AFC Recal */
7520 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7521 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7522 DPIO_AFC_RECAL);
7523
a580516d 7524 mutex_unlock(&dev_priv->sb_lock);
9d556c99
CML
7525}
7526
d288f65f
VS
7527/**
7528 * vlv_force_pll_on - forcibly enable just the PLL
7529 * @dev_priv: i915 private structure
7530 * @pipe: pipe PLL to enable
7531 * @dpll: PLL configuration
7532 *
7533 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7534 * in cases where we need the PLL enabled even when @pipe is not going to
7535 * be enabled.
7536 */
3f36b937
TU
7537int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7538 const struct dpll *dpll)
d288f65f
VS
7539{
7540 struct intel_crtc *crtc =
7541 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
3f36b937
TU
7542 struct intel_crtc_state *pipe_config;
7543
7544 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7545 if (!pipe_config)
7546 return -ENOMEM;
7547
7548 pipe_config->base.crtc = &crtc->base;
7549 pipe_config->pixel_multiplier = 1;
7550 pipe_config->dpll = *dpll;
d288f65f
VS
7551
7552 if (IS_CHERRYVIEW(dev)) {
3f36b937
TU
7553 chv_compute_dpll(crtc, pipe_config);
7554 chv_prepare_pll(crtc, pipe_config);
7555 chv_enable_pll(crtc, pipe_config);
d288f65f 7556 } else {
3f36b937
TU
7557 vlv_compute_dpll(crtc, pipe_config);
7558 vlv_prepare_pll(crtc, pipe_config);
7559 vlv_enable_pll(crtc, pipe_config);
d288f65f 7560 }
3f36b937
TU
7561
7562 kfree(pipe_config);
7563
7564 return 0;
d288f65f
VS
7565}
7566
7567/**
7568 * vlv_force_pll_off - forcibly disable just the PLL
7569 * @dev_priv: i915 private structure
7570 * @pipe: pipe PLL to disable
7571 *
7572 * Disable the PLL for @pipe. To be used in cases where we need
7573 * the PLL enabled even when @pipe is not going to be enabled.
7574 */
7575void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7576{
7577 if (IS_CHERRYVIEW(dev))
7578 chv_disable_pll(to_i915(dev), pipe);
7579 else
7580 vlv_disable_pll(to_i915(dev), pipe);
7581}
7582
251ac862
DV
7583static void i9xx_compute_dpll(struct intel_crtc *crtc,
7584 struct intel_crtc_state *crtc_state,
9e2c8475 7585 struct dpll *reduced_clock)
eb1cbe48 7586{
f47709a9 7587 struct drm_device *dev = crtc->base.dev;
eb1cbe48 7588 struct drm_i915_private *dev_priv = dev->dev_private;
eb1cbe48
DV
7589 u32 dpll;
7590 bool is_sdvo;
190f68c5 7591 struct dpll *clock = &crtc_state->dpll;
eb1cbe48 7592
190f68c5 7593 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
2a8f64ca 7594
a93e255f
ACO
7595 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7596 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
eb1cbe48
DV
7597
7598 dpll = DPLL_VGA_MODE_DIS;
7599
a93e255f 7600 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
eb1cbe48
DV
7601 dpll |= DPLLB_MODE_LVDS;
7602 else
7603 dpll |= DPLLB_MODE_DAC_SERIAL;
6cc5f341 7604
ef1b460d 7605 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
190f68c5 7606 dpll |= (crtc_state->pixel_multiplier - 1)
198a037f 7607 << SDVO_MULTIPLIER_SHIFT_HIRES;
eb1cbe48 7608 }
198a037f
DV
7609
7610 if (is_sdvo)
4a33e48d 7611 dpll |= DPLL_SDVO_HIGH_SPEED;
198a037f 7612
190f68c5 7613 if (crtc_state->has_dp_encoder)
4a33e48d 7614 dpll |= DPLL_SDVO_HIGH_SPEED;
eb1cbe48
DV
7615
7616 /* compute bitmask from p1 value */
7617 if (IS_PINEVIEW(dev))
7618 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7619 else {
7620 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7621 if (IS_G4X(dev) && reduced_clock)
7622 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7623 }
7624 switch (clock->p2) {
7625 case 5:
7626 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7627 break;
7628 case 7:
7629 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7630 break;
7631 case 10:
7632 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7633 break;
7634 case 14:
7635 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7636 break;
7637 }
7638 if (INTEL_INFO(dev)->gen >= 4)
7639 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7640
190f68c5 7641 if (crtc_state->sdvo_tv_clock)
eb1cbe48 7642 dpll |= PLL_REF_INPUT_TVCLKINBC;
a93e255f 7643 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ceb41007 7644 intel_panel_use_ssc(dev_priv))
eb1cbe48
DV
7645 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7646 else
7647 dpll |= PLL_REF_INPUT_DREFCLK;
7648
7649 dpll |= DPLL_VCO_ENABLE;
190f68c5 7650 crtc_state->dpll_hw_state.dpll = dpll;
8bcc2795 7651
eb1cbe48 7652 if (INTEL_INFO(dev)->gen >= 4) {
190f68c5 7653 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
ef1b460d 7654 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
190f68c5 7655 crtc_state->dpll_hw_state.dpll_md = dpll_md;
eb1cbe48
DV
7656 }
7657}
7658
251ac862
DV
7659static void i8xx_compute_dpll(struct intel_crtc *crtc,
7660 struct intel_crtc_state *crtc_state,
9e2c8475 7661 struct dpll *reduced_clock)
eb1cbe48 7662{
f47709a9 7663 struct drm_device *dev = crtc->base.dev;
eb1cbe48 7664 struct drm_i915_private *dev_priv = dev->dev_private;
eb1cbe48 7665 u32 dpll;
190f68c5 7666 struct dpll *clock = &crtc_state->dpll;
eb1cbe48 7667
190f68c5 7668 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
2a8f64ca 7669
eb1cbe48
DV
7670 dpll = DPLL_VGA_MODE_DIS;
7671
a93e255f 7672 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
eb1cbe48
DV
7673 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7674 } else {
7675 if (clock->p1 == 2)
7676 dpll |= PLL_P1_DIVIDE_BY_TWO;
7677 else
7678 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7679 if (clock->p2 == 4)
7680 dpll |= PLL_P2_DIVIDE_BY_4;
7681 }
7682
a93e255f 7683 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
4a33e48d
DV
7684 dpll |= DPLL_DVO_2X_MODE;
7685
a93e255f 7686 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ceb41007 7687 intel_panel_use_ssc(dev_priv))
eb1cbe48
DV
7688 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7689 else
7690 dpll |= PLL_REF_INPUT_DREFCLK;
7691
7692 dpll |= DPLL_VCO_ENABLE;
190f68c5 7693 crtc_state->dpll_hw_state.dpll = dpll;
eb1cbe48
DV
7694}
7695
8a654f3b 7696static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
b0e77b9c
PZ
7697{
7698 struct drm_device *dev = intel_crtc->base.dev;
7699 struct drm_i915_private *dev_priv = dev->dev_private;
7700 enum pipe pipe = intel_crtc->pipe;
6e3c9717 7701 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7c5f93b0 7702 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1caea6e9
VS
7703 uint32_t crtc_vtotal, crtc_vblank_end;
7704 int vsyncshift = 0;
4d8a62ea
DV
7705
7706 /* We need to be careful not to changed the adjusted mode, for otherwise
7707 * the hw state checker will get angry at the mismatch. */
7708 crtc_vtotal = adjusted_mode->crtc_vtotal;
7709 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
b0e77b9c 7710
609aeaca 7711 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
b0e77b9c 7712 /* the chip adds 2 halflines automatically */
4d8a62ea
DV
7713 crtc_vtotal -= 1;
7714 crtc_vblank_end -= 1;
609aeaca 7715
409ee761 7716 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
609aeaca
VS
7717 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7718 else
7719 vsyncshift = adjusted_mode->crtc_hsync_start -
7720 adjusted_mode->crtc_htotal / 2;
1caea6e9
VS
7721 if (vsyncshift < 0)
7722 vsyncshift += adjusted_mode->crtc_htotal;
b0e77b9c
PZ
7723 }
7724
7725 if (INTEL_INFO(dev)->gen > 3)
fe2b8f9d 7726 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
b0e77b9c 7727
fe2b8f9d 7728 I915_WRITE(HTOTAL(cpu_transcoder),
b0e77b9c
PZ
7729 (adjusted_mode->crtc_hdisplay - 1) |
7730 ((adjusted_mode->crtc_htotal - 1) << 16));
fe2b8f9d 7731 I915_WRITE(HBLANK(cpu_transcoder),
b0e77b9c
PZ
7732 (adjusted_mode->crtc_hblank_start - 1) |
7733 ((adjusted_mode->crtc_hblank_end - 1) << 16));
fe2b8f9d 7734 I915_WRITE(HSYNC(cpu_transcoder),
b0e77b9c
PZ
7735 (adjusted_mode->crtc_hsync_start - 1) |
7736 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7737
fe2b8f9d 7738 I915_WRITE(VTOTAL(cpu_transcoder),
b0e77b9c 7739 (adjusted_mode->crtc_vdisplay - 1) |
4d8a62ea 7740 ((crtc_vtotal - 1) << 16));
fe2b8f9d 7741 I915_WRITE(VBLANK(cpu_transcoder),
b0e77b9c 7742 (adjusted_mode->crtc_vblank_start - 1) |
4d8a62ea 7743 ((crtc_vblank_end - 1) << 16));
fe2b8f9d 7744 I915_WRITE(VSYNC(cpu_transcoder),
b0e77b9c
PZ
7745 (adjusted_mode->crtc_vsync_start - 1) |
7746 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7747
b5e508d4
PZ
7748 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7749 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7750 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7751 * bits. */
7752 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7753 (pipe == PIPE_B || pipe == PIPE_C))
7754 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7755
bc58be60
JN
7756}
7757
7758static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7759{
7760 struct drm_device *dev = intel_crtc->base.dev;
7761 struct drm_i915_private *dev_priv = dev->dev_private;
7762 enum pipe pipe = intel_crtc->pipe;
7763
b0e77b9c
PZ
7764 /* pipesrc controls the size that is scaled from, which should
7765 * always be the user's requested size.
7766 */
7767 I915_WRITE(PIPESRC(pipe),
6e3c9717
ACO
7768 ((intel_crtc->config->pipe_src_w - 1) << 16) |
7769 (intel_crtc->config->pipe_src_h - 1));
b0e77b9c
PZ
7770}
7771
1bd1bd80 7772static void intel_get_pipe_timings(struct intel_crtc *crtc,
5cec258b 7773 struct intel_crtc_state *pipe_config)
1bd1bd80
DV
7774{
7775 struct drm_device *dev = crtc->base.dev;
7776 struct drm_i915_private *dev_priv = dev->dev_private;
7777 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7778 uint32_t tmp;
7779
7780 tmp = I915_READ(HTOTAL(cpu_transcoder));
2d112de7
ACO
7781 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7782 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7783 tmp = I915_READ(HBLANK(cpu_transcoder));
2d112de7
ACO
7784 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7785 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7786 tmp = I915_READ(HSYNC(cpu_transcoder));
2d112de7
ACO
7787 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7788 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80
DV
7789
7790 tmp = I915_READ(VTOTAL(cpu_transcoder));
2d112de7
ACO
7791 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7792 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7793 tmp = I915_READ(VBLANK(cpu_transcoder));
2d112de7
ACO
7794 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7795 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7796 tmp = I915_READ(VSYNC(cpu_transcoder));
2d112de7
ACO
7797 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7798 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80
DV
7799
7800 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
2d112de7
ACO
7801 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7802 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7803 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
1bd1bd80 7804 }
bc58be60
JN
7805}
7806
7807static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7808 struct intel_crtc_state *pipe_config)
7809{
7810 struct drm_device *dev = crtc->base.dev;
7811 struct drm_i915_private *dev_priv = dev->dev_private;
7812 u32 tmp;
1bd1bd80
DV
7813
7814 tmp = I915_READ(PIPESRC(crtc->pipe));
37327abd
VS
7815 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7816 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7817
2d112de7
ACO
7818 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7819 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
1bd1bd80
DV
7820}
7821
f6a83288 7822void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5cec258b 7823 struct intel_crtc_state *pipe_config)
babea61d 7824{
2d112de7
ACO
7825 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7826 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7827 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7828 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
babea61d 7829
2d112de7
ACO
7830 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7831 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7832 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7833 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
babea61d 7834
2d112de7 7835 mode->flags = pipe_config->base.adjusted_mode.flags;
cd13f5ab 7836 mode->type = DRM_MODE_TYPE_DRIVER;
babea61d 7837
2d112de7
ACO
7838 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7839 mode->flags |= pipe_config->base.adjusted_mode.flags;
cd13f5ab
ML
7840
7841 mode->hsync = drm_mode_hsync(mode);
7842 mode->vrefresh = drm_mode_vrefresh(mode);
7843 drm_mode_set_name(mode);
babea61d
JB
7844}
7845
84b046f3
DV
7846static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7847{
7848 struct drm_device *dev = intel_crtc->base.dev;
7849 struct drm_i915_private *dev_priv = dev->dev_private;
7850 uint32_t pipeconf;
7851
9f11a9e4 7852 pipeconf = 0;
84b046f3 7853
b6b5d049
VS
7854 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7855 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7856 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
67c72a12 7857
6e3c9717 7858 if (intel_crtc->config->double_wide)
cf532bb2 7859 pipeconf |= PIPECONF_DOUBLE_WIDE;
84b046f3 7860
ff9ce46e 7861 /* only g4x and later have fancy bpc/dither controls */
666a4537 7862 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ff9ce46e 7863 /* Bspec claims that we can't use dithering for 30bpp pipes. */
6e3c9717 7864 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
ff9ce46e 7865 pipeconf |= PIPECONF_DITHER_EN |
84b046f3 7866 PIPECONF_DITHER_TYPE_SP;
84b046f3 7867
6e3c9717 7868 switch (intel_crtc->config->pipe_bpp) {
ff9ce46e
DV
7869 case 18:
7870 pipeconf |= PIPECONF_6BPC;
7871 break;
7872 case 24:
7873 pipeconf |= PIPECONF_8BPC;
7874 break;
7875 case 30:
7876 pipeconf |= PIPECONF_10BPC;
7877 break;
7878 default:
7879 /* Case prevented by intel_choose_pipe_bpp_dither. */
7880 BUG();
84b046f3
DV
7881 }
7882 }
7883
7884 if (HAS_PIPE_CXSR(dev)) {
7885 if (intel_crtc->lowfreq_avail) {
7886 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7887 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7888 } else {
7889 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
84b046f3
DV
7890 }
7891 }
7892
6e3c9717 7893 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
efc2cfff 7894 if (INTEL_INFO(dev)->gen < 4 ||
409ee761 7895 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
efc2cfff
VS
7896 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7897 else
7898 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7899 } else
84b046f3
DV
7900 pipeconf |= PIPECONF_PROGRESSIVE;
7901
666a4537
WB
7902 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7903 intel_crtc->config->limited_color_range)
9f11a9e4 7904 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
9c8e09b7 7905
84b046f3
DV
7906 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7907 POSTING_READ(PIPECONF(intel_crtc->pipe));
7908}
7909
81c97f52
ACO
7910static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7911 struct intel_crtc_state *crtc_state)
7912{
7913 struct drm_device *dev = crtc->base.dev;
7914 struct drm_i915_private *dev_priv = dev->dev_private;
1b6f4958 7915 const struct intel_limit *limit;
81c97f52
ACO
7916 int refclk = 48000;
7917
7918 memset(&crtc_state->dpll_hw_state, 0,
7919 sizeof(crtc_state->dpll_hw_state));
7920
7921 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7922 if (intel_panel_use_ssc(dev_priv)) {
7923 refclk = dev_priv->vbt.lvds_ssc_freq;
7924 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7925 }
7926
7927 limit = &intel_limits_i8xx_lvds;
7928 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
7929 limit = &intel_limits_i8xx_dvo;
7930 } else {
7931 limit = &intel_limits_i8xx_dac;
7932 }
7933
7934 if (!crtc_state->clock_set &&
7935 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7936 refclk, NULL, &crtc_state->dpll)) {
7937 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7938 return -EINVAL;
7939 }
7940
7941 i8xx_compute_dpll(crtc, crtc_state, NULL);
7942
7943 return 0;
7944}
7945
19ec6693
ACO
7946static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7947 struct intel_crtc_state *crtc_state)
7948{
7949 struct drm_device *dev = crtc->base.dev;
7950 struct drm_i915_private *dev_priv = dev->dev_private;
1b6f4958 7951 const struct intel_limit *limit;
19ec6693
ACO
7952 int refclk = 96000;
7953
7954 memset(&crtc_state->dpll_hw_state, 0,
7955 sizeof(crtc_state->dpll_hw_state));
7956
7957 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7958 if (intel_panel_use_ssc(dev_priv)) {
7959 refclk = dev_priv->vbt.lvds_ssc_freq;
7960 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7961 }
7962
7963 if (intel_is_dual_link_lvds(dev))
7964 limit = &intel_limits_g4x_dual_channel_lvds;
7965 else
7966 limit = &intel_limits_g4x_single_channel_lvds;
7967 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7968 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7969 limit = &intel_limits_g4x_hdmi;
7970 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7971 limit = &intel_limits_g4x_sdvo;
7972 } else {
7973 /* The option is for other outputs */
7974 limit = &intel_limits_i9xx_sdvo;
7975 }
7976
7977 if (!crtc_state->clock_set &&
7978 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7979 refclk, NULL, &crtc_state->dpll)) {
7980 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7981 return -EINVAL;
7982 }
7983
7984 i9xx_compute_dpll(crtc, crtc_state, NULL);
7985
7986 return 0;
7987}
7988
70e8aa21
ACO
7989static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7990 struct intel_crtc_state *crtc_state)
7991{
7992 struct drm_device *dev = crtc->base.dev;
7993 struct drm_i915_private *dev_priv = dev->dev_private;
1b6f4958 7994 const struct intel_limit *limit;
70e8aa21
ACO
7995 int refclk = 96000;
7996
7997 memset(&crtc_state->dpll_hw_state, 0,
7998 sizeof(crtc_state->dpll_hw_state));
7999
8000 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8001 if (intel_panel_use_ssc(dev_priv)) {
8002 refclk = dev_priv->vbt.lvds_ssc_freq;
8003 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8004 }
8005
8006 limit = &intel_limits_pineview_lvds;
8007 } else {
8008 limit = &intel_limits_pineview_sdvo;
8009 }
8010
8011 if (!crtc_state->clock_set &&
8012 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8013 refclk, NULL, &crtc_state->dpll)) {
8014 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8015 return -EINVAL;
8016 }
8017
8018 i9xx_compute_dpll(crtc, crtc_state, NULL);
8019
8020 return 0;
8021}
8022
190f68c5
ACO
8023static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8024 struct intel_crtc_state *crtc_state)
79e53945 8025{
c7653199 8026 struct drm_device *dev = crtc->base.dev;
79e53945 8027 struct drm_i915_private *dev_priv = dev->dev_private;
1b6f4958 8028 const struct intel_limit *limit;
81c97f52 8029 int refclk = 96000;
79e53945 8030
dd3cd74a
ACO
8031 memset(&crtc_state->dpll_hw_state, 0,
8032 sizeof(crtc_state->dpll_hw_state));
8033
70e8aa21
ACO
8034 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8035 if (intel_panel_use_ssc(dev_priv)) {
8036 refclk = dev_priv->vbt.lvds_ssc_freq;
8037 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8038 }
43565a06 8039
70e8aa21
ACO
8040 limit = &intel_limits_i9xx_lvds;
8041 } else {
8042 limit = &intel_limits_i9xx_sdvo;
81c97f52 8043 }
79e53945 8044
70e8aa21
ACO
8045 if (!crtc_state->clock_set &&
8046 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8047 refclk, NULL, &crtc_state->dpll)) {
8048 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8049 return -EINVAL;
f47709a9 8050 }
7026d4ac 8051
81c97f52 8052 i9xx_compute_dpll(crtc, crtc_state, NULL);
79e53945 8053
c8f7a0db 8054 return 0;
f564048e
EA
8055}
8056
65b3d6a9
ACO
8057static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8058 struct intel_crtc_state *crtc_state)
8059{
8060 int refclk = 100000;
1b6f4958 8061 const struct intel_limit *limit = &intel_limits_chv;
65b3d6a9
ACO
8062
8063 memset(&crtc_state->dpll_hw_state, 0,
8064 sizeof(crtc_state->dpll_hw_state));
8065
65b3d6a9
ACO
8066 if (!crtc_state->clock_set &&
8067 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8068 refclk, NULL, &crtc_state->dpll)) {
8069 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8070 return -EINVAL;
8071 }
8072
8073 chv_compute_dpll(crtc, crtc_state);
8074
8075 return 0;
8076}
8077
8078static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8079 struct intel_crtc_state *crtc_state)
8080{
8081 int refclk = 100000;
1b6f4958 8082 const struct intel_limit *limit = &intel_limits_vlv;
65b3d6a9
ACO
8083
8084 memset(&crtc_state->dpll_hw_state, 0,
8085 sizeof(crtc_state->dpll_hw_state));
8086
65b3d6a9
ACO
8087 if (!crtc_state->clock_set &&
8088 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8089 refclk, NULL, &crtc_state->dpll)) {
8090 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8091 return -EINVAL;
8092 }
8093
8094 vlv_compute_dpll(crtc, crtc_state);
8095
8096 return 0;
8097}
8098
2fa2fe9a 8099static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5cec258b 8100 struct intel_crtc_state *pipe_config)
2fa2fe9a
DV
8101{
8102 struct drm_device *dev = crtc->base.dev;
8103 struct drm_i915_private *dev_priv = dev->dev_private;
8104 uint32_t tmp;
8105
dc9e7dec
VS
8106 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
8107 return;
8108
2fa2fe9a 8109 tmp = I915_READ(PFIT_CONTROL);
06922821
DV
8110 if (!(tmp & PFIT_ENABLE))
8111 return;
2fa2fe9a 8112
06922821 8113 /* Check whether the pfit is attached to our pipe. */
2fa2fe9a
DV
8114 if (INTEL_INFO(dev)->gen < 4) {
8115 if (crtc->pipe != PIPE_B)
8116 return;
2fa2fe9a
DV
8117 } else {
8118 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8119 return;
8120 }
8121
06922821 8122 pipe_config->gmch_pfit.control = tmp;
2fa2fe9a 8123 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
2fa2fe9a
DV
8124}
8125
acbec814 8126static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 8127 struct intel_crtc_state *pipe_config)
acbec814
JB
8128{
8129 struct drm_device *dev = crtc->base.dev;
8130 struct drm_i915_private *dev_priv = dev->dev_private;
8131 int pipe = pipe_config->cpu_transcoder;
9e2c8475 8132 struct dpll clock;
acbec814 8133 u32 mdiv;
662c6ecb 8134 int refclk = 100000;
acbec814 8135
b521973b
VS
8136 /* In case of DSI, DPLL will not be used */
8137 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
f573de5a
SK
8138 return;
8139
a580516d 8140 mutex_lock(&dev_priv->sb_lock);
ab3c759a 8141 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
a580516d 8142 mutex_unlock(&dev_priv->sb_lock);
acbec814
JB
8143
8144 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8145 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8146 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8147 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8148 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8149
dccbea3b 8150 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
acbec814
JB
8151}
8152
5724dbd1
DL
8153static void
8154i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8155 struct intel_initial_plane_config *plane_config)
1ad292b5
JB
8156{
8157 struct drm_device *dev = crtc->base.dev;
8158 struct drm_i915_private *dev_priv = dev->dev_private;
8159 u32 val, base, offset;
8160 int pipe = crtc->pipe, plane = crtc->plane;
8161 int fourcc, pixel_format;
6761dd31 8162 unsigned int aligned_height;
b113d5ee 8163 struct drm_framebuffer *fb;
1b842c89 8164 struct intel_framebuffer *intel_fb;
1ad292b5 8165
42a7b088
DL
8166 val = I915_READ(DSPCNTR(plane));
8167 if (!(val & DISPLAY_PLANE_ENABLE))
8168 return;
8169
d9806c9f 8170 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 8171 if (!intel_fb) {
1ad292b5
JB
8172 DRM_DEBUG_KMS("failed to alloc fb\n");
8173 return;
8174 }
8175
1b842c89
DL
8176 fb = &intel_fb->base;
8177
18c5247e
DV
8178 if (INTEL_INFO(dev)->gen >= 4) {
8179 if (val & DISPPLANE_TILED) {
49af449b 8180 plane_config->tiling = I915_TILING_X;
18c5247e
DV
8181 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8182 }
8183 }
1ad292b5
JB
8184
8185 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
b35d63fa 8186 fourcc = i9xx_format_to_fourcc(pixel_format);
b113d5ee
DL
8187 fb->pixel_format = fourcc;
8188 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
1ad292b5
JB
8189
8190 if (INTEL_INFO(dev)->gen >= 4) {
49af449b 8191 if (plane_config->tiling)
1ad292b5
JB
8192 offset = I915_READ(DSPTILEOFF(plane));
8193 else
8194 offset = I915_READ(DSPLINOFF(plane));
8195 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8196 } else {
8197 base = I915_READ(DSPADDR(plane));
8198 }
8199 plane_config->base = base;
8200
8201 val = I915_READ(PIPESRC(pipe));
b113d5ee
DL
8202 fb->width = ((val >> 16) & 0xfff) + 1;
8203 fb->height = ((val >> 0) & 0xfff) + 1;
1ad292b5
JB
8204
8205 val = I915_READ(DSPSTRIDE(pipe));
b113d5ee 8206 fb->pitches[0] = val & 0xffffffc0;
1ad292b5 8207
b113d5ee 8208 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
8209 fb->pixel_format,
8210 fb->modifier[0]);
1ad292b5 8211
f37b5c2b 8212 plane_config->size = fb->pitches[0] * aligned_height;
1ad292b5 8213
2844a921
DL
8214 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8215 pipe_name(pipe), plane, fb->width, fb->height,
8216 fb->bits_per_pixel, base, fb->pitches[0],
8217 plane_config->size);
1ad292b5 8218
2d14030b 8219 plane_config->fb = intel_fb;
1ad292b5
JB
8220}
8221
70b23a98 8222static void chv_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 8223 struct intel_crtc_state *pipe_config)
70b23a98
VS
8224{
8225 struct drm_device *dev = crtc->base.dev;
8226 struct drm_i915_private *dev_priv = dev->dev_private;
8227 int pipe = pipe_config->cpu_transcoder;
8228 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9e2c8475 8229 struct dpll clock;
0d7b6b11 8230 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
70b23a98
VS
8231 int refclk = 100000;
8232
b521973b
VS
8233 /* In case of DSI, DPLL will not be used */
8234 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8235 return;
8236
a580516d 8237 mutex_lock(&dev_priv->sb_lock);
70b23a98
VS
8238 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8239 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8240 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8241 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
0d7b6b11 8242 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
a580516d 8243 mutex_unlock(&dev_priv->sb_lock);
70b23a98
VS
8244
8245 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
0d7b6b11
ID
8246 clock.m2 = (pll_dw0 & 0xff) << 22;
8247 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8248 clock.m2 |= pll_dw2 & 0x3fffff;
70b23a98
VS
8249 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8250 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8251 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8252
dccbea3b 8253 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
70b23a98
VS
8254}
8255
0e8ffe1b 8256static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5cec258b 8257 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
8258{
8259 struct drm_device *dev = crtc->base.dev;
8260 struct drm_i915_private *dev_priv = dev->dev_private;
1729050e 8261 enum intel_display_power_domain power_domain;
0e8ffe1b 8262 uint32_t tmp;
1729050e 8263 bool ret;
0e8ffe1b 8264
1729050e
ID
8265 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8266 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
b5482bd0
ID
8267 return false;
8268
e143a21c 8269 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8106ddbd 8270 pipe_config->shared_dpll = NULL;
eccb140b 8271
1729050e
ID
8272 ret = false;
8273
0e8ffe1b
DV
8274 tmp = I915_READ(PIPECONF(crtc->pipe));
8275 if (!(tmp & PIPECONF_ENABLE))
1729050e 8276 goto out;
0e8ffe1b 8277
666a4537 8278 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
42571aef
VS
8279 switch (tmp & PIPECONF_BPC_MASK) {
8280 case PIPECONF_6BPC:
8281 pipe_config->pipe_bpp = 18;
8282 break;
8283 case PIPECONF_8BPC:
8284 pipe_config->pipe_bpp = 24;
8285 break;
8286 case PIPECONF_10BPC:
8287 pipe_config->pipe_bpp = 30;
8288 break;
8289 default:
8290 break;
8291 }
8292 }
8293
666a4537
WB
8294 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8295 (tmp & PIPECONF_COLOR_RANGE_SELECT))
b5a9fa09
DV
8296 pipe_config->limited_color_range = true;
8297
282740f7
VS
8298 if (INTEL_INFO(dev)->gen < 4)
8299 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8300
1bd1bd80 8301 intel_get_pipe_timings(crtc, pipe_config);
bc58be60 8302 intel_get_pipe_src_size(crtc, pipe_config);
1bd1bd80 8303
2fa2fe9a
DV
8304 i9xx_get_pfit_config(crtc, pipe_config);
8305
6c49f241 8306 if (INTEL_INFO(dev)->gen >= 4) {
c231775c
VS
8307 /* No way to read it out on pipes B and C */
8308 if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
8309 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8310 else
8311 tmp = I915_READ(DPLL_MD(crtc->pipe));
6c49f241
DV
8312 pipe_config->pixel_multiplier =
8313 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8314 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8bcc2795 8315 pipe_config->dpll_hw_state.dpll_md = tmp;
6c49f241
DV
8316 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8317 tmp = I915_READ(DPLL(crtc->pipe));
8318 pipe_config->pixel_multiplier =
8319 ((tmp & SDVO_MULTIPLIER_MASK)
8320 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8321 } else {
8322 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8323 * port and will be fixed up in the encoder->get_config
8324 * function. */
8325 pipe_config->pixel_multiplier = 1;
8326 }
8bcc2795 8327 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
666a4537 8328 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
1c4e0274
VS
8329 /*
8330 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8331 * on 830. Filter it out here so that we don't
8332 * report errors due to that.
8333 */
8334 if (IS_I830(dev))
8335 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8336
8bcc2795
DV
8337 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8338 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
165e901c
VS
8339 } else {
8340 /* Mask out read-only status bits. */
8341 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8342 DPLL_PORTC_READY_MASK |
8343 DPLL_PORTB_READY_MASK);
8bcc2795 8344 }
6c49f241 8345
70b23a98
VS
8346 if (IS_CHERRYVIEW(dev))
8347 chv_crtc_clock_get(crtc, pipe_config);
8348 else if (IS_VALLEYVIEW(dev))
acbec814
JB
8349 vlv_crtc_clock_get(crtc, pipe_config);
8350 else
8351 i9xx_crtc_clock_get(crtc, pipe_config);
18442d08 8352
0f64614d
VS
8353 /*
8354 * Normally the dotclock is filled in by the encoder .get_config()
8355 * but in case the pipe is enabled w/o any ports we need a sane
8356 * default.
8357 */
8358 pipe_config->base.adjusted_mode.crtc_clock =
8359 pipe_config->port_clock / pipe_config->pixel_multiplier;
8360
1729050e
ID
8361 ret = true;
8362
8363out:
8364 intel_display_power_put(dev_priv, power_domain);
8365
8366 return ret;
0e8ffe1b
DV
8367}
8368
dde86e2d 8369static void ironlake_init_pch_refclk(struct drm_device *dev)
13d83a67
JB
8370{
8371 struct drm_i915_private *dev_priv = dev->dev_private;
13d83a67 8372 struct intel_encoder *encoder;
1c1a24d2 8373 int i;
74cfd7ac 8374 u32 val, final;
13d83a67 8375 bool has_lvds = false;
199e5d79 8376 bool has_cpu_edp = false;
199e5d79 8377 bool has_panel = false;
99eb6a01
KP
8378 bool has_ck505 = false;
8379 bool can_ssc = false;
1c1a24d2 8380 bool using_ssc_source = false;
13d83a67
JB
8381
8382 /* We need to take the global config into account */
b2784e15 8383 for_each_intel_encoder(dev, encoder) {
199e5d79
KP
8384 switch (encoder->type) {
8385 case INTEL_OUTPUT_LVDS:
8386 has_panel = true;
8387 has_lvds = true;
8388 break;
8389 case INTEL_OUTPUT_EDP:
8390 has_panel = true;
2de6905f 8391 if (enc_to_dig_port(&encoder->base)->port == PORT_A)
199e5d79
KP
8392 has_cpu_edp = true;
8393 break;
6847d71b
PZ
8394 default:
8395 break;
13d83a67
JB
8396 }
8397 }
8398
99eb6a01 8399 if (HAS_PCH_IBX(dev)) {
41aa3448 8400 has_ck505 = dev_priv->vbt.display_clock_mode;
99eb6a01
KP
8401 can_ssc = has_ck505;
8402 } else {
8403 has_ck505 = false;
8404 can_ssc = true;
8405 }
8406
1c1a24d2
L
8407 /* Check if any DPLLs are using the SSC source */
8408 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8409 u32 temp = I915_READ(PCH_DPLL(i));
8410
8411 if (!(temp & DPLL_VCO_ENABLE))
8412 continue;
8413
8414 if ((temp & PLL_REF_INPUT_MASK) ==
8415 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8416 using_ssc_source = true;
8417 break;
8418 }
8419 }
8420
8421 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8422 has_panel, has_lvds, has_ck505, using_ssc_source);
13d83a67
JB
8423
8424 /* Ironlake: try to setup display ref clock before DPLL
8425 * enabling. This is only under driver's control after
8426 * PCH B stepping, previous chipset stepping should be
8427 * ignoring this setting.
8428 */
74cfd7ac
CW
8429 val = I915_READ(PCH_DREF_CONTROL);
8430
8431 /* As we must carefully and slowly disable/enable each source in turn,
8432 * compute the final state we want first and check if we need to
8433 * make any changes at all.
8434 */
8435 final = val;
8436 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8437 if (has_ck505)
8438 final |= DREF_NONSPREAD_CK505_ENABLE;
8439 else
8440 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8441
8c07eb68 8442 final &= ~DREF_SSC_SOURCE_MASK;
74cfd7ac 8443 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8c07eb68 8444 final &= ~DREF_SSC1_ENABLE;
74cfd7ac
CW
8445
8446 if (has_panel) {
8447 final |= DREF_SSC_SOURCE_ENABLE;
8448
8449 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8450 final |= DREF_SSC1_ENABLE;
8451
8452 if (has_cpu_edp) {
8453 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8454 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8455 else
8456 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8457 } else
8458 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1c1a24d2
L
8459 } else if (using_ssc_source) {
8460 final |= DREF_SSC_SOURCE_ENABLE;
8461 final |= DREF_SSC1_ENABLE;
74cfd7ac
CW
8462 }
8463
8464 if (final == val)
8465 return;
8466
13d83a67 8467 /* Always enable nonspread source */
74cfd7ac 8468 val &= ~DREF_NONSPREAD_SOURCE_MASK;
13d83a67 8469
99eb6a01 8470 if (has_ck505)
74cfd7ac 8471 val |= DREF_NONSPREAD_CK505_ENABLE;
99eb6a01 8472 else
74cfd7ac 8473 val |= DREF_NONSPREAD_SOURCE_ENABLE;
13d83a67 8474
199e5d79 8475 if (has_panel) {
74cfd7ac
CW
8476 val &= ~DREF_SSC_SOURCE_MASK;
8477 val |= DREF_SSC_SOURCE_ENABLE;
13d83a67 8478
199e5d79 8479 /* SSC must be turned on before enabling the CPU output */
99eb6a01 8480 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 8481 DRM_DEBUG_KMS("Using SSC on panel\n");
74cfd7ac 8482 val |= DREF_SSC1_ENABLE;
e77166b5 8483 } else
74cfd7ac 8484 val &= ~DREF_SSC1_ENABLE;
199e5d79
KP
8485
8486 /* Get SSC going before enabling the outputs */
74cfd7ac 8487 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8488 POSTING_READ(PCH_DREF_CONTROL);
8489 udelay(200);
8490
74cfd7ac 8491 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
13d83a67
JB
8492
8493 /* Enable CPU source on CPU attached eDP */
199e5d79 8494 if (has_cpu_edp) {
99eb6a01 8495 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 8496 DRM_DEBUG_KMS("Using SSC on eDP\n");
74cfd7ac 8497 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
eba905b2 8498 } else
74cfd7ac 8499 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
199e5d79 8500 } else
74cfd7ac 8501 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 8502
74cfd7ac 8503 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8504 POSTING_READ(PCH_DREF_CONTROL);
8505 udelay(200);
8506 } else {
1c1a24d2 8507 DRM_DEBUG_KMS("Disabling CPU source output\n");
199e5d79 8508
74cfd7ac 8509 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
199e5d79
KP
8510
8511 /* Turn off CPU output */
74cfd7ac 8512 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 8513
74cfd7ac 8514 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8515 POSTING_READ(PCH_DREF_CONTROL);
8516 udelay(200);
8517
1c1a24d2
L
8518 if (!using_ssc_source) {
8519 DRM_DEBUG_KMS("Disabling SSC source\n");
199e5d79 8520
1c1a24d2
L
8521 /* Turn off the SSC source */
8522 val &= ~DREF_SSC_SOURCE_MASK;
8523 val |= DREF_SSC_SOURCE_DISABLE;
f165d283 8524
1c1a24d2
L
8525 /* Turn off SSC1 */
8526 val &= ~DREF_SSC1_ENABLE;
8527
8528 I915_WRITE(PCH_DREF_CONTROL, val);
8529 POSTING_READ(PCH_DREF_CONTROL);
8530 udelay(200);
8531 }
13d83a67 8532 }
74cfd7ac
CW
8533
8534 BUG_ON(val != final);
13d83a67
JB
8535}
8536
f31f2d55 8537static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
dde86e2d 8538{
f31f2d55 8539 uint32_t tmp;
dde86e2d 8540
0ff066a9
PZ
8541 tmp = I915_READ(SOUTH_CHICKEN2);
8542 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8543 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 8544
cf3598c2
ID
8545 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8546 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
0ff066a9 8547 DRM_ERROR("FDI mPHY reset assert timeout\n");
dde86e2d 8548
0ff066a9
PZ
8549 tmp = I915_READ(SOUTH_CHICKEN2);
8550 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8551 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 8552
cf3598c2
ID
8553 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8554 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
0ff066a9 8555 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
f31f2d55
PZ
8556}
8557
8558/* WaMPhyProgramming:hsw */
8559static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8560{
8561 uint32_t tmp;
dde86e2d
PZ
8562
8563 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8564 tmp &= ~(0xFF << 24);
8565 tmp |= (0x12 << 24);
8566 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8567
dde86e2d
PZ
8568 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8569 tmp |= (1 << 11);
8570 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8571
8572 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8573 tmp |= (1 << 11);
8574 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8575
dde86e2d
PZ
8576 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8577 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8578 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8579
8580 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8581 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8582 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8583
0ff066a9
PZ
8584 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8585 tmp &= ~(7 << 13);
8586 tmp |= (5 << 13);
8587 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
dde86e2d 8588
0ff066a9
PZ
8589 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8590 tmp &= ~(7 << 13);
8591 tmp |= (5 << 13);
8592 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
dde86e2d
PZ
8593
8594 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8595 tmp &= ~0xFF;
8596 tmp |= 0x1C;
8597 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8598
8599 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8600 tmp &= ~0xFF;
8601 tmp |= 0x1C;
8602 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8603
8604 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8605 tmp &= ~(0xFF << 16);
8606 tmp |= (0x1C << 16);
8607 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8608
8609 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8610 tmp &= ~(0xFF << 16);
8611 tmp |= (0x1C << 16);
8612 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8613
0ff066a9
PZ
8614 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8615 tmp |= (1 << 27);
8616 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
dde86e2d 8617
0ff066a9
PZ
8618 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8619 tmp |= (1 << 27);
8620 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
dde86e2d 8621
0ff066a9
PZ
8622 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8623 tmp &= ~(0xF << 28);
8624 tmp |= (4 << 28);
8625 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
dde86e2d 8626
0ff066a9
PZ
8627 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8628 tmp &= ~(0xF << 28);
8629 tmp |= (4 << 28);
8630 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
f31f2d55
PZ
8631}
8632
2fa86a1f
PZ
8633/* Implements 3 different sequences from BSpec chapter "Display iCLK
8634 * Programming" based on the parameters passed:
8635 * - Sequence to enable CLKOUT_DP
8636 * - Sequence to enable CLKOUT_DP without spread
8637 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8638 */
8639static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8640 bool with_fdi)
f31f2d55
PZ
8641{
8642 struct drm_i915_private *dev_priv = dev->dev_private;
2fa86a1f
PZ
8643 uint32_t reg, tmp;
8644
8645 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8646 with_spread = true;
c2699524 8647 if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
2fa86a1f 8648 with_fdi = false;
f31f2d55 8649
a580516d 8650 mutex_lock(&dev_priv->sb_lock);
f31f2d55
PZ
8651
8652 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8653 tmp &= ~SBI_SSCCTL_DISABLE;
8654 tmp |= SBI_SSCCTL_PATHALT;
8655 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8656
8657 udelay(24);
8658
2fa86a1f
PZ
8659 if (with_spread) {
8660 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8661 tmp &= ~SBI_SSCCTL_PATHALT;
8662 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
f31f2d55 8663
2fa86a1f
PZ
8664 if (with_fdi) {
8665 lpt_reset_fdi_mphy(dev_priv);
8666 lpt_program_fdi_mphy(dev_priv);
8667 }
8668 }
dde86e2d 8669
c2699524 8670 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
2fa86a1f
PZ
8671 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8672 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8673 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
c00db246 8674
a580516d 8675 mutex_unlock(&dev_priv->sb_lock);
dde86e2d
PZ
8676}
8677
47701c3b
PZ
8678/* Sequence to disable CLKOUT_DP */
8679static void lpt_disable_clkout_dp(struct drm_device *dev)
8680{
8681 struct drm_i915_private *dev_priv = dev->dev_private;
8682 uint32_t reg, tmp;
8683
a580516d 8684 mutex_lock(&dev_priv->sb_lock);
47701c3b 8685
c2699524 8686 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
47701c3b
PZ
8687 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8688 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8689 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8690
8691 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8692 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8693 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8694 tmp |= SBI_SSCCTL_PATHALT;
8695 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8696 udelay(32);
8697 }
8698 tmp |= SBI_SSCCTL_DISABLE;
8699 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8700 }
8701
a580516d 8702 mutex_unlock(&dev_priv->sb_lock);
47701c3b
PZ
8703}
8704
f7be2c21
VS
8705#define BEND_IDX(steps) ((50 + (steps)) / 5)
8706
8707static const uint16_t sscdivintphase[] = {
8708 [BEND_IDX( 50)] = 0x3B23,
8709 [BEND_IDX( 45)] = 0x3B23,
8710 [BEND_IDX( 40)] = 0x3C23,
8711 [BEND_IDX( 35)] = 0x3C23,
8712 [BEND_IDX( 30)] = 0x3D23,
8713 [BEND_IDX( 25)] = 0x3D23,
8714 [BEND_IDX( 20)] = 0x3E23,
8715 [BEND_IDX( 15)] = 0x3E23,
8716 [BEND_IDX( 10)] = 0x3F23,
8717 [BEND_IDX( 5)] = 0x3F23,
8718 [BEND_IDX( 0)] = 0x0025,
8719 [BEND_IDX( -5)] = 0x0025,
8720 [BEND_IDX(-10)] = 0x0125,
8721 [BEND_IDX(-15)] = 0x0125,
8722 [BEND_IDX(-20)] = 0x0225,
8723 [BEND_IDX(-25)] = 0x0225,
8724 [BEND_IDX(-30)] = 0x0325,
8725 [BEND_IDX(-35)] = 0x0325,
8726 [BEND_IDX(-40)] = 0x0425,
8727 [BEND_IDX(-45)] = 0x0425,
8728 [BEND_IDX(-50)] = 0x0525,
8729};
8730
8731/*
8732 * Bend CLKOUT_DP
8733 * steps -50 to 50 inclusive, in steps of 5
8734 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8735 * change in clock period = -(steps / 10) * 5.787 ps
8736 */
8737static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8738{
8739 uint32_t tmp;
8740 int idx = BEND_IDX(steps);
8741
8742 if (WARN_ON(steps % 5 != 0))
8743 return;
8744
8745 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8746 return;
8747
8748 mutex_lock(&dev_priv->sb_lock);
8749
8750 if (steps % 10 != 0)
8751 tmp = 0xAAAAAAAB;
8752 else
8753 tmp = 0x00000000;
8754 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8755
8756 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8757 tmp &= 0xffff0000;
8758 tmp |= sscdivintphase[idx];
8759 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8760
8761 mutex_unlock(&dev_priv->sb_lock);
8762}
8763
8764#undef BEND_IDX
8765
bf8fa3d3
PZ
8766static void lpt_init_pch_refclk(struct drm_device *dev)
8767{
bf8fa3d3
PZ
8768 struct intel_encoder *encoder;
8769 bool has_vga = false;
8770
b2784e15 8771 for_each_intel_encoder(dev, encoder) {
bf8fa3d3
PZ
8772 switch (encoder->type) {
8773 case INTEL_OUTPUT_ANALOG:
8774 has_vga = true;
8775 break;
6847d71b
PZ
8776 default:
8777 break;
bf8fa3d3
PZ
8778 }
8779 }
8780
f7be2c21
VS
8781 if (has_vga) {
8782 lpt_bend_clkout_dp(to_i915(dev), 0);
47701c3b 8783 lpt_enable_clkout_dp(dev, true, true);
f7be2c21 8784 } else {
47701c3b 8785 lpt_disable_clkout_dp(dev);
f7be2c21 8786 }
bf8fa3d3
PZ
8787}
8788
dde86e2d
PZ
8789/*
8790 * Initialize reference clocks when the driver loads
8791 */
8792void intel_init_pch_refclk(struct drm_device *dev)
8793{
8794 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8795 ironlake_init_pch_refclk(dev);
8796 else if (HAS_PCH_LPT(dev))
8797 lpt_init_pch_refclk(dev);
8798}
8799
6ff93609 8800static void ironlake_set_pipeconf(struct drm_crtc *crtc)
79e53945 8801{
c8203565 8802 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
79e53945
JB
8803 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8804 int pipe = intel_crtc->pipe;
c8203565
PZ
8805 uint32_t val;
8806
78114071 8807 val = 0;
c8203565 8808
6e3c9717 8809 switch (intel_crtc->config->pipe_bpp) {
c8203565 8810 case 18:
dfd07d72 8811 val |= PIPECONF_6BPC;
c8203565
PZ
8812 break;
8813 case 24:
dfd07d72 8814 val |= PIPECONF_8BPC;
c8203565
PZ
8815 break;
8816 case 30:
dfd07d72 8817 val |= PIPECONF_10BPC;
c8203565
PZ
8818 break;
8819 case 36:
dfd07d72 8820 val |= PIPECONF_12BPC;
c8203565
PZ
8821 break;
8822 default:
cc769b62
PZ
8823 /* Case prevented by intel_choose_pipe_bpp_dither. */
8824 BUG();
c8203565
PZ
8825 }
8826
6e3c9717 8827 if (intel_crtc->config->dither)
c8203565
PZ
8828 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8829
6e3c9717 8830 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
c8203565
PZ
8831 val |= PIPECONF_INTERLACED_ILK;
8832 else
8833 val |= PIPECONF_PROGRESSIVE;
8834
6e3c9717 8835 if (intel_crtc->config->limited_color_range)
3685a8f3 8836 val |= PIPECONF_COLOR_RANGE_SELECT;
3685a8f3 8837
c8203565
PZ
8838 I915_WRITE(PIPECONF(pipe), val);
8839 POSTING_READ(PIPECONF(pipe));
8840}
8841
6ff93609 8842static void haswell_set_pipeconf(struct drm_crtc *crtc)
ee2b0b38 8843{
391bf048 8844 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
ee2b0b38 8845 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 8846 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
391bf048 8847 u32 val = 0;
ee2b0b38 8848
391bf048 8849 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
ee2b0b38
PZ
8850 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8851
6e3c9717 8852 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
ee2b0b38
PZ
8853 val |= PIPECONF_INTERLACED_ILK;
8854 else
8855 val |= PIPECONF_PROGRESSIVE;
8856
702e7a56
PZ
8857 I915_WRITE(PIPECONF(cpu_transcoder), val);
8858 POSTING_READ(PIPECONF(cpu_transcoder));
391bf048
JN
8859}
8860
391bf048
JN
8861static void haswell_set_pipemisc(struct drm_crtc *crtc)
8862{
8863 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8864 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
756f85cf 8865
391bf048
JN
8866 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
8867 u32 val = 0;
756f85cf 8868
6e3c9717 8869 switch (intel_crtc->config->pipe_bpp) {
756f85cf
PZ
8870 case 18:
8871 val |= PIPEMISC_DITHER_6_BPC;
8872 break;
8873 case 24:
8874 val |= PIPEMISC_DITHER_8_BPC;
8875 break;
8876 case 30:
8877 val |= PIPEMISC_DITHER_10_BPC;
8878 break;
8879 case 36:
8880 val |= PIPEMISC_DITHER_12_BPC;
8881 break;
8882 default:
8883 /* Case prevented by pipe_config_set_bpp. */
8884 BUG();
8885 }
8886
6e3c9717 8887 if (intel_crtc->config->dither)
756f85cf
PZ
8888 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8889
391bf048 8890 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
756f85cf 8891 }
ee2b0b38
PZ
8892}
8893
d4b1931c
PZ
8894int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8895{
8896 /*
8897 * Account for spread spectrum to avoid
8898 * oversubscribing the link. Max center spread
8899 * is 2.5%; use 5% for safety's sake.
8900 */
8901 u32 bps = target_clock * bpp * 21 / 20;
619d4d04 8902 return DIV_ROUND_UP(bps, link_bw * 8);
d4b1931c
PZ
8903}
8904
7429e9d4 8905static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6cf86a5e 8906{
7429e9d4 8907 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
f48d8f23
PZ
8908}
8909
b75ca6f6
ACO
8910static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8911 struct intel_crtc_state *crtc_state,
9e2c8475 8912 struct dpll *reduced_clock)
79e53945 8913{
de13a2e3 8914 struct drm_crtc *crtc = &intel_crtc->base;
79e53945
JB
8915 struct drm_device *dev = crtc->dev;
8916 struct drm_i915_private *dev_priv = dev->dev_private;
55bb9992 8917 struct drm_atomic_state *state = crtc_state->base.state;
da3ced29 8918 struct drm_connector *connector;
55bb9992
ACO
8919 struct drm_connector_state *connector_state;
8920 struct intel_encoder *encoder;
b75ca6f6 8921 u32 dpll, fp, fp2;
ceb41007 8922 int factor, i;
09ede541 8923 bool is_lvds = false, is_sdvo = false;
79e53945 8924
da3ced29 8925 for_each_connector_in_state(state, connector, connector_state, i) {
55bb9992
ACO
8926 if (connector_state->crtc != crtc_state->base.crtc)
8927 continue;
8928
8929 encoder = to_intel_encoder(connector_state->best_encoder);
8930
8931 switch (encoder->type) {
79e53945
JB
8932 case INTEL_OUTPUT_LVDS:
8933 is_lvds = true;
8934 break;
8935 case INTEL_OUTPUT_SDVO:
7d57382e 8936 case INTEL_OUTPUT_HDMI:
79e53945 8937 is_sdvo = true;
79e53945 8938 break;
6847d71b
PZ
8939 default:
8940 break;
79e53945
JB
8941 }
8942 }
79e53945 8943
c1858123 8944 /* Enable autotuning of the PLL clock (if permissible) */
8febb297
EA
8945 factor = 21;
8946 if (is_lvds) {
8947 if ((intel_panel_use_ssc(dev_priv) &&
e91e941b 8948 dev_priv->vbt.lvds_ssc_freq == 100000) ||
f0b44056 8949 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8febb297 8950 factor = 25;
190f68c5 8951 } else if (crtc_state->sdvo_tv_clock)
8febb297 8952 factor = 20;
c1858123 8953
b75ca6f6
ACO
8954 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8955
190f68c5 8956 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
b75ca6f6
ACO
8957 fp |= FP_CB_TUNE;
8958
8959 if (reduced_clock) {
8960 fp2 = i9xx_dpll_compute_fp(reduced_clock);
2c07245f 8961
b75ca6f6
ACO
8962 if (reduced_clock->m < factor * reduced_clock->n)
8963 fp2 |= FP_CB_TUNE;
8964 } else {
8965 fp2 = fp;
8966 }
9a7c7890 8967
5eddb70b 8968 dpll = 0;
2c07245f 8969
a07d6787
EA
8970 if (is_lvds)
8971 dpll |= DPLLB_MODE_LVDS;
8972 else
8973 dpll |= DPLLB_MODE_DAC_SERIAL;
198a037f 8974
190f68c5 8975 dpll |= (crtc_state->pixel_multiplier - 1)
ef1b460d 8976 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
198a037f
DV
8977
8978 if (is_sdvo)
4a33e48d 8979 dpll |= DPLL_SDVO_HIGH_SPEED;
190f68c5 8980 if (crtc_state->has_dp_encoder)
4a33e48d 8981 dpll |= DPLL_SDVO_HIGH_SPEED;
79e53945 8982
a07d6787 8983 /* compute bitmask from p1 value */
190f68c5 8984 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
a07d6787 8985 /* also FPA1 */
190f68c5 8986 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
a07d6787 8987
190f68c5 8988 switch (crtc_state->dpll.p2) {
a07d6787
EA
8989 case 5:
8990 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8991 break;
8992 case 7:
8993 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8994 break;
8995 case 10:
8996 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8997 break;
8998 case 14:
8999 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9000 break;
79e53945
JB
9001 }
9002
ceb41007 9003 if (is_lvds && intel_panel_use_ssc(dev_priv))
43565a06 9004 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
79e53945
JB
9005 else
9006 dpll |= PLL_REF_INPUT_DREFCLK;
9007
b75ca6f6
ACO
9008 dpll |= DPLL_VCO_ENABLE;
9009
9010 crtc_state->dpll_hw_state.dpll = dpll;
9011 crtc_state->dpll_hw_state.fp0 = fp;
9012 crtc_state->dpll_hw_state.fp1 = fp2;
de13a2e3
PZ
9013}
9014
190f68c5
ACO
9015static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9016 struct intel_crtc_state *crtc_state)
de13a2e3 9017{
997c030c
ACO
9018 struct drm_device *dev = crtc->base.dev;
9019 struct drm_i915_private *dev_priv = dev->dev_private;
9e2c8475 9020 struct dpll reduced_clock;
7ed9f894 9021 bool has_reduced_clock = false;
e2b78267 9022 struct intel_shared_dpll *pll;
1b6f4958 9023 const struct intel_limit *limit;
997c030c 9024 int refclk = 120000;
de13a2e3 9025
dd3cd74a
ACO
9026 memset(&crtc_state->dpll_hw_state, 0,
9027 sizeof(crtc_state->dpll_hw_state));
9028
ded220e2
ACO
9029 crtc->lowfreq_avail = false;
9030
9031 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9032 if (!crtc_state->has_pch_encoder)
9033 return 0;
79e53945 9034
997c030c
ACO
9035 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
9036 if (intel_panel_use_ssc(dev_priv)) {
9037 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9038 dev_priv->vbt.lvds_ssc_freq);
9039 refclk = dev_priv->vbt.lvds_ssc_freq;
9040 }
9041
9042 if (intel_is_dual_link_lvds(dev)) {
9043 if (refclk == 100000)
9044 limit = &intel_limits_ironlake_dual_lvds_100m;
9045 else
9046 limit = &intel_limits_ironlake_dual_lvds;
9047 } else {
9048 if (refclk == 100000)
9049 limit = &intel_limits_ironlake_single_lvds_100m;
9050 else
9051 limit = &intel_limits_ironlake_single_lvds;
9052 }
9053 } else {
9054 limit = &intel_limits_ironlake_dac;
9055 }
9056
364ee29d 9057 if (!crtc_state->clock_set &&
997c030c
ACO
9058 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9059 refclk, NULL, &crtc_state->dpll)) {
364ee29d
ACO
9060 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9061 return -EINVAL;
f47709a9 9062 }
79e53945 9063
b75ca6f6
ACO
9064 ironlake_compute_dpll(crtc, crtc_state,
9065 has_reduced_clock ? &reduced_clock : NULL);
66e985c0 9066
ded220e2
ACO
9067 pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
9068 if (pll == NULL) {
9069 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
9070 pipe_name(crtc->pipe));
9071 return -EINVAL;
3fb37703 9072 }
79e53945 9073
ded220e2
ACO
9074 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9075 has_reduced_clock)
c7653199 9076 crtc->lowfreq_avail = true;
e2b78267 9077
c8f7a0db 9078 return 0;
79e53945
JB
9079}
9080
eb14cb74
VS
9081static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9082 struct intel_link_m_n *m_n)
9083{
9084 struct drm_device *dev = crtc->base.dev;
9085 struct drm_i915_private *dev_priv = dev->dev_private;
9086 enum pipe pipe = crtc->pipe;
9087
9088 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9089 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9090 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9091 & ~TU_SIZE_MASK;
9092 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9093 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9094 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9095}
9096
9097static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9098 enum transcoder transcoder,
b95af8be
VK
9099 struct intel_link_m_n *m_n,
9100 struct intel_link_m_n *m2_n2)
72419203
DV
9101{
9102 struct drm_device *dev = crtc->base.dev;
9103 struct drm_i915_private *dev_priv = dev->dev_private;
eb14cb74 9104 enum pipe pipe = crtc->pipe;
72419203 9105
eb14cb74
VS
9106 if (INTEL_INFO(dev)->gen >= 5) {
9107 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9108 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9109 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9110 & ~TU_SIZE_MASK;
9111 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9112 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9113 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
b95af8be
VK
9114 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
9115 * gen < 8) and if DRRS is supported (to make sure the
9116 * registers are not unnecessarily read).
9117 */
9118 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
6e3c9717 9119 crtc->config->has_drrs) {
b95af8be
VK
9120 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9121 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9122 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9123 & ~TU_SIZE_MASK;
9124 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9125 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9126 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9127 }
eb14cb74
VS
9128 } else {
9129 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9130 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9131 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9132 & ~TU_SIZE_MASK;
9133 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9134 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9135 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9136 }
9137}
9138
9139void intel_dp_get_m_n(struct intel_crtc *crtc,
5cec258b 9140 struct intel_crtc_state *pipe_config)
eb14cb74 9141{
681a8504 9142 if (pipe_config->has_pch_encoder)
eb14cb74
VS
9143 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9144 else
9145 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
b95af8be
VK
9146 &pipe_config->dp_m_n,
9147 &pipe_config->dp_m2_n2);
eb14cb74 9148}
72419203 9149
eb14cb74 9150static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
5cec258b 9151 struct intel_crtc_state *pipe_config)
eb14cb74
VS
9152{
9153 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
b95af8be 9154 &pipe_config->fdi_m_n, NULL);
72419203
DV
9155}
9156
bd2e244f 9157static void skylake_get_pfit_config(struct intel_crtc *crtc,
5cec258b 9158 struct intel_crtc_state *pipe_config)
bd2e244f
JB
9159{
9160 struct drm_device *dev = crtc->base.dev;
9161 struct drm_i915_private *dev_priv = dev->dev_private;
a1b2278e
CK
9162 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9163 uint32_t ps_ctrl = 0;
9164 int id = -1;
9165 int i;
bd2e244f 9166
a1b2278e
CK
9167 /* find scaler attached to this pipe */
9168 for (i = 0; i < crtc->num_scalers; i++) {
9169 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9170 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9171 id = i;
9172 pipe_config->pch_pfit.enabled = true;
9173 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9174 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9175 break;
9176 }
9177 }
bd2e244f 9178
a1b2278e
CK
9179 scaler_state->scaler_id = id;
9180 if (id >= 0) {
9181 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9182 } else {
9183 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
bd2e244f
JB
9184 }
9185}
9186
5724dbd1
DL
9187static void
9188skylake_get_initial_plane_config(struct intel_crtc *crtc,
9189 struct intel_initial_plane_config *plane_config)
bc8d7dff
DL
9190{
9191 struct drm_device *dev = crtc->base.dev;
9192 struct drm_i915_private *dev_priv = dev->dev_private;
40f46283 9193 u32 val, base, offset, stride_mult, tiling;
bc8d7dff
DL
9194 int pipe = crtc->pipe;
9195 int fourcc, pixel_format;
6761dd31 9196 unsigned int aligned_height;
bc8d7dff 9197 struct drm_framebuffer *fb;
1b842c89 9198 struct intel_framebuffer *intel_fb;
bc8d7dff 9199
d9806c9f 9200 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 9201 if (!intel_fb) {
bc8d7dff
DL
9202 DRM_DEBUG_KMS("failed to alloc fb\n");
9203 return;
9204 }
9205
1b842c89
DL
9206 fb = &intel_fb->base;
9207
bc8d7dff 9208 val = I915_READ(PLANE_CTL(pipe, 0));
42a7b088
DL
9209 if (!(val & PLANE_CTL_ENABLE))
9210 goto error;
9211
bc8d7dff
DL
9212 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9213 fourcc = skl_format_to_fourcc(pixel_format,
9214 val & PLANE_CTL_ORDER_RGBX,
9215 val & PLANE_CTL_ALPHA_MASK);
9216 fb->pixel_format = fourcc;
9217 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9218
40f46283
DL
9219 tiling = val & PLANE_CTL_TILED_MASK;
9220 switch (tiling) {
9221 case PLANE_CTL_TILED_LINEAR:
9222 fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9223 break;
9224 case PLANE_CTL_TILED_X:
9225 plane_config->tiling = I915_TILING_X;
9226 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9227 break;
9228 case PLANE_CTL_TILED_Y:
9229 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9230 break;
9231 case PLANE_CTL_TILED_YF:
9232 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9233 break;
9234 default:
9235 MISSING_CASE(tiling);
9236 goto error;
9237 }
9238
bc8d7dff
DL
9239 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9240 plane_config->base = base;
9241
9242 offset = I915_READ(PLANE_OFFSET(pipe, 0));
9243
9244 val = I915_READ(PLANE_SIZE(pipe, 0));
9245 fb->height = ((val >> 16) & 0xfff) + 1;
9246 fb->width = ((val >> 0) & 0x1fff) + 1;
9247
9248 val = I915_READ(PLANE_STRIDE(pipe, 0));
7b49f948 9249 stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
40f46283 9250 fb->pixel_format);
bc8d7dff
DL
9251 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9252
9253 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
9254 fb->pixel_format,
9255 fb->modifier[0]);
bc8d7dff 9256
f37b5c2b 9257 plane_config->size = fb->pitches[0] * aligned_height;
bc8d7dff
DL
9258
9259 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9260 pipe_name(pipe), fb->width, fb->height,
9261 fb->bits_per_pixel, base, fb->pitches[0],
9262 plane_config->size);
9263
2d14030b 9264 plane_config->fb = intel_fb;
bc8d7dff
DL
9265 return;
9266
9267error:
9268 kfree(fb);
9269}
9270
2fa2fe9a 9271static void ironlake_get_pfit_config(struct intel_crtc *crtc,
5cec258b 9272 struct intel_crtc_state *pipe_config)
2fa2fe9a
DV
9273{
9274 struct drm_device *dev = crtc->base.dev;
9275 struct drm_i915_private *dev_priv = dev->dev_private;
9276 uint32_t tmp;
9277
9278 tmp = I915_READ(PF_CTL(crtc->pipe));
9279
9280 if (tmp & PF_ENABLE) {
fd4daa9c 9281 pipe_config->pch_pfit.enabled = true;
2fa2fe9a
DV
9282 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9283 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
cb8b2a30
DV
9284
9285 /* We currently do not free assignements of panel fitters on
9286 * ivb/hsw (since we don't use the higher upscaling modes which
9287 * differentiates them) so just WARN about this case for now. */
9288 if (IS_GEN7(dev)) {
9289 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9290 PF_PIPE_SEL_IVB(crtc->pipe));
9291 }
2fa2fe9a 9292 }
79e53945
JB
9293}
9294
5724dbd1
DL
9295static void
9296ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9297 struct intel_initial_plane_config *plane_config)
4c6baa59
JB
9298{
9299 struct drm_device *dev = crtc->base.dev;
9300 struct drm_i915_private *dev_priv = dev->dev_private;
9301 u32 val, base, offset;
aeee5a49 9302 int pipe = crtc->pipe;
4c6baa59 9303 int fourcc, pixel_format;
6761dd31 9304 unsigned int aligned_height;
b113d5ee 9305 struct drm_framebuffer *fb;
1b842c89 9306 struct intel_framebuffer *intel_fb;
4c6baa59 9307
42a7b088
DL
9308 val = I915_READ(DSPCNTR(pipe));
9309 if (!(val & DISPLAY_PLANE_ENABLE))
9310 return;
9311
d9806c9f 9312 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 9313 if (!intel_fb) {
4c6baa59
JB
9314 DRM_DEBUG_KMS("failed to alloc fb\n");
9315 return;
9316 }
9317
1b842c89
DL
9318 fb = &intel_fb->base;
9319
18c5247e
DV
9320 if (INTEL_INFO(dev)->gen >= 4) {
9321 if (val & DISPPLANE_TILED) {
49af449b 9322 plane_config->tiling = I915_TILING_X;
18c5247e
DV
9323 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9324 }
9325 }
4c6baa59
JB
9326
9327 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
b35d63fa 9328 fourcc = i9xx_format_to_fourcc(pixel_format);
b113d5ee
DL
9329 fb->pixel_format = fourcc;
9330 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
4c6baa59 9331
aeee5a49 9332 base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
4c6baa59 9333 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
aeee5a49 9334 offset = I915_READ(DSPOFFSET(pipe));
4c6baa59 9335 } else {
49af449b 9336 if (plane_config->tiling)
aeee5a49 9337 offset = I915_READ(DSPTILEOFF(pipe));
4c6baa59 9338 else
aeee5a49 9339 offset = I915_READ(DSPLINOFF(pipe));
4c6baa59
JB
9340 }
9341 plane_config->base = base;
9342
9343 val = I915_READ(PIPESRC(pipe));
b113d5ee
DL
9344 fb->width = ((val >> 16) & 0xfff) + 1;
9345 fb->height = ((val >> 0) & 0xfff) + 1;
4c6baa59
JB
9346
9347 val = I915_READ(DSPSTRIDE(pipe));
b113d5ee 9348 fb->pitches[0] = val & 0xffffffc0;
4c6baa59 9349
b113d5ee 9350 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
9351 fb->pixel_format,
9352 fb->modifier[0]);
4c6baa59 9353
f37b5c2b 9354 plane_config->size = fb->pitches[0] * aligned_height;
4c6baa59 9355
2844a921
DL
9356 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9357 pipe_name(pipe), fb->width, fb->height,
9358 fb->bits_per_pixel, base, fb->pitches[0],
9359 plane_config->size);
b113d5ee 9360
2d14030b 9361 plane_config->fb = intel_fb;
4c6baa59
JB
9362}
9363
0e8ffe1b 9364static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5cec258b 9365 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
9366{
9367 struct drm_device *dev = crtc->base.dev;
9368 struct drm_i915_private *dev_priv = dev->dev_private;
1729050e 9369 enum intel_display_power_domain power_domain;
0e8ffe1b 9370 uint32_t tmp;
1729050e 9371 bool ret;
0e8ffe1b 9372
1729050e
ID
9373 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9374 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
930e8c9e
PZ
9375 return false;
9376
e143a21c 9377 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8106ddbd 9378 pipe_config->shared_dpll = NULL;
eccb140b 9379
1729050e 9380 ret = false;
0e8ffe1b
DV
9381 tmp = I915_READ(PIPECONF(crtc->pipe));
9382 if (!(tmp & PIPECONF_ENABLE))
1729050e 9383 goto out;
0e8ffe1b 9384
42571aef
VS
9385 switch (tmp & PIPECONF_BPC_MASK) {
9386 case PIPECONF_6BPC:
9387 pipe_config->pipe_bpp = 18;
9388 break;
9389 case PIPECONF_8BPC:
9390 pipe_config->pipe_bpp = 24;
9391 break;
9392 case PIPECONF_10BPC:
9393 pipe_config->pipe_bpp = 30;
9394 break;
9395 case PIPECONF_12BPC:
9396 pipe_config->pipe_bpp = 36;
9397 break;
9398 default:
9399 break;
9400 }
9401
b5a9fa09
DV
9402 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9403 pipe_config->limited_color_range = true;
9404
ab9412ba 9405 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
66e985c0 9406 struct intel_shared_dpll *pll;
8106ddbd 9407 enum intel_dpll_id pll_id;
66e985c0 9408
88adfff1
DV
9409 pipe_config->has_pch_encoder = true;
9410
627eb5a3
DV
9411 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9412 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9413 FDI_DP_PORT_WIDTH_SHIFT) + 1;
72419203
DV
9414
9415 ironlake_get_fdi_m_n_config(crtc, pipe_config);
6c49f241 9416
2d1fe073 9417 if (HAS_PCH_IBX(dev_priv)) {
d9a7bc67
ID
9418 /*
9419 * The pipe->pch transcoder and pch transcoder->pll
9420 * mapping is fixed.
9421 */
8106ddbd 9422 pll_id = (enum intel_dpll_id) crtc->pipe;
c0d43d62
DV
9423 } else {
9424 tmp = I915_READ(PCH_DPLL_SEL);
9425 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
8106ddbd 9426 pll_id = DPLL_ID_PCH_PLL_B;
c0d43d62 9427 else
8106ddbd 9428 pll_id= DPLL_ID_PCH_PLL_A;
c0d43d62 9429 }
66e985c0 9430
8106ddbd
ACO
9431 pipe_config->shared_dpll =
9432 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9433 pll = pipe_config->shared_dpll;
66e985c0 9434
2edd6443
ACO
9435 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9436 &pipe_config->dpll_hw_state));
c93f54cf
DV
9437
9438 tmp = pipe_config->dpll_hw_state.dpll;
9439 pipe_config->pixel_multiplier =
9440 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9441 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
18442d08
VS
9442
9443 ironlake_pch_clock_get(crtc, pipe_config);
6c49f241
DV
9444 } else {
9445 pipe_config->pixel_multiplier = 1;
627eb5a3
DV
9446 }
9447
1bd1bd80 9448 intel_get_pipe_timings(crtc, pipe_config);
bc58be60 9449 intel_get_pipe_src_size(crtc, pipe_config);
1bd1bd80 9450
2fa2fe9a
DV
9451 ironlake_get_pfit_config(crtc, pipe_config);
9452
1729050e
ID
9453 ret = true;
9454
9455out:
9456 intel_display_power_put(dev_priv, power_domain);
9457
9458 return ret;
0e8ffe1b
DV
9459}
9460
be256dc7
PZ
9461static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9462{
9463 struct drm_device *dev = dev_priv->dev;
be256dc7 9464 struct intel_crtc *crtc;
be256dc7 9465
d3fcc808 9466 for_each_intel_crtc(dev, crtc)
e2c719b7 9467 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
be256dc7
PZ
9468 pipe_name(crtc->pipe));
9469
e2c719b7
RC
9470 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9471 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
01403de3
VS
9472 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9473 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
e2c719b7
RC
9474 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9475 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
be256dc7 9476 "CPU PWM1 enabled\n");
c5107b87 9477 if (IS_HASWELL(dev))
e2c719b7 9478 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
c5107b87 9479 "CPU PWM2 enabled\n");
e2c719b7 9480 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
be256dc7 9481 "PCH PWM1 enabled\n");
e2c719b7 9482 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
be256dc7 9483 "Utility pin enabled\n");
e2c719b7 9484 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
be256dc7 9485
9926ada1
PZ
9486 /*
9487 * In theory we can still leave IRQs enabled, as long as only the HPD
9488 * interrupts remain enabled. We used to check for that, but since it's
9489 * gen-specific and since we only disable LCPLL after we fully disable
9490 * the interrupts, the check below should be enough.
9491 */
e2c719b7 9492 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
be256dc7
PZ
9493}
9494
9ccd5aeb
PZ
9495static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9496{
9497 struct drm_device *dev = dev_priv->dev;
9498
9499 if (IS_HASWELL(dev))
9500 return I915_READ(D_COMP_HSW);
9501 else
9502 return I915_READ(D_COMP_BDW);
9503}
9504
3c4c9b81
PZ
9505static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9506{
9507 struct drm_device *dev = dev_priv->dev;
9508
9509 if (IS_HASWELL(dev)) {
9510 mutex_lock(&dev_priv->rps.hw_lock);
9511 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9512 val))
f475dadf 9513 DRM_ERROR("Failed to write to D_COMP\n");
3c4c9b81
PZ
9514 mutex_unlock(&dev_priv->rps.hw_lock);
9515 } else {
9ccd5aeb
PZ
9516 I915_WRITE(D_COMP_BDW, val);
9517 POSTING_READ(D_COMP_BDW);
3c4c9b81 9518 }
be256dc7
PZ
9519}
9520
9521/*
9522 * This function implements pieces of two sequences from BSpec:
9523 * - Sequence for display software to disable LCPLL
9524 * - Sequence for display software to allow package C8+
9525 * The steps implemented here are just the steps that actually touch the LCPLL
9526 * register. Callers should take care of disabling all the display engine
9527 * functions, doing the mode unset, fixing interrupts, etc.
9528 */
6ff58d53
PZ
9529static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9530 bool switch_to_fclk, bool allow_power_down)
be256dc7
PZ
9531{
9532 uint32_t val;
9533
9534 assert_can_disable_lcpll(dev_priv);
9535
9536 val = I915_READ(LCPLL_CTL);
9537
9538 if (switch_to_fclk) {
9539 val |= LCPLL_CD_SOURCE_FCLK;
9540 I915_WRITE(LCPLL_CTL, val);
9541
f53dd63f
ID
9542 if (wait_for_us(I915_READ(LCPLL_CTL) &
9543 LCPLL_CD_SOURCE_FCLK_DONE, 1))
be256dc7
PZ
9544 DRM_ERROR("Switching to FCLK failed\n");
9545
9546 val = I915_READ(LCPLL_CTL);
9547 }
9548
9549 val |= LCPLL_PLL_DISABLE;
9550 I915_WRITE(LCPLL_CTL, val);
9551 POSTING_READ(LCPLL_CTL);
9552
9553 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9554 DRM_ERROR("LCPLL still locked\n");
9555
9ccd5aeb 9556 val = hsw_read_dcomp(dev_priv);
be256dc7 9557 val |= D_COMP_COMP_DISABLE;
3c4c9b81 9558 hsw_write_dcomp(dev_priv, val);
be256dc7
PZ
9559 ndelay(100);
9560
9ccd5aeb
PZ
9561 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9562 1))
be256dc7
PZ
9563 DRM_ERROR("D_COMP RCOMP still in progress\n");
9564
9565 if (allow_power_down) {
9566 val = I915_READ(LCPLL_CTL);
9567 val |= LCPLL_POWER_DOWN_ALLOW;
9568 I915_WRITE(LCPLL_CTL, val);
9569 POSTING_READ(LCPLL_CTL);
9570 }
9571}
9572
9573/*
9574 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9575 * source.
9576 */
6ff58d53 9577static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
be256dc7
PZ
9578{
9579 uint32_t val;
9580
9581 val = I915_READ(LCPLL_CTL);
9582
9583 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9584 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9585 return;
9586
a8a8bd54
PZ
9587 /*
9588 * Make sure we're not on PC8 state before disabling PC8, otherwise
9589 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
a8a8bd54 9590 */
59bad947 9591 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
215733fa 9592
be256dc7
PZ
9593 if (val & LCPLL_POWER_DOWN_ALLOW) {
9594 val &= ~LCPLL_POWER_DOWN_ALLOW;
9595 I915_WRITE(LCPLL_CTL, val);
35d8f2eb 9596 POSTING_READ(LCPLL_CTL);
be256dc7
PZ
9597 }
9598
9ccd5aeb 9599 val = hsw_read_dcomp(dev_priv);
be256dc7
PZ
9600 val |= D_COMP_COMP_FORCE;
9601 val &= ~D_COMP_COMP_DISABLE;
3c4c9b81 9602 hsw_write_dcomp(dev_priv, val);
be256dc7
PZ
9603
9604 val = I915_READ(LCPLL_CTL);
9605 val &= ~LCPLL_PLL_DISABLE;
9606 I915_WRITE(LCPLL_CTL, val);
9607
9608 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9609 DRM_ERROR("LCPLL not locked yet\n");
9610
9611 if (val & LCPLL_CD_SOURCE_FCLK) {
9612 val = I915_READ(LCPLL_CTL);
9613 val &= ~LCPLL_CD_SOURCE_FCLK;
9614 I915_WRITE(LCPLL_CTL, val);
9615
f53dd63f
ID
9616 if (wait_for_us((I915_READ(LCPLL_CTL) &
9617 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
be256dc7
PZ
9618 DRM_ERROR("Switching back to LCPLL failed\n");
9619 }
215733fa 9620
59bad947 9621 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
b6283055 9622 intel_update_cdclk(dev_priv->dev);
be256dc7
PZ
9623}
9624
765dab67
PZ
9625/*
9626 * Package states C8 and deeper are really deep PC states that can only be
9627 * reached when all the devices on the system allow it, so even if the graphics
9628 * device allows PC8+, it doesn't mean the system will actually get to these
9629 * states. Our driver only allows PC8+ when going into runtime PM.
9630 *
9631 * The requirements for PC8+ are that all the outputs are disabled, the power
9632 * well is disabled and most interrupts are disabled, and these are also
9633 * requirements for runtime PM. When these conditions are met, we manually do
9634 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9635 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9636 * hang the machine.
9637 *
9638 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9639 * the state of some registers, so when we come back from PC8+ we need to
9640 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9641 * need to take care of the registers kept by RC6. Notice that this happens even
9642 * if we don't put the device in PCI D3 state (which is what currently happens
9643 * because of the runtime PM support).
9644 *
9645 * For more, read "Display Sequences for Package C8" on the hardware
9646 * documentation.
9647 */
a14cb6fc 9648void hsw_enable_pc8(struct drm_i915_private *dev_priv)
c67a470b 9649{
c67a470b
PZ
9650 struct drm_device *dev = dev_priv->dev;
9651 uint32_t val;
9652
c67a470b
PZ
9653 DRM_DEBUG_KMS("Enabling package C8+\n");
9654
c2699524 9655 if (HAS_PCH_LPT_LP(dev)) {
c67a470b
PZ
9656 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9657 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9658 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9659 }
9660
9661 lpt_disable_clkout_dp(dev);
c67a470b
PZ
9662 hsw_disable_lcpll(dev_priv, true, true);
9663}
9664
a14cb6fc 9665void hsw_disable_pc8(struct drm_i915_private *dev_priv)
c67a470b
PZ
9666{
9667 struct drm_device *dev = dev_priv->dev;
9668 uint32_t val;
9669
c67a470b
PZ
9670 DRM_DEBUG_KMS("Disabling package C8+\n");
9671
9672 hsw_restore_lcpll(dev_priv);
c67a470b
PZ
9673 lpt_init_pch_refclk(dev);
9674
c2699524 9675 if (HAS_PCH_LPT_LP(dev)) {
c67a470b
PZ
9676 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9677 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9678 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9679 }
c67a470b
PZ
9680}
9681
324513c0 9682static void bxt_modeset_commit_cdclk(struct drm_atomic_state *old_state)
f8437dd1 9683{
a821fc46 9684 struct drm_device *dev = old_state->dev;
1a617b77
ML
9685 struct intel_atomic_state *old_intel_state =
9686 to_intel_atomic_state(old_state);
9687 unsigned int req_cdclk = old_intel_state->dev_cdclk;
f8437dd1 9688
324513c0 9689 bxt_set_cdclk(to_i915(dev), req_cdclk);
f8437dd1
VK
9690}
9691
b432e5cf 9692/* compute the max rate for new configuration */
27c329ed 9693static int ilk_max_pixel_rate(struct drm_atomic_state *state)
b432e5cf 9694{
565602d7
ML
9695 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9696 struct drm_i915_private *dev_priv = state->dev->dev_private;
9697 struct drm_crtc *crtc;
9698 struct drm_crtc_state *cstate;
27c329ed 9699 struct intel_crtc_state *crtc_state;
565602d7
ML
9700 unsigned max_pixel_rate = 0, i;
9701 enum pipe pipe;
b432e5cf 9702
565602d7
ML
9703 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9704 sizeof(intel_state->min_pixclk));
27c329ed 9705
565602d7
ML
9706 for_each_crtc_in_state(state, crtc, cstate, i) {
9707 int pixel_rate;
27c329ed 9708
565602d7
ML
9709 crtc_state = to_intel_crtc_state(cstate);
9710 if (!crtc_state->base.enable) {
9711 intel_state->min_pixclk[i] = 0;
b432e5cf 9712 continue;
565602d7 9713 }
b432e5cf 9714
27c329ed 9715 pixel_rate = ilk_pipe_pixel_rate(crtc_state);
b432e5cf
VS
9716
9717 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
565602d7 9718 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
b432e5cf
VS
9719 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9720
565602d7 9721 intel_state->min_pixclk[i] = pixel_rate;
b432e5cf
VS
9722 }
9723
565602d7
ML
9724 for_each_pipe(dev_priv, pipe)
9725 max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9726
b432e5cf
VS
9727 return max_pixel_rate;
9728}
9729
9730static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9731{
9732 struct drm_i915_private *dev_priv = dev->dev_private;
9733 uint32_t val, data;
9734 int ret;
9735
9736 if (WARN((I915_READ(LCPLL_CTL) &
9737 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9738 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9739 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9740 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9741 "trying to change cdclk frequency with cdclk not enabled\n"))
9742 return;
9743
9744 mutex_lock(&dev_priv->rps.hw_lock);
9745 ret = sandybridge_pcode_write(dev_priv,
9746 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9747 mutex_unlock(&dev_priv->rps.hw_lock);
9748 if (ret) {
9749 DRM_ERROR("failed to inform pcode about cdclk change\n");
9750 return;
9751 }
9752
9753 val = I915_READ(LCPLL_CTL);
9754 val |= LCPLL_CD_SOURCE_FCLK;
9755 I915_WRITE(LCPLL_CTL, val);
9756
5ba00178
TU
9757 if (wait_for_us(I915_READ(LCPLL_CTL) &
9758 LCPLL_CD_SOURCE_FCLK_DONE, 1))
b432e5cf
VS
9759 DRM_ERROR("Switching to FCLK failed\n");
9760
9761 val = I915_READ(LCPLL_CTL);
9762 val &= ~LCPLL_CLK_FREQ_MASK;
9763
9764 switch (cdclk) {
9765 case 450000:
9766 val |= LCPLL_CLK_FREQ_450;
9767 data = 0;
9768 break;
9769 case 540000:
9770 val |= LCPLL_CLK_FREQ_54O_BDW;
9771 data = 1;
9772 break;
9773 case 337500:
9774 val |= LCPLL_CLK_FREQ_337_5_BDW;
9775 data = 2;
9776 break;
9777 case 675000:
9778 val |= LCPLL_CLK_FREQ_675_BDW;
9779 data = 3;
9780 break;
9781 default:
9782 WARN(1, "invalid cdclk frequency\n");
9783 return;
9784 }
9785
9786 I915_WRITE(LCPLL_CTL, val);
9787
9788 val = I915_READ(LCPLL_CTL);
9789 val &= ~LCPLL_CD_SOURCE_FCLK;
9790 I915_WRITE(LCPLL_CTL, val);
9791
5ba00178
TU
9792 if (wait_for_us((I915_READ(LCPLL_CTL) &
9793 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
b432e5cf
VS
9794 DRM_ERROR("Switching back to LCPLL failed\n");
9795
9796 mutex_lock(&dev_priv->rps.hw_lock);
9797 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9798 mutex_unlock(&dev_priv->rps.hw_lock);
9799
7f1052a8
VS
9800 I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9801
b432e5cf
VS
9802 intel_update_cdclk(dev);
9803
9804 WARN(cdclk != dev_priv->cdclk_freq,
9805 "cdclk requested %d kHz but got %d kHz\n",
9806 cdclk, dev_priv->cdclk_freq);
9807}
9808
587c7914
VS
9809static int broadwell_calc_cdclk(int max_pixclk)
9810{
9811 if (max_pixclk > 540000)
9812 return 675000;
9813 else if (max_pixclk > 450000)
9814 return 540000;
9815 else if (max_pixclk > 337500)
9816 return 450000;
9817 else
9818 return 337500;
9819}
9820
27c329ed 9821static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
b432e5cf 9822{
27c329ed 9823 struct drm_i915_private *dev_priv = to_i915(state->dev);
1a617b77 9824 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
27c329ed 9825 int max_pixclk = ilk_max_pixel_rate(state);
b432e5cf
VS
9826 int cdclk;
9827
9828 /*
9829 * FIXME should also account for plane ratio
9830 * once 64bpp pixel formats are supported.
9831 */
587c7914 9832 cdclk = broadwell_calc_cdclk(max_pixclk);
b432e5cf 9833
b432e5cf 9834 if (cdclk > dev_priv->max_cdclk_freq) {
63ba534e
ML
9835 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9836 cdclk, dev_priv->max_cdclk_freq);
9837 return -EINVAL;
b432e5cf
VS
9838 }
9839
1a617b77
ML
9840 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9841 if (!intel_state->active_crtcs)
587c7914 9842 intel_state->dev_cdclk = broadwell_calc_cdclk(0);
b432e5cf
VS
9843
9844 return 0;
9845}
9846
27c329ed 9847static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
b432e5cf 9848{
27c329ed 9849 struct drm_device *dev = old_state->dev;
1a617b77
ML
9850 struct intel_atomic_state *old_intel_state =
9851 to_intel_atomic_state(old_state);
9852 unsigned req_cdclk = old_intel_state->dev_cdclk;
b432e5cf 9853
27c329ed 9854 broadwell_set_cdclk(dev, req_cdclk);
b432e5cf
VS
9855}
9856
c89e39f3
CT
9857static int skl_modeset_calc_cdclk(struct drm_atomic_state *state)
9858{
9859 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9860 struct drm_i915_private *dev_priv = to_i915(state->dev);
9861 const int max_pixclk = ilk_max_pixel_rate(state);
a8ca4934 9862 int vco = intel_state->cdclk_pll_vco;
c89e39f3
CT
9863 int cdclk;
9864
9865 /*
9866 * FIXME should also account for plane ratio
9867 * once 64bpp pixel formats are supported.
9868 */
a8ca4934 9869 cdclk = skl_calc_cdclk(max_pixclk, vco);
c89e39f3
CT
9870
9871 /*
9872 * FIXME move the cdclk caclulation to
9873 * compute_config() so we can fail gracegully.
9874 */
9875 if (cdclk > dev_priv->max_cdclk_freq) {
9876 DRM_ERROR("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9877 cdclk, dev_priv->max_cdclk_freq);
9878 cdclk = dev_priv->max_cdclk_freq;
9879 }
9880
9881 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9882 if (!intel_state->active_crtcs)
a8ca4934 9883 intel_state->dev_cdclk = skl_calc_cdclk(0, vco);
c89e39f3
CT
9884
9885 return 0;
9886}
9887
9888static void skl_modeset_commit_cdclk(struct drm_atomic_state *old_state)
9889{
1cd593e0
VS
9890 struct drm_i915_private *dev_priv = to_i915(old_state->dev);
9891 struct intel_atomic_state *intel_state = to_intel_atomic_state(old_state);
9892 unsigned int req_cdclk = intel_state->dev_cdclk;
9893 unsigned int req_vco = intel_state->cdclk_pll_vco;
c89e39f3 9894
1cd593e0 9895 skl_set_cdclk(dev_priv, req_cdclk, req_vco);
c89e39f3
CT
9896}
9897
190f68c5
ACO
9898static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9899 struct intel_crtc_state *crtc_state)
09b4ddf9 9900{
af3997b5
MK
9901 struct intel_encoder *intel_encoder =
9902 intel_ddi_get_crtc_new_encoder(crtc_state);
9903
9904 if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9905 if (!intel_ddi_pll_select(crtc, crtc_state))
9906 return -EINVAL;
9907 }
716c2e55 9908
c7653199 9909 crtc->lowfreq_avail = false;
644cef34 9910
c8f7a0db 9911 return 0;
79e53945
JB
9912}
9913
3760b59c
S
9914static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9915 enum port port,
9916 struct intel_crtc_state *pipe_config)
9917{
8106ddbd
ACO
9918 enum intel_dpll_id id;
9919
3760b59c
S
9920 switch (port) {
9921 case PORT_A:
9922 pipe_config->ddi_pll_sel = SKL_DPLL0;
08250c4b 9923 id = DPLL_ID_SKL_DPLL0;
3760b59c
S
9924 break;
9925 case PORT_B:
9926 pipe_config->ddi_pll_sel = SKL_DPLL1;
08250c4b 9927 id = DPLL_ID_SKL_DPLL1;
3760b59c
S
9928 break;
9929 case PORT_C:
9930 pipe_config->ddi_pll_sel = SKL_DPLL2;
08250c4b 9931 id = DPLL_ID_SKL_DPLL2;
3760b59c
S
9932 break;
9933 default:
9934 DRM_ERROR("Incorrect port type\n");
8106ddbd 9935 return;
3760b59c 9936 }
8106ddbd
ACO
9937
9938 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
3760b59c
S
9939}
9940
96b7dfb7
S
9941static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9942 enum port port,
5cec258b 9943 struct intel_crtc_state *pipe_config)
96b7dfb7 9944{
8106ddbd 9945 enum intel_dpll_id id;
a3c988ea 9946 u32 temp;
96b7dfb7
S
9947
9948 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9949 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9950
9951 switch (pipe_config->ddi_pll_sel) {
3148ade7 9952 case SKL_DPLL0:
a3c988ea
ACO
9953 id = DPLL_ID_SKL_DPLL0;
9954 break;
96b7dfb7 9955 case SKL_DPLL1:
8106ddbd 9956 id = DPLL_ID_SKL_DPLL1;
96b7dfb7
S
9957 break;
9958 case SKL_DPLL2:
8106ddbd 9959 id = DPLL_ID_SKL_DPLL2;
96b7dfb7
S
9960 break;
9961 case SKL_DPLL3:
8106ddbd 9962 id = DPLL_ID_SKL_DPLL3;
96b7dfb7 9963 break;
8106ddbd
ACO
9964 default:
9965 MISSING_CASE(pipe_config->ddi_pll_sel);
9966 return;
96b7dfb7 9967 }
8106ddbd
ACO
9968
9969 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
96b7dfb7
S
9970}
9971
7d2c8175
DL
9972static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9973 enum port port,
5cec258b 9974 struct intel_crtc_state *pipe_config)
7d2c8175 9975{
8106ddbd
ACO
9976 enum intel_dpll_id id;
9977
7d2c8175
DL
9978 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9979
9980 switch (pipe_config->ddi_pll_sel) {
9981 case PORT_CLK_SEL_WRPLL1:
8106ddbd 9982 id = DPLL_ID_WRPLL1;
7d2c8175
DL
9983 break;
9984 case PORT_CLK_SEL_WRPLL2:
8106ddbd 9985 id = DPLL_ID_WRPLL2;
7d2c8175 9986 break;
00490c22 9987 case PORT_CLK_SEL_SPLL:
8106ddbd 9988 id = DPLL_ID_SPLL;
79bd23da 9989 break;
9d16da65
ACO
9990 case PORT_CLK_SEL_LCPLL_810:
9991 id = DPLL_ID_LCPLL_810;
9992 break;
9993 case PORT_CLK_SEL_LCPLL_1350:
9994 id = DPLL_ID_LCPLL_1350;
9995 break;
9996 case PORT_CLK_SEL_LCPLL_2700:
9997 id = DPLL_ID_LCPLL_2700;
9998 break;
8106ddbd
ACO
9999 default:
10000 MISSING_CASE(pipe_config->ddi_pll_sel);
10001 /* fall through */
10002 case PORT_CLK_SEL_NONE:
8106ddbd 10003 return;
7d2c8175 10004 }
8106ddbd
ACO
10005
10006 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
7d2c8175
DL
10007}
10008
cf30429e
JN
10009static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
10010 struct intel_crtc_state *pipe_config,
10011 unsigned long *power_domain_mask)
10012{
10013 struct drm_device *dev = crtc->base.dev;
10014 struct drm_i915_private *dev_priv = dev->dev_private;
10015 enum intel_display_power_domain power_domain;
10016 u32 tmp;
10017
d9a7bc67
ID
10018 /*
10019 * The pipe->transcoder mapping is fixed with the exception of the eDP
10020 * transcoder handled below.
10021 */
cf30429e
JN
10022 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
10023
10024 /*
10025 * XXX: Do intel_display_power_get_if_enabled before reading this (for
10026 * consistency and less surprising code; it's in always on power).
10027 */
10028 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
10029 if (tmp & TRANS_DDI_FUNC_ENABLE) {
10030 enum pipe trans_edp_pipe;
10031 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10032 default:
10033 WARN(1, "unknown pipe linked to edp transcoder\n");
10034 case TRANS_DDI_EDP_INPUT_A_ONOFF:
10035 case TRANS_DDI_EDP_INPUT_A_ON:
10036 trans_edp_pipe = PIPE_A;
10037 break;
10038 case TRANS_DDI_EDP_INPUT_B_ONOFF:
10039 trans_edp_pipe = PIPE_B;
10040 break;
10041 case TRANS_DDI_EDP_INPUT_C_ONOFF:
10042 trans_edp_pipe = PIPE_C;
10043 break;
10044 }
10045
10046 if (trans_edp_pipe == crtc->pipe)
10047 pipe_config->cpu_transcoder = TRANSCODER_EDP;
10048 }
10049
10050 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
10051 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10052 return false;
10053 *power_domain_mask |= BIT(power_domain);
10054
10055 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10056
10057 return tmp & PIPECONF_ENABLE;
10058}
10059
4d1de975
JN
10060static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10061 struct intel_crtc_state *pipe_config,
10062 unsigned long *power_domain_mask)
10063{
10064 struct drm_device *dev = crtc->base.dev;
10065 struct drm_i915_private *dev_priv = dev->dev_private;
10066 enum intel_display_power_domain power_domain;
10067 enum port port;
10068 enum transcoder cpu_transcoder;
10069 u32 tmp;
10070
10071 pipe_config->has_dsi_encoder = false;
10072
10073 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10074 if (port == PORT_A)
10075 cpu_transcoder = TRANSCODER_DSI_A;
10076 else
10077 cpu_transcoder = TRANSCODER_DSI_C;
10078
10079 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
10080 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
10081 continue;
10082 *power_domain_mask |= BIT(power_domain);
10083
db18b6a6
ID
10084 /*
10085 * The PLL needs to be enabled with a valid divider
10086 * configuration, otherwise accessing DSI registers will hang
10087 * the machine. See BSpec North Display Engine
10088 * registers/MIPI[BXT]. We can break out here early, since we
10089 * need the same DSI PLL to be enabled for both DSI ports.
10090 */
10091 if (!intel_dsi_pll_is_enabled(dev_priv))
10092 break;
10093
4d1de975
JN
10094 /* XXX: this works for video mode only */
10095 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10096 if (!(tmp & DPI_ENABLE))
10097 continue;
10098
10099 tmp = I915_READ(MIPI_CTRL(port));
10100 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10101 continue;
10102
10103 pipe_config->cpu_transcoder = cpu_transcoder;
10104 pipe_config->has_dsi_encoder = true;
10105 break;
10106 }
10107
10108 return pipe_config->has_dsi_encoder;
10109}
10110
26804afd 10111static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
5cec258b 10112 struct intel_crtc_state *pipe_config)
26804afd
DV
10113{
10114 struct drm_device *dev = crtc->base.dev;
10115 struct drm_i915_private *dev_priv = dev->dev_private;
d452c5b6 10116 struct intel_shared_dpll *pll;
26804afd
DV
10117 enum port port;
10118 uint32_t tmp;
10119
10120 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10121
10122 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
10123
ef11bdb3 10124 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
96b7dfb7 10125 skylake_get_ddi_pll(dev_priv, port, pipe_config);
3760b59c
S
10126 else if (IS_BROXTON(dev))
10127 bxt_get_ddi_pll(dev_priv, port, pipe_config);
96b7dfb7
S
10128 else
10129 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9cd86933 10130
8106ddbd
ACO
10131 pll = pipe_config->shared_dpll;
10132 if (pll) {
2edd6443
ACO
10133 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
10134 &pipe_config->dpll_hw_state));
d452c5b6
DV
10135 }
10136
26804afd
DV
10137 /*
10138 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10139 * DDI E. So just check whether this pipe is wired to DDI E and whether
10140 * the PCH transcoder is on.
10141 */
ca370455
DL
10142 if (INTEL_INFO(dev)->gen < 9 &&
10143 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
26804afd
DV
10144 pipe_config->has_pch_encoder = true;
10145
10146 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10147 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10148 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10149
10150 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10151 }
10152}
10153
0e8ffe1b 10154static bool haswell_get_pipe_config(struct intel_crtc *crtc,
5cec258b 10155 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
10156{
10157 struct drm_device *dev = crtc->base.dev;
10158 struct drm_i915_private *dev_priv = dev->dev_private;
1729050e
ID
10159 enum intel_display_power_domain power_domain;
10160 unsigned long power_domain_mask;
cf30429e 10161 bool active;
0e8ffe1b 10162
1729050e
ID
10163 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
10164 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
b5482bd0 10165 return false;
1729050e
ID
10166 power_domain_mask = BIT(power_domain);
10167
8106ddbd 10168 pipe_config->shared_dpll = NULL;
c0d43d62 10169
cf30429e 10170 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
eccb140b 10171
4d1de975
JN
10172 if (IS_BROXTON(dev_priv)) {
10173 bxt_get_dsi_transcoder_state(crtc, pipe_config,
10174 &power_domain_mask);
10175 WARN_ON(active && pipe_config->has_dsi_encoder);
10176 if (pipe_config->has_dsi_encoder)
10177 active = true;
10178 }
10179
cf30429e 10180 if (!active)
1729050e 10181 goto out;
0e8ffe1b 10182
4d1de975
JN
10183 if (!pipe_config->has_dsi_encoder) {
10184 haswell_get_ddi_port_state(crtc, pipe_config);
10185 intel_get_pipe_timings(crtc, pipe_config);
10186 }
627eb5a3 10187
bc58be60 10188 intel_get_pipe_src_size(crtc, pipe_config);
1bd1bd80 10189
05dc698c
LL
10190 pipe_config->gamma_mode =
10191 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
10192
a1b2278e
CK
10193 if (INTEL_INFO(dev)->gen >= 9) {
10194 skl_init_scalers(dev, crtc, pipe_config);
10195 }
10196
af99ceda
CK
10197 if (INTEL_INFO(dev)->gen >= 9) {
10198 pipe_config->scaler_state.scaler_id = -1;
10199 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10200 }
10201
1729050e
ID
10202 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10203 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10204 power_domain_mask |= BIT(power_domain);
1c132b44 10205 if (INTEL_INFO(dev)->gen >= 9)
bd2e244f 10206 skylake_get_pfit_config(crtc, pipe_config);
ff6d9f55 10207 else
1c132b44 10208 ironlake_get_pfit_config(crtc, pipe_config);
bd2e244f 10209 }
88adfff1 10210
e59150dc
JB
10211 if (IS_HASWELL(dev))
10212 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10213 (I915_READ(IPS_CTL) & IPS_ENABLE);
42db64ef 10214
4d1de975
JN
10215 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10216 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
ebb69c95
CT
10217 pipe_config->pixel_multiplier =
10218 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10219 } else {
10220 pipe_config->pixel_multiplier = 1;
10221 }
6c49f241 10222
1729050e
ID
10223out:
10224 for_each_power_domain(power_domain, power_domain_mask)
10225 intel_display_power_put(dev_priv, power_domain);
10226
cf30429e 10227 return active;
0e8ffe1b
DV
10228}
10229
55a08b3f
ML
10230static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10231 const struct intel_plane_state *plane_state)
560b85bb
CW
10232{
10233 struct drm_device *dev = crtc->dev;
10234 struct drm_i915_private *dev_priv = dev->dev_private;
10235 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
dc41c154 10236 uint32_t cntl = 0, size = 0;
560b85bb 10237
55a08b3f
ML
10238 if (plane_state && plane_state->visible) {
10239 unsigned int width = plane_state->base.crtc_w;
10240 unsigned int height = plane_state->base.crtc_h;
dc41c154
VS
10241 unsigned int stride = roundup_pow_of_two(width) * 4;
10242
10243 switch (stride) {
10244 default:
10245 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10246 width, stride);
10247 stride = 256;
10248 /* fallthrough */
10249 case 256:
10250 case 512:
10251 case 1024:
10252 case 2048:
10253 break;
4b0e333e
CW
10254 }
10255
dc41c154
VS
10256 cntl |= CURSOR_ENABLE |
10257 CURSOR_GAMMA_ENABLE |
10258 CURSOR_FORMAT_ARGB |
10259 CURSOR_STRIDE(stride);
10260
10261 size = (height << 12) | width;
4b0e333e 10262 }
560b85bb 10263
dc41c154
VS
10264 if (intel_crtc->cursor_cntl != 0 &&
10265 (intel_crtc->cursor_base != base ||
10266 intel_crtc->cursor_size != size ||
10267 intel_crtc->cursor_cntl != cntl)) {
10268 /* On these chipsets we can only modify the base/size/stride
10269 * whilst the cursor is disabled.
10270 */
0b87c24e
VS
10271 I915_WRITE(CURCNTR(PIPE_A), 0);
10272 POSTING_READ(CURCNTR(PIPE_A));
dc41c154 10273 intel_crtc->cursor_cntl = 0;
4b0e333e 10274 }
560b85bb 10275
99d1f387 10276 if (intel_crtc->cursor_base != base) {
0b87c24e 10277 I915_WRITE(CURBASE(PIPE_A), base);
99d1f387
VS
10278 intel_crtc->cursor_base = base;
10279 }
4726e0b0 10280
dc41c154
VS
10281 if (intel_crtc->cursor_size != size) {
10282 I915_WRITE(CURSIZE, size);
10283 intel_crtc->cursor_size = size;
4b0e333e 10284 }
560b85bb 10285
4b0e333e 10286 if (intel_crtc->cursor_cntl != cntl) {
0b87c24e
VS
10287 I915_WRITE(CURCNTR(PIPE_A), cntl);
10288 POSTING_READ(CURCNTR(PIPE_A));
4b0e333e 10289 intel_crtc->cursor_cntl = cntl;
560b85bb 10290 }
560b85bb
CW
10291}
10292
55a08b3f
ML
10293static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10294 const struct intel_plane_state *plane_state)
65a21cd6
JB
10295{
10296 struct drm_device *dev = crtc->dev;
10297 struct drm_i915_private *dev_priv = dev->dev_private;
10298 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10299 int pipe = intel_crtc->pipe;
663f3122 10300 uint32_t cntl = 0;
4b0e333e 10301
55a08b3f 10302 if (plane_state && plane_state->visible) {
4b0e333e 10303 cntl = MCURSOR_GAMMA_ENABLE;
55a08b3f 10304 switch (plane_state->base.crtc_w) {
4726e0b0
SK
10305 case 64:
10306 cntl |= CURSOR_MODE_64_ARGB_AX;
10307 break;
10308 case 128:
10309 cntl |= CURSOR_MODE_128_ARGB_AX;
10310 break;
10311 case 256:
10312 cntl |= CURSOR_MODE_256_ARGB_AX;
10313 break;
10314 default:
55a08b3f 10315 MISSING_CASE(plane_state->base.crtc_w);
4726e0b0 10316 return;
65a21cd6 10317 }
4b0e333e 10318 cntl |= pipe << 28; /* Connect to correct pipe */
47bf17a7 10319
fc6f93bc 10320 if (HAS_DDI(dev))
47bf17a7 10321 cntl |= CURSOR_PIPE_CSC_ENABLE;
65a21cd6 10322
55a08b3f
ML
10323 if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
10324 cntl |= CURSOR_ROTATE_180;
10325 }
4398ad45 10326
4b0e333e
CW
10327 if (intel_crtc->cursor_cntl != cntl) {
10328 I915_WRITE(CURCNTR(pipe), cntl);
10329 POSTING_READ(CURCNTR(pipe));
10330 intel_crtc->cursor_cntl = cntl;
65a21cd6 10331 }
4b0e333e 10332
65a21cd6 10333 /* and commit changes on next vblank */
5efb3e28
VS
10334 I915_WRITE(CURBASE(pipe), base);
10335 POSTING_READ(CURBASE(pipe));
99d1f387
VS
10336
10337 intel_crtc->cursor_base = base;
65a21cd6
JB
10338}
10339
cda4b7d3 10340/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6b383a7f 10341static void intel_crtc_update_cursor(struct drm_crtc *crtc,
55a08b3f 10342 const struct intel_plane_state *plane_state)
cda4b7d3
CW
10343{
10344 struct drm_device *dev = crtc->dev;
10345 struct drm_i915_private *dev_priv = dev->dev_private;
10346 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10347 int pipe = intel_crtc->pipe;
55a08b3f
ML
10348 u32 base = intel_crtc->cursor_addr;
10349 u32 pos = 0;
cda4b7d3 10350
55a08b3f
ML
10351 if (plane_state) {
10352 int x = plane_state->base.crtc_x;
10353 int y = plane_state->base.crtc_y;
cda4b7d3 10354
55a08b3f
ML
10355 if (x < 0) {
10356 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10357 x = -x;
10358 }
10359 pos |= x << CURSOR_X_SHIFT;
cda4b7d3 10360
55a08b3f
ML
10361 if (y < 0) {
10362 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10363 y = -y;
10364 }
10365 pos |= y << CURSOR_Y_SHIFT;
10366
10367 /* ILK+ do this automagically */
10368 if (HAS_GMCH_DISPLAY(dev) &&
10369 plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
10370 base += (plane_state->base.crtc_h *
10371 plane_state->base.crtc_w - 1) * 4;
10372 }
cda4b7d3 10373 }
cda4b7d3 10374
5efb3e28
VS
10375 I915_WRITE(CURPOS(pipe), pos);
10376
8ac54669 10377 if (IS_845G(dev) || IS_I865G(dev))
55a08b3f 10378 i845_update_cursor(crtc, base, plane_state);
5efb3e28 10379 else
55a08b3f 10380 i9xx_update_cursor(crtc, base, plane_state);
cda4b7d3
CW
10381}
10382
dc41c154
VS
10383static bool cursor_size_ok(struct drm_device *dev,
10384 uint32_t width, uint32_t height)
10385{
10386 if (width == 0 || height == 0)
10387 return false;
10388
10389 /*
10390 * 845g/865g are special in that they are only limited by
10391 * the width of their cursors, the height is arbitrary up to
10392 * the precision of the register. Everything else requires
10393 * square cursors, limited to a few power-of-two sizes.
10394 */
10395 if (IS_845G(dev) || IS_I865G(dev)) {
10396 if ((width & 63) != 0)
10397 return false;
10398
10399 if (width > (IS_845G(dev) ? 64 : 512))
10400 return false;
10401
10402 if (height > 1023)
10403 return false;
10404 } else {
10405 switch (width | height) {
10406 case 256:
10407 case 128:
10408 if (IS_GEN2(dev))
10409 return false;
10410 case 64:
10411 break;
10412 default:
10413 return false;
10414 }
10415 }
10416
10417 return true;
10418}
10419
79e53945
JB
10420/* VESA 640x480x72Hz mode to set on the pipe */
10421static struct drm_display_mode load_detect_mode = {
10422 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10423 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10424};
10425
a8bb6818
DV
10426struct drm_framebuffer *
10427__intel_framebuffer_create(struct drm_device *dev,
10428 struct drm_mode_fb_cmd2 *mode_cmd,
10429 struct drm_i915_gem_object *obj)
d2dff872
CW
10430{
10431 struct intel_framebuffer *intel_fb;
10432 int ret;
10433
10434 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
dcb1394e 10435 if (!intel_fb)
d2dff872 10436 return ERR_PTR(-ENOMEM);
d2dff872
CW
10437
10438 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
dd4916c5
DV
10439 if (ret)
10440 goto err;
d2dff872
CW
10441
10442 return &intel_fb->base;
dcb1394e 10443
dd4916c5 10444err:
dd4916c5 10445 kfree(intel_fb);
dd4916c5 10446 return ERR_PTR(ret);
d2dff872
CW
10447}
10448
b5ea642a 10449static struct drm_framebuffer *
a8bb6818
DV
10450intel_framebuffer_create(struct drm_device *dev,
10451 struct drm_mode_fb_cmd2 *mode_cmd,
10452 struct drm_i915_gem_object *obj)
10453{
10454 struct drm_framebuffer *fb;
10455 int ret;
10456
10457 ret = i915_mutex_lock_interruptible(dev);
10458 if (ret)
10459 return ERR_PTR(ret);
10460 fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10461 mutex_unlock(&dev->struct_mutex);
10462
10463 return fb;
10464}
10465
d2dff872
CW
10466static u32
10467intel_framebuffer_pitch_for_width(int width, int bpp)
10468{
10469 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10470 return ALIGN(pitch, 64);
10471}
10472
10473static u32
10474intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10475{
10476 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
1267a26b 10477 return PAGE_ALIGN(pitch * mode->vdisplay);
d2dff872
CW
10478}
10479
10480static struct drm_framebuffer *
10481intel_framebuffer_create_for_mode(struct drm_device *dev,
10482 struct drm_display_mode *mode,
10483 int depth, int bpp)
10484{
dcb1394e 10485 struct drm_framebuffer *fb;
d2dff872 10486 struct drm_i915_gem_object *obj;
0fed39bd 10487 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
d2dff872 10488
d37cd8a8 10489 obj = i915_gem_object_create(dev,
d2dff872 10490 intel_framebuffer_size_for_mode(mode, bpp));
fe3db79b
CW
10491 if (IS_ERR(obj))
10492 return ERR_CAST(obj);
d2dff872
CW
10493
10494 mode_cmd.width = mode->hdisplay;
10495 mode_cmd.height = mode->vdisplay;
308e5bcb
JB
10496 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10497 bpp);
5ca0c34a 10498 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
d2dff872 10499
dcb1394e
LW
10500 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10501 if (IS_ERR(fb))
10502 drm_gem_object_unreference_unlocked(&obj->base);
10503
10504 return fb;
d2dff872
CW
10505}
10506
10507static struct drm_framebuffer *
10508mode_fits_in_fbdev(struct drm_device *dev,
10509 struct drm_display_mode *mode)
10510{
0695726e 10511#ifdef CONFIG_DRM_FBDEV_EMULATION
d2dff872
CW
10512 struct drm_i915_private *dev_priv = dev->dev_private;
10513 struct drm_i915_gem_object *obj;
10514 struct drm_framebuffer *fb;
10515
4c0e5528 10516 if (!dev_priv->fbdev)
d2dff872
CW
10517 return NULL;
10518
4c0e5528 10519 if (!dev_priv->fbdev->fb)
d2dff872
CW
10520 return NULL;
10521
4c0e5528
DV
10522 obj = dev_priv->fbdev->fb->obj;
10523 BUG_ON(!obj);
10524
8bcd4553 10525 fb = &dev_priv->fbdev->fb->base;
01f2c773
VS
10526 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10527 fb->bits_per_pixel))
d2dff872
CW
10528 return NULL;
10529
01f2c773 10530 if (obj->base.size < mode->vdisplay * fb->pitches[0])
d2dff872
CW
10531 return NULL;
10532
edde3617 10533 drm_framebuffer_reference(fb);
d2dff872 10534 return fb;
4520f53a
DV
10535#else
10536 return NULL;
10537#endif
d2dff872
CW
10538}
10539
d3a40d1b
ACO
10540static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10541 struct drm_crtc *crtc,
10542 struct drm_display_mode *mode,
10543 struct drm_framebuffer *fb,
10544 int x, int y)
10545{
10546 struct drm_plane_state *plane_state;
10547 int hdisplay, vdisplay;
10548 int ret;
10549
10550 plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10551 if (IS_ERR(plane_state))
10552 return PTR_ERR(plane_state);
10553
10554 if (mode)
10555 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10556 else
10557 hdisplay = vdisplay = 0;
10558
10559 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10560 if (ret)
10561 return ret;
10562 drm_atomic_set_fb_for_plane(plane_state, fb);
10563 plane_state->crtc_x = 0;
10564 plane_state->crtc_y = 0;
10565 plane_state->crtc_w = hdisplay;
10566 plane_state->crtc_h = vdisplay;
10567 plane_state->src_x = x << 16;
10568 plane_state->src_y = y << 16;
10569 plane_state->src_w = hdisplay << 16;
10570 plane_state->src_h = vdisplay << 16;
10571
10572 return 0;
10573}
10574
d2434ab7 10575bool intel_get_load_detect_pipe(struct drm_connector *connector,
7173188d 10576 struct drm_display_mode *mode,
51fd371b
RC
10577 struct intel_load_detect_pipe *old,
10578 struct drm_modeset_acquire_ctx *ctx)
79e53945
JB
10579{
10580 struct intel_crtc *intel_crtc;
d2434ab7
DV
10581 struct intel_encoder *intel_encoder =
10582 intel_attached_encoder(connector);
79e53945 10583 struct drm_crtc *possible_crtc;
4ef69c7a 10584 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
10585 struct drm_crtc *crtc = NULL;
10586 struct drm_device *dev = encoder->dev;
94352cf9 10587 struct drm_framebuffer *fb;
51fd371b 10588 struct drm_mode_config *config = &dev->mode_config;
edde3617 10589 struct drm_atomic_state *state = NULL, *restore_state = NULL;
944b0c76 10590 struct drm_connector_state *connector_state;
4be07317 10591 struct intel_crtc_state *crtc_state;
51fd371b 10592 int ret, i = -1;
79e53945 10593
d2dff872 10594 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
c23cc417 10595 connector->base.id, connector->name,
8e329a03 10596 encoder->base.id, encoder->name);
d2dff872 10597
edde3617
ML
10598 old->restore_state = NULL;
10599
51fd371b
RC
10600retry:
10601 ret = drm_modeset_lock(&config->connection_mutex, ctx);
10602 if (ret)
ad3c558f 10603 goto fail;
6e9f798d 10604
79e53945
JB
10605 /*
10606 * Algorithm gets a little messy:
7a5e4805 10607 *
79e53945
JB
10608 * - if the connector already has an assigned crtc, use it (but make
10609 * sure it's on first)
7a5e4805 10610 *
79e53945
JB
10611 * - try to find the first unused crtc that can drive this connector,
10612 * and use that if we find one
79e53945
JB
10613 */
10614
10615 /* See if we already have a CRTC for this connector */
edde3617
ML
10616 if (connector->state->crtc) {
10617 crtc = connector->state->crtc;
8261b191 10618
51fd371b 10619 ret = drm_modeset_lock(&crtc->mutex, ctx);
4d02e2de 10620 if (ret)
ad3c558f 10621 goto fail;
8261b191
CW
10622
10623 /* Make sure the crtc and connector are running */
edde3617 10624 goto found;
79e53945
JB
10625 }
10626
10627 /* Find an unused one (if possible) */
70e1e0ec 10628 for_each_crtc(dev, possible_crtc) {
79e53945
JB
10629 i++;
10630 if (!(encoder->possible_crtcs & (1 << i)))
10631 continue;
edde3617
ML
10632
10633 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10634 if (ret)
10635 goto fail;
10636
10637 if (possible_crtc->state->enable) {
10638 drm_modeset_unlock(&possible_crtc->mutex);
a459249c 10639 continue;
edde3617 10640 }
a459249c
VS
10641
10642 crtc = possible_crtc;
10643 break;
79e53945
JB
10644 }
10645
10646 /*
10647 * If we didn't find an unused CRTC, don't use any.
10648 */
10649 if (!crtc) {
7173188d 10650 DRM_DEBUG_KMS("no pipe available for load-detect\n");
ad3c558f 10651 goto fail;
79e53945
JB
10652 }
10653
edde3617
ML
10654found:
10655 intel_crtc = to_intel_crtc(crtc);
10656
4d02e2de
DV
10657 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10658 if (ret)
ad3c558f 10659 goto fail;
79e53945 10660
83a57153 10661 state = drm_atomic_state_alloc(dev);
edde3617
ML
10662 restore_state = drm_atomic_state_alloc(dev);
10663 if (!state || !restore_state) {
10664 ret = -ENOMEM;
10665 goto fail;
10666 }
83a57153
ACO
10667
10668 state->acquire_ctx = ctx;
edde3617 10669 restore_state->acquire_ctx = ctx;
83a57153 10670
944b0c76
ACO
10671 connector_state = drm_atomic_get_connector_state(state, connector);
10672 if (IS_ERR(connector_state)) {
10673 ret = PTR_ERR(connector_state);
10674 goto fail;
10675 }
10676
edde3617
ML
10677 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10678 if (ret)
10679 goto fail;
944b0c76 10680
4be07317
ACO
10681 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10682 if (IS_ERR(crtc_state)) {
10683 ret = PTR_ERR(crtc_state);
10684 goto fail;
10685 }
10686
49d6fa21 10687 crtc_state->base.active = crtc_state->base.enable = true;
4be07317 10688
6492711d
CW
10689 if (!mode)
10690 mode = &load_detect_mode;
79e53945 10691
d2dff872
CW
10692 /* We need a framebuffer large enough to accommodate all accesses
10693 * that the plane may generate whilst we perform load detection.
10694 * We can not rely on the fbcon either being present (we get called
10695 * during its initialisation to detect all boot displays, or it may
10696 * not even exist) or that it is large enough to satisfy the
10697 * requested mode.
10698 */
94352cf9
DV
10699 fb = mode_fits_in_fbdev(dev, mode);
10700 if (fb == NULL) {
d2dff872 10701 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
94352cf9 10702 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
d2dff872
CW
10703 } else
10704 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
94352cf9 10705 if (IS_ERR(fb)) {
d2dff872 10706 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
412b61d8 10707 goto fail;
79e53945 10708 }
79e53945 10709
d3a40d1b
ACO
10710 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10711 if (ret)
10712 goto fail;
10713
edde3617
ML
10714 drm_framebuffer_unreference(fb);
10715
10716 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10717 if (ret)
10718 goto fail;
10719
10720 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10721 if (!ret)
10722 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10723 if (!ret)
10724 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10725 if (ret) {
10726 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10727 goto fail;
10728 }
8c7b5ccb 10729
3ba86073
ML
10730 ret = drm_atomic_commit(state);
10731 if (ret) {
6492711d 10732 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
412b61d8 10733 goto fail;
79e53945 10734 }
edde3617
ML
10735
10736 old->restore_state = restore_state;
7173188d 10737
79e53945 10738 /* let the connector get through one full cycle before testing */
9d0498a2 10739 intel_wait_for_vblank(dev, intel_crtc->pipe);
7173188d 10740 return true;
412b61d8 10741
ad3c558f 10742fail:
e5d958ef 10743 drm_atomic_state_free(state);
edde3617
ML
10744 drm_atomic_state_free(restore_state);
10745 restore_state = state = NULL;
83a57153 10746
51fd371b
RC
10747 if (ret == -EDEADLK) {
10748 drm_modeset_backoff(ctx);
10749 goto retry;
10750 }
10751
412b61d8 10752 return false;
79e53945
JB
10753}
10754
d2434ab7 10755void intel_release_load_detect_pipe(struct drm_connector *connector,
49172fee
ACO
10756 struct intel_load_detect_pipe *old,
10757 struct drm_modeset_acquire_ctx *ctx)
79e53945 10758{
d2434ab7
DV
10759 struct intel_encoder *intel_encoder =
10760 intel_attached_encoder(connector);
4ef69c7a 10761 struct drm_encoder *encoder = &intel_encoder->base;
edde3617 10762 struct drm_atomic_state *state = old->restore_state;
d3a40d1b 10763 int ret;
79e53945 10764
d2dff872 10765 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
c23cc417 10766 connector->base.id, connector->name,
8e329a03 10767 encoder->base.id, encoder->name);
d2dff872 10768
edde3617 10769 if (!state)
0622a53c 10770 return;
79e53945 10771
edde3617
ML
10772 ret = drm_atomic_commit(state);
10773 if (ret) {
10774 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10775 drm_atomic_state_free(state);
10776 }
79e53945
JB
10777}
10778
da4a1efa 10779static int i9xx_pll_refclk(struct drm_device *dev,
5cec258b 10780 const struct intel_crtc_state *pipe_config)
da4a1efa
VS
10781{
10782 struct drm_i915_private *dev_priv = dev->dev_private;
10783 u32 dpll = pipe_config->dpll_hw_state.dpll;
10784
10785 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
e91e941b 10786 return dev_priv->vbt.lvds_ssc_freq;
da4a1efa
VS
10787 else if (HAS_PCH_SPLIT(dev))
10788 return 120000;
10789 else if (!IS_GEN2(dev))
10790 return 96000;
10791 else
10792 return 48000;
10793}
10794
79e53945 10795/* Returns the clock of the currently programmed mode of the given pipe. */
f1f644dc 10796static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 10797 struct intel_crtc_state *pipe_config)
79e53945 10798{
f1f644dc 10799 struct drm_device *dev = crtc->base.dev;
79e53945 10800 struct drm_i915_private *dev_priv = dev->dev_private;
f1f644dc 10801 int pipe = pipe_config->cpu_transcoder;
293623f7 10802 u32 dpll = pipe_config->dpll_hw_state.dpll;
79e53945 10803 u32 fp;
9e2c8475 10804 struct dpll clock;
dccbea3b 10805 int port_clock;
da4a1efa 10806 int refclk = i9xx_pll_refclk(dev, pipe_config);
79e53945
JB
10807
10808 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
293623f7 10809 fp = pipe_config->dpll_hw_state.fp0;
79e53945 10810 else
293623f7 10811 fp = pipe_config->dpll_hw_state.fp1;
79e53945
JB
10812
10813 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
f2b115e6
AJ
10814 if (IS_PINEVIEW(dev)) {
10815 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10816 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
2177832f
SL
10817 } else {
10818 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10819 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10820 }
10821
a6c45cf0 10822 if (!IS_GEN2(dev)) {
f2b115e6
AJ
10823 if (IS_PINEVIEW(dev))
10824 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10825 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
2177832f
SL
10826 else
10827 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
79e53945
JB
10828 DPLL_FPA01_P1_POST_DIV_SHIFT);
10829
10830 switch (dpll & DPLL_MODE_MASK) {
10831 case DPLLB_MODE_DAC_SERIAL:
10832 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10833 5 : 10;
10834 break;
10835 case DPLLB_MODE_LVDS:
10836 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10837 7 : 14;
10838 break;
10839 default:
28c97730 10840 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
79e53945 10841 "mode\n", (int)(dpll & DPLL_MODE_MASK));
f1f644dc 10842 return;
79e53945
JB
10843 }
10844
ac58c3f0 10845 if (IS_PINEVIEW(dev))
dccbea3b 10846 port_clock = pnv_calc_dpll_params(refclk, &clock);
ac58c3f0 10847 else
dccbea3b 10848 port_clock = i9xx_calc_dpll_params(refclk, &clock);
79e53945 10849 } else {
0fb58223 10850 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
b1c560d1 10851 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
79e53945
JB
10852
10853 if (is_lvds) {
10854 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10855 DPLL_FPA01_P1_POST_DIV_SHIFT);
b1c560d1
VS
10856
10857 if (lvds & LVDS_CLKB_POWER_UP)
10858 clock.p2 = 7;
10859 else
10860 clock.p2 = 14;
79e53945
JB
10861 } else {
10862 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10863 clock.p1 = 2;
10864 else {
10865 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10866 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10867 }
10868 if (dpll & PLL_P2_DIVIDE_BY_4)
10869 clock.p2 = 4;
10870 else
10871 clock.p2 = 2;
79e53945 10872 }
da4a1efa 10873
dccbea3b 10874 port_clock = i9xx_calc_dpll_params(refclk, &clock);
79e53945
JB
10875 }
10876
18442d08
VS
10877 /*
10878 * This value includes pixel_multiplier. We will use
241bfc38 10879 * port_clock to compute adjusted_mode.crtc_clock in the
18442d08
VS
10880 * encoder's get_config() function.
10881 */
dccbea3b 10882 pipe_config->port_clock = port_clock;
f1f644dc
JB
10883}
10884
6878da05
VS
10885int intel_dotclock_calculate(int link_freq,
10886 const struct intel_link_m_n *m_n)
f1f644dc 10887{
f1f644dc
JB
10888 /*
10889 * The calculation for the data clock is:
1041a02f 10890 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
f1f644dc 10891 * But we want to avoid losing precison if possible, so:
1041a02f 10892 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
f1f644dc
JB
10893 *
10894 * and the link clock is simpler:
1041a02f 10895 * link_clock = (m * link_clock) / n
f1f644dc
JB
10896 */
10897
6878da05
VS
10898 if (!m_n->link_n)
10899 return 0;
f1f644dc 10900
6878da05
VS
10901 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10902}
f1f644dc 10903
18442d08 10904static void ironlake_pch_clock_get(struct intel_crtc *crtc,
5cec258b 10905 struct intel_crtc_state *pipe_config)
6878da05 10906{
e3b247da 10907 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
79e53945 10908
18442d08
VS
10909 /* read out port_clock from the DPLL */
10910 i9xx_crtc_clock_get(crtc, pipe_config);
f1f644dc 10911
f1f644dc 10912 /*
e3b247da
VS
10913 * In case there is an active pipe without active ports,
10914 * we may need some idea for the dotclock anyway.
10915 * Calculate one based on the FDI configuration.
79e53945 10916 */
2d112de7 10917 pipe_config->base.adjusted_mode.crtc_clock =
21a727b3 10918 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
18442d08 10919 &pipe_config->fdi_m_n);
79e53945
JB
10920}
10921
10922/** Returns the currently programmed mode of the given pipe. */
10923struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10924 struct drm_crtc *crtc)
10925{
548f245b 10926 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 10927 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 10928 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
79e53945 10929 struct drm_display_mode *mode;
3f36b937 10930 struct intel_crtc_state *pipe_config;
fe2b8f9d
PZ
10931 int htot = I915_READ(HTOTAL(cpu_transcoder));
10932 int hsync = I915_READ(HSYNC(cpu_transcoder));
10933 int vtot = I915_READ(VTOTAL(cpu_transcoder));
10934 int vsync = I915_READ(VSYNC(cpu_transcoder));
293623f7 10935 enum pipe pipe = intel_crtc->pipe;
79e53945
JB
10936
10937 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10938 if (!mode)
10939 return NULL;
10940
3f36b937
TU
10941 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10942 if (!pipe_config) {
10943 kfree(mode);
10944 return NULL;
10945 }
10946
f1f644dc
JB
10947 /*
10948 * Construct a pipe_config sufficient for getting the clock info
10949 * back out of crtc_clock_get.
10950 *
10951 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10952 * to use a real value here instead.
10953 */
3f36b937
TU
10954 pipe_config->cpu_transcoder = (enum transcoder) pipe;
10955 pipe_config->pixel_multiplier = 1;
10956 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10957 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10958 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10959 i9xx_crtc_clock_get(intel_crtc, pipe_config);
10960
10961 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
79e53945
JB
10962 mode->hdisplay = (htot & 0xffff) + 1;
10963 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10964 mode->hsync_start = (hsync & 0xffff) + 1;
10965 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10966 mode->vdisplay = (vtot & 0xffff) + 1;
10967 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10968 mode->vsync_start = (vsync & 0xffff) + 1;
10969 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10970
10971 drm_mode_set_name(mode);
79e53945 10972
3f36b937
TU
10973 kfree(pipe_config);
10974
79e53945
JB
10975 return mode;
10976}
10977
7d993739 10978void intel_mark_busy(struct drm_i915_private *dev_priv)
f047e395 10979{
f62a0076
CW
10980 if (dev_priv->mm.busy)
10981 return;
10982
43694d69 10983 intel_runtime_pm_get(dev_priv);
c67a470b 10984 i915_update_gfx_val(dev_priv);
7d993739 10985 if (INTEL_GEN(dev_priv) >= 6)
43cf3bf0 10986 gen6_rps_busy(dev_priv);
f62a0076 10987 dev_priv->mm.busy = true;
f047e395
CW
10988}
10989
7d993739 10990void intel_mark_idle(struct drm_i915_private *dev_priv)
652c393a 10991{
f62a0076
CW
10992 if (!dev_priv->mm.busy)
10993 return;
10994
10995 dev_priv->mm.busy = false;
10996
7d993739
TU
10997 if (INTEL_GEN(dev_priv) >= 6)
10998 gen6_rps_idle(dev_priv);
bb4cdd53 10999
43694d69 11000 intel_runtime_pm_put(dev_priv);
652c393a
JB
11001}
11002
79e53945
JB
11003static void intel_crtc_destroy(struct drm_crtc *crtc)
11004{
11005 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
67e77c5a 11006 struct drm_device *dev = crtc->dev;
51cbaf01 11007 struct intel_flip_work *work;
67e77c5a 11008
5e2d7afc 11009 spin_lock_irq(&dev->event_lock);
5a21b665
DV
11010 work = intel_crtc->flip_work;
11011 intel_crtc->flip_work = NULL;
11012 spin_unlock_irq(&dev->event_lock);
67e77c5a 11013
5a21b665 11014 if (work) {
51cbaf01
ML
11015 cancel_work_sync(&work->mmio_work);
11016 cancel_work_sync(&work->unpin_work);
5a21b665 11017 kfree(work);
67e77c5a 11018 }
79e53945
JB
11019
11020 drm_crtc_cleanup(crtc);
67e77c5a 11021
79e53945
JB
11022 kfree(intel_crtc);
11023}
11024
6b95a207
KH
11025static void intel_unpin_work_fn(struct work_struct *__work)
11026{
51cbaf01
ML
11027 struct intel_flip_work *work =
11028 container_of(__work, struct intel_flip_work, unpin_work);
5a21b665
DV
11029 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11030 struct drm_device *dev = crtc->base.dev;
11031 struct drm_plane *primary = crtc->base.primary;
03f476e1 11032
5a21b665
DV
11033 if (is_mmio_work(work))
11034 flush_work(&work->mmio_work);
03f476e1 11035
5a21b665
DV
11036 mutex_lock(&dev->struct_mutex);
11037 intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
11038 drm_gem_object_unreference(&work->pending_flip_obj->base);
143f73b3 11039
5a21b665
DV
11040 if (work->flip_queued_req)
11041 i915_gem_request_assign(&work->flip_queued_req, NULL);
11042 mutex_unlock(&dev->struct_mutex);
143f73b3 11043
5a21b665
DV
11044 intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
11045 intel_fbc_post_update(crtc);
11046 drm_framebuffer_unreference(work->old_fb);
143f73b3 11047
5a21b665
DV
11048 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
11049 atomic_dec(&crtc->unpin_work_count);
a6747b73 11050
5a21b665
DV
11051 kfree(work);
11052}
d9e86c0e 11053
5a21b665
DV
11054/* Is 'a' after or equal to 'b'? */
11055static bool g4x_flip_count_after_eq(u32 a, u32 b)
11056{
11057 return !((a - b) & 0x80000000);
11058}
143f73b3 11059
5a21b665
DV
11060static bool __pageflip_finished_cs(struct intel_crtc *crtc,
11061 struct intel_flip_work *work)
11062{
11063 struct drm_device *dev = crtc->base.dev;
11064 struct drm_i915_private *dev_priv = dev->dev_private;
11065 unsigned reset_counter;
143f73b3 11066
5a21b665
DV
11067 reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11068 if (crtc->reset_counter != reset_counter)
11069 return true;
143f73b3 11070
5a21b665
DV
11071 /*
11072 * The relevant registers doen't exist on pre-ctg.
11073 * As the flip done interrupt doesn't trigger for mmio
11074 * flips on gmch platforms, a flip count check isn't
11075 * really needed there. But since ctg has the registers,
11076 * include it in the check anyway.
11077 */
11078 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
11079 return true;
b4a98e57 11080
5a21b665
DV
11081 /*
11082 * BDW signals flip done immediately if the plane
11083 * is disabled, even if the plane enable is already
11084 * armed to occur at the next vblank :(
11085 */
f99d7069 11086
5a21b665
DV
11087 /*
11088 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
11089 * used the same base address. In that case the mmio flip might
11090 * have completed, but the CS hasn't even executed the flip yet.
11091 *
11092 * A flip count check isn't enough as the CS might have updated
11093 * the base address just after start of vblank, but before we
11094 * managed to process the interrupt. This means we'd complete the
11095 * CS flip too soon.
11096 *
11097 * Combining both checks should get us a good enough result. It may
11098 * still happen that the CS flip has been executed, but has not
11099 * yet actually completed. But in case the base address is the same
11100 * anyway, we don't really care.
11101 */
11102 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
11103 crtc->flip_work->gtt_offset &&
11104 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
11105 crtc->flip_work->flip_count);
11106}
b4a98e57 11107
5a21b665
DV
11108static bool
11109__pageflip_finished_mmio(struct intel_crtc *crtc,
11110 struct intel_flip_work *work)
11111{
11112 /*
11113 * MMIO work completes when vblank is different from
11114 * flip_queued_vblank.
11115 *
11116 * Reset counter value doesn't matter, this is handled by
11117 * i915_wait_request finishing early, so no need to handle
11118 * reset here.
11119 */
11120 return intel_crtc_get_vblank_counter(crtc) != work->flip_queued_vblank;
6b95a207
KH
11121}
11122
51cbaf01
ML
11123
11124static bool pageflip_finished(struct intel_crtc *crtc,
11125 struct intel_flip_work *work)
11126{
11127 if (!atomic_read(&work->pending))
11128 return false;
11129
11130 smp_rmb();
11131
5a21b665
DV
11132 if (is_mmio_work(work))
11133 return __pageflip_finished_mmio(crtc, work);
11134 else
11135 return __pageflip_finished_cs(crtc, work);
11136}
11137
11138void intel_finish_page_flip_cs(struct drm_i915_private *dev_priv, int pipe)
11139{
11140 struct drm_device *dev = dev_priv->dev;
11141 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11142 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11143 struct intel_flip_work *work;
11144 unsigned long flags;
11145
11146 /* Ignore early vblank irqs */
11147 if (!crtc)
11148 return;
11149
51cbaf01 11150 /*
5a21b665
DV
11151 * This is called both by irq handlers and the reset code (to complete
11152 * lost pageflips) so needs the full irqsave spinlocks.
51cbaf01 11153 */
5a21b665
DV
11154 spin_lock_irqsave(&dev->event_lock, flags);
11155 work = intel_crtc->flip_work;
11156
11157 if (work != NULL &&
11158 !is_mmio_work(work) &&
11159 pageflip_finished(intel_crtc, work))
11160 page_flip_completed(intel_crtc);
11161
11162 spin_unlock_irqrestore(&dev->event_lock, flags);
75f7f3ec
VS
11163}
11164
51cbaf01 11165void intel_finish_page_flip_mmio(struct drm_i915_private *dev_priv, int pipe)
6b95a207 11166{
91d14251 11167 struct drm_device *dev = dev_priv->dev;
5251f04e
ML
11168 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11169 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
51cbaf01 11170 struct intel_flip_work *work;
6b95a207
KH
11171 unsigned long flags;
11172
5251f04e
ML
11173 /* Ignore early vblank irqs */
11174 if (!crtc)
11175 return;
f326038a
DV
11176
11177 /*
11178 * This is called both by irq handlers and the reset code (to complete
11179 * lost pageflips) so needs the full irqsave spinlocks.
e7d841ca 11180 */
6b95a207 11181 spin_lock_irqsave(&dev->event_lock, flags);
5a21b665 11182 work = intel_crtc->flip_work;
5251f04e 11183
5a21b665
DV
11184 if (work != NULL &&
11185 is_mmio_work(work) &&
11186 pageflip_finished(intel_crtc, work))
11187 page_flip_completed(intel_crtc);
5251f04e 11188
6b95a207
KH
11189 spin_unlock_irqrestore(&dev->event_lock, flags);
11190}
11191
5a21b665
DV
11192static inline void intel_mark_page_flip_active(struct intel_crtc *crtc,
11193 struct intel_flip_work *work)
84c33a64 11194{
5a21b665 11195 work->flip_queued_vblank = intel_crtc_get_vblank_counter(crtc);
84c33a64 11196
5a21b665
DV
11197 /* Ensure that the work item is consistent when activating it ... */
11198 smp_mb__before_atomic();
11199 atomic_set(&work->pending, 1);
11200}
a6747b73 11201
5a21b665
DV
11202static int intel_gen2_queue_flip(struct drm_device *dev,
11203 struct drm_crtc *crtc,
11204 struct drm_framebuffer *fb,
11205 struct drm_i915_gem_object *obj,
11206 struct drm_i915_gem_request *req,
11207 uint32_t flags)
11208{
11209 struct intel_engine_cs *engine = req->engine;
11210 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11211 u32 flip_mask;
11212 int ret;
143f73b3 11213
5a21b665
DV
11214 ret = intel_ring_begin(req, 6);
11215 if (ret)
11216 return ret;
143f73b3 11217
5a21b665
DV
11218 /* Can't queue multiple flips, so wait for the previous
11219 * one to finish before executing the next.
11220 */
11221 if (intel_crtc->plane)
11222 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11223 else
11224 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11225 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11226 intel_ring_emit(engine, MI_NOOP);
11227 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11228 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11229 intel_ring_emit(engine, fb->pitches[0]);
11230 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11231 intel_ring_emit(engine, 0); /* aux display base address, unused */
143f73b3 11232
5a21b665
DV
11233 return 0;
11234}
84c33a64 11235
5a21b665
DV
11236static int intel_gen3_queue_flip(struct drm_device *dev,
11237 struct drm_crtc *crtc,
11238 struct drm_framebuffer *fb,
11239 struct drm_i915_gem_object *obj,
11240 struct drm_i915_gem_request *req,
11241 uint32_t flags)
11242{
11243 struct intel_engine_cs *engine = req->engine;
11244 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11245 u32 flip_mask;
11246 int ret;
d55dbd06 11247
5a21b665
DV
11248 ret = intel_ring_begin(req, 6);
11249 if (ret)
11250 return ret;
d55dbd06 11251
5a21b665
DV
11252 if (intel_crtc->plane)
11253 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11254 else
11255 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
11256 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11257 intel_ring_emit(engine, MI_NOOP);
11258 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
11259 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11260 intel_ring_emit(engine, fb->pitches[0]);
11261 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11262 intel_ring_emit(engine, MI_NOOP);
fd8e058a 11263
5a21b665
DV
11264 return 0;
11265}
84c33a64 11266
5a21b665
DV
11267static int intel_gen4_queue_flip(struct drm_device *dev,
11268 struct drm_crtc *crtc,
11269 struct drm_framebuffer *fb,
11270 struct drm_i915_gem_object *obj,
11271 struct drm_i915_gem_request *req,
11272 uint32_t flags)
11273{
11274 struct intel_engine_cs *engine = req->engine;
11275 struct drm_i915_private *dev_priv = dev->dev_private;
11276 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11277 uint32_t pf, pipesrc;
11278 int ret;
143f73b3 11279
5a21b665
DV
11280 ret = intel_ring_begin(req, 4);
11281 if (ret)
11282 return ret;
143f73b3 11283
5a21b665
DV
11284 /* i965+ uses the linear or tiled offsets from the
11285 * Display Registers (which do not change across a page-flip)
11286 * so we need only reprogram the base address.
11287 */
11288 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11289 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11290 intel_ring_emit(engine, fb->pitches[0]);
11291 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset |
11292 obj->tiling_mode);
11293
11294 /* XXX Enabling the panel-fitter across page-flip is so far
11295 * untested on non-native modes, so ignore it for now.
11296 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11297 */
11298 pf = 0;
11299 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11300 intel_ring_emit(engine, pf | pipesrc);
143f73b3 11301
5a21b665 11302 return 0;
8c9f3aaf
JB
11303}
11304
5a21b665
DV
11305static int intel_gen6_queue_flip(struct drm_device *dev,
11306 struct drm_crtc *crtc,
11307 struct drm_framebuffer *fb,
11308 struct drm_i915_gem_object *obj,
11309 struct drm_i915_gem_request *req,
11310 uint32_t flags)
da20eabd 11311{
5a21b665
DV
11312 struct intel_engine_cs *engine = req->engine;
11313 struct drm_i915_private *dev_priv = dev->dev_private;
11314 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11315 uint32_t pf, pipesrc;
11316 int ret;
d21fbe87 11317
5a21b665
DV
11318 ret = intel_ring_begin(req, 4);
11319 if (ret)
11320 return ret;
92826fcd 11321
5a21b665
DV
11322 intel_ring_emit(engine, MI_DISPLAY_FLIP |
11323 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
11324 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11325 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
92826fcd 11326
5a21b665
DV
11327 /* Contrary to the suggestions in the documentation,
11328 * "Enable Panel Fitter" does not seem to be required when page
11329 * flipping with a non-native mode, and worse causes a normal
11330 * modeset to fail.
11331 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11332 */
11333 pf = 0;
11334 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
11335 intel_ring_emit(engine, pf | pipesrc);
7809e5ae 11336
5a21b665 11337 return 0;
7809e5ae
MR
11338}
11339
5a21b665
DV
11340static int intel_gen7_queue_flip(struct drm_device *dev,
11341 struct drm_crtc *crtc,
11342 struct drm_framebuffer *fb,
11343 struct drm_i915_gem_object *obj,
11344 struct drm_i915_gem_request *req,
11345 uint32_t flags)
d21fbe87 11346{
5a21b665
DV
11347 struct intel_engine_cs *engine = req->engine;
11348 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11349 uint32_t plane_bit = 0;
11350 int len, ret;
d21fbe87 11351
5a21b665
DV
11352 switch (intel_crtc->plane) {
11353 case PLANE_A:
11354 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11355 break;
11356 case PLANE_B:
11357 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11358 break;
11359 case PLANE_C:
11360 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11361 break;
11362 default:
11363 WARN_ONCE(1, "unknown plane in flip command\n");
11364 return -ENODEV;
11365 }
11366
11367 len = 4;
11368 if (engine->id == RCS) {
11369 len += 6;
11370 /*
11371 * On Gen 8, SRM is now taking an extra dword to accommodate
11372 * 48bits addresses, and we need a NOOP for the batch size to
11373 * stay even.
11374 */
11375 if (IS_GEN8(dev))
11376 len += 2;
11377 }
11378
11379 /*
11380 * BSpec MI_DISPLAY_FLIP for IVB:
11381 * "The full packet must be contained within the same cache line."
11382 *
11383 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11384 * cacheline, if we ever start emitting more commands before
11385 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11386 * then do the cacheline alignment, and finally emit the
11387 * MI_DISPLAY_FLIP.
11388 */
11389 ret = intel_ring_cacheline_align(req);
11390 if (ret)
11391 return ret;
11392
11393 ret = intel_ring_begin(req, len);
11394 if (ret)
11395 return ret;
11396
11397 /* Unmask the flip-done completion message. Note that the bspec says that
11398 * we should do this for both the BCS and RCS, and that we must not unmask
11399 * more than one flip event at any time (or ensure that one flip message
11400 * can be sent by waiting for flip-done prior to queueing new flips).
11401 * Experimentation says that BCS works despite DERRMR masking all
11402 * flip-done completion events and that unmasking all planes at once
11403 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11404 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11405 */
11406 if (engine->id == RCS) {
11407 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
11408 intel_ring_emit_reg(engine, DERRMR);
11409 intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11410 DERRMR_PIPEB_PRI_FLIP_DONE |
11411 DERRMR_PIPEC_PRI_FLIP_DONE));
11412 if (IS_GEN8(dev))
11413 intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
11414 MI_SRM_LRM_GLOBAL_GTT);
11415 else
11416 intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
11417 MI_SRM_LRM_GLOBAL_GTT);
11418 intel_ring_emit_reg(engine, DERRMR);
11419 intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
11420 if (IS_GEN8(dev)) {
11421 intel_ring_emit(engine, 0);
11422 intel_ring_emit(engine, MI_NOOP);
11423 }
11424 }
11425
11426 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11427 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11428 intel_ring_emit(engine, intel_crtc->flip_work->gtt_offset);
11429 intel_ring_emit(engine, (MI_NOOP));
11430
11431 return 0;
11432}
11433
11434static bool use_mmio_flip(struct intel_engine_cs *engine,
11435 struct drm_i915_gem_object *obj)
11436{
c37efb99
CW
11437 struct reservation_object *resv;
11438
5a21b665
DV
11439 /*
11440 * This is not being used for older platforms, because
11441 * non-availability of flip done interrupt forces us to use
11442 * CS flips. Older platforms derive flip done using some clever
11443 * tricks involving the flip_pending status bits and vblank irqs.
11444 * So using MMIO flips there would disrupt this mechanism.
11445 */
11446
11447 if (engine == NULL)
11448 return true;
11449
11450 if (INTEL_GEN(engine->i915) < 5)
11451 return false;
11452
11453 if (i915.use_mmio_flip < 0)
11454 return false;
11455 else if (i915.use_mmio_flip > 0)
11456 return true;
11457 else if (i915.enable_execlists)
11458 return true;
c37efb99
CW
11459
11460 resv = i915_gem_object_get_dmabuf_resv(obj);
11461 if (resv && !reservation_object_test_signaled_rcu(resv, false))
5a21b665 11462 return true;
c37efb99
CW
11463
11464 return engine != i915_gem_request_get_engine(obj->last_write_req);
5a21b665
DV
11465}
11466
11467static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
11468 unsigned int rotation,
11469 struct intel_flip_work *work)
11470{
11471 struct drm_device *dev = intel_crtc->base.dev;
11472 struct drm_i915_private *dev_priv = dev->dev_private;
11473 struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
11474 const enum pipe pipe = intel_crtc->pipe;
11475 u32 ctl, stride, tile_height;
11476
11477 ctl = I915_READ(PLANE_CTL(pipe, 0));
11478 ctl &= ~PLANE_CTL_TILED_MASK;
11479 switch (fb->modifier[0]) {
11480 case DRM_FORMAT_MOD_NONE:
11481 break;
11482 case I915_FORMAT_MOD_X_TILED:
11483 ctl |= PLANE_CTL_TILED_X;
11484 break;
11485 case I915_FORMAT_MOD_Y_TILED:
11486 ctl |= PLANE_CTL_TILED_Y;
11487 break;
11488 case I915_FORMAT_MOD_Yf_TILED:
11489 ctl |= PLANE_CTL_TILED_YF;
11490 break;
11491 default:
11492 MISSING_CASE(fb->modifier[0]);
11493 }
11494
11495 /*
11496 * The stride is either expressed as a multiple of 64 bytes chunks for
11497 * linear buffers or in number of tiles for tiled buffers.
11498 */
11499 if (intel_rotation_90_or_270(rotation)) {
11500 /* stride = Surface height in tiles */
11501 tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
11502 stride = DIV_ROUND_UP(fb->height, tile_height);
11503 } else {
11504 stride = fb->pitches[0] /
11505 intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11506 fb->pixel_format);
11507 }
11508
11509 /*
11510 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11511 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11512 */
11513 I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11514 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11515
11516 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
11517 POSTING_READ(PLANE_SURF(pipe, 0));
11518}
11519
11520static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11521 struct intel_flip_work *work)
11522{
11523 struct drm_device *dev = intel_crtc->base.dev;
11524 struct drm_i915_private *dev_priv = dev->dev_private;
11525 struct intel_framebuffer *intel_fb =
11526 to_intel_framebuffer(intel_crtc->base.primary->fb);
11527 struct drm_i915_gem_object *obj = intel_fb->obj;
11528 i915_reg_t reg = DSPCNTR(intel_crtc->plane);
11529 u32 dspcntr;
11530
11531 dspcntr = I915_READ(reg);
11532
11533 if (obj->tiling_mode != I915_TILING_NONE)
11534 dspcntr |= DISPPLANE_TILED;
11535 else
11536 dspcntr &= ~DISPPLANE_TILED;
11537
11538 I915_WRITE(reg, dspcntr);
11539
11540 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
11541 POSTING_READ(DSPSURF(intel_crtc->plane));
11542}
11543
11544static void intel_mmio_flip_work_func(struct work_struct *w)
11545{
11546 struct intel_flip_work *work =
11547 container_of(w, struct intel_flip_work, mmio_work);
11548 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
11549 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11550 struct intel_framebuffer *intel_fb =
11551 to_intel_framebuffer(crtc->base.primary->fb);
11552 struct drm_i915_gem_object *obj = intel_fb->obj;
c37efb99 11553 struct reservation_object *resv;
5a21b665
DV
11554
11555 if (work->flip_queued_req)
11556 WARN_ON(__i915_wait_request(work->flip_queued_req,
11557 false, NULL,
11558 &dev_priv->rps.mmioflips));
11559
11560 /* For framebuffer backed by dmabuf, wait for fence */
c37efb99
CW
11561 resv = i915_gem_object_get_dmabuf_resv(obj);
11562 if (resv)
11563 WARN_ON(reservation_object_wait_timeout_rcu(resv, false, false,
5a21b665
DV
11564 MAX_SCHEDULE_TIMEOUT) < 0);
11565
11566 intel_pipe_update_start(crtc);
11567
11568 if (INTEL_GEN(dev_priv) >= 9)
11569 skl_do_mmio_flip(crtc, work->rotation, work);
11570 else
11571 /* use_mmio_flip() retricts MMIO flips to ilk+ */
11572 ilk_do_mmio_flip(crtc, work);
11573
11574 intel_pipe_update_end(crtc, work);
11575}
11576
11577static int intel_default_queue_flip(struct drm_device *dev,
11578 struct drm_crtc *crtc,
11579 struct drm_framebuffer *fb,
11580 struct drm_i915_gem_object *obj,
11581 struct drm_i915_gem_request *req,
11582 uint32_t flags)
11583{
11584 return -ENODEV;
11585}
11586
11587static bool __pageflip_stall_check_cs(struct drm_i915_private *dev_priv,
11588 struct intel_crtc *intel_crtc,
11589 struct intel_flip_work *work)
11590{
11591 u32 addr, vblank;
11592
11593 if (!atomic_read(&work->pending))
11594 return false;
11595
11596 smp_rmb();
11597
11598 vblank = intel_crtc_get_vblank_counter(intel_crtc);
11599 if (work->flip_ready_vblank == 0) {
11600 if (work->flip_queued_req &&
11601 !i915_gem_request_completed(work->flip_queued_req, true))
11602 return false;
11603
11604 work->flip_ready_vblank = vblank;
11605 }
11606
11607 if (vblank - work->flip_ready_vblank < 3)
11608 return false;
11609
11610 /* Potential stall - if we see that the flip has happened,
11611 * assume a missed interrupt. */
11612 if (INTEL_GEN(dev_priv) >= 4)
11613 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11614 else
11615 addr = I915_READ(DSPADDR(intel_crtc->plane));
11616
11617 /* There is a potential issue here with a false positive after a flip
11618 * to the same address. We could address this by checking for a
11619 * non-incrementing frame counter.
11620 */
11621 return addr == work->gtt_offset;
11622}
11623
11624void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
11625{
11626 struct drm_device *dev = dev_priv->dev;
11627 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11628 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11629 struct intel_flip_work *work;
11630
11631 WARN_ON(!in_interrupt());
11632
11633 if (crtc == NULL)
11634 return;
11635
11636 spin_lock(&dev->event_lock);
11637 work = intel_crtc->flip_work;
11638
11639 if (work != NULL && !is_mmio_work(work) &&
11640 __pageflip_stall_check_cs(dev_priv, intel_crtc, work)) {
11641 WARN_ONCE(1,
11642 "Kicking stuck page flip: queued at %d, now %d\n",
11643 work->flip_queued_vblank, intel_crtc_get_vblank_counter(intel_crtc));
11644 page_flip_completed(intel_crtc);
11645 work = NULL;
11646 }
11647
11648 if (work != NULL && !is_mmio_work(work) &&
11649 intel_crtc_get_vblank_counter(intel_crtc) - work->flip_queued_vblank > 1)
11650 intel_queue_rps_boost_for_request(work->flip_queued_req);
11651 spin_unlock(&dev->event_lock);
11652}
11653
11654static int intel_crtc_page_flip(struct drm_crtc *crtc,
11655 struct drm_framebuffer *fb,
11656 struct drm_pending_vblank_event *event,
11657 uint32_t page_flip_flags)
11658{
11659 struct drm_device *dev = crtc->dev;
11660 struct drm_i915_private *dev_priv = dev->dev_private;
11661 struct drm_framebuffer *old_fb = crtc->primary->fb;
11662 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
11663 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11664 struct drm_plane *primary = crtc->primary;
11665 enum pipe pipe = intel_crtc->pipe;
11666 struct intel_flip_work *work;
11667 struct intel_engine_cs *engine;
11668 bool mmio_flip;
11669 struct drm_i915_gem_request *request = NULL;
11670 int ret;
11671
11672 /*
11673 * drm_mode_page_flip_ioctl() should already catch this, but double
11674 * check to be safe. In the future we may enable pageflipping from
11675 * a disabled primary plane.
11676 */
11677 if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11678 return -EBUSY;
11679
11680 /* Can't change pixel format via MI display flips. */
11681 if (fb->pixel_format != crtc->primary->fb->pixel_format)
11682 return -EINVAL;
11683
11684 /*
11685 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11686 * Note that pitch changes could also affect these register.
11687 */
11688 if (INTEL_INFO(dev)->gen > 3 &&
11689 (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11690 fb->pitches[0] != crtc->primary->fb->pitches[0]))
11691 return -EINVAL;
11692
11693 if (i915_terminally_wedged(&dev_priv->gpu_error))
11694 goto out_hang;
11695
11696 work = kzalloc(sizeof(*work), GFP_KERNEL);
11697 if (work == NULL)
11698 return -ENOMEM;
11699
11700 work->event = event;
11701 work->crtc = crtc;
11702 work->old_fb = old_fb;
11703 INIT_WORK(&work->unpin_work, intel_unpin_work_fn);
11704
11705 ret = drm_crtc_vblank_get(crtc);
11706 if (ret)
11707 goto free_work;
11708
11709 /* We borrow the event spin lock for protecting flip_work */
11710 spin_lock_irq(&dev->event_lock);
11711 if (intel_crtc->flip_work) {
11712 /* Before declaring the flip queue wedged, check if
11713 * the hardware completed the operation behind our backs.
11714 */
11715 if (pageflip_finished(intel_crtc, intel_crtc->flip_work)) {
11716 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11717 page_flip_completed(intel_crtc);
11718 } else {
11719 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
11720 spin_unlock_irq(&dev->event_lock);
11721
11722 drm_crtc_vblank_put(crtc);
11723 kfree(work);
11724 return -EBUSY;
11725 }
11726 }
11727 intel_crtc->flip_work = work;
11728 spin_unlock_irq(&dev->event_lock);
11729
11730 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11731 flush_workqueue(dev_priv->wq);
11732
11733 /* Reference the objects for the scheduled work. */
11734 drm_framebuffer_reference(work->old_fb);
11735 drm_gem_object_reference(&obj->base);
11736
11737 crtc->primary->fb = fb;
11738 update_state_fb(crtc->primary);
faf68d92
ML
11739
11740 intel_fbc_pre_update(intel_crtc, intel_crtc->config,
11741 to_intel_plane_state(primary->state));
5a21b665
DV
11742
11743 work->pending_flip_obj = obj;
11744
11745 ret = i915_mutex_lock_interruptible(dev);
11746 if (ret)
11747 goto cleanup;
11748
11749 intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
11750 if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
11751 ret = -EIO;
11752 goto cleanup;
11753 }
11754
11755 atomic_inc(&intel_crtc->unpin_work_count);
11756
11757 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
11758 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
11759
11760 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
11761 engine = &dev_priv->engine[BCS];
11762 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
11763 /* vlv: DISPLAY_FLIP fails to change tiling */
11764 engine = NULL;
11765 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
11766 engine = &dev_priv->engine[BCS];
11767 } else if (INTEL_INFO(dev)->gen >= 7) {
11768 engine = i915_gem_request_get_engine(obj->last_write_req);
11769 if (engine == NULL || engine->id != RCS)
11770 engine = &dev_priv->engine[BCS];
11771 } else {
11772 engine = &dev_priv->engine[RCS];
11773 }
11774
11775 mmio_flip = use_mmio_flip(engine, obj);
11776
11777 /* When using CS flips, we want to emit semaphores between rings.
11778 * However, when using mmio flips we will create a task to do the
11779 * synchronisation, so all we want here is to pin the framebuffer
11780 * into the display plane and skip any waits.
11781 */
11782 if (!mmio_flip) {
11783 ret = i915_gem_object_sync(obj, engine, &request);
11784 if (!ret && !request) {
11785 request = i915_gem_request_alloc(engine, NULL);
11786 ret = PTR_ERR_OR_ZERO(request);
11787 }
11788
11789 if (ret)
11790 goto cleanup_pending;
11791 }
11792
11793 ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
11794 if (ret)
11795 goto cleanup_pending;
11796
11797 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11798 obj, 0);
11799 work->gtt_offset += intel_crtc->dspaddr_offset;
11800 work->rotation = crtc->primary->state->rotation;
11801
11802 if (mmio_flip) {
11803 INIT_WORK(&work->mmio_work, intel_mmio_flip_work_func);
11804
11805 i915_gem_request_assign(&work->flip_queued_req,
11806 obj->last_write_req);
11807
11808 schedule_work(&work->mmio_work);
11809 } else {
11810 i915_gem_request_assign(&work->flip_queued_req, request);
11811 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
11812 page_flip_flags);
11813 if (ret)
11814 goto cleanup_unpin;
11815
11816 intel_mark_page_flip_active(intel_crtc, work);
11817
11818 i915_add_request_no_flush(request);
11819 }
11820
11821 i915_gem_track_fb(intel_fb_obj(old_fb), obj,
11822 to_intel_plane(primary)->frontbuffer_bit);
11823 mutex_unlock(&dev->struct_mutex);
11824
11825 intel_frontbuffer_flip_prepare(dev,
11826 to_intel_plane(primary)->frontbuffer_bit);
11827
11828 trace_i915_flip_request(intel_crtc->plane, obj);
11829
11830 return 0;
11831
11832cleanup_unpin:
11833 intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
11834cleanup_pending:
11835 if (!IS_ERR_OR_NULL(request))
11836 i915_add_request_no_flush(request);
11837 atomic_dec(&intel_crtc->unpin_work_count);
11838 mutex_unlock(&dev->struct_mutex);
11839cleanup:
11840 crtc->primary->fb = old_fb;
11841 update_state_fb(crtc->primary);
11842
11843 drm_gem_object_unreference_unlocked(&obj->base);
11844 drm_framebuffer_unreference(work->old_fb);
11845
11846 spin_lock_irq(&dev->event_lock);
11847 intel_crtc->flip_work = NULL;
11848 spin_unlock_irq(&dev->event_lock);
11849
11850 drm_crtc_vblank_put(crtc);
11851free_work:
11852 kfree(work);
11853
11854 if (ret == -EIO) {
11855 struct drm_atomic_state *state;
11856 struct drm_plane_state *plane_state;
11857
11858out_hang:
11859 state = drm_atomic_state_alloc(dev);
11860 if (!state)
11861 return -ENOMEM;
11862 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11863
11864retry:
11865 plane_state = drm_atomic_get_plane_state(state, primary);
11866 ret = PTR_ERR_OR_ZERO(plane_state);
11867 if (!ret) {
11868 drm_atomic_set_fb_for_plane(plane_state, fb);
11869
11870 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11871 if (!ret)
11872 ret = drm_atomic_commit(state);
11873 }
11874
11875 if (ret == -EDEADLK) {
11876 drm_modeset_backoff(state->acquire_ctx);
11877 drm_atomic_state_clear(state);
11878 goto retry;
11879 }
11880
11881 if (ret)
11882 drm_atomic_state_free(state);
11883
11884 if (ret == 0 && event) {
11885 spin_lock_irq(&dev->event_lock);
11886 drm_crtc_send_vblank_event(crtc, event);
11887 spin_unlock_irq(&dev->event_lock);
11888 }
11889 }
11890 return ret;
11891}
11892
11893
11894/**
11895 * intel_wm_need_update - Check whether watermarks need updating
11896 * @plane: drm plane
11897 * @state: new plane state
11898 *
11899 * Check current plane state versus the new one to determine whether
11900 * watermarks need to be recalculated.
11901 *
11902 * Returns true or false.
11903 */
11904static bool intel_wm_need_update(struct drm_plane *plane,
11905 struct drm_plane_state *state)
11906{
11907 struct intel_plane_state *new = to_intel_plane_state(state);
11908 struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11909
11910 /* Update watermarks on tiling or size changes. */
11911 if (new->visible != cur->visible)
11912 return true;
11913
11914 if (!cur->base.fb || !new->base.fb)
11915 return false;
11916
11917 if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11918 cur->base.rotation != new->base.rotation ||
11919 drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11920 drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11921 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11922 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
11923 return true;
11924
11925 return false;
11926}
11927
11928static bool needs_scaling(struct intel_plane_state *state)
11929{
11930 int src_w = drm_rect_width(&state->src) >> 16;
11931 int src_h = drm_rect_height(&state->src) >> 16;
11932 int dst_w = drm_rect_width(&state->dst);
11933 int dst_h = drm_rect_height(&state->dst);
11934
11935 return (src_w != dst_w || src_h != dst_h);
11936}
d21fbe87 11937
da20eabd
ML
11938int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11939 struct drm_plane_state *plane_state)
11940{
ab1d3a0e 11941 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
da20eabd
ML
11942 struct drm_crtc *crtc = crtc_state->crtc;
11943 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11944 struct drm_plane *plane = plane_state->plane;
11945 struct drm_device *dev = crtc->dev;
ed4a6a7c 11946 struct drm_i915_private *dev_priv = to_i915(dev);
da20eabd
ML
11947 struct intel_plane_state *old_plane_state =
11948 to_intel_plane_state(plane->state);
da20eabd
ML
11949 bool mode_changed = needs_modeset(crtc_state);
11950 bool was_crtc_enabled = crtc->state->active;
11951 bool is_crtc_enabled = crtc_state->active;
da20eabd
ML
11952 bool turn_off, turn_on, visible, was_visible;
11953 struct drm_framebuffer *fb = plane_state->fb;
78108b7c 11954 int ret;
da20eabd
ML
11955
11956 if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11957 plane->type != DRM_PLANE_TYPE_CURSOR) {
11958 ret = skl_update_scaler_plane(
11959 to_intel_crtc_state(crtc_state),
11960 to_intel_plane_state(plane_state));
11961 if (ret)
11962 return ret;
11963 }
11964
da20eabd
ML
11965 was_visible = old_plane_state->visible;
11966 visible = to_intel_plane_state(plane_state)->visible;
11967
11968 if (!was_crtc_enabled && WARN_ON(was_visible))
11969 was_visible = false;
11970
35c08f43
ML
11971 /*
11972 * Visibility is calculated as if the crtc was on, but
11973 * after scaler setup everything depends on it being off
11974 * when the crtc isn't active.
f818ffea
VS
11975 *
11976 * FIXME this is wrong for watermarks. Watermarks should also
11977 * be computed as if the pipe would be active. Perhaps move
11978 * per-plane wm computation to the .check_plane() hook, and
11979 * only combine the results from all planes in the current place?
35c08f43
ML
11980 */
11981 if (!is_crtc_enabled)
11982 to_intel_plane_state(plane_state)->visible = visible = false;
da20eabd
ML
11983
11984 if (!was_visible && !visible)
11985 return 0;
11986
e8861675
ML
11987 if (fb != old_plane_state->base.fb)
11988 pipe_config->fb_changed = true;
11989
da20eabd
ML
11990 turn_off = was_visible && (!visible || mode_changed);
11991 turn_on = visible && (!was_visible || mode_changed);
11992
72660ce0 11993 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
78108b7c
VS
11994 intel_crtc->base.base.id,
11995 intel_crtc->base.name,
72660ce0
VS
11996 plane->base.id, plane->name,
11997 fb ? fb->base.id : -1);
da20eabd 11998
72660ce0
VS
11999 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
12000 plane->base.id, plane->name,
12001 was_visible, visible,
da20eabd
ML
12002 turn_off, turn_on, mode_changed);
12003
caed361d
VS
12004 if (turn_on) {
12005 pipe_config->update_wm_pre = true;
12006
12007 /* must disable cxsr around plane enable/disable */
12008 if (plane->type != DRM_PLANE_TYPE_CURSOR)
12009 pipe_config->disable_cxsr = true;
12010 } else if (turn_off) {
12011 pipe_config->update_wm_post = true;
92826fcd 12012
852eb00d 12013 /* must disable cxsr around plane enable/disable */
e8861675 12014 if (plane->type != DRM_PLANE_TYPE_CURSOR)
ab1d3a0e 12015 pipe_config->disable_cxsr = true;
852eb00d 12016 } else if (intel_wm_need_update(plane, plane_state)) {
caed361d
VS
12017 /* FIXME bollocks */
12018 pipe_config->update_wm_pre = true;
12019 pipe_config->update_wm_post = true;
852eb00d 12020 }
da20eabd 12021
ed4a6a7c 12022 /* Pre-gen9 platforms need two-step watermark updates */
caed361d
VS
12023 if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
12024 INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
ed4a6a7c
MR
12025 to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
12026
8be6ca85 12027 if (visible || was_visible)
cd202f69 12028 pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
a9ff8714 12029
31ae71fc
ML
12030 /*
12031 * WaCxSRDisabledForSpriteScaling:ivb
12032 *
12033 * cstate->update_wm was already set above, so this flag will
12034 * take effect when we commit and program watermarks.
12035 */
12036 if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
12037 needs_scaling(to_intel_plane_state(plane_state)) &&
12038 !needs_scaling(old_plane_state))
12039 pipe_config->disable_lp_wm = true;
d21fbe87 12040
da20eabd
ML
12041 return 0;
12042}
12043
6d3a1ce7
ML
12044static bool encoders_cloneable(const struct intel_encoder *a,
12045 const struct intel_encoder *b)
12046{
12047 /* masks could be asymmetric, so check both ways */
12048 return a == b || (a->cloneable & (1 << b->type) &&
12049 b->cloneable & (1 << a->type));
12050}
12051
12052static bool check_single_encoder_cloning(struct drm_atomic_state *state,
12053 struct intel_crtc *crtc,
12054 struct intel_encoder *encoder)
12055{
12056 struct intel_encoder *source_encoder;
12057 struct drm_connector *connector;
12058 struct drm_connector_state *connector_state;
12059 int i;
12060
12061 for_each_connector_in_state(state, connector, connector_state, i) {
12062 if (connector_state->crtc != &crtc->base)
12063 continue;
12064
12065 source_encoder =
12066 to_intel_encoder(connector_state->best_encoder);
12067 if (!encoders_cloneable(encoder, source_encoder))
12068 return false;
12069 }
12070
12071 return true;
12072}
12073
12074static bool check_encoder_cloning(struct drm_atomic_state *state,
12075 struct intel_crtc *crtc)
12076{
12077 struct intel_encoder *encoder;
12078 struct drm_connector *connector;
12079 struct drm_connector_state *connector_state;
12080 int i;
12081
12082 for_each_connector_in_state(state, connector, connector_state, i) {
12083 if (connector_state->crtc != &crtc->base)
12084 continue;
12085
12086 encoder = to_intel_encoder(connector_state->best_encoder);
12087 if (!check_single_encoder_cloning(state, crtc, encoder))
12088 return false;
12089 }
12090
12091 return true;
12092}
12093
12094static int intel_crtc_atomic_check(struct drm_crtc *crtc,
12095 struct drm_crtc_state *crtc_state)
12096{
cf5a15be 12097 struct drm_device *dev = crtc->dev;
ad421372 12098 struct drm_i915_private *dev_priv = dev->dev_private;
6d3a1ce7 12099 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
cf5a15be
ML
12100 struct intel_crtc_state *pipe_config =
12101 to_intel_crtc_state(crtc_state);
6d3a1ce7 12102 struct drm_atomic_state *state = crtc_state->state;
4d20cd86 12103 int ret;
6d3a1ce7
ML
12104 bool mode_changed = needs_modeset(crtc_state);
12105
12106 if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
12107 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
12108 return -EINVAL;
12109 }
12110
852eb00d 12111 if (mode_changed && !crtc_state->active)
caed361d 12112 pipe_config->update_wm_post = true;
eddfcbcd 12113
ad421372
ML
12114 if (mode_changed && crtc_state->enable &&
12115 dev_priv->display.crtc_compute_clock &&
8106ddbd 12116 !WARN_ON(pipe_config->shared_dpll)) {
ad421372
ML
12117 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
12118 pipe_config);
12119 if (ret)
12120 return ret;
12121 }
12122
82cf435b
LL
12123 if (crtc_state->color_mgmt_changed) {
12124 ret = intel_color_check(crtc, crtc_state);
12125 if (ret)
12126 return ret;
12127 }
12128
e435d6e5 12129 ret = 0;
86c8bbbe 12130 if (dev_priv->display.compute_pipe_wm) {
e3bddded 12131 ret = dev_priv->display.compute_pipe_wm(pipe_config);
ed4a6a7c
MR
12132 if (ret) {
12133 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
12134 return ret;
12135 }
12136 }
12137
12138 if (dev_priv->display.compute_intermediate_wm &&
12139 !to_intel_atomic_state(state)->skip_intermediate_wm) {
12140 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
12141 return 0;
12142
12143 /*
12144 * Calculate 'intermediate' watermarks that satisfy both the
12145 * old state and the new state. We can program these
12146 * immediately.
12147 */
12148 ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
12149 intel_crtc,
12150 pipe_config);
12151 if (ret) {
12152 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
86c8bbbe 12153 return ret;
ed4a6a7c 12154 }
e3d5457c
VS
12155 } else if (dev_priv->display.compute_intermediate_wm) {
12156 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
12157 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
86c8bbbe
MR
12158 }
12159
e435d6e5
ML
12160 if (INTEL_INFO(dev)->gen >= 9) {
12161 if (mode_changed)
12162 ret = skl_update_scaler_crtc(pipe_config);
12163
12164 if (!ret)
12165 ret = intel_atomic_setup_scalers(dev, intel_crtc,
12166 pipe_config);
12167 }
12168
12169 return ret;
6d3a1ce7
ML
12170}
12171
65b38e0d 12172static const struct drm_crtc_helper_funcs intel_helper_funcs = {
f6e5b160 12173 .mode_set_base_atomic = intel_pipe_set_base_atomic,
5a21b665
DV
12174 .atomic_begin = intel_begin_crtc_commit,
12175 .atomic_flush = intel_finish_crtc_commit,
6d3a1ce7 12176 .atomic_check = intel_crtc_atomic_check,
f6e5b160
CW
12177};
12178
d29b2f9d
ACO
12179static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12180{
12181 struct intel_connector *connector;
12182
12183 for_each_intel_connector(dev, connector) {
8863dc7f
DV
12184 if (connector->base.state->crtc)
12185 drm_connector_unreference(&connector->base);
12186
d29b2f9d
ACO
12187 if (connector->base.encoder) {
12188 connector->base.state->best_encoder =
12189 connector->base.encoder;
12190 connector->base.state->crtc =
12191 connector->base.encoder->crtc;
8863dc7f
DV
12192
12193 drm_connector_reference(&connector->base);
d29b2f9d
ACO
12194 } else {
12195 connector->base.state->best_encoder = NULL;
12196 connector->base.state->crtc = NULL;
12197 }
12198 }
12199}
12200
050f7aeb 12201static void
eba905b2 12202connected_sink_compute_bpp(struct intel_connector *connector,
5cec258b 12203 struct intel_crtc_state *pipe_config)
050f7aeb
DV
12204{
12205 int bpp = pipe_config->pipe_bpp;
12206
12207 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12208 connector->base.base.id,
c23cc417 12209 connector->base.name);
050f7aeb
DV
12210
12211 /* Don't use an invalid EDID bpc value */
12212 if (connector->base.display_info.bpc &&
12213 connector->base.display_info.bpc * 3 < bpp) {
12214 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12215 bpp, connector->base.display_info.bpc*3);
12216 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12217 }
12218
013dd9e0
JN
12219 /* Clamp bpp to default limit on screens without EDID 1.4 */
12220 if (connector->base.display_info.bpc == 0) {
12221 int type = connector->base.connector_type;
12222 int clamp_bpp = 24;
12223
12224 /* Fall back to 18 bpp when DP sink capability is unknown. */
12225 if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12226 type == DRM_MODE_CONNECTOR_eDP)
12227 clamp_bpp = 18;
12228
12229 if (bpp > clamp_bpp) {
12230 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12231 bpp, clamp_bpp);
12232 pipe_config->pipe_bpp = clamp_bpp;
12233 }
050f7aeb
DV
12234 }
12235}
12236
4e53c2e0 12237static int
050f7aeb 12238compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5cec258b 12239 struct intel_crtc_state *pipe_config)
4e53c2e0 12240{
050f7aeb 12241 struct drm_device *dev = crtc->base.dev;
1486017f 12242 struct drm_atomic_state *state;
da3ced29
ACO
12243 struct drm_connector *connector;
12244 struct drm_connector_state *connector_state;
1486017f 12245 int bpp, i;
4e53c2e0 12246
666a4537 12247 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
4e53c2e0 12248 bpp = 10*3;
d328c9d7
DV
12249 else if (INTEL_INFO(dev)->gen >= 5)
12250 bpp = 12*3;
12251 else
12252 bpp = 8*3;
12253
4e53c2e0 12254
4e53c2e0
DV
12255 pipe_config->pipe_bpp = bpp;
12256
1486017f
ACO
12257 state = pipe_config->base.state;
12258
4e53c2e0 12259 /* Clamp display bpp to EDID value */
da3ced29
ACO
12260 for_each_connector_in_state(state, connector, connector_state, i) {
12261 if (connector_state->crtc != &crtc->base)
4e53c2e0
DV
12262 continue;
12263
da3ced29
ACO
12264 connected_sink_compute_bpp(to_intel_connector(connector),
12265 pipe_config);
4e53c2e0
DV
12266 }
12267
12268 return bpp;
12269}
12270
644db711
DV
12271static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12272{
12273 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12274 "type: 0x%x flags: 0x%x\n",
1342830c 12275 mode->crtc_clock,
644db711
DV
12276 mode->crtc_hdisplay, mode->crtc_hsync_start,
12277 mode->crtc_hsync_end, mode->crtc_htotal,
12278 mode->crtc_vdisplay, mode->crtc_vsync_start,
12279 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12280}
12281
c0b03411 12282static void intel_dump_pipe_config(struct intel_crtc *crtc,
5cec258b 12283 struct intel_crtc_state *pipe_config,
c0b03411
DV
12284 const char *context)
12285{
6a60cd87
CK
12286 struct drm_device *dev = crtc->base.dev;
12287 struct drm_plane *plane;
12288 struct intel_plane *intel_plane;
12289 struct intel_plane_state *state;
12290 struct drm_framebuffer *fb;
12291
78108b7c
VS
12292 DRM_DEBUG_KMS("[CRTC:%d:%s]%s config %p for pipe %c\n",
12293 crtc->base.base.id, crtc->base.name,
6a60cd87 12294 context, pipe_config, pipe_name(crtc->pipe));
c0b03411 12295
da205630 12296 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
c0b03411
DV
12297 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12298 pipe_config->pipe_bpp, pipe_config->dither);
12299 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12300 pipe_config->has_pch_encoder,
12301 pipe_config->fdi_lanes,
12302 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12303 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12304 pipe_config->fdi_m_n.tu);
90a6b7b0 12305 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
eb14cb74 12306 pipe_config->has_dp_encoder,
90a6b7b0 12307 pipe_config->lane_count,
eb14cb74
VS
12308 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12309 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12310 pipe_config->dp_m_n.tu);
b95af8be 12311
90a6b7b0 12312 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
b95af8be 12313 pipe_config->has_dp_encoder,
90a6b7b0 12314 pipe_config->lane_count,
b95af8be
VK
12315 pipe_config->dp_m2_n2.gmch_m,
12316 pipe_config->dp_m2_n2.gmch_n,
12317 pipe_config->dp_m2_n2.link_m,
12318 pipe_config->dp_m2_n2.link_n,
12319 pipe_config->dp_m2_n2.tu);
12320
55072d19
DV
12321 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12322 pipe_config->has_audio,
12323 pipe_config->has_infoframe);
12324
c0b03411 12325 DRM_DEBUG_KMS("requested mode:\n");
2d112de7 12326 drm_mode_debug_printmodeline(&pipe_config->base.mode);
c0b03411 12327 DRM_DEBUG_KMS("adjusted mode:\n");
2d112de7
ACO
12328 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12329 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
d71b8d4a 12330 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
37327abd
VS
12331 DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12332 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
0ec463d3
TU
12333 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12334 crtc->num_scalers,
12335 pipe_config->scaler_state.scaler_users,
12336 pipe_config->scaler_state.scaler_id);
c0b03411
DV
12337 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12338 pipe_config->gmch_pfit.control,
12339 pipe_config->gmch_pfit.pgm_ratios,
12340 pipe_config->gmch_pfit.lvds_border_bits);
fd4daa9c 12341 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
c0b03411 12342 pipe_config->pch_pfit.pos,
fd4daa9c
CW
12343 pipe_config->pch_pfit.size,
12344 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
42db64ef 12345 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
cf532bb2 12346 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
6a60cd87 12347
415ff0f6 12348 if (IS_BROXTON(dev)) {
05712c15 12349 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
415ff0f6 12350 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
c8453338 12351 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
415ff0f6
TU
12352 pipe_config->ddi_pll_sel,
12353 pipe_config->dpll_hw_state.ebb0,
05712c15 12354 pipe_config->dpll_hw_state.ebb4,
415ff0f6
TU
12355 pipe_config->dpll_hw_state.pll0,
12356 pipe_config->dpll_hw_state.pll1,
12357 pipe_config->dpll_hw_state.pll2,
12358 pipe_config->dpll_hw_state.pll3,
12359 pipe_config->dpll_hw_state.pll6,
12360 pipe_config->dpll_hw_state.pll8,
05712c15 12361 pipe_config->dpll_hw_state.pll9,
c8453338 12362 pipe_config->dpll_hw_state.pll10,
415ff0f6 12363 pipe_config->dpll_hw_state.pcsdw12);
ef11bdb3 12364 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
415ff0f6
TU
12365 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12366 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12367 pipe_config->ddi_pll_sel,
12368 pipe_config->dpll_hw_state.ctrl1,
12369 pipe_config->dpll_hw_state.cfgcr1,
12370 pipe_config->dpll_hw_state.cfgcr2);
12371 } else if (HAS_DDI(dev)) {
1260f07e 12372 DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
415ff0f6 12373 pipe_config->ddi_pll_sel,
00490c22
ML
12374 pipe_config->dpll_hw_state.wrpll,
12375 pipe_config->dpll_hw_state.spll);
415ff0f6
TU
12376 } else {
12377 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12378 "fp0: 0x%x, fp1: 0x%x\n",
12379 pipe_config->dpll_hw_state.dpll,
12380 pipe_config->dpll_hw_state.dpll_md,
12381 pipe_config->dpll_hw_state.fp0,
12382 pipe_config->dpll_hw_state.fp1);
12383 }
12384
6a60cd87
CK
12385 DRM_DEBUG_KMS("planes on this crtc\n");
12386 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12387 intel_plane = to_intel_plane(plane);
12388 if (intel_plane->pipe != crtc->pipe)
12389 continue;
12390
12391 state = to_intel_plane_state(plane->state);
12392 fb = state->base.fb;
12393 if (!fb) {
1d577e02
VS
12394 DRM_DEBUG_KMS("[PLANE:%d:%s] disabled, scaler_id = %d\n",
12395 plane->base.id, plane->name, state->scaler_id);
6a60cd87
CK
12396 continue;
12397 }
12398
1d577e02
VS
12399 DRM_DEBUG_KMS("[PLANE:%d:%s] enabled",
12400 plane->base.id, plane->name);
12401 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = %s",
12402 fb->base.id, fb->width, fb->height,
12403 drm_get_format_name(fb->pixel_format));
12404 DRM_DEBUG_KMS("\tscaler:%d src %dx%d+%d+%d dst %dx%d+%d+%d\n",
12405 state->scaler_id,
12406 state->src.x1 >> 16, state->src.y1 >> 16,
12407 drm_rect_width(&state->src) >> 16,
12408 drm_rect_height(&state->src) >> 16,
12409 state->dst.x1, state->dst.y1,
12410 drm_rect_width(&state->dst),
12411 drm_rect_height(&state->dst));
6a60cd87 12412 }
c0b03411
DV
12413}
12414
5448a00d 12415static bool check_digital_port_conflicts(struct drm_atomic_state *state)
00f0b378 12416{
5448a00d 12417 struct drm_device *dev = state->dev;
da3ced29 12418 struct drm_connector *connector;
00f0b378
VS
12419 unsigned int used_ports = 0;
12420
12421 /*
12422 * Walk the connector list instead of the encoder
12423 * list to detect the problem on ddi platforms
12424 * where there's just one encoder per digital port.
12425 */
0bff4858
VS
12426 drm_for_each_connector(connector, dev) {
12427 struct drm_connector_state *connector_state;
12428 struct intel_encoder *encoder;
12429
12430 connector_state = drm_atomic_get_existing_connector_state(state, connector);
12431 if (!connector_state)
12432 connector_state = connector->state;
12433
5448a00d 12434 if (!connector_state->best_encoder)
00f0b378
VS
12435 continue;
12436
5448a00d
ACO
12437 encoder = to_intel_encoder(connector_state->best_encoder);
12438
12439 WARN_ON(!connector_state->crtc);
00f0b378
VS
12440
12441 switch (encoder->type) {
12442 unsigned int port_mask;
12443 case INTEL_OUTPUT_UNKNOWN:
12444 if (WARN_ON(!HAS_DDI(dev)))
12445 break;
12446 case INTEL_OUTPUT_DISPLAYPORT:
12447 case INTEL_OUTPUT_HDMI:
12448 case INTEL_OUTPUT_EDP:
12449 port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12450
12451 /* the same port mustn't appear more than once */
12452 if (used_ports & port_mask)
12453 return false;
12454
12455 used_ports |= port_mask;
12456 default:
12457 break;
12458 }
12459 }
12460
12461 return true;
12462}
12463
83a57153
ACO
12464static void
12465clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12466{
12467 struct drm_crtc_state tmp_state;
663a3640 12468 struct intel_crtc_scaler_state scaler_state;
4978cc93 12469 struct intel_dpll_hw_state dpll_hw_state;
8106ddbd 12470 struct intel_shared_dpll *shared_dpll;
8504c74c 12471 uint32_t ddi_pll_sel;
c4e2d043 12472 bool force_thru;
83a57153 12473
7546a384
ACO
12474 /* FIXME: before the switch to atomic started, a new pipe_config was
12475 * kzalloc'd. Code that depends on any field being zero should be
12476 * fixed, so that the crtc_state can be safely duplicated. For now,
12477 * only fields that are know to not cause problems are preserved. */
12478
83a57153 12479 tmp_state = crtc_state->base;
663a3640 12480 scaler_state = crtc_state->scaler_state;
4978cc93
ACO
12481 shared_dpll = crtc_state->shared_dpll;
12482 dpll_hw_state = crtc_state->dpll_hw_state;
8504c74c 12483 ddi_pll_sel = crtc_state->ddi_pll_sel;
c4e2d043 12484 force_thru = crtc_state->pch_pfit.force_thru;
4978cc93 12485
83a57153 12486 memset(crtc_state, 0, sizeof *crtc_state);
4978cc93 12487
83a57153 12488 crtc_state->base = tmp_state;
663a3640 12489 crtc_state->scaler_state = scaler_state;
4978cc93
ACO
12490 crtc_state->shared_dpll = shared_dpll;
12491 crtc_state->dpll_hw_state = dpll_hw_state;
8504c74c 12492 crtc_state->ddi_pll_sel = ddi_pll_sel;
c4e2d043 12493 crtc_state->pch_pfit.force_thru = force_thru;
83a57153
ACO
12494}
12495
548ee15b 12496static int
b8cecdf5 12497intel_modeset_pipe_config(struct drm_crtc *crtc,
b359283a 12498 struct intel_crtc_state *pipe_config)
ee7b9f93 12499{
b359283a 12500 struct drm_atomic_state *state = pipe_config->base.state;
7758a113 12501 struct intel_encoder *encoder;
da3ced29 12502 struct drm_connector *connector;
0b901879 12503 struct drm_connector_state *connector_state;
d328c9d7 12504 int base_bpp, ret = -EINVAL;
0b901879 12505 int i;
e29c22c0 12506 bool retry = true;
ee7b9f93 12507
83a57153 12508 clear_intel_crtc_state(pipe_config);
7758a113 12509
e143a21c
DV
12510 pipe_config->cpu_transcoder =
12511 (enum transcoder) to_intel_crtc(crtc)->pipe;
b8cecdf5 12512
2960bc9c
ID
12513 /*
12514 * Sanitize sync polarity flags based on requested ones. If neither
12515 * positive or negative polarity is requested, treat this as meaning
12516 * negative polarity.
12517 */
2d112de7 12518 if (!(pipe_config->base.adjusted_mode.flags &
2960bc9c 12519 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
2d112de7 12520 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
2960bc9c 12521
2d112de7 12522 if (!(pipe_config->base.adjusted_mode.flags &
2960bc9c 12523 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
2d112de7 12524 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
2960bc9c 12525
d328c9d7
DV
12526 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12527 pipe_config);
12528 if (base_bpp < 0)
4e53c2e0
DV
12529 goto fail;
12530
e41a56be
VS
12531 /*
12532 * Determine the real pipe dimensions. Note that stereo modes can
12533 * increase the actual pipe size due to the frame doubling and
12534 * insertion of additional space for blanks between the frame. This
12535 * is stored in the crtc timings. We use the requested mode to do this
12536 * computation to clearly distinguish it from the adjusted mode, which
12537 * can be changed by the connectors in the below retry loop.
12538 */
2d112de7 12539 drm_crtc_get_hv_timing(&pipe_config->base.mode,
ecb7e16b
GP
12540 &pipe_config->pipe_src_w,
12541 &pipe_config->pipe_src_h);
e41a56be 12542
e29c22c0 12543encoder_retry:
ef1b460d 12544 /* Ensure the port clock defaults are reset when retrying. */
ff9a6750 12545 pipe_config->port_clock = 0;
ef1b460d 12546 pipe_config->pixel_multiplier = 1;
ff9a6750 12547
135c81b8 12548 /* Fill in default crtc timings, allow encoders to overwrite them. */
2d112de7
ACO
12549 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12550 CRTC_STEREO_DOUBLE);
135c81b8 12551
7758a113
DV
12552 /* Pass our mode to the connectors and the CRTC to give them a chance to
12553 * adjust it according to limitations or connector properties, and also
12554 * a chance to reject the mode entirely.
47f1c6c9 12555 */
da3ced29 12556 for_each_connector_in_state(state, connector, connector_state, i) {
0b901879 12557 if (connector_state->crtc != crtc)
7758a113 12558 continue;
7ae89233 12559
0b901879
ACO
12560 encoder = to_intel_encoder(connector_state->best_encoder);
12561
efea6e8e
DV
12562 if (!(encoder->compute_config(encoder, pipe_config))) {
12563 DRM_DEBUG_KMS("Encoder config failure\n");
7758a113
DV
12564 goto fail;
12565 }
ee7b9f93 12566 }
47f1c6c9 12567
ff9a6750
DV
12568 /* Set default port clock if not overwritten by the encoder. Needs to be
12569 * done afterwards in case the encoder adjusts the mode. */
12570 if (!pipe_config->port_clock)
2d112de7 12571 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
241bfc38 12572 * pipe_config->pixel_multiplier;
ff9a6750 12573
a43f6e0f 12574 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
e29c22c0 12575 if (ret < 0) {
7758a113
DV
12576 DRM_DEBUG_KMS("CRTC fixup failed\n");
12577 goto fail;
ee7b9f93 12578 }
e29c22c0
DV
12579
12580 if (ret == RETRY) {
12581 if (WARN(!retry, "loop in pipe configuration computation\n")) {
12582 ret = -EINVAL;
12583 goto fail;
12584 }
12585
12586 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12587 retry = false;
12588 goto encoder_retry;
12589 }
12590
e8fa4270
DV
12591 /* Dithering seems to not pass-through bits correctly when it should, so
12592 * only enable it on 6bpc panels. */
12593 pipe_config->dither = pipe_config->pipe_bpp == 6*3;
62f0ace5 12594 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
d328c9d7 12595 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
4e53c2e0 12596
7758a113 12597fail:
548ee15b 12598 return ret;
ee7b9f93 12599}
47f1c6c9 12600
ea9d758d 12601static void
4740b0f2 12602intel_modeset_update_crtc_state(struct drm_atomic_state *state)
ea9d758d 12603{
0a9ab303
ACO
12604 struct drm_crtc *crtc;
12605 struct drm_crtc_state *crtc_state;
8a75d157 12606 int i;
ea9d758d 12607
7668851f 12608 /* Double check state. */
8a75d157 12609 for_each_crtc_in_state(state, crtc, crtc_state, i) {
3cb480bc 12610 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
fc467a22
ML
12611
12612 /* Update hwmode for vblank functions */
12613 if (crtc->state->active)
12614 crtc->hwmode = crtc->state->adjusted_mode;
12615 else
12616 crtc->hwmode.crtc_clock = 0;
61067a5e
ML
12617
12618 /*
12619 * Update legacy state to satisfy fbc code. This can
12620 * be removed when fbc uses the atomic state.
12621 */
12622 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12623 struct drm_plane_state *plane_state = crtc->primary->state;
12624
12625 crtc->primary->fb = plane_state->fb;
12626 crtc->x = plane_state->src_x >> 16;
12627 crtc->y = plane_state->src_y >> 16;
12628 }
ea9d758d 12629 }
ea9d758d
DV
12630}
12631
3bd26263 12632static bool intel_fuzzy_clock_check(int clock1, int clock2)
f1f644dc 12633{
3bd26263 12634 int diff;
f1f644dc
JB
12635
12636 if (clock1 == clock2)
12637 return true;
12638
12639 if (!clock1 || !clock2)
12640 return false;
12641
12642 diff = abs(clock1 - clock2);
12643
12644 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12645 return true;
12646
12647 return false;
12648}
12649
25c5b266
DV
12650#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12651 list_for_each_entry((intel_crtc), \
12652 &(dev)->mode_config.crtc_list, \
12653 base.head) \
95150bdf 12654 for_each_if (mask & (1 <<(intel_crtc)->pipe))
25c5b266 12655
cfb23ed6
ML
12656static bool
12657intel_compare_m_n(unsigned int m, unsigned int n,
12658 unsigned int m2, unsigned int n2,
12659 bool exact)
12660{
12661 if (m == m2 && n == n2)
12662 return true;
12663
12664 if (exact || !m || !n || !m2 || !n2)
12665 return false;
12666
12667 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12668
31d10b57
ML
12669 if (n > n2) {
12670 while (n > n2) {
cfb23ed6
ML
12671 m2 <<= 1;
12672 n2 <<= 1;
12673 }
31d10b57
ML
12674 } else if (n < n2) {
12675 while (n < n2) {
cfb23ed6
ML
12676 m <<= 1;
12677 n <<= 1;
12678 }
12679 }
12680
31d10b57
ML
12681 if (n != n2)
12682 return false;
12683
12684 return intel_fuzzy_clock_check(m, m2);
cfb23ed6
ML
12685}
12686
12687static bool
12688intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12689 struct intel_link_m_n *m2_n2,
12690 bool adjust)
12691{
12692 if (m_n->tu == m2_n2->tu &&
12693 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12694 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12695 intel_compare_m_n(m_n->link_m, m_n->link_n,
12696 m2_n2->link_m, m2_n2->link_n, !adjust)) {
12697 if (adjust)
12698 *m2_n2 = *m_n;
12699
12700 return true;
12701 }
12702
12703 return false;
12704}
12705
0e8ffe1b 12706static bool
2fa2fe9a 12707intel_pipe_config_compare(struct drm_device *dev,
5cec258b 12708 struct intel_crtc_state *current_config,
cfb23ed6
ML
12709 struct intel_crtc_state *pipe_config,
12710 bool adjust)
0e8ffe1b 12711{
cfb23ed6
ML
12712 bool ret = true;
12713
12714#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12715 do { \
12716 if (!adjust) \
12717 DRM_ERROR(fmt, ##__VA_ARGS__); \
12718 else \
12719 DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12720 } while (0)
12721
66e985c0
DV
12722#define PIPE_CONF_CHECK_X(name) \
12723 if (current_config->name != pipe_config->name) { \
cfb23ed6 12724 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
66e985c0
DV
12725 "(expected 0x%08x, found 0x%08x)\n", \
12726 current_config->name, \
12727 pipe_config->name); \
cfb23ed6 12728 ret = false; \
66e985c0
DV
12729 }
12730
08a24034
DV
12731#define PIPE_CONF_CHECK_I(name) \
12732 if (current_config->name != pipe_config->name) { \
cfb23ed6 12733 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
08a24034
DV
12734 "(expected %i, found %i)\n", \
12735 current_config->name, \
12736 pipe_config->name); \
cfb23ed6
ML
12737 ret = false; \
12738 }
12739
8106ddbd
ACO
12740#define PIPE_CONF_CHECK_P(name) \
12741 if (current_config->name != pipe_config->name) { \
12742 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12743 "(expected %p, found %p)\n", \
12744 current_config->name, \
12745 pipe_config->name); \
12746 ret = false; \
12747 }
12748
cfb23ed6
ML
12749#define PIPE_CONF_CHECK_M_N(name) \
12750 if (!intel_compare_link_m_n(&current_config->name, \
12751 &pipe_config->name,\
12752 adjust)) { \
12753 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12754 "(expected tu %i gmch %i/%i link %i/%i, " \
12755 "found tu %i, gmch %i/%i link %i/%i)\n", \
12756 current_config->name.tu, \
12757 current_config->name.gmch_m, \
12758 current_config->name.gmch_n, \
12759 current_config->name.link_m, \
12760 current_config->name.link_n, \
12761 pipe_config->name.tu, \
12762 pipe_config->name.gmch_m, \
12763 pipe_config->name.gmch_n, \
12764 pipe_config->name.link_m, \
12765 pipe_config->name.link_n); \
12766 ret = false; \
12767 }
12768
55c561a7
DV
12769/* This is required for BDW+ where there is only one set of registers for
12770 * switching between high and low RR.
12771 * This macro can be used whenever a comparison has to be made between one
12772 * hw state and multiple sw state variables.
12773 */
cfb23ed6
ML
12774#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12775 if (!intel_compare_link_m_n(&current_config->name, \
12776 &pipe_config->name, adjust) && \
12777 !intel_compare_link_m_n(&current_config->alt_name, \
12778 &pipe_config->name, adjust)) { \
12779 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12780 "(expected tu %i gmch %i/%i link %i/%i, " \
12781 "or tu %i gmch %i/%i link %i/%i, " \
12782 "found tu %i, gmch %i/%i link %i/%i)\n", \
12783 current_config->name.tu, \
12784 current_config->name.gmch_m, \
12785 current_config->name.gmch_n, \
12786 current_config->name.link_m, \
12787 current_config->name.link_n, \
12788 current_config->alt_name.tu, \
12789 current_config->alt_name.gmch_m, \
12790 current_config->alt_name.gmch_n, \
12791 current_config->alt_name.link_m, \
12792 current_config->alt_name.link_n, \
12793 pipe_config->name.tu, \
12794 pipe_config->name.gmch_m, \
12795 pipe_config->name.gmch_n, \
12796 pipe_config->name.link_m, \
12797 pipe_config->name.link_n); \
12798 ret = false; \
88adfff1
DV
12799 }
12800
1bd1bd80
DV
12801#define PIPE_CONF_CHECK_FLAGS(name, mask) \
12802 if ((current_config->name ^ pipe_config->name) & (mask)) { \
cfb23ed6 12803 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
1bd1bd80
DV
12804 "(expected %i, found %i)\n", \
12805 current_config->name & (mask), \
12806 pipe_config->name & (mask)); \
cfb23ed6 12807 ret = false; \
1bd1bd80
DV
12808 }
12809
5e550656
VS
12810#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12811 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
cfb23ed6 12812 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
5e550656
VS
12813 "(expected %i, found %i)\n", \
12814 current_config->name, \
12815 pipe_config->name); \
cfb23ed6 12816 ret = false; \
5e550656
VS
12817 }
12818
bb760063
DV
12819#define PIPE_CONF_QUIRK(quirk) \
12820 ((current_config->quirks | pipe_config->quirks) & (quirk))
12821
eccb140b
DV
12822 PIPE_CONF_CHECK_I(cpu_transcoder);
12823
08a24034
DV
12824 PIPE_CONF_CHECK_I(has_pch_encoder);
12825 PIPE_CONF_CHECK_I(fdi_lanes);
cfb23ed6 12826 PIPE_CONF_CHECK_M_N(fdi_m_n);
08a24034 12827
eb14cb74 12828 PIPE_CONF_CHECK_I(has_dp_encoder);
90a6b7b0 12829 PIPE_CONF_CHECK_I(lane_count);
95a7a2ae 12830 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
b95af8be
VK
12831
12832 if (INTEL_INFO(dev)->gen < 8) {
cfb23ed6
ML
12833 PIPE_CONF_CHECK_M_N(dp_m_n);
12834
cfb23ed6
ML
12835 if (current_config->has_drrs)
12836 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12837 } else
12838 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
eb14cb74 12839
a65347ba
JN
12840 PIPE_CONF_CHECK_I(has_dsi_encoder);
12841
2d112de7
ACO
12842 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12843 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12844 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12845 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12846 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12847 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
1bd1bd80 12848
2d112de7
ACO
12849 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12850 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12851 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12852 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12853 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12854 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
1bd1bd80 12855
c93f54cf 12856 PIPE_CONF_CHECK_I(pixel_multiplier);
6897b4b5 12857 PIPE_CONF_CHECK_I(has_hdmi_sink);
b5a9fa09 12858 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
666a4537 12859 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
b5a9fa09 12860 PIPE_CONF_CHECK_I(limited_color_range);
e43823ec 12861 PIPE_CONF_CHECK_I(has_infoframe);
6c49f241 12862
9ed109a7
DV
12863 PIPE_CONF_CHECK_I(has_audio);
12864
2d112de7 12865 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
1bd1bd80
DV
12866 DRM_MODE_FLAG_INTERLACE);
12867
bb760063 12868 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
2d112de7 12869 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12870 DRM_MODE_FLAG_PHSYNC);
2d112de7 12871 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12872 DRM_MODE_FLAG_NHSYNC);
2d112de7 12873 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12874 DRM_MODE_FLAG_PVSYNC);
2d112de7 12875 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063
DV
12876 DRM_MODE_FLAG_NVSYNC);
12877 }
045ac3b5 12878
333b8ca8 12879 PIPE_CONF_CHECK_X(gmch_pfit.control);
e2ff2d4a
DV
12880 /* pfit ratios are autocomputed by the hw on gen4+ */
12881 if (INTEL_INFO(dev)->gen < 4)
7f7d8dd6 12882 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
333b8ca8 12883 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
9953599b 12884
bfd16b2a
ML
12885 if (!adjust) {
12886 PIPE_CONF_CHECK_I(pipe_src_w);
12887 PIPE_CONF_CHECK_I(pipe_src_h);
12888
12889 PIPE_CONF_CHECK_I(pch_pfit.enabled);
12890 if (current_config->pch_pfit.enabled) {
12891 PIPE_CONF_CHECK_X(pch_pfit.pos);
12892 PIPE_CONF_CHECK_X(pch_pfit.size);
12893 }
2fa2fe9a 12894
7aefe2b5
ML
12895 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12896 }
a1b2278e 12897
e59150dc
JB
12898 /* BDW+ don't expose a synchronous way to read the state */
12899 if (IS_HASWELL(dev))
12900 PIPE_CONF_CHECK_I(ips_enabled);
42db64ef 12901
282740f7
VS
12902 PIPE_CONF_CHECK_I(double_wide);
12903
26804afd
DV
12904 PIPE_CONF_CHECK_X(ddi_pll_sel);
12905
8106ddbd 12906 PIPE_CONF_CHECK_P(shared_dpll);
66e985c0 12907 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8bcc2795 12908 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
66e985c0
DV
12909 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12910 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
d452c5b6 12911 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
00490c22 12912 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
3f4cd19f
DL
12913 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12914 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12915 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
c0d43d62 12916
47eacbab
VS
12917 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12918 PIPE_CONF_CHECK_X(dsi_pll.div);
12919
42571aef
VS
12920 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12921 PIPE_CONF_CHECK_I(pipe_bpp);
12922
2d112de7 12923 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
a9a7e98a 12924 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
5e550656 12925
66e985c0 12926#undef PIPE_CONF_CHECK_X
08a24034 12927#undef PIPE_CONF_CHECK_I
8106ddbd 12928#undef PIPE_CONF_CHECK_P
1bd1bd80 12929#undef PIPE_CONF_CHECK_FLAGS
5e550656 12930#undef PIPE_CONF_CHECK_CLOCK_FUZZY
bb760063 12931#undef PIPE_CONF_QUIRK
cfb23ed6 12932#undef INTEL_ERR_OR_DBG_KMS
88adfff1 12933
cfb23ed6 12934 return ret;
0e8ffe1b
DV
12935}
12936
e3b247da
VS
12937static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12938 const struct intel_crtc_state *pipe_config)
12939{
12940 if (pipe_config->has_pch_encoder) {
21a727b3 12941 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
e3b247da
VS
12942 &pipe_config->fdi_m_n);
12943 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12944
12945 /*
12946 * FDI already provided one idea for the dotclock.
12947 * Yell if the encoder disagrees.
12948 */
12949 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12950 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12951 fdi_dotclock, dotclock);
12952 }
12953}
12954
c0ead703
ML
12955static void verify_wm_state(struct drm_crtc *crtc,
12956 struct drm_crtc_state *new_state)
08db6652 12957{
e7c84544 12958 struct drm_device *dev = crtc->dev;
08db6652
DL
12959 struct drm_i915_private *dev_priv = dev->dev_private;
12960 struct skl_ddb_allocation hw_ddb, *sw_ddb;
e7c84544
ML
12961 struct skl_ddb_entry *hw_entry, *sw_entry;
12962 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12963 const enum pipe pipe = intel_crtc->pipe;
08db6652
DL
12964 int plane;
12965
e7c84544 12966 if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
08db6652
DL
12967 return;
12968
12969 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12970 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12971
e7c84544
ML
12972 /* planes */
12973 for_each_plane(dev_priv, pipe, plane) {
12974 hw_entry = &hw_ddb.plane[pipe][plane];
12975 sw_entry = &sw_ddb->plane[pipe][plane];
08db6652 12976
e7c84544 12977 if (skl_ddb_entry_equal(hw_entry, sw_entry))
08db6652
DL
12978 continue;
12979
e7c84544
ML
12980 DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12981 "(expected (%u,%u), found (%u,%u))\n",
12982 pipe_name(pipe), plane + 1,
12983 sw_entry->start, sw_entry->end,
12984 hw_entry->start, hw_entry->end);
12985 }
08db6652 12986
e7c84544
ML
12987 /* cursor */
12988 hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12989 sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
08db6652 12990
e7c84544 12991 if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
08db6652
DL
12992 DRM_ERROR("mismatch in DDB state pipe %c cursor "
12993 "(expected (%u,%u), found (%u,%u))\n",
12994 pipe_name(pipe),
12995 sw_entry->start, sw_entry->end,
12996 hw_entry->start, hw_entry->end);
12997 }
12998}
12999
91d1b4bd 13000static void
c0ead703 13001verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
8af6cf88 13002{
35dd3c64 13003 struct drm_connector *connector;
8af6cf88 13004
e7c84544 13005 drm_for_each_connector(connector, dev) {
35dd3c64
ML
13006 struct drm_encoder *encoder = connector->encoder;
13007 struct drm_connector_state *state = connector->state;
ad3c558f 13008
e7c84544
ML
13009 if (state->crtc != crtc)
13010 continue;
13011
5a21b665 13012 intel_connector_verify_state(to_intel_connector(connector));
8af6cf88 13013
ad3c558f 13014 I915_STATE_WARN(state->best_encoder != encoder,
35dd3c64 13015 "connector's atomic encoder doesn't match legacy encoder\n");
8af6cf88 13016 }
91d1b4bd
DV
13017}
13018
13019static void
c0ead703 13020verify_encoder_state(struct drm_device *dev)
91d1b4bd
DV
13021{
13022 struct intel_encoder *encoder;
13023 struct intel_connector *connector;
8af6cf88 13024
b2784e15 13025 for_each_intel_encoder(dev, encoder) {
8af6cf88 13026 bool enabled = false;
4d20cd86 13027 enum pipe pipe;
8af6cf88
DV
13028
13029 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
13030 encoder->base.base.id,
8e329a03 13031 encoder->base.name);
8af6cf88 13032
3a3371ff 13033 for_each_intel_connector(dev, connector) {
4d20cd86 13034 if (connector->base.state->best_encoder != &encoder->base)
8af6cf88
DV
13035 continue;
13036 enabled = true;
ad3c558f
ML
13037
13038 I915_STATE_WARN(connector->base.state->crtc !=
13039 encoder->base.crtc,
13040 "connector's crtc doesn't match encoder crtc\n");
8af6cf88 13041 }
0e32b39c 13042
e2c719b7 13043 I915_STATE_WARN(!!encoder->base.crtc != enabled,
8af6cf88
DV
13044 "encoder's enabled state mismatch "
13045 "(expected %i, found %i)\n",
13046 !!encoder->base.crtc, enabled);
7c60d198
ML
13047
13048 if (!encoder->base.crtc) {
4d20cd86 13049 bool active;
7c60d198 13050
4d20cd86
ML
13051 active = encoder->get_hw_state(encoder, &pipe);
13052 I915_STATE_WARN(active,
13053 "encoder detached but still enabled on pipe %c.\n",
13054 pipe_name(pipe));
7c60d198 13055 }
8af6cf88 13056 }
91d1b4bd
DV
13057}
13058
13059static void
c0ead703
ML
13060verify_crtc_state(struct drm_crtc *crtc,
13061 struct drm_crtc_state *old_crtc_state,
13062 struct drm_crtc_state *new_crtc_state)
91d1b4bd 13063{
e7c84544 13064 struct drm_device *dev = crtc->dev;
fbee40df 13065 struct drm_i915_private *dev_priv = dev->dev_private;
91d1b4bd 13066 struct intel_encoder *encoder;
e7c84544
ML
13067 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13068 struct intel_crtc_state *pipe_config, *sw_config;
13069 struct drm_atomic_state *old_state;
13070 bool active;
045ac3b5 13071
e7c84544 13072 old_state = old_crtc_state->state;
ec2dc6a0 13073 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
e7c84544
ML
13074 pipe_config = to_intel_crtc_state(old_crtc_state);
13075 memset(pipe_config, 0, sizeof(*pipe_config));
13076 pipe_config->base.crtc = crtc;
13077 pipe_config->base.state = old_state;
8af6cf88 13078
78108b7c 13079 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
8af6cf88 13080
e7c84544 13081 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
d62cf62a 13082
e7c84544
ML
13083 /* hw state is inconsistent with the pipe quirk */
13084 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
13085 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
13086 active = new_crtc_state->active;
6c49f241 13087
e7c84544
ML
13088 I915_STATE_WARN(new_crtc_state->active != active,
13089 "crtc active state doesn't match with hw state "
13090 "(expected %i, found %i)\n", new_crtc_state->active, active);
0e8ffe1b 13091
e7c84544
ML
13092 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
13093 "transitional active state does not match atomic hw state "
13094 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
4d20cd86 13095
e7c84544
ML
13096 for_each_encoder_on_crtc(dev, crtc, encoder) {
13097 enum pipe pipe;
4d20cd86 13098
e7c84544
ML
13099 active = encoder->get_hw_state(encoder, &pipe);
13100 I915_STATE_WARN(active != new_crtc_state->active,
13101 "[ENCODER:%i] active %i with crtc active %i\n",
13102 encoder->base.base.id, active, new_crtc_state->active);
4d20cd86 13103
e7c84544
ML
13104 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
13105 "Encoder connected to wrong pipe %c\n",
13106 pipe_name(pipe));
4d20cd86 13107
e7c84544
ML
13108 if (active)
13109 encoder->get_config(encoder, pipe_config);
13110 }
53d9f4e9 13111
e7c84544
ML
13112 if (!new_crtc_state->active)
13113 return;
cfb23ed6 13114
e7c84544 13115 intel_pipe_config_sanity_check(dev_priv, pipe_config);
e3b247da 13116
e7c84544
ML
13117 sw_config = to_intel_crtc_state(crtc->state);
13118 if (!intel_pipe_config_compare(dev, sw_config,
13119 pipe_config, false)) {
13120 I915_STATE_WARN(1, "pipe state doesn't match!\n");
13121 intel_dump_pipe_config(intel_crtc, pipe_config,
13122 "[hw state]");
13123 intel_dump_pipe_config(intel_crtc, sw_config,
13124 "[sw state]");
8af6cf88
DV
13125 }
13126}
13127
91d1b4bd 13128static void
c0ead703
ML
13129verify_single_dpll_state(struct drm_i915_private *dev_priv,
13130 struct intel_shared_dpll *pll,
13131 struct drm_crtc *crtc,
13132 struct drm_crtc_state *new_state)
91d1b4bd 13133{
91d1b4bd 13134 struct intel_dpll_hw_state dpll_hw_state;
e7c84544
ML
13135 unsigned crtc_mask;
13136 bool active;
5358901f 13137
e7c84544 13138 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
5358901f 13139
e7c84544 13140 DRM_DEBUG_KMS("%s\n", pll->name);
5358901f 13141
e7c84544 13142 active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
5358901f 13143
e7c84544
ML
13144 if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
13145 I915_STATE_WARN(!pll->on && pll->active_mask,
13146 "pll in active use but not on in sw tracking\n");
13147 I915_STATE_WARN(pll->on && !pll->active_mask,
13148 "pll is on but not used by any active crtc\n");
13149 I915_STATE_WARN(pll->on != active,
13150 "pll on state mismatch (expected %i, found %i)\n",
13151 pll->on, active);
13152 }
5358901f 13153
e7c84544 13154 if (!crtc) {
2dd66ebd 13155 I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
e7c84544
ML
13156 "more active pll users than references: %x vs %x\n",
13157 pll->active_mask, pll->config.crtc_mask);
5358901f 13158
e7c84544
ML
13159 return;
13160 }
13161
13162 crtc_mask = 1 << drm_crtc_index(crtc);
13163
13164 if (new_state->active)
13165 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
13166 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
13167 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
13168 else
13169 I915_STATE_WARN(pll->active_mask & crtc_mask,
13170 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
13171 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
2dd66ebd 13172
e7c84544
ML
13173 I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
13174 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
13175 crtc_mask, pll->config.crtc_mask);
66e985c0 13176
e7c84544
ML
13177 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
13178 &dpll_hw_state,
13179 sizeof(dpll_hw_state)),
13180 "pll hw state mismatch\n");
13181}
13182
13183static void
c0ead703
ML
13184verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13185 struct drm_crtc_state *old_crtc_state,
13186 struct drm_crtc_state *new_crtc_state)
e7c84544
ML
13187{
13188 struct drm_i915_private *dev_priv = dev->dev_private;
13189 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13190 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13191
13192 if (new_state->shared_dpll)
c0ead703 13193 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
e7c84544
ML
13194
13195 if (old_state->shared_dpll &&
13196 old_state->shared_dpll != new_state->shared_dpll) {
13197 unsigned crtc_mask = 1 << drm_crtc_index(crtc);
13198 struct intel_shared_dpll *pll = old_state->shared_dpll;
13199
13200 I915_STATE_WARN(pll->active_mask & crtc_mask,
13201 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13202 pipe_name(drm_crtc_index(crtc)));
13203 I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
13204 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13205 pipe_name(drm_crtc_index(crtc)));
5358901f 13206 }
8af6cf88
DV
13207}
13208
e7c84544 13209static void
c0ead703 13210intel_modeset_verify_crtc(struct drm_crtc *crtc,
e7c84544
ML
13211 struct drm_crtc_state *old_state,
13212 struct drm_crtc_state *new_state)
13213{
5a21b665
DV
13214 if (!needs_modeset(new_state) &&
13215 !to_intel_crtc_state(new_state)->update_pipe)
13216 return;
13217
c0ead703 13218 verify_wm_state(crtc, new_state);
5a21b665 13219 verify_connector_state(crtc->dev, crtc);
c0ead703
ML
13220 verify_crtc_state(crtc, old_state, new_state);
13221 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
e7c84544
ML
13222}
13223
13224static void
c0ead703 13225verify_disabled_dpll_state(struct drm_device *dev)
e7c84544
ML
13226{
13227 struct drm_i915_private *dev_priv = dev->dev_private;
13228 int i;
13229
13230 for (i = 0; i < dev_priv->num_shared_dpll; i++)
c0ead703 13231 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
e7c84544
ML
13232}
13233
13234static void
c0ead703 13235intel_modeset_verify_disabled(struct drm_device *dev)
e7c84544 13236{
c0ead703
ML
13237 verify_encoder_state(dev);
13238 verify_connector_state(dev, NULL);
13239 verify_disabled_dpll_state(dev);
e7c84544
ML
13240}
13241
80715b2f
VS
13242static void update_scanline_offset(struct intel_crtc *crtc)
13243{
13244 struct drm_device *dev = crtc->base.dev;
13245
13246 /*
13247 * The scanline counter increments at the leading edge of hsync.
13248 *
13249 * On most platforms it starts counting from vtotal-1 on the
13250 * first active line. That means the scanline counter value is
13251 * always one less than what we would expect. Ie. just after
13252 * start of vblank, which also occurs at start of hsync (on the
13253 * last active line), the scanline counter will read vblank_start-1.
13254 *
13255 * On gen2 the scanline counter starts counting from 1 instead
13256 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13257 * to keep the value positive), instead of adding one.
13258 *
13259 * On HSW+ the behaviour of the scanline counter depends on the output
13260 * type. For DP ports it behaves like most other platforms, but on HDMI
13261 * there's an extra 1 line difference. So we need to add two instead of
13262 * one to the value.
13263 */
13264 if (IS_GEN2(dev)) {
124abe07 13265 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
80715b2f
VS
13266 int vtotal;
13267
124abe07
VS
13268 vtotal = adjusted_mode->crtc_vtotal;
13269 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
80715b2f
VS
13270 vtotal /= 2;
13271
13272 crtc->scanline_offset = vtotal - 1;
13273 } else if (HAS_DDI(dev) &&
409ee761 13274 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
80715b2f
VS
13275 crtc->scanline_offset = 2;
13276 } else
13277 crtc->scanline_offset = 1;
13278}
13279
ad421372 13280static void intel_modeset_clear_plls(struct drm_atomic_state *state)
ed6739ef 13281{
225da59b 13282 struct drm_device *dev = state->dev;
ed6739ef 13283 struct drm_i915_private *dev_priv = to_i915(dev);
ad421372 13284 struct intel_shared_dpll_config *shared_dpll = NULL;
0a9ab303
ACO
13285 struct drm_crtc *crtc;
13286 struct drm_crtc_state *crtc_state;
0a9ab303 13287 int i;
ed6739ef
ACO
13288
13289 if (!dev_priv->display.crtc_compute_clock)
ad421372 13290 return;
ed6739ef 13291
0a9ab303 13292 for_each_crtc_in_state(state, crtc, crtc_state, i) {
fb1a38a9 13293 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8106ddbd
ACO
13294 struct intel_shared_dpll *old_dpll =
13295 to_intel_crtc_state(crtc->state)->shared_dpll;
0a9ab303 13296
fb1a38a9 13297 if (!needs_modeset(crtc_state))
225da59b
ACO
13298 continue;
13299
8106ddbd 13300 to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
fb1a38a9 13301
8106ddbd 13302 if (!old_dpll)
fb1a38a9 13303 continue;
0a9ab303 13304
ad421372
ML
13305 if (!shared_dpll)
13306 shared_dpll = intel_atomic_get_shared_dpll_state(state);
ed6739ef 13307
8106ddbd 13308 intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
ad421372 13309 }
ed6739ef
ACO
13310}
13311
99d736a2
ML
13312/*
13313 * This implements the workaround described in the "notes" section of the mode
13314 * set sequence documentation. When going from no pipes or single pipe to
13315 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13316 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13317 */
13318static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13319{
13320 struct drm_crtc_state *crtc_state;
13321 struct intel_crtc *intel_crtc;
13322 struct drm_crtc *crtc;
13323 struct intel_crtc_state *first_crtc_state = NULL;
13324 struct intel_crtc_state *other_crtc_state = NULL;
13325 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13326 int i;
13327
13328 /* look at all crtc's that are going to be enabled in during modeset */
13329 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13330 intel_crtc = to_intel_crtc(crtc);
13331
13332 if (!crtc_state->active || !needs_modeset(crtc_state))
13333 continue;
13334
13335 if (first_crtc_state) {
13336 other_crtc_state = to_intel_crtc_state(crtc_state);
13337 break;
13338 } else {
13339 first_crtc_state = to_intel_crtc_state(crtc_state);
13340 first_pipe = intel_crtc->pipe;
13341 }
13342 }
13343
13344 /* No workaround needed? */
13345 if (!first_crtc_state)
13346 return 0;
13347
13348 /* w/a possibly needed, check how many crtc's are already enabled. */
13349 for_each_intel_crtc(state->dev, intel_crtc) {
13350 struct intel_crtc_state *pipe_config;
13351
13352 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13353 if (IS_ERR(pipe_config))
13354 return PTR_ERR(pipe_config);
13355
13356 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13357
13358 if (!pipe_config->base.active ||
13359 needs_modeset(&pipe_config->base))
13360 continue;
13361
13362 /* 2 or more enabled crtcs means no need for w/a */
13363 if (enabled_pipe != INVALID_PIPE)
13364 return 0;
13365
13366 enabled_pipe = intel_crtc->pipe;
13367 }
13368
13369 if (enabled_pipe != INVALID_PIPE)
13370 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13371 else if (other_crtc_state)
13372 other_crtc_state->hsw_workaround_pipe = first_pipe;
13373
13374 return 0;
13375}
13376
27c329ed
ML
13377static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13378{
13379 struct drm_crtc *crtc;
13380 struct drm_crtc_state *crtc_state;
13381 int ret = 0;
13382
13383 /* add all active pipes to the state */
13384 for_each_crtc(state->dev, crtc) {
13385 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13386 if (IS_ERR(crtc_state))
13387 return PTR_ERR(crtc_state);
13388
13389 if (!crtc_state->active || needs_modeset(crtc_state))
13390 continue;
13391
13392 crtc_state->mode_changed = true;
13393
13394 ret = drm_atomic_add_affected_connectors(state, crtc);
13395 if (ret)
13396 break;
13397
13398 ret = drm_atomic_add_affected_planes(state, crtc);
13399 if (ret)
13400 break;
13401 }
13402
13403 return ret;
13404}
13405
c347a676 13406static int intel_modeset_checks(struct drm_atomic_state *state)
054518dd 13407{
565602d7
ML
13408 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13409 struct drm_i915_private *dev_priv = state->dev->dev_private;
13410 struct drm_crtc *crtc;
13411 struct drm_crtc_state *crtc_state;
13412 int ret = 0, i;
054518dd 13413
b359283a
ML
13414 if (!check_digital_port_conflicts(state)) {
13415 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13416 return -EINVAL;
13417 }
13418
565602d7
ML
13419 intel_state->modeset = true;
13420 intel_state->active_crtcs = dev_priv->active_crtcs;
13421
13422 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13423 if (crtc_state->active)
13424 intel_state->active_crtcs |= 1 << i;
13425 else
13426 intel_state->active_crtcs &= ~(1 << i);
8b4a7d05
MR
13427
13428 if (crtc_state->active != crtc->state->active)
13429 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
565602d7
ML
13430 }
13431
054518dd
ACO
13432 /*
13433 * See if the config requires any additional preparation, e.g.
13434 * to adjust global state with pipes off. We need to do this
13435 * here so we can get the modeset_pipe updated config for the new
13436 * mode set on this crtc. For other crtcs we need to use the
13437 * adjusted_mode bits in the crtc directly.
13438 */
27c329ed 13439 if (dev_priv->display.modeset_calc_cdclk) {
c89e39f3 13440 if (!intel_state->cdclk_pll_vco)
63911d72 13441 intel_state->cdclk_pll_vco = dev_priv->cdclk_pll.vco;
b2045352
VS
13442 if (!intel_state->cdclk_pll_vco)
13443 intel_state->cdclk_pll_vco = dev_priv->skl_preferred_vco_freq;
c89e39f3 13444
27c329ed 13445 ret = dev_priv->display.modeset_calc_cdclk(state);
c89e39f3
CT
13446 if (ret < 0)
13447 return ret;
27c329ed 13448
c89e39f3 13449 if (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
63911d72 13450 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco)
27c329ed
ML
13451 ret = intel_modeset_all_pipes(state);
13452
13453 if (ret < 0)
054518dd 13454 return ret;
e8788cbc
ML
13455
13456 DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
13457 intel_state->cdclk, intel_state->dev_cdclk);
27c329ed 13458 } else
1a617b77 13459 to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
054518dd 13460
ad421372 13461 intel_modeset_clear_plls(state);
054518dd 13462
565602d7 13463 if (IS_HASWELL(dev_priv))
ad421372 13464 return haswell_mode_set_planes_workaround(state);
99d736a2 13465
ad421372 13466 return 0;
c347a676
ACO
13467}
13468
aa363136
MR
13469/*
13470 * Handle calculation of various watermark data at the end of the atomic check
13471 * phase. The code here should be run after the per-crtc and per-plane 'check'
13472 * handlers to ensure that all derived state has been updated.
13473 */
55994c2c 13474static int calc_watermark_data(struct drm_atomic_state *state)
aa363136
MR
13475{
13476 struct drm_device *dev = state->dev;
98d39494 13477 struct drm_i915_private *dev_priv = to_i915(dev);
98d39494
MR
13478
13479 /* Is there platform-specific watermark information to calculate? */
13480 if (dev_priv->display.compute_global_watermarks)
55994c2c
MR
13481 return dev_priv->display.compute_global_watermarks(state);
13482
13483 return 0;
aa363136
MR
13484}
13485
74c090b1
ML
13486/**
13487 * intel_atomic_check - validate state object
13488 * @dev: drm device
13489 * @state: state to validate
13490 */
13491static int intel_atomic_check(struct drm_device *dev,
13492 struct drm_atomic_state *state)
c347a676 13493{
dd8b3bdb 13494 struct drm_i915_private *dev_priv = to_i915(dev);
aa363136 13495 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
c347a676
ACO
13496 struct drm_crtc *crtc;
13497 struct drm_crtc_state *crtc_state;
13498 int ret, i;
61333b60 13499 bool any_ms = false;
c347a676 13500
74c090b1 13501 ret = drm_atomic_helper_check_modeset(dev, state);
054518dd
ACO
13502 if (ret)
13503 return ret;
13504
c347a676 13505 for_each_crtc_in_state(state, crtc, crtc_state, i) {
cfb23ed6
ML
13506 struct intel_crtc_state *pipe_config =
13507 to_intel_crtc_state(crtc_state);
1ed51de9
DV
13508
13509 /* Catch I915_MODE_FLAG_INHERITED */
13510 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13511 crtc_state->mode_changed = true;
cfb23ed6 13512
af4a879e 13513 if (!needs_modeset(crtc_state))
c347a676
ACO
13514 continue;
13515
af4a879e
DV
13516 if (!crtc_state->enable) {
13517 any_ms = true;
cfb23ed6 13518 continue;
af4a879e 13519 }
cfb23ed6 13520
26495481
DV
13521 /* FIXME: For only active_changed we shouldn't need to do any
13522 * state recomputation at all. */
13523
1ed51de9
DV
13524 ret = drm_atomic_add_affected_connectors(state, crtc);
13525 if (ret)
13526 return ret;
b359283a 13527
cfb23ed6 13528 ret = intel_modeset_pipe_config(crtc, pipe_config);
25aa1c39
ML
13529 if (ret) {
13530 intel_dump_pipe_config(to_intel_crtc(crtc),
13531 pipe_config, "[failed]");
c347a676 13532 return ret;
25aa1c39 13533 }
c347a676 13534
73831236 13535 if (i915.fastboot &&
dd8b3bdb 13536 intel_pipe_config_compare(dev,
cfb23ed6 13537 to_intel_crtc_state(crtc->state),
1ed51de9 13538 pipe_config, true)) {
26495481 13539 crtc_state->mode_changed = false;
bfd16b2a 13540 to_intel_crtc_state(crtc_state)->update_pipe = true;
26495481
DV
13541 }
13542
af4a879e 13543 if (needs_modeset(crtc_state))
26495481 13544 any_ms = true;
cfb23ed6 13545
af4a879e
DV
13546 ret = drm_atomic_add_affected_planes(state, crtc);
13547 if (ret)
13548 return ret;
61333b60 13549
26495481
DV
13550 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13551 needs_modeset(crtc_state) ?
13552 "[modeset]" : "[fastset]");
c347a676
ACO
13553 }
13554
61333b60
ML
13555 if (any_ms) {
13556 ret = intel_modeset_checks(state);
13557
13558 if (ret)
13559 return ret;
27c329ed 13560 } else
dd8b3bdb 13561 intel_state->cdclk = dev_priv->cdclk_freq;
76305b1a 13562
dd8b3bdb 13563 ret = drm_atomic_helper_check_planes(dev, state);
aa363136
MR
13564 if (ret)
13565 return ret;
13566
f51be2e0 13567 intel_fbc_choose_crtc(dev_priv, state);
55994c2c 13568 return calc_watermark_data(state);
054518dd
ACO
13569}
13570
5008e874
ML
13571static int intel_atomic_prepare_commit(struct drm_device *dev,
13572 struct drm_atomic_state *state,
81072bfd 13573 bool nonblock)
5008e874 13574{
7580d774
ML
13575 struct drm_i915_private *dev_priv = dev->dev_private;
13576 struct drm_plane_state *plane_state;
5008e874 13577 struct drm_crtc_state *crtc_state;
7580d774 13578 struct drm_plane *plane;
5008e874
ML
13579 struct drm_crtc *crtc;
13580 int i, ret;
13581
5a21b665
DV
13582 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13583 if (state->legacy_cursor_update)
a6747b73
ML
13584 continue;
13585
5a21b665
DV
13586 ret = intel_crtc_wait_for_pending_flips(crtc);
13587 if (ret)
13588 return ret;
5008e874 13589
5a21b665
DV
13590 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13591 flush_workqueue(dev_priv->wq);
d55dbd06
ML
13592 }
13593
f935675f
ML
13594 ret = mutex_lock_interruptible(&dev->struct_mutex);
13595 if (ret)
13596 return ret;
13597
5008e874 13598 ret = drm_atomic_helper_prepare_planes(dev, state);
f7e5838b 13599 mutex_unlock(&dev->struct_mutex);
7580d774 13600
21daaeee 13601 if (!ret && !nonblock) {
7580d774
ML
13602 for_each_plane_in_state(state, plane, plane_state, i) {
13603 struct intel_plane_state *intel_plane_state =
13604 to_intel_plane_state(plane_state);
13605
13606 if (!intel_plane_state->wait_req)
13607 continue;
13608
13609 ret = __i915_wait_request(intel_plane_state->wait_req,
299259a3 13610 true, NULL, NULL);
f7e5838b 13611 if (ret) {
f4457ae7
CW
13612 /* Any hang should be swallowed by the wait */
13613 WARN_ON(ret == -EIO);
f7e5838b
CW
13614 mutex_lock(&dev->struct_mutex);
13615 drm_atomic_helper_cleanup_planes(dev, state);
13616 mutex_unlock(&dev->struct_mutex);
7580d774 13617 break;
f7e5838b 13618 }
7580d774 13619 }
7580d774 13620 }
5008e874
ML
13621
13622 return ret;
13623}
13624
a2991414
ML
13625u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13626{
13627 struct drm_device *dev = crtc->base.dev;
13628
13629 if (!dev->max_vblank_count)
13630 return drm_accurate_vblank_count(&crtc->base);
13631
13632 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13633}
13634
5a21b665
DV
13635static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13636 struct drm_i915_private *dev_priv,
13637 unsigned crtc_mask)
e8861675 13638{
5a21b665
DV
13639 unsigned last_vblank_count[I915_MAX_PIPES];
13640 enum pipe pipe;
13641 int ret;
e8861675 13642
5a21b665
DV
13643 if (!crtc_mask)
13644 return;
e8861675 13645
5a21b665
DV
13646 for_each_pipe(dev_priv, pipe) {
13647 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
e8861675 13648
5a21b665 13649 if (!((1 << pipe) & crtc_mask))
e8861675
ML
13650 continue;
13651
5a21b665
DV
13652 ret = drm_crtc_vblank_get(crtc);
13653 if (WARN_ON(ret != 0)) {
13654 crtc_mask &= ~(1 << pipe);
13655 continue;
e8861675
ML
13656 }
13657
5a21b665 13658 last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
e8861675
ML
13659 }
13660
5a21b665
DV
13661 for_each_pipe(dev_priv, pipe) {
13662 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13663 long lret;
e8861675 13664
5a21b665
DV
13665 if (!((1 << pipe) & crtc_mask))
13666 continue;
d55dbd06 13667
5a21b665
DV
13668 lret = wait_event_timeout(dev->vblank[pipe].queue,
13669 last_vblank_count[pipe] !=
13670 drm_crtc_vblank_count(crtc),
13671 msecs_to_jiffies(50));
d55dbd06 13672
5a21b665 13673 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
d55dbd06 13674
5a21b665 13675 drm_crtc_vblank_put(crtc);
d55dbd06
ML
13676 }
13677}
13678
5a21b665 13679static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
a6747b73 13680{
5a21b665
DV
13681 /* fb updated, need to unpin old fb */
13682 if (crtc_state->fb_changed)
13683 return true;
a6747b73 13684
5a21b665
DV
13685 /* wm changes, need vblank before final wm's */
13686 if (crtc_state->update_wm_post)
13687 return true;
a6747b73 13688
5a21b665
DV
13689 /*
13690 * cxsr is re-enabled after vblank.
13691 * This is already handled by crtc_state->update_wm_post,
13692 * but added for clarity.
13693 */
13694 if (crtc_state->disable_cxsr)
13695 return true;
a6747b73 13696
5a21b665 13697 return false;
e8861675
ML
13698}
13699
94f05024 13700static void intel_atomic_commit_tail(struct drm_atomic_state *state)
a6778b3c 13701{
94f05024 13702 struct drm_device *dev = state->dev;
565602d7 13703 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
fbee40df 13704 struct drm_i915_private *dev_priv = dev->dev_private;
29ceb0e6 13705 struct drm_crtc_state *old_crtc_state;
7580d774 13706 struct drm_crtc *crtc;
5a21b665 13707 struct intel_crtc_state *intel_cstate;
94f05024
DV
13708 struct drm_plane *plane;
13709 struct drm_plane_state *plane_state;
5a21b665
DV
13710 bool hw_check = intel_state->modeset;
13711 unsigned long put_domains[I915_MAX_PIPES] = {};
13712 unsigned crtc_vblank_mask = 0;
94f05024 13713 int i, ret;
a6778b3c 13714
94f05024
DV
13715 for_each_plane_in_state(state, plane, plane_state, i) {
13716 struct intel_plane_state *intel_plane_state =
13717 to_intel_plane_state(plane_state);
ea0000f0 13718
94f05024
DV
13719 if (!intel_plane_state->wait_req)
13720 continue;
d4afb8cc 13721
94f05024
DV
13722 ret = __i915_wait_request(intel_plane_state->wait_req,
13723 true, NULL, NULL);
13724 /* EIO should be eaten, and we can't get interrupted in the
13725 * worker, and blocking commits have waited already. */
13726 WARN_ON(ret);
13727 }
1c5e19f8 13728
ea0000f0
DV
13729 drm_atomic_helper_wait_for_dependencies(state);
13730
565602d7
ML
13731 if (intel_state->modeset) {
13732 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13733 sizeof(intel_state->min_pixclk));
13734 dev_priv->active_crtcs = intel_state->active_crtcs;
1a617b77 13735 dev_priv->atomic_cdclk_freq = intel_state->cdclk;
5a21b665
DV
13736
13737 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
565602d7
ML
13738 }
13739
29ceb0e6 13740 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
a539205a
ML
13741 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13742
5a21b665
DV
13743 if (needs_modeset(crtc->state) ||
13744 to_intel_crtc_state(crtc->state)->update_pipe) {
13745 hw_check = true;
13746
13747 put_domains[to_intel_crtc(crtc)->pipe] =
13748 modeset_get_crtc_power_domains(crtc,
13749 to_intel_crtc_state(crtc->state));
13750 }
13751
61333b60
ML
13752 if (!needs_modeset(crtc->state))
13753 continue;
13754
29ceb0e6 13755 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
460da916 13756
29ceb0e6
VS
13757 if (old_crtc_state->active) {
13758 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
a539205a 13759 dev_priv->display.crtc_disable(crtc);
eddfcbcd 13760 intel_crtc->active = false;
58f9c0bc 13761 intel_fbc_disable(intel_crtc);
eddfcbcd 13762 intel_disable_shared_dpll(intel_crtc);
9bbc8258
VS
13763
13764 /*
13765 * Underruns don't always raise
13766 * interrupts, so check manually.
13767 */
13768 intel_check_cpu_fifo_underruns(dev_priv);
13769 intel_check_pch_fifo_underruns(dev_priv);
b9001114
ML
13770
13771 if (!crtc->state->active)
13772 intel_update_watermarks(crtc);
a539205a 13773 }
b8cecdf5 13774 }
7758a113 13775
ea9d758d
DV
13776 /* Only after disabling all output pipelines that will be changed can we
13777 * update the the output configuration. */
4740b0f2 13778 intel_modeset_update_crtc_state(state);
f6e5b160 13779
565602d7 13780 if (intel_state->modeset) {
4740b0f2 13781 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
33c8df89
ML
13782
13783 if (dev_priv->display.modeset_commit_cdclk &&
c89e39f3 13784 (intel_state->dev_cdclk != dev_priv->cdclk_freq ||
63911d72 13785 intel_state->cdclk_pll_vco != dev_priv->cdclk_pll.vco))
33c8df89 13786 dev_priv->display.modeset_commit_cdclk(state);
f6d1973d 13787
c0ead703 13788 intel_modeset_verify_disabled(dev);
4740b0f2 13789 }
47fab737 13790
a6778b3c 13791 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
29ceb0e6 13792 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
f6ac4b2a
ML
13793 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13794 bool modeset = needs_modeset(crtc->state);
5a21b665
DV
13795 struct intel_crtc_state *pipe_config =
13796 to_intel_crtc_state(crtc->state);
9f836f90 13797
f6ac4b2a 13798 if (modeset && crtc->state->active) {
a539205a
ML
13799 update_scanline_offset(to_intel_crtc(crtc));
13800 dev_priv->display.crtc_enable(crtc);
13801 }
80715b2f 13802
1f7528c4
DV
13803 /* Complete events for now disable pipes here. */
13804 if (modeset && !crtc->state->active && crtc->state->event) {
13805 spin_lock_irq(&dev->event_lock);
13806 drm_crtc_send_vblank_event(crtc, crtc->state->event);
13807 spin_unlock_irq(&dev->event_lock);
13808
13809 crtc->state->event = NULL;
13810 }
13811
f6ac4b2a 13812 if (!modeset)
29ceb0e6 13813 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
f6ac4b2a 13814
5a21b665
DV
13815 if (crtc->state->active &&
13816 drm_atomic_get_existing_plane_state(state, crtc->primary))
faf68d92 13817 intel_fbc_enable(intel_crtc, pipe_config, to_intel_plane_state(crtc->primary->state));
5a21b665 13818
1f7528c4 13819 if (crtc->state->active)
5a21b665 13820 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
f6d1973d 13821
5a21b665
DV
13822 if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13823 crtc_vblank_mask |= 1 << i;
177246a8
MR
13824 }
13825
94f05024
DV
13826 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13827 * already, but still need the state for the delayed optimization. To
13828 * fix this:
13829 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13830 * - schedule that vblank worker _before_ calling hw_done
13831 * - at the start of commit_tail, cancel it _synchrously
13832 * - switch over to the vblank wait helper in the core after that since
13833 * we don't need out special handling any more.
13834 */
5a21b665
DV
13835 if (!state->legacy_cursor_update)
13836 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
13837
13838 /*
13839 * Now that the vblank has passed, we can go ahead and program the
13840 * optimal watermarks on platforms that need two-step watermark
13841 * programming.
13842 *
13843 * TODO: Move this (and other cleanup) to an async worker eventually.
13844 */
13845 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13846 intel_cstate = to_intel_crtc_state(crtc->state);
13847
13848 if (dev_priv->display.optimize_watermarks)
13849 dev_priv->display.optimize_watermarks(intel_cstate);
13850 }
13851
13852 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13853 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13854
13855 if (put_domains[i])
13856 modeset_put_power_domains(dev_priv, put_domains[i]);
13857
13858 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
13859 }
13860
94f05024
DV
13861 drm_atomic_helper_commit_hw_done(state);
13862
5a21b665
DV
13863 if (intel_state->modeset)
13864 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13865
13866 mutex_lock(&dev->struct_mutex);
13867 drm_atomic_helper_cleanup_planes(dev, state);
13868 mutex_unlock(&dev->struct_mutex);
13869
ea0000f0
DV
13870 drm_atomic_helper_commit_cleanup_done(state);
13871
ee165b1a 13872 drm_atomic_state_free(state);
f30da187 13873
75714940
MK
13874 /* As one of the primary mmio accessors, KMS has a high likelihood
13875 * of triggering bugs in unclaimed access. After we finish
13876 * modesetting, see if an error has been flagged, and if so
13877 * enable debugging for the next modeset - and hope we catch
13878 * the culprit.
13879 *
13880 * XXX note that we assume display power is on at this point.
13881 * This might hold true now but we need to add pm helper to check
13882 * unclaimed only when the hardware is on, as atomic commits
13883 * can happen also when the device is completely off.
13884 */
13885 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
94f05024
DV
13886}
13887
13888static void intel_atomic_commit_work(struct work_struct *work)
13889{
13890 struct drm_atomic_state *state = container_of(work,
13891 struct drm_atomic_state,
13892 commit_work);
13893 intel_atomic_commit_tail(state);
13894}
13895
6c9c1b38
DV
13896static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13897{
13898 struct drm_plane_state *old_plane_state;
13899 struct drm_plane *plane;
13900 struct drm_i915_gem_object *obj, *old_obj;
13901 struct intel_plane *intel_plane;
13902 int i;
13903
13904 mutex_lock(&state->dev->struct_mutex);
13905 for_each_plane_in_state(state, plane, old_plane_state, i) {
13906 obj = intel_fb_obj(plane->state->fb);
13907 old_obj = intel_fb_obj(old_plane_state->fb);
13908 intel_plane = to_intel_plane(plane);
13909
13910 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
13911 }
13912 mutex_unlock(&state->dev->struct_mutex);
13913}
13914
94f05024
DV
13915/**
13916 * intel_atomic_commit - commit validated state object
13917 * @dev: DRM device
13918 * @state: the top-level driver state object
13919 * @nonblock: nonblocking commit
13920 *
13921 * This function commits a top-level state object that has been validated
13922 * with drm_atomic_helper_check().
13923 *
13924 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
13925 * nonblocking commits are only safe for pure plane updates. Everything else
13926 * should work though.
13927 *
13928 * RETURNS
13929 * Zero for success or -errno.
13930 */
13931static int intel_atomic_commit(struct drm_device *dev,
13932 struct drm_atomic_state *state,
13933 bool nonblock)
13934{
13935 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13936 struct drm_i915_private *dev_priv = dev->dev_private;
13937 int ret = 0;
13938
13939 if (intel_state->modeset && nonblock) {
13940 DRM_DEBUG_KMS("nonblocking commit for modeset not yet implemented.\n");
13941 return -EINVAL;
13942 }
13943
13944 ret = drm_atomic_helper_setup_commit(state, nonblock);
13945 if (ret)
13946 return ret;
13947
13948 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
13949
13950 ret = intel_atomic_prepare_commit(dev, state, nonblock);
13951 if (ret) {
13952 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13953 return ret;
13954 }
13955
13956 drm_atomic_helper_swap_state(state, true);
13957 dev_priv->wm.distrust_bios_wm = false;
13958 dev_priv->wm.skl_results = intel_state->wm_results;
13959 intel_shared_dpll_commit(state);
6c9c1b38 13960 intel_atomic_track_fbs(state);
94f05024
DV
13961
13962 if (nonblock)
13963 queue_work(system_unbound_wq, &state->commit_work);
13964 else
13965 intel_atomic_commit_tail(state);
75714940 13966
74c090b1 13967 return 0;
7f27126e
JB
13968}
13969
c0c36b94
CW
13970void intel_crtc_restore_mode(struct drm_crtc *crtc)
13971{
83a57153
ACO
13972 struct drm_device *dev = crtc->dev;
13973 struct drm_atomic_state *state;
e694eb02 13974 struct drm_crtc_state *crtc_state;
2bfb4627 13975 int ret;
83a57153
ACO
13976
13977 state = drm_atomic_state_alloc(dev);
13978 if (!state) {
78108b7c
VS
13979 DRM_DEBUG_KMS("[CRTC:%d:%s] crtc restore failed, out of memory",
13980 crtc->base.id, crtc->name);
83a57153
ACO
13981 return;
13982 }
13983
e694eb02 13984 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
83a57153 13985
e694eb02
ML
13986retry:
13987 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13988 ret = PTR_ERR_OR_ZERO(crtc_state);
13989 if (!ret) {
13990 if (!crtc_state->active)
13991 goto out;
83a57153 13992
e694eb02 13993 crtc_state->mode_changed = true;
74c090b1 13994 ret = drm_atomic_commit(state);
83a57153
ACO
13995 }
13996
e694eb02
ML
13997 if (ret == -EDEADLK) {
13998 drm_atomic_state_clear(state);
13999 drm_modeset_backoff(state->acquire_ctx);
14000 goto retry;
4ed9fb37 14001 }
4be07317 14002
2bfb4627 14003 if (ret)
e694eb02 14004out:
2bfb4627 14005 drm_atomic_state_free(state);
c0c36b94
CW
14006}
14007
25c5b266
DV
14008#undef for_each_intel_crtc_masked
14009
f6e5b160 14010static const struct drm_crtc_funcs intel_crtc_funcs = {
82cf435b 14011 .gamma_set = drm_atomic_helper_legacy_gamma_set,
74c090b1 14012 .set_config = drm_atomic_helper_set_config,
82cf435b 14013 .set_property = drm_atomic_helper_crtc_set_property,
f6e5b160 14014 .destroy = intel_crtc_destroy,
527b6abe 14015 .page_flip = intel_crtc_page_flip,
1356837e
MR
14016 .atomic_duplicate_state = intel_crtc_duplicate_state,
14017 .atomic_destroy_state = intel_crtc_destroy_state,
f6e5b160
CW
14018};
14019
6beb8c23
MR
14020/**
14021 * intel_prepare_plane_fb - Prepare fb for usage on plane
14022 * @plane: drm plane to prepare for
14023 * @fb: framebuffer to prepare for presentation
14024 *
14025 * Prepares a framebuffer for usage on a display plane. Generally this
14026 * involves pinning the underlying object and updating the frontbuffer tracking
14027 * bits. Some older platforms need special physical address handling for
14028 * cursor planes.
14029 *
f935675f
ML
14030 * Must be called with struct_mutex held.
14031 *
6beb8c23
MR
14032 * Returns 0 on success, negative error code on failure.
14033 */
14034int
14035intel_prepare_plane_fb(struct drm_plane *plane,
d136dfee 14036 const struct drm_plane_state *new_state)
465c120c
MR
14037{
14038 struct drm_device *dev = plane->dev;
844f9111 14039 struct drm_framebuffer *fb = new_state->fb;
6beb8c23 14040 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1ee49399 14041 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
c37efb99 14042 struct reservation_object *resv;
6beb8c23 14043 int ret = 0;
465c120c 14044
1ee49399 14045 if (!obj && !old_obj)
465c120c
MR
14046 return 0;
14047
5008e874
ML
14048 if (old_obj) {
14049 struct drm_crtc_state *crtc_state =
14050 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
14051
14052 /* Big Hammer, we also need to ensure that any pending
14053 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14054 * current scanout is retired before unpinning the old
14055 * framebuffer. Note that we rely on userspace rendering
14056 * into the buffer attached to the pipe they are waiting
14057 * on. If not, userspace generates a GPU hang with IPEHR
14058 * point to the MI_WAIT_FOR_EVENT.
14059 *
14060 * This should only fail upon a hung GPU, in which case we
14061 * can safely continue.
14062 */
14063 if (needs_modeset(crtc_state))
14064 ret = i915_gem_object_wait_rendering(old_obj, true);
f4457ae7
CW
14065 if (ret) {
14066 /* GPU hangs should have been swallowed by the wait */
14067 WARN_ON(ret == -EIO);
f935675f 14068 return ret;
f4457ae7 14069 }
5008e874
ML
14070 }
14071
c37efb99
CW
14072 if (!obj)
14073 return 0;
14074
5a21b665 14075 /* For framebuffer backed by dmabuf, wait for fence */
c37efb99
CW
14076 resv = i915_gem_object_get_dmabuf_resv(obj);
14077 if (resv) {
5a21b665
DV
14078 long lret;
14079
c37efb99 14080 lret = reservation_object_wait_timeout_rcu(resv, false, true,
5a21b665
DV
14081 MAX_SCHEDULE_TIMEOUT);
14082 if (lret == -ERESTARTSYS)
14083 return lret;
14084
14085 WARN(lret < 0, "waiting returns %li\n", lret);
14086 }
14087
c37efb99 14088 if (plane->type == DRM_PLANE_TYPE_CURSOR &&
6beb8c23
MR
14089 INTEL_INFO(dev)->cursor_needs_physical) {
14090 int align = IS_I830(dev) ? 16 * 1024 : 256;
14091 ret = i915_gem_object_attach_phys(obj, align);
14092 if (ret)
14093 DRM_DEBUG_KMS("failed to attach phys object\n");
14094 } else {
3465c580 14095 ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
6beb8c23 14096 }
465c120c 14097
c37efb99 14098 if (ret == 0) {
6c9c1b38
DV
14099 struct intel_plane_state *plane_state =
14100 to_intel_plane_state(new_state);
7580d774 14101
6c9c1b38
DV
14102 i915_gem_request_assign(&plane_state->wait_req,
14103 obj->last_write_req);
7580d774 14104 }
fdd508a6 14105
6beb8c23
MR
14106 return ret;
14107}
14108
38f3ce3a
MR
14109/**
14110 * intel_cleanup_plane_fb - Cleans up an fb after plane use
14111 * @plane: drm plane to clean up for
14112 * @fb: old framebuffer that was on plane
14113 *
14114 * Cleans up a framebuffer that has just been removed from a plane.
f935675f
ML
14115 *
14116 * Must be called with struct_mutex held.
38f3ce3a
MR
14117 */
14118void
14119intel_cleanup_plane_fb(struct drm_plane *plane,
d136dfee 14120 const struct drm_plane_state *old_state)
38f3ce3a
MR
14121{
14122 struct drm_device *dev = plane->dev;
7580d774 14123 struct intel_plane_state *old_intel_state;
1ee49399
ML
14124 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
14125 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
38f3ce3a 14126
7580d774
ML
14127 old_intel_state = to_intel_plane_state(old_state);
14128
1ee49399 14129 if (!obj && !old_obj)
38f3ce3a
MR
14130 return;
14131
1ee49399
ML
14132 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
14133 !INTEL_INFO(dev)->cursor_needs_physical))
3465c580 14134 intel_unpin_fb_obj(old_state->fb, old_state->rotation);
1ee49399 14135
7580d774 14136 i915_gem_request_assign(&old_intel_state->wait_req, NULL);
465c120c
MR
14137}
14138
6156a456
CK
14139int
14140skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
14141{
14142 int max_scale;
14143 struct drm_device *dev;
14144 struct drm_i915_private *dev_priv;
14145 int crtc_clock, cdclk;
14146
bf8a0af0 14147 if (!intel_crtc || !crtc_state->base.enable)
6156a456
CK
14148 return DRM_PLANE_HELPER_NO_SCALING;
14149
14150 dev = intel_crtc->base.dev;
14151 dev_priv = dev->dev_private;
14152 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
27c329ed 14153 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
6156a456 14154
54bf1ce6 14155 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
6156a456
CK
14156 return DRM_PLANE_HELPER_NO_SCALING;
14157
14158 /*
14159 * skl max scale is lower of:
14160 * close to 3 but not 3, -1 is for that purpose
14161 * or
14162 * cdclk/crtc_clock
14163 */
14164 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
14165
14166 return max_scale;
14167}
14168
465c120c 14169static int
3c692a41 14170intel_check_primary_plane(struct drm_plane *plane,
061e4b8d 14171 struct intel_crtc_state *crtc_state,
3c692a41
GP
14172 struct intel_plane_state *state)
14173{
2b875c22
MR
14174 struct drm_crtc *crtc = state->base.crtc;
14175 struct drm_framebuffer *fb = state->base.fb;
6156a456 14176 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
061e4b8d
ML
14177 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
14178 bool can_position = false;
465c120c 14179
693bdc28
VS
14180 if (INTEL_INFO(plane->dev)->gen >= 9) {
14181 /* use scaler when colorkey is not required */
14182 if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
14183 min_scale = 1;
14184 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
14185 }
d8106366 14186 can_position = true;
6156a456 14187 }
d8106366 14188
061e4b8d
ML
14189 return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14190 &state->dst, &state->clip,
9b8b013d 14191 state->base.rotation,
da20eabd
ML
14192 min_scale, max_scale,
14193 can_position, true,
14194 &state->visible);
14af293f
GP
14195}
14196
5a21b665
DV
14197static void intel_begin_crtc_commit(struct drm_crtc *crtc,
14198 struct drm_crtc_state *old_crtc_state)
14199{
14200 struct drm_device *dev = crtc->dev;
14201 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14202 struct intel_crtc_state *old_intel_state =
14203 to_intel_crtc_state(old_crtc_state);
14204 bool modeset = needs_modeset(crtc->state);
14205
14206 /* Perform vblank evasion around commit operation */
14207 intel_pipe_update_start(intel_crtc);
14208
14209 if (modeset)
14210 return;
14211
14212 if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
14213 intel_color_set_csc(crtc->state);
14214 intel_color_load_luts(crtc->state);
14215 }
14216
14217 if (to_intel_crtc_state(crtc->state)->update_pipe)
14218 intel_update_pipe_config(intel_crtc, old_intel_state);
14219 else if (INTEL_INFO(dev)->gen >= 9)
14220 skl_detach_scalers(intel_crtc);
14221}
14222
14223static void intel_finish_crtc_commit(struct drm_crtc *crtc,
14224 struct drm_crtc_state *old_crtc_state)
14225{
14226 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14227
14228 intel_pipe_update_end(intel_crtc, NULL);
14229}
14230
cf4c7c12 14231/**
4a3b8769
MR
14232 * intel_plane_destroy - destroy a plane
14233 * @plane: plane to destroy
cf4c7c12 14234 *
4a3b8769
MR
14235 * Common destruction function for all types of planes (primary, cursor,
14236 * sprite).
cf4c7c12 14237 */
4a3b8769 14238void intel_plane_destroy(struct drm_plane *plane)
465c120c 14239{
69ae561f
VS
14240 if (!plane)
14241 return;
14242
465c120c 14243 drm_plane_cleanup(plane);
69ae561f 14244 kfree(to_intel_plane(plane));
465c120c
MR
14245}
14246
65a3fea0 14247const struct drm_plane_funcs intel_plane_funcs = {
70a101f8
MR
14248 .update_plane = drm_atomic_helper_update_plane,
14249 .disable_plane = drm_atomic_helper_disable_plane,
3d7d6510 14250 .destroy = intel_plane_destroy,
c196e1d6 14251 .set_property = drm_atomic_helper_plane_set_property,
a98b3431
MR
14252 .atomic_get_property = intel_plane_atomic_get_property,
14253 .atomic_set_property = intel_plane_atomic_set_property,
ea2c67bb
MR
14254 .atomic_duplicate_state = intel_plane_duplicate_state,
14255 .atomic_destroy_state = intel_plane_destroy_state,
14256
465c120c
MR
14257};
14258
14259static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
14260 int pipe)
14261{
fca0ce2a
VS
14262 struct intel_plane *primary = NULL;
14263 struct intel_plane_state *state = NULL;
465c120c 14264 const uint32_t *intel_primary_formats;
45e3743a 14265 unsigned int num_formats;
fca0ce2a 14266 int ret;
465c120c
MR
14267
14268 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
fca0ce2a
VS
14269 if (!primary)
14270 goto fail;
465c120c 14271
8e7d688b 14272 state = intel_create_plane_state(&primary->base);
fca0ce2a
VS
14273 if (!state)
14274 goto fail;
8e7d688b 14275 primary->base.state = &state->base;
ea2c67bb 14276
465c120c
MR
14277 primary->can_scale = false;
14278 primary->max_downscale = 1;
6156a456
CK
14279 if (INTEL_INFO(dev)->gen >= 9) {
14280 primary->can_scale = true;
af99ceda 14281 state->scaler_id = -1;
6156a456 14282 }
465c120c
MR
14283 primary->pipe = pipe;
14284 primary->plane = pipe;
a9ff8714 14285 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
c59cb179 14286 primary->check_plane = intel_check_primary_plane;
465c120c
MR
14287 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14288 primary->plane = !pipe;
14289
6c0fd451
DL
14290 if (INTEL_INFO(dev)->gen >= 9) {
14291 intel_primary_formats = skl_primary_formats;
14292 num_formats = ARRAY_SIZE(skl_primary_formats);
a8d201af
ML
14293
14294 primary->update_plane = skylake_update_primary_plane;
14295 primary->disable_plane = skylake_disable_primary_plane;
14296 } else if (HAS_PCH_SPLIT(dev)) {
14297 intel_primary_formats = i965_primary_formats;
14298 num_formats = ARRAY_SIZE(i965_primary_formats);
14299
14300 primary->update_plane = ironlake_update_primary_plane;
14301 primary->disable_plane = i9xx_disable_primary_plane;
6c0fd451 14302 } else if (INTEL_INFO(dev)->gen >= 4) {
568db4f2
DL
14303 intel_primary_formats = i965_primary_formats;
14304 num_formats = ARRAY_SIZE(i965_primary_formats);
a8d201af
ML
14305
14306 primary->update_plane = i9xx_update_primary_plane;
14307 primary->disable_plane = i9xx_disable_primary_plane;
6c0fd451
DL
14308 } else {
14309 intel_primary_formats = i8xx_primary_formats;
14310 num_formats = ARRAY_SIZE(i8xx_primary_formats);
a8d201af
ML
14311
14312 primary->update_plane = i9xx_update_primary_plane;
14313 primary->disable_plane = i9xx_disable_primary_plane;
465c120c
MR
14314 }
14315
38573dc1
VS
14316 if (INTEL_INFO(dev)->gen >= 9)
14317 ret = drm_universal_plane_init(dev, &primary->base, 0,
14318 &intel_plane_funcs,
14319 intel_primary_formats, num_formats,
14320 DRM_PLANE_TYPE_PRIMARY,
14321 "plane 1%c", pipe_name(pipe));
14322 else if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
14323 ret = drm_universal_plane_init(dev, &primary->base, 0,
14324 &intel_plane_funcs,
14325 intel_primary_formats, num_formats,
14326 DRM_PLANE_TYPE_PRIMARY,
14327 "primary %c", pipe_name(pipe));
14328 else
14329 ret = drm_universal_plane_init(dev, &primary->base, 0,
14330 &intel_plane_funcs,
14331 intel_primary_formats, num_formats,
14332 DRM_PLANE_TYPE_PRIMARY,
14333 "plane %c", plane_name(primary->plane));
fca0ce2a
VS
14334 if (ret)
14335 goto fail;
48404c1e 14336
3b7a5119
SJ
14337 if (INTEL_INFO(dev)->gen >= 4)
14338 intel_create_rotation_property(dev, primary);
48404c1e 14339
ea2c67bb
MR
14340 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14341
465c120c 14342 return &primary->base;
fca0ce2a
VS
14343
14344fail:
14345 kfree(state);
14346 kfree(primary);
14347
14348 return NULL;
465c120c
MR
14349}
14350
3b7a5119
SJ
14351void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14352{
14353 if (!dev->mode_config.rotation_property) {
14354 unsigned long flags = BIT(DRM_ROTATE_0) |
14355 BIT(DRM_ROTATE_180);
14356
14357 if (INTEL_INFO(dev)->gen >= 9)
14358 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
14359
14360 dev->mode_config.rotation_property =
14361 drm_mode_create_rotation_property(dev, flags);
14362 }
14363 if (dev->mode_config.rotation_property)
14364 drm_object_attach_property(&plane->base.base,
14365 dev->mode_config.rotation_property,
14366 plane->base.state->rotation);
14367}
14368
3d7d6510 14369static int
852e787c 14370intel_check_cursor_plane(struct drm_plane *plane,
061e4b8d 14371 struct intel_crtc_state *crtc_state,
852e787c 14372 struct intel_plane_state *state)
3d7d6510 14373{
061e4b8d 14374 struct drm_crtc *crtc = crtc_state->base.crtc;
2b875c22 14375 struct drm_framebuffer *fb = state->base.fb;
757f9a3e 14376 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
b29ec92c 14377 enum pipe pipe = to_intel_plane(plane)->pipe;
757f9a3e
GP
14378 unsigned stride;
14379 int ret;
3d7d6510 14380
061e4b8d
ML
14381 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14382 &state->dst, &state->clip,
9b8b013d 14383 state->base.rotation,
3d7d6510
MR
14384 DRM_PLANE_HELPER_NO_SCALING,
14385 DRM_PLANE_HELPER_NO_SCALING,
852e787c 14386 true, true, &state->visible);
757f9a3e
GP
14387 if (ret)
14388 return ret;
14389
757f9a3e
GP
14390 /* if we want to turn off the cursor ignore width and height */
14391 if (!obj)
da20eabd 14392 return 0;
757f9a3e 14393
757f9a3e 14394 /* Check for which cursor types we support */
061e4b8d 14395 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
ea2c67bb
MR
14396 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14397 state->base.crtc_w, state->base.crtc_h);
757f9a3e
GP
14398 return -EINVAL;
14399 }
14400
ea2c67bb
MR
14401 stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14402 if (obj->base.size < stride * state->base.crtc_h) {
757f9a3e
GP
14403 DRM_DEBUG_KMS("buffer is too small\n");
14404 return -ENOMEM;
14405 }
14406
3a656b54 14407 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
757f9a3e 14408 DRM_DEBUG_KMS("cursor cannot be tiled\n");
da20eabd 14409 return -EINVAL;
32b7eeec
MR
14410 }
14411
b29ec92c
VS
14412 /*
14413 * There's something wrong with the cursor on CHV pipe C.
14414 * If it straddles the left edge of the screen then
14415 * moving it away from the edge or disabling it often
14416 * results in a pipe underrun, and often that can lead to
14417 * dead pipe (constant underrun reported, and it scans
14418 * out just a solid color). To recover from that, the
14419 * display power well must be turned off and on again.
14420 * Refuse the put the cursor into that compromised position.
14421 */
14422 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14423 state->visible && state->base.crtc_x < 0) {
14424 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14425 return -EINVAL;
14426 }
14427
da20eabd 14428 return 0;
852e787c 14429}
3d7d6510 14430
a8ad0d8e
ML
14431static void
14432intel_disable_cursor_plane(struct drm_plane *plane,
7fabf5ef 14433 struct drm_crtc *crtc)
a8ad0d8e 14434{
f2858021
ML
14435 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14436
14437 intel_crtc->cursor_addr = 0;
55a08b3f 14438 intel_crtc_update_cursor(crtc, NULL);
a8ad0d8e
ML
14439}
14440
f4a2cf29 14441static void
55a08b3f
ML
14442intel_update_cursor_plane(struct drm_plane *plane,
14443 const struct intel_crtc_state *crtc_state,
14444 const struct intel_plane_state *state)
852e787c 14445{
55a08b3f
ML
14446 struct drm_crtc *crtc = crtc_state->base.crtc;
14447 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ea2c67bb 14448 struct drm_device *dev = plane->dev;
2b875c22 14449 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
a912f12f 14450 uint32_t addr;
852e787c 14451
f4a2cf29 14452 if (!obj)
a912f12f 14453 addr = 0;
f4a2cf29 14454 else if (!INTEL_INFO(dev)->cursor_needs_physical)
a912f12f 14455 addr = i915_gem_obj_ggtt_offset(obj);
f4a2cf29 14456 else
a912f12f 14457 addr = obj->phys_handle->busaddr;
852e787c 14458
a912f12f 14459 intel_crtc->cursor_addr = addr;
55a08b3f 14460 intel_crtc_update_cursor(crtc, state);
852e787c
GP
14461}
14462
3d7d6510
MR
14463static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14464 int pipe)
14465{
fca0ce2a
VS
14466 struct intel_plane *cursor = NULL;
14467 struct intel_plane_state *state = NULL;
14468 int ret;
3d7d6510
MR
14469
14470 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
fca0ce2a
VS
14471 if (!cursor)
14472 goto fail;
3d7d6510 14473
8e7d688b 14474 state = intel_create_plane_state(&cursor->base);
fca0ce2a
VS
14475 if (!state)
14476 goto fail;
8e7d688b 14477 cursor->base.state = &state->base;
ea2c67bb 14478
3d7d6510
MR
14479 cursor->can_scale = false;
14480 cursor->max_downscale = 1;
14481 cursor->pipe = pipe;
14482 cursor->plane = pipe;
a9ff8714 14483 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
c59cb179 14484 cursor->check_plane = intel_check_cursor_plane;
55a08b3f 14485 cursor->update_plane = intel_update_cursor_plane;
a8ad0d8e 14486 cursor->disable_plane = intel_disable_cursor_plane;
3d7d6510 14487
fca0ce2a
VS
14488 ret = drm_universal_plane_init(dev, &cursor->base, 0,
14489 &intel_plane_funcs,
14490 intel_cursor_formats,
14491 ARRAY_SIZE(intel_cursor_formats),
38573dc1
VS
14492 DRM_PLANE_TYPE_CURSOR,
14493 "cursor %c", pipe_name(pipe));
fca0ce2a
VS
14494 if (ret)
14495 goto fail;
4398ad45
VS
14496
14497 if (INTEL_INFO(dev)->gen >= 4) {
14498 if (!dev->mode_config.rotation_property)
14499 dev->mode_config.rotation_property =
14500 drm_mode_create_rotation_property(dev,
14501 BIT(DRM_ROTATE_0) |
14502 BIT(DRM_ROTATE_180));
14503 if (dev->mode_config.rotation_property)
14504 drm_object_attach_property(&cursor->base.base,
14505 dev->mode_config.rotation_property,
8e7d688b 14506 state->base.rotation);
4398ad45
VS
14507 }
14508
af99ceda
CK
14509 if (INTEL_INFO(dev)->gen >=9)
14510 state->scaler_id = -1;
14511
ea2c67bb
MR
14512 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14513
3d7d6510 14514 return &cursor->base;
fca0ce2a
VS
14515
14516fail:
14517 kfree(state);
14518 kfree(cursor);
14519
14520 return NULL;
3d7d6510
MR
14521}
14522
549e2bfb
CK
14523static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14524 struct intel_crtc_state *crtc_state)
14525{
14526 int i;
14527 struct intel_scaler *intel_scaler;
14528 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14529
14530 for (i = 0; i < intel_crtc->num_scalers; i++) {
14531 intel_scaler = &scaler_state->scalers[i];
14532 intel_scaler->in_use = 0;
549e2bfb
CK
14533 intel_scaler->mode = PS_SCALER_MODE_DYN;
14534 }
14535
14536 scaler_state->scaler_id = -1;
14537}
14538
b358d0a6 14539static void intel_crtc_init(struct drm_device *dev, int pipe)
79e53945 14540{
fbee40df 14541 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 14542 struct intel_crtc *intel_crtc;
f5de6e07 14543 struct intel_crtc_state *crtc_state = NULL;
3d7d6510
MR
14544 struct drm_plane *primary = NULL;
14545 struct drm_plane *cursor = NULL;
8563b1e8 14546 int ret;
79e53945 14547
955382f3 14548 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
79e53945
JB
14549 if (intel_crtc == NULL)
14550 return;
14551
f5de6e07
ACO
14552 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14553 if (!crtc_state)
14554 goto fail;
550acefd
ACO
14555 intel_crtc->config = crtc_state;
14556 intel_crtc->base.state = &crtc_state->base;
07878248 14557 crtc_state->base.crtc = &intel_crtc->base;
f5de6e07 14558
549e2bfb
CK
14559 /* initialize shared scalers */
14560 if (INTEL_INFO(dev)->gen >= 9) {
14561 if (pipe == PIPE_C)
14562 intel_crtc->num_scalers = 1;
14563 else
14564 intel_crtc->num_scalers = SKL_NUM_SCALERS;
14565
14566 skl_init_scalers(dev, intel_crtc, crtc_state);
14567 }
14568
465c120c 14569 primary = intel_primary_plane_create(dev, pipe);
3d7d6510
MR
14570 if (!primary)
14571 goto fail;
14572
14573 cursor = intel_cursor_plane_create(dev, pipe);
14574 if (!cursor)
14575 goto fail;
14576
465c120c 14577 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
4d5d72b7
VS
14578 cursor, &intel_crtc_funcs,
14579 "pipe %c", pipe_name(pipe));
3d7d6510
MR
14580 if (ret)
14581 goto fail;
79e53945 14582
1f1c2e24
VS
14583 /*
14584 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
8c0f92e1 14585 * is hooked to pipe B. Hence we want plane A feeding pipe B.
1f1c2e24 14586 */
80824003
JB
14587 intel_crtc->pipe = pipe;
14588 intel_crtc->plane = pipe;
3a77c4c4 14589 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
28c97730 14590 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
e2e767ab 14591 intel_crtc->plane = !pipe;
80824003
JB
14592 }
14593
4b0e333e
CW
14594 intel_crtc->cursor_base = ~0;
14595 intel_crtc->cursor_cntl = ~0;
dc41c154 14596 intel_crtc->cursor_size = ~0;
8d7849db 14597
852eb00d
VS
14598 intel_crtc->wm.cxsr_allowed = true;
14599
22fd0fab
JB
14600 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14601 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14602 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14603 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14604
79e53945 14605 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
87b6b101 14606
8563b1e8
LL
14607 intel_color_init(&intel_crtc->base);
14608
87b6b101 14609 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
3d7d6510
MR
14610 return;
14611
14612fail:
69ae561f
VS
14613 intel_plane_destroy(primary);
14614 intel_plane_destroy(cursor);
f5de6e07 14615 kfree(crtc_state);
3d7d6510 14616 kfree(intel_crtc);
79e53945
JB
14617}
14618
752aa88a
JB
14619enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14620{
14621 struct drm_encoder *encoder = connector->base.encoder;
6e9f798d 14622 struct drm_device *dev = connector->base.dev;
752aa88a 14623
51fd371b 14624 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
752aa88a 14625
d3babd3f 14626 if (!encoder || WARN_ON(!encoder->crtc))
752aa88a
JB
14627 return INVALID_PIPE;
14628
14629 return to_intel_crtc(encoder->crtc)->pipe;
14630}
14631
08d7b3d1 14632int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
05394f39 14633 struct drm_file *file)
08d7b3d1 14634{
08d7b3d1 14635 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7707e653 14636 struct drm_crtc *drmmode_crtc;
c05422d5 14637 struct intel_crtc *crtc;
08d7b3d1 14638
7707e653 14639 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
71240ed2 14640 if (!drmmode_crtc)
3f2c2057 14641 return -ENOENT;
08d7b3d1 14642
7707e653 14643 crtc = to_intel_crtc(drmmode_crtc);
c05422d5 14644 pipe_from_crtc_id->pipe = crtc->pipe;
08d7b3d1 14645
c05422d5 14646 return 0;
08d7b3d1
CW
14647}
14648
66a9278e 14649static int intel_encoder_clones(struct intel_encoder *encoder)
79e53945 14650{
66a9278e
DV
14651 struct drm_device *dev = encoder->base.dev;
14652 struct intel_encoder *source_encoder;
79e53945 14653 int index_mask = 0;
79e53945
JB
14654 int entry = 0;
14655
b2784e15 14656 for_each_intel_encoder(dev, source_encoder) {
bc079e8b 14657 if (encoders_cloneable(encoder, source_encoder))
66a9278e
DV
14658 index_mask |= (1 << entry);
14659
79e53945
JB
14660 entry++;
14661 }
4ef69c7a 14662
79e53945
JB
14663 return index_mask;
14664}
14665
4d302442
CW
14666static bool has_edp_a(struct drm_device *dev)
14667{
14668 struct drm_i915_private *dev_priv = dev->dev_private;
14669
14670 if (!IS_MOBILE(dev))
14671 return false;
14672
14673 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14674 return false;
14675
e3589908 14676 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
4d302442
CW
14677 return false;
14678
14679 return true;
14680}
14681
84b4e042
JB
14682static bool intel_crt_present(struct drm_device *dev)
14683{
14684 struct drm_i915_private *dev_priv = dev->dev_private;
14685
884497ed
DL
14686 if (INTEL_INFO(dev)->gen >= 9)
14687 return false;
14688
cf404ce4 14689 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
84b4e042
JB
14690 return false;
14691
14692 if (IS_CHERRYVIEW(dev))
14693 return false;
14694
65e472e4
VS
14695 if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14696 return false;
14697
70ac54d0
VS
14698 /* DDI E can't be used if DDI A requires 4 lanes */
14699 if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14700 return false;
14701
e4abb733 14702 if (!dev_priv->vbt.int_crt_support)
84b4e042
JB
14703 return false;
14704
14705 return true;
14706}
14707
79e53945
JB
14708static void intel_setup_outputs(struct drm_device *dev)
14709{
725e30ad 14710 struct drm_i915_private *dev_priv = dev->dev_private;
4ef69c7a 14711 struct intel_encoder *encoder;
cb0953d7 14712 bool dpd_is_edp = false;
79e53945 14713
97a824e1
ID
14714 /*
14715 * intel_edp_init_connector() depends on this completing first, to
14716 * prevent the registeration of both eDP and LVDS and the incorrect
14717 * sharing of the PPS.
14718 */
c9093354 14719 intel_lvds_init(dev);
79e53945 14720
84b4e042 14721 if (intel_crt_present(dev))
79935fca 14722 intel_crt_init(dev);
cb0953d7 14723
c776eb2e
VK
14724 if (IS_BROXTON(dev)) {
14725 /*
14726 * FIXME: Broxton doesn't support port detection via the
14727 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14728 * detect the ports.
14729 */
14730 intel_ddi_init(dev, PORT_A);
14731 intel_ddi_init(dev, PORT_B);
14732 intel_ddi_init(dev, PORT_C);
c6c794a2
SS
14733
14734 intel_dsi_init(dev);
c776eb2e 14735 } else if (HAS_DDI(dev)) {
0e72a5b5
ED
14736 int found;
14737
de31facd
JB
14738 /*
14739 * Haswell uses DDI functions to detect digital outputs.
14740 * On SKL pre-D0 the strap isn't connected, so we assume
14741 * it's there.
14742 */
77179400 14743 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
de31facd 14744 /* WaIgnoreDDIAStrap: skl */
ef11bdb3 14745 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
0e72a5b5
ED
14746 intel_ddi_init(dev, PORT_A);
14747
14748 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
14749 * register */
14750 found = I915_READ(SFUSE_STRAP);
14751
14752 if (found & SFUSE_STRAP_DDIB_DETECTED)
14753 intel_ddi_init(dev, PORT_B);
14754 if (found & SFUSE_STRAP_DDIC_DETECTED)
14755 intel_ddi_init(dev, PORT_C);
14756 if (found & SFUSE_STRAP_DDID_DETECTED)
14757 intel_ddi_init(dev, PORT_D);
2800e4c2
RV
14758 /*
14759 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14760 */
ef11bdb3 14761 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
2800e4c2
RV
14762 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14763 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14764 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14765 intel_ddi_init(dev, PORT_E);
14766
0e72a5b5 14767 } else if (HAS_PCH_SPLIT(dev)) {
cb0953d7 14768 int found;
5d8a7752 14769 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
270b3042
DV
14770
14771 if (has_edp_a(dev))
14772 intel_dp_init(dev, DP_A, PORT_A);
cb0953d7 14773
dc0fa718 14774 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
461ed3ca 14775 /* PCH SDVOB multiplex with HDMIB */
2a5c0832 14776 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
30ad48b7 14777 if (!found)
e2debe91 14778 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
5eb08b69 14779 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
ab9d7c30 14780 intel_dp_init(dev, PCH_DP_B, PORT_B);
30ad48b7
ZW
14781 }
14782
dc0fa718 14783 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
e2debe91 14784 intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
30ad48b7 14785
dc0fa718 14786 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
e2debe91 14787 intel_hdmi_init(dev, PCH_HDMID, PORT_D);
30ad48b7 14788
5eb08b69 14789 if (I915_READ(PCH_DP_C) & DP_DETECTED)
ab9d7c30 14790 intel_dp_init(dev, PCH_DP_C, PORT_C);
5eb08b69 14791
270b3042 14792 if (I915_READ(PCH_DP_D) & DP_DETECTED)
ab9d7c30 14793 intel_dp_init(dev, PCH_DP_D, PORT_D);
666a4537 14794 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
22f35042 14795 bool has_edp, has_port;
457c52d8 14796
e17ac6db
VS
14797 /*
14798 * The DP_DETECTED bit is the latched state of the DDC
14799 * SDA pin at boot. However since eDP doesn't require DDC
14800 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14801 * eDP ports may have been muxed to an alternate function.
14802 * Thus we can't rely on the DP_DETECTED bit alone to detect
14803 * eDP ports. Consult the VBT as well as DP_DETECTED to
14804 * detect eDP ports.
22f35042
VS
14805 *
14806 * Sadly the straps seem to be missing sometimes even for HDMI
14807 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
14808 * and VBT for the presence of the port. Additionally we can't
14809 * trust the port type the VBT declares as we've seen at least
14810 * HDMI ports that the VBT claim are DP or eDP.
e17ac6db 14811 */
457c52d8 14812 has_edp = intel_dp_is_edp(dev, PORT_B);
22f35042
VS
14813 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
14814 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
457c52d8 14815 has_edp &= intel_dp_init(dev, VLV_DP_B, PORT_B);
22f35042 14816 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
e66eb81d 14817 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
585a94b8 14818
457c52d8 14819 has_edp = intel_dp_is_edp(dev, PORT_C);
22f35042
VS
14820 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
14821 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
457c52d8 14822 has_edp &= intel_dp_init(dev, VLV_DP_C, PORT_C);
22f35042 14823 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
e66eb81d 14824 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
19c03924 14825
9418c1f1 14826 if (IS_CHERRYVIEW(dev)) {
22f35042
VS
14827 /*
14828 * eDP not supported on port D,
14829 * so no need to worry about it
14830 */
14831 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
14832 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
e66eb81d 14833 intel_dp_init(dev, CHV_DP_D, PORT_D);
22f35042
VS
14834 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
14835 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
9418c1f1
VS
14836 }
14837
3cfca973 14838 intel_dsi_init(dev);
09da55dc 14839 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
27185ae1 14840 bool found = false;
7d57382e 14841
e2debe91 14842 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 14843 DRM_DEBUG_KMS("probing SDVOB\n");
2a5c0832 14844 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
3fec3d2f 14845 if (!found && IS_G4X(dev)) {
b01f2c3a 14846 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
e2debe91 14847 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
b01f2c3a 14848 }
27185ae1 14849
3fec3d2f 14850 if (!found && IS_G4X(dev))
ab9d7c30 14851 intel_dp_init(dev, DP_B, PORT_B);
725e30ad 14852 }
13520b05
KH
14853
14854 /* Before G4X SDVOC doesn't have its own detect register */
13520b05 14855
e2debe91 14856 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 14857 DRM_DEBUG_KMS("probing SDVOC\n");
2a5c0832 14858 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
b01f2c3a 14859 }
27185ae1 14860
e2debe91 14861 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
27185ae1 14862
3fec3d2f 14863 if (IS_G4X(dev)) {
b01f2c3a 14864 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
e2debe91 14865 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
b01f2c3a 14866 }
3fec3d2f 14867 if (IS_G4X(dev))
ab9d7c30 14868 intel_dp_init(dev, DP_C, PORT_C);
725e30ad 14869 }
27185ae1 14870
3fec3d2f 14871 if (IS_G4X(dev) &&
e7281eab 14872 (I915_READ(DP_D) & DP_DETECTED))
ab9d7c30 14873 intel_dp_init(dev, DP_D, PORT_D);
bad720ff 14874 } else if (IS_GEN2(dev))
79e53945
JB
14875 intel_dvo_init(dev);
14876
103a196f 14877 if (SUPPORTS_TV(dev))
79e53945
JB
14878 intel_tv_init(dev);
14879
0bc12bcb 14880 intel_psr_init(dev);
7c8f8a70 14881
b2784e15 14882 for_each_intel_encoder(dev, encoder) {
4ef69c7a
CW
14883 encoder->base.possible_crtcs = encoder->crtc_mask;
14884 encoder->base.possible_clones =
66a9278e 14885 intel_encoder_clones(encoder);
79e53945 14886 }
47356eb6 14887
dde86e2d 14888 intel_init_pch_refclk(dev);
270b3042
DV
14889
14890 drm_helper_move_panel_connectors_to_head(dev);
79e53945
JB
14891}
14892
14893static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14894{
60a5ca01 14895 struct drm_device *dev = fb->dev;
79e53945 14896 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
79e53945 14897
ef2d633e 14898 drm_framebuffer_cleanup(fb);
60a5ca01 14899 mutex_lock(&dev->struct_mutex);
ef2d633e 14900 WARN_ON(!intel_fb->obj->framebuffer_references--);
60a5ca01
VS
14901 drm_gem_object_unreference(&intel_fb->obj->base);
14902 mutex_unlock(&dev->struct_mutex);
79e53945
JB
14903 kfree(intel_fb);
14904}
14905
14906static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
05394f39 14907 struct drm_file *file,
79e53945
JB
14908 unsigned int *handle)
14909{
14910 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
05394f39 14911 struct drm_i915_gem_object *obj = intel_fb->obj;
79e53945 14912
cc917ab4
CW
14913 if (obj->userptr.mm) {
14914 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14915 return -EINVAL;
14916 }
14917
05394f39 14918 return drm_gem_handle_create(file, &obj->base, handle);
79e53945
JB
14919}
14920
86c98588
RV
14921static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14922 struct drm_file *file,
14923 unsigned flags, unsigned color,
14924 struct drm_clip_rect *clips,
14925 unsigned num_clips)
14926{
14927 struct drm_device *dev = fb->dev;
14928 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14929 struct drm_i915_gem_object *obj = intel_fb->obj;
14930
14931 mutex_lock(&dev->struct_mutex);
74b4ea1e 14932 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
86c98588
RV
14933 mutex_unlock(&dev->struct_mutex);
14934
14935 return 0;
14936}
14937
79e53945
JB
14938static const struct drm_framebuffer_funcs intel_fb_funcs = {
14939 .destroy = intel_user_framebuffer_destroy,
14940 .create_handle = intel_user_framebuffer_create_handle,
86c98588 14941 .dirty = intel_user_framebuffer_dirty,
79e53945
JB
14942};
14943
b321803d
DL
14944static
14945u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14946 uint32_t pixel_format)
14947{
14948 u32 gen = INTEL_INFO(dev)->gen;
14949
14950 if (gen >= 9) {
ac484963
VS
14951 int cpp = drm_format_plane_cpp(pixel_format, 0);
14952
b321803d
DL
14953 /* "The stride in bytes must not exceed the of the size of 8K
14954 * pixels and 32K bytes."
14955 */
ac484963 14956 return min(8192 * cpp, 32768);
666a4537 14957 } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
b321803d
DL
14958 return 32*1024;
14959 } else if (gen >= 4) {
14960 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14961 return 16*1024;
14962 else
14963 return 32*1024;
14964 } else if (gen >= 3) {
14965 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14966 return 8*1024;
14967 else
14968 return 16*1024;
14969 } else {
14970 /* XXX DSPC is limited to 4k tiled */
14971 return 8*1024;
14972 }
14973}
14974
b5ea642a
DV
14975static int intel_framebuffer_init(struct drm_device *dev,
14976 struct intel_framebuffer *intel_fb,
14977 struct drm_mode_fb_cmd2 *mode_cmd,
14978 struct drm_i915_gem_object *obj)
79e53945 14979{
7b49f948 14980 struct drm_i915_private *dev_priv = to_i915(dev);
6761dd31 14981 unsigned int aligned_height;
79e53945 14982 int ret;
b321803d 14983 u32 pitch_limit, stride_alignment;
79e53945 14984
dd4916c5
DV
14985 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14986
2a80eada
DV
14987 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14988 /* Enforce that fb modifier and tiling mode match, but only for
14989 * X-tiled. This is needed for FBC. */
14990 if (!!(obj->tiling_mode == I915_TILING_X) !=
14991 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14992 DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14993 return -EINVAL;
14994 }
14995 } else {
14996 if (obj->tiling_mode == I915_TILING_X)
14997 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14998 else if (obj->tiling_mode == I915_TILING_Y) {
14999 DRM_DEBUG("No Y tiling for legacy addfb\n");
15000 return -EINVAL;
15001 }
15002 }
15003
9a8f0a12
TU
15004 /* Passed in modifier sanity checking. */
15005 switch (mode_cmd->modifier[0]) {
15006 case I915_FORMAT_MOD_Y_TILED:
15007 case I915_FORMAT_MOD_Yf_TILED:
15008 if (INTEL_INFO(dev)->gen < 9) {
15009 DRM_DEBUG("Unsupported tiling 0x%llx!\n",
15010 mode_cmd->modifier[0]);
15011 return -EINVAL;
15012 }
15013 case DRM_FORMAT_MOD_NONE:
15014 case I915_FORMAT_MOD_X_TILED:
15015 break;
15016 default:
c0f40428
JB
15017 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
15018 mode_cmd->modifier[0]);
57cd6508 15019 return -EINVAL;
c16ed4be 15020 }
57cd6508 15021
7b49f948
VS
15022 stride_alignment = intel_fb_stride_alignment(dev_priv,
15023 mode_cmd->modifier[0],
b321803d
DL
15024 mode_cmd->pixel_format);
15025 if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
15026 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
15027 mode_cmd->pitches[0], stride_alignment);
57cd6508 15028 return -EINVAL;
c16ed4be 15029 }
57cd6508 15030
b321803d
DL
15031 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
15032 mode_cmd->pixel_format);
a35cdaa0 15033 if (mode_cmd->pitches[0] > pitch_limit) {
b321803d
DL
15034 DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
15035 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
2a80eada 15036 "tiled" : "linear",
a35cdaa0 15037 mode_cmd->pitches[0], pitch_limit);
5d7bd705 15038 return -EINVAL;
c16ed4be 15039 }
5d7bd705 15040
2a80eada 15041 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
c16ed4be
CW
15042 mode_cmd->pitches[0] != obj->stride) {
15043 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
15044 mode_cmd->pitches[0], obj->stride);
5d7bd705 15045 return -EINVAL;
c16ed4be 15046 }
5d7bd705 15047
57779d06 15048 /* Reject formats not supported by any plane early. */
308e5bcb 15049 switch (mode_cmd->pixel_format) {
57779d06 15050 case DRM_FORMAT_C8:
04b3924d
VS
15051 case DRM_FORMAT_RGB565:
15052 case DRM_FORMAT_XRGB8888:
15053 case DRM_FORMAT_ARGB8888:
57779d06
VS
15054 break;
15055 case DRM_FORMAT_XRGB1555:
c16ed4be 15056 if (INTEL_INFO(dev)->gen > 3) {
4ee62c76
VS
15057 DRM_DEBUG("unsupported pixel format: %s\n",
15058 drm_get_format_name(mode_cmd->pixel_format));
57779d06 15059 return -EINVAL;
c16ed4be 15060 }
57779d06 15061 break;
57779d06 15062 case DRM_FORMAT_ABGR8888:
666a4537
WB
15063 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
15064 INTEL_INFO(dev)->gen < 9) {
6c0fd451
DL
15065 DRM_DEBUG("unsupported pixel format: %s\n",
15066 drm_get_format_name(mode_cmd->pixel_format));
15067 return -EINVAL;
15068 }
15069 break;
15070 case DRM_FORMAT_XBGR8888:
04b3924d 15071 case DRM_FORMAT_XRGB2101010:
57779d06 15072 case DRM_FORMAT_XBGR2101010:
c16ed4be 15073 if (INTEL_INFO(dev)->gen < 4) {
4ee62c76
VS
15074 DRM_DEBUG("unsupported pixel format: %s\n",
15075 drm_get_format_name(mode_cmd->pixel_format));
57779d06 15076 return -EINVAL;
c16ed4be 15077 }
b5626747 15078 break;
7531208b 15079 case DRM_FORMAT_ABGR2101010:
666a4537 15080 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
7531208b
DL
15081 DRM_DEBUG("unsupported pixel format: %s\n",
15082 drm_get_format_name(mode_cmd->pixel_format));
15083 return -EINVAL;
15084 }
15085 break;
04b3924d
VS
15086 case DRM_FORMAT_YUYV:
15087 case DRM_FORMAT_UYVY:
15088 case DRM_FORMAT_YVYU:
15089 case DRM_FORMAT_VYUY:
c16ed4be 15090 if (INTEL_INFO(dev)->gen < 5) {
4ee62c76
VS
15091 DRM_DEBUG("unsupported pixel format: %s\n",
15092 drm_get_format_name(mode_cmd->pixel_format));
57779d06 15093 return -EINVAL;
c16ed4be 15094 }
57cd6508
CW
15095 break;
15096 default:
4ee62c76
VS
15097 DRM_DEBUG("unsupported pixel format: %s\n",
15098 drm_get_format_name(mode_cmd->pixel_format));
57cd6508
CW
15099 return -EINVAL;
15100 }
15101
90f9a336
VS
15102 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15103 if (mode_cmd->offsets[0] != 0)
15104 return -EINVAL;
15105
ec2c981e 15106 aligned_height = intel_fb_align_height(dev, mode_cmd->height,
091df6cb
DV
15107 mode_cmd->pixel_format,
15108 mode_cmd->modifier[0]);
53155c0a
DV
15109 /* FIXME drm helper for size checks (especially planar formats)? */
15110 if (obj->base.size < aligned_height * mode_cmd->pitches[0])
15111 return -EINVAL;
15112
c7d73f6a
DV
15113 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
15114 intel_fb->obj = obj;
15115
2d7a215f
VS
15116 intel_fill_fb_info(dev_priv, &intel_fb->base);
15117
79e53945
JB
15118 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
15119 if (ret) {
15120 DRM_ERROR("framebuffer init failed %d\n", ret);
15121 return ret;
15122 }
15123
0b05e1e0
VS
15124 intel_fb->obj->framebuffer_references++;
15125
79e53945
JB
15126 return 0;
15127}
15128
79e53945
JB
15129static struct drm_framebuffer *
15130intel_user_framebuffer_create(struct drm_device *dev,
15131 struct drm_file *filp,
1eb83451 15132 const struct drm_mode_fb_cmd2 *user_mode_cmd)
79e53945 15133{
dcb1394e 15134 struct drm_framebuffer *fb;
05394f39 15135 struct drm_i915_gem_object *obj;
76dc3769 15136 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
79e53945 15137
a8ad0bd8 15138 obj = to_intel_bo(drm_gem_object_lookup(filp, mode_cmd.handles[0]));
c8725226 15139 if (&obj->base == NULL)
cce13ff7 15140 return ERR_PTR(-ENOENT);
79e53945 15141
92907cbb 15142 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
dcb1394e
LW
15143 if (IS_ERR(fb))
15144 drm_gem_object_unreference_unlocked(&obj->base);
15145
15146 return fb;
79e53945
JB
15147}
15148
0695726e 15149#ifndef CONFIG_DRM_FBDEV_EMULATION
0632fef6 15150static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
4520f53a
DV
15151{
15152}
15153#endif
15154
79e53945 15155static const struct drm_mode_config_funcs intel_mode_funcs = {
79e53945 15156 .fb_create = intel_user_framebuffer_create,
0632fef6 15157 .output_poll_changed = intel_fbdev_output_poll_changed,
5ee67f1c
MR
15158 .atomic_check = intel_atomic_check,
15159 .atomic_commit = intel_atomic_commit,
de419ab6
ML
15160 .atomic_state_alloc = intel_atomic_state_alloc,
15161 .atomic_state_clear = intel_atomic_state_clear,
79e53945
JB
15162};
15163
88212941
ID
15164/**
15165 * intel_init_display_hooks - initialize the display modesetting hooks
15166 * @dev_priv: device private
15167 */
15168void intel_init_display_hooks(struct drm_i915_private *dev_priv)
e70236a8 15169{
88212941 15170 if (INTEL_INFO(dev_priv)->gen >= 9) {
bc8d7dff 15171 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5724dbd1
DL
15172 dev_priv->display.get_initial_plane_config =
15173 skylake_get_initial_plane_config;
bc8d7dff
DL
15174 dev_priv->display.crtc_compute_clock =
15175 haswell_crtc_compute_clock;
15176 dev_priv->display.crtc_enable = haswell_crtc_enable;
15177 dev_priv->display.crtc_disable = haswell_crtc_disable;
88212941 15178 } else if (HAS_DDI(dev_priv)) {
0e8ffe1b 15179 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5724dbd1
DL
15180 dev_priv->display.get_initial_plane_config =
15181 ironlake_get_initial_plane_config;
797d0259
ACO
15182 dev_priv->display.crtc_compute_clock =
15183 haswell_crtc_compute_clock;
4f771f10
PZ
15184 dev_priv->display.crtc_enable = haswell_crtc_enable;
15185 dev_priv->display.crtc_disable = haswell_crtc_disable;
88212941 15186 } else if (HAS_PCH_SPLIT(dev_priv)) {
0e8ffe1b 15187 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
5724dbd1
DL
15188 dev_priv->display.get_initial_plane_config =
15189 ironlake_get_initial_plane_config;
3fb37703
ACO
15190 dev_priv->display.crtc_compute_clock =
15191 ironlake_crtc_compute_clock;
76e5a89c
DV
15192 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15193 dev_priv->display.crtc_disable = ironlake_crtc_disable;
65b3d6a9 15194 } else if (IS_CHERRYVIEW(dev_priv)) {
89b667f8 15195 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5724dbd1
DL
15196 dev_priv->display.get_initial_plane_config =
15197 i9xx_get_initial_plane_config;
65b3d6a9
ACO
15198 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15199 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15200 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15201 } else if (IS_VALLEYVIEW(dev_priv)) {
15202 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15203 dev_priv->display.get_initial_plane_config =
15204 i9xx_get_initial_plane_config;
15205 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
89b667f8
JB
15206 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15207 dev_priv->display.crtc_disable = i9xx_crtc_disable;
19ec6693
ACO
15208 } else if (IS_G4X(dev_priv)) {
15209 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15210 dev_priv->display.get_initial_plane_config =
15211 i9xx_get_initial_plane_config;
15212 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15213 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15214 dev_priv->display.crtc_disable = i9xx_crtc_disable;
70e8aa21
ACO
15215 } else if (IS_PINEVIEW(dev_priv)) {
15216 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15217 dev_priv->display.get_initial_plane_config =
15218 i9xx_get_initial_plane_config;
15219 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15220 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15221 dev_priv->display.crtc_disable = i9xx_crtc_disable;
81c97f52 15222 } else if (!IS_GEN2(dev_priv)) {
0e8ffe1b 15223 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5724dbd1
DL
15224 dev_priv->display.get_initial_plane_config =
15225 i9xx_get_initial_plane_config;
d6dfee7a 15226 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
76e5a89c
DV
15227 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15228 dev_priv->display.crtc_disable = i9xx_crtc_disable;
81c97f52
ACO
15229 } else {
15230 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15231 dev_priv->display.get_initial_plane_config =
15232 i9xx_get_initial_plane_config;
15233 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15234 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15235 dev_priv->display.crtc_disable = i9xx_crtc_disable;
f564048e 15236 }
e70236a8 15237
e70236a8 15238 /* Returns the core display clock speed */
88212941 15239 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1652d19e
VS
15240 dev_priv->display.get_display_clock_speed =
15241 skylake_get_display_clock_speed;
88212941 15242 else if (IS_BROXTON(dev_priv))
acd3f3d3
BP
15243 dev_priv->display.get_display_clock_speed =
15244 broxton_get_display_clock_speed;
88212941 15245 else if (IS_BROADWELL(dev_priv))
1652d19e
VS
15246 dev_priv->display.get_display_clock_speed =
15247 broadwell_get_display_clock_speed;
88212941 15248 else if (IS_HASWELL(dev_priv))
1652d19e
VS
15249 dev_priv->display.get_display_clock_speed =
15250 haswell_get_display_clock_speed;
88212941 15251 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
25eb05fc
JB
15252 dev_priv->display.get_display_clock_speed =
15253 valleyview_get_display_clock_speed;
88212941 15254 else if (IS_GEN5(dev_priv))
b37a6434
VS
15255 dev_priv->display.get_display_clock_speed =
15256 ilk_get_display_clock_speed;
88212941
ID
15257 else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
15258 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
e70236a8
JB
15259 dev_priv->display.get_display_clock_speed =
15260 i945_get_display_clock_speed;
88212941 15261 else if (IS_GM45(dev_priv))
34edce2f
VS
15262 dev_priv->display.get_display_clock_speed =
15263 gm45_get_display_clock_speed;
88212941 15264 else if (IS_CRESTLINE(dev_priv))
34edce2f
VS
15265 dev_priv->display.get_display_clock_speed =
15266 i965gm_get_display_clock_speed;
88212941 15267 else if (IS_PINEVIEW(dev_priv))
34edce2f
VS
15268 dev_priv->display.get_display_clock_speed =
15269 pnv_get_display_clock_speed;
88212941 15270 else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
34edce2f
VS
15271 dev_priv->display.get_display_clock_speed =
15272 g33_get_display_clock_speed;
88212941 15273 else if (IS_I915G(dev_priv))
e70236a8
JB
15274 dev_priv->display.get_display_clock_speed =
15275 i915_get_display_clock_speed;
88212941 15276 else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
e70236a8
JB
15277 dev_priv->display.get_display_clock_speed =
15278 i9xx_misc_get_display_clock_speed;
88212941 15279 else if (IS_I915GM(dev_priv))
e70236a8
JB
15280 dev_priv->display.get_display_clock_speed =
15281 i915gm_get_display_clock_speed;
88212941 15282 else if (IS_I865G(dev_priv))
e70236a8
JB
15283 dev_priv->display.get_display_clock_speed =
15284 i865_get_display_clock_speed;
88212941 15285 else if (IS_I85X(dev_priv))
e70236a8 15286 dev_priv->display.get_display_clock_speed =
1b1d2716 15287 i85x_get_display_clock_speed;
623e01e5 15288 else { /* 830 */
88212941 15289 WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
e70236a8
JB
15290 dev_priv->display.get_display_clock_speed =
15291 i830_get_display_clock_speed;
623e01e5 15292 }
e70236a8 15293
88212941 15294 if (IS_GEN5(dev_priv)) {
3bb11b53 15295 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
88212941 15296 } else if (IS_GEN6(dev_priv)) {
3bb11b53 15297 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
88212941 15298 } else if (IS_IVYBRIDGE(dev_priv)) {
3bb11b53
SJ
15299 /* FIXME: detect B0+ stepping and use auto training */
15300 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
88212941 15301 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3bb11b53 15302 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
445e780b
VS
15303 }
15304
15305 if (IS_BROADWELL(dev_priv)) {
15306 dev_priv->display.modeset_commit_cdclk =
15307 broadwell_modeset_commit_cdclk;
15308 dev_priv->display.modeset_calc_cdclk =
15309 broadwell_modeset_calc_cdclk;
88212941 15310 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
27c329ed
ML
15311 dev_priv->display.modeset_commit_cdclk =
15312 valleyview_modeset_commit_cdclk;
15313 dev_priv->display.modeset_calc_cdclk =
15314 valleyview_modeset_calc_cdclk;
88212941 15315 } else if (IS_BROXTON(dev_priv)) {
27c329ed 15316 dev_priv->display.modeset_commit_cdclk =
324513c0 15317 bxt_modeset_commit_cdclk;
27c329ed 15318 dev_priv->display.modeset_calc_cdclk =
324513c0 15319 bxt_modeset_calc_cdclk;
c89e39f3
CT
15320 } else if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
15321 dev_priv->display.modeset_commit_cdclk =
15322 skl_modeset_commit_cdclk;
15323 dev_priv->display.modeset_calc_cdclk =
15324 skl_modeset_calc_cdclk;
e70236a8 15325 }
5a21b665
DV
15326
15327 switch (INTEL_INFO(dev_priv)->gen) {
15328 case 2:
15329 dev_priv->display.queue_flip = intel_gen2_queue_flip;
15330 break;
15331
15332 case 3:
15333 dev_priv->display.queue_flip = intel_gen3_queue_flip;
15334 break;
15335
15336 case 4:
15337 case 5:
15338 dev_priv->display.queue_flip = intel_gen4_queue_flip;
15339 break;
15340
15341 case 6:
15342 dev_priv->display.queue_flip = intel_gen6_queue_flip;
15343 break;
15344 case 7:
15345 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
15346 dev_priv->display.queue_flip = intel_gen7_queue_flip;
15347 break;
15348 case 9:
15349 /* Drop through - unsupported since execlist only. */
15350 default:
15351 /* Default just returns -ENODEV to indicate unsupported */
15352 dev_priv->display.queue_flip = intel_default_queue_flip;
15353 }
e70236a8
JB
15354}
15355
b690e96c
JB
15356/*
15357 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15358 * resume, or other times. This quirk makes sure that's the case for
15359 * affected systems.
15360 */
0206e353 15361static void quirk_pipea_force(struct drm_device *dev)
b690e96c
JB
15362{
15363 struct drm_i915_private *dev_priv = dev->dev_private;
15364
15365 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
bc0daf48 15366 DRM_INFO("applying pipe a force quirk\n");
b690e96c
JB
15367}
15368
b6b5d049
VS
15369static void quirk_pipeb_force(struct drm_device *dev)
15370{
15371 struct drm_i915_private *dev_priv = dev->dev_private;
15372
15373 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15374 DRM_INFO("applying pipe b force quirk\n");
15375}
15376
435793df
KP
15377/*
15378 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15379 */
15380static void quirk_ssc_force_disable(struct drm_device *dev)
15381{
15382 struct drm_i915_private *dev_priv = dev->dev_private;
15383 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
bc0daf48 15384 DRM_INFO("applying lvds SSC disable quirk\n");
435793df
KP
15385}
15386
4dca20ef 15387/*
5a15ab5b
CE
15388 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15389 * brightness value
4dca20ef
CE
15390 */
15391static void quirk_invert_brightness(struct drm_device *dev)
15392{
15393 struct drm_i915_private *dev_priv = dev->dev_private;
15394 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
bc0daf48 15395 DRM_INFO("applying inverted panel brightness quirk\n");
435793df
KP
15396}
15397
9c72cc6f
SD
15398/* Some VBT's incorrectly indicate no backlight is present */
15399static void quirk_backlight_present(struct drm_device *dev)
15400{
15401 struct drm_i915_private *dev_priv = dev->dev_private;
15402 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15403 DRM_INFO("applying backlight present quirk\n");
15404}
15405
b690e96c
JB
15406struct intel_quirk {
15407 int device;
15408 int subsystem_vendor;
15409 int subsystem_device;
15410 void (*hook)(struct drm_device *dev);
15411};
15412
5f85f176
EE
15413/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15414struct intel_dmi_quirk {
15415 void (*hook)(struct drm_device *dev);
15416 const struct dmi_system_id (*dmi_id_list)[];
15417};
15418
15419static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15420{
15421 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15422 return 1;
15423}
15424
15425static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15426 {
15427 .dmi_id_list = &(const struct dmi_system_id[]) {
15428 {
15429 .callback = intel_dmi_reverse_brightness,
15430 .ident = "NCR Corporation",
15431 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15432 DMI_MATCH(DMI_PRODUCT_NAME, ""),
15433 },
15434 },
15435 { } /* terminating entry */
15436 },
15437 .hook = quirk_invert_brightness,
15438 },
15439};
15440
c43b5634 15441static struct intel_quirk intel_quirks[] = {
b690e96c
JB
15442 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15443 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15444
b690e96c
JB
15445 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15446 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15447
5f080c0f
VS
15448 /* 830 needs to leave pipe A & dpll A up */
15449 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15450
b6b5d049
VS
15451 /* 830 needs to leave pipe B & dpll B up */
15452 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15453
435793df
KP
15454 /* Lenovo U160 cannot use SSC on LVDS */
15455 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
070d329a
MAS
15456
15457 /* Sony Vaio Y cannot use SSC on LVDS */
15458 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
5a15ab5b 15459
be505f64
AH
15460 /* Acer Aspire 5734Z must invert backlight brightness */
15461 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15462
15463 /* Acer/eMachines G725 */
15464 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15465
15466 /* Acer/eMachines e725 */
15467 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15468
15469 /* Acer/Packard Bell NCL20 */
15470 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15471
15472 /* Acer Aspire 4736Z */
15473 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
0f540c3a
JN
15474
15475 /* Acer Aspire 5336 */
15476 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
2e93a1aa
SD
15477
15478 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15479 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
d4967d8c 15480
dfb3d47b
SD
15481 /* Acer C720 Chromebook (Core i3 4005U) */
15482 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15483
b2a9601c 15484 /* Apple Macbook 2,1 (Core 2 T7400) */
15485 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15486
1b9448b0
JN
15487 /* Apple Macbook 4,1 */
15488 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15489
d4967d8c
SD
15490 /* Toshiba CB35 Chromebook (Celeron 2955U) */
15491 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
724cb06f
SD
15492
15493 /* HP Chromebook 14 (Celeron 2955U) */
15494 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
cf6f0af9
JN
15495
15496 /* Dell Chromebook 11 */
15497 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
9be64eee
JN
15498
15499 /* Dell Chromebook 11 (2015 version) */
15500 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
b690e96c
JB
15501};
15502
15503static void intel_init_quirks(struct drm_device *dev)
15504{
15505 struct pci_dev *d = dev->pdev;
15506 int i;
15507
15508 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15509 struct intel_quirk *q = &intel_quirks[i];
15510
15511 if (d->device == q->device &&
15512 (d->subsystem_vendor == q->subsystem_vendor ||
15513 q->subsystem_vendor == PCI_ANY_ID) &&
15514 (d->subsystem_device == q->subsystem_device ||
15515 q->subsystem_device == PCI_ANY_ID))
15516 q->hook(dev);
15517 }
5f85f176
EE
15518 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15519 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15520 intel_dmi_quirks[i].hook(dev);
15521 }
b690e96c
JB
15522}
15523
9cce37f4
JB
15524/* Disable the VGA plane that we never use */
15525static void i915_disable_vga(struct drm_device *dev)
15526{
15527 struct drm_i915_private *dev_priv = dev->dev_private;
15528 u8 sr1;
f0f59a00 15529 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
9cce37f4 15530
2b37c616 15531 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
9cce37f4 15532 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
3fdcf431 15533 outb(SR01, VGA_SR_INDEX);
9cce37f4
JB
15534 sr1 = inb(VGA_SR_DATA);
15535 outb(sr1 | 1<<5, VGA_SR_DATA);
15536 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15537 udelay(300);
15538
01f5a626 15539 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9cce37f4
JB
15540 POSTING_READ(vga_reg);
15541}
15542
f817586c
DV
15543void intel_modeset_init_hw(struct drm_device *dev)
15544{
1a617b77
ML
15545 struct drm_i915_private *dev_priv = dev->dev_private;
15546
b6283055 15547 intel_update_cdclk(dev);
1a617b77
ML
15548
15549 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15550
f817586c 15551 intel_init_clock_gating(dev);
dc97997a 15552 intel_enable_gt_powersave(dev_priv);
f817586c
DV
15553}
15554
d93c0372
MR
15555/*
15556 * Calculate what we think the watermarks should be for the state we've read
15557 * out of the hardware and then immediately program those watermarks so that
15558 * we ensure the hardware settings match our internal state.
15559 *
15560 * We can calculate what we think WM's should be by creating a duplicate of the
15561 * current state (which was constructed during hardware readout) and running it
15562 * through the atomic check code to calculate new watermark values in the
15563 * state object.
15564 */
15565static void sanitize_watermarks(struct drm_device *dev)
15566{
15567 struct drm_i915_private *dev_priv = to_i915(dev);
15568 struct drm_atomic_state *state;
15569 struct drm_crtc *crtc;
15570 struct drm_crtc_state *cstate;
15571 struct drm_modeset_acquire_ctx ctx;
15572 int ret;
15573 int i;
15574
15575 /* Only supported on platforms that use atomic watermark design */
ed4a6a7c 15576 if (!dev_priv->display.optimize_watermarks)
d93c0372
MR
15577 return;
15578
15579 /*
15580 * We need to hold connection_mutex before calling duplicate_state so
15581 * that the connector loop is protected.
15582 */
15583 drm_modeset_acquire_init(&ctx, 0);
15584retry:
0cd1262d 15585 ret = drm_modeset_lock_all_ctx(dev, &ctx);
d93c0372
MR
15586 if (ret == -EDEADLK) {
15587 drm_modeset_backoff(&ctx);
15588 goto retry;
15589 } else if (WARN_ON(ret)) {
0cd1262d 15590 goto fail;
d93c0372
MR
15591 }
15592
15593 state = drm_atomic_helper_duplicate_state(dev, &ctx);
15594 if (WARN_ON(IS_ERR(state)))
0cd1262d 15595 goto fail;
d93c0372 15596
ed4a6a7c
MR
15597 /*
15598 * Hardware readout is the only time we don't want to calculate
15599 * intermediate watermarks (since we don't trust the current
15600 * watermarks).
15601 */
15602 to_intel_atomic_state(state)->skip_intermediate_wm = true;
15603
d93c0372
MR
15604 ret = intel_atomic_check(dev, state);
15605 if (ret) {
15606 /*
15607 * If we fail here, it means that the hardware appears to be
15608 * programmed in a way that shouldn't be possible, given our
15609 * understanding of watermark requirements. This might mean a
15610 * mistake in the hardware readout code or a mistake in the
15611 * watermark calculations for a given platform. Raise a WARN
15612 * so that this is noticeable.
15613 *
15614 * If this actually happens, we'll have to just leave the
15615 * BIOS-programmed watermarks untouched and hope for the best.
15616 */
15617 WARN(true, "Could not determine valid watermarks for inherited state\n");
0cd1262d 15618 goto fail;
d93c0372
MR
15619 }
15620
15621 /* Write calculated watermark values back */
d93c0372
MR
15622 for_each_crtc_in_state(state, crtc, cstate, i) {
15623 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15624
ed4a6a7c
MR
15625 cs->wm.need_postvbl_update = true;
15626 dev_priv->display.optimize_watermarks(cs);
d93c0372
MR
15627 }
15628
15629 drm_atomic_state_free(state);
0cd1262d 15630fail:
d93c0372
MR
15631 drm_modeset_drop_locks(&ctx);
15632 drm_modeset_acquire_fini(&ctx);
15633}
15634
79e53945
JB
15635void intel_modeset_init(struct drm_device *dev)
15636{
72e96d64
JL
15637 struct drm_i915_private *dev_priv = to_i915(dev);
15638 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1fe47785 15639 int sprite, ret;
8cc87b75 15640 enum pipe pipe;
46f297fb 15641 struct intel_crtc *crtc;
79e53945
JB
15642
15643 drm_mode_config_init(dev);
15644
15645 dev->mode_config.min_width = 0;
15646 dev->mode_config.min_height = 0;
15647
019d96cb
DA
15648 dev->mode_config.preferred_depth = 24;
15649 dev->mode_config.prefer_shadow = 1;
15650
25bab385
TU
15651 dev->mode_config.allow_fb_modifiers = true;
15652
e6ecefaa 15653 dev->mode_config.funcs = &intel_mode_funcs;
79e53945 15654
b690e96c
JB
15655 intel_init_quirks(dev);
15656
1fa61106
ED
15657 intel_init_pm(dev);
15658
e3c74757
BW
15659 if (INTEL_INFO(dev)->num_pipes == 0)
15660 return;
15661
69f92f67
LW
15662 /*
15663 * There may be no VBT; and if the BIOS enabled SSC we can
15664 * just keep using it to avoid unnecessary flicker. Whereas if the
15665 * BIOS isn't using it, don't assume it will work even if the VBT
15666 * indicates as much.
15667 */
15668 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15669 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15670 DREF_SSC1_ENABLE);
15671
15672 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15673 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15674 bios_lvds_use_ssc ? "en" : "dis",
15675 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15676 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15677 }
15678 }
15679
a6c45cf0
CW
15680 if (IS_GEN2(dev)) {
15681 dev->mode_config.max_width = 2048;
15682 dev->mode_config.max_height = 2048;
15683 } else if (IS_GEN3(dev)) {
5e4d6fa7
KP
15684 dev->mode_config.max_width = 4096;
15685 dev->mode_config.max_height = 4096;
79e53945 15686 } else {
a6c45cf0
CW
15687 dev->mode_config.max_width = 8192;
15688 dev->mode_config.max_height = 8192;
79e53945 15689 }
068be561 15690
dc41c154
VS
15691 if (IS_845G(dev) || IS_I865G(dev)) {
15692 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15693 dev->mode_config.cursor_height = 1023;
15694 } else if (IS_GEN2(dev)) {
068be561
DL
15695 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15696 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15697 } else {
15698 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15699 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15700 }
15701
72e96d64 15702 dev->mode_config.fb_base = ggtt->mappable_base;
79e53945 15703
28c97730 15704 DRM_DEBUG_KMS("%d display pipe%s available.\n",
7eb552ae
BW
15705 INTEL_INFO(dev)->num_pipes,
15706 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
79e53945 15707
055e393f 15708 for_each_pipe(dev_priv, pipe) {
8cc87b75 15709 intel_crtc_init(dev, pipe);
3bdcfc0c 15710 for_each_sprite(dev_priv, pipe, sprite) {
1fe47785 15711 ret = intel_plane_init(dev, pipe, sprite);
7f1f3851 15712 if (ret)
06da8da2 15713 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
1fe47785 15714 pipe_name(pipe), sprite_name(pipe, sprite), ret);
7f1f3851 15715 }
79e53945
JB
15716 }
15717
bfa7df01
VS
15718 intel_update_czclk(dev_priv);
15719 intel_update_cdclk(dev);
15720
e72f9fbf 15721 intel_shared_dpll_init(dev);
ee7b9f93 15722
b2045352
VS
15723 if (dev_priv->max_cdclk_freq == 0)
15724 intel_update_max_cdclk(dev);
15725
9cce37f4
JB
15726 /* Just disable it once at startup */
15727 i915_disable_vga(dev);
79e53945 15728 intel_setup_outputs(dev);
11be49eb 15729
6e9f798d 15730 drm_modeset_lock_all(dev);
043e9bda 15731 intel_modeset_setup_hw_state(dev);
6e9f798d 15732 drm_modeset_unlock_all(dev);
46f297fb 15733
d3fcc808 15734 for_each_intel_crtc(dev, crtc) {
eeebeac5
ML
15735 struct intel_initial_plane_config plane_config = {};
15736
46f297fb
JB
15737 if (!crtc->active)
15738 continue;
15739
46f297fb 15740 /*
46f297fb
JB
15741 * Note that reserving the BIOS fb up front prevents us
15742 * from stuffing other stolen allocations like the ring
15743 * on top. This prevents some ugliness at boot time, and
15744 * can even allow for smooth boot transitions if the BIOS
15745 * fb is large enough for the active pipe configuration.
15746 */
eeebeac5
ML
15747 dev_priv->display.get_initial_plane_config(crtc,
15748 &plane_config);
15749
15750 /*
15751 * If the fb is shared between multiple heads, we'll
15752 * just get the first one.
15753 */
15754 intel_find_initial_plane_obj(crtc, &plane_config);
46f297fb 15755 }
d93c0372
MR
15756
15757 /*
15758 * Make sure hardware watermarks really match the state we read out.
15759 * Note that we need to do this after reconstructing the BIOS fb's
15760 * since the watermark calculation done here will use pstate->fb.
15761 */
15762 sanitize_watermarks(dev);
2c7111db
CW
15763}
15764
7fad798e
DV
15765static void intel_enable_pipe_a(struct drm_device *dev)
15766{
15767 struct intel_connector *connector;
15768 struct drm_connector *crt = NULL;
15769 struct intel_load_detect_pipe load_detect_temp;
208bf9fd 15770 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
7fad798e
DV
15771
15772 /* We can't just switch on the pipe A, we need to set things up with a
15773 * proper mode and output configuration. As a gross hack, enable pipe A
15774 * by enabling the load detect pipe once. */
3a3371ff 15775 for_each_intel_connector(dev, connector) {
7fad798e
DV
15776 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15777 crt = &connector->base;
15778 break;
15779 }
15780 }
15781
15782 if (!crt)
15783 return;
15784
208bf9fd 15785 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
49172fee 15786 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
7fad798e
DV
15787}
15788
fa555837
DV
15789static bool
15790intel_check_plane_mapping(struct intel_crtc *crtc)
15791{
7eb552ae
BW
15792 struct drm_device *dev = crtc->base.dev;
15793 struct drm_i915_private *dev_priv = dev->dev_private;
649636ef 15794 u32 val;
fa555837 15795
7eb552ae 15796 if (INTEL_INFO(dev)->num_pipes == 1)
fa555837
DV
15797 return true;
15798
649636ef 15799 val = I915_READ(DSPCNTR(!crtc->plane));
fa555837
DV
15800
15801 if ((val & DISPLAY_PLANE_ENABLE) &&
15802 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15803 return false;
15804
15805 return true;
15806}
15807
02e93c35
VS
15808static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15809{
15810 struct drm_device *dev = crtc->base.dev;
15811 struct intel_encoder *encoder;
15812
15813 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15814 return true;
15815
15816 return false;
15817}
15818
dd756198
VS
15819static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15820{
15821 struct drm_device *dev = encoder->base.dev;
15822 struct intel_connector *connector;
15823
15824 for_each_connector_on_encoder(dev, &encoder->base, connector)
15825 return true;
15826
15827 return false;
15828}
15829
24929352
DV
15830static void intel_sanitize_crtc(struct intel_crtc *crtc)
15831{
15832 struct drm_device *dev = crtc->base.dev;
15833 struct drm_i915_private *dev_priv = dev->dev_private;
4d1de975 15834 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
24929352 15835
24929352 15836 /* Clear any frame start delays used for debugging left by the BIOS */
4d1de975
JN
15837 if (!transcoder_is_dsi(cpu_transcoder)) {
15838 i915_reg_t reg = PIPECONF(cpu_transcoder);
15839
15840 I915_WRITE(reg,
15841 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15842 }
24929352 15843
d3eaf884 15844 /* restore vblank interrupts to correct state */
9625604c 15845 drm_crtc_vblank_reset(&crtc->base);
d297e103 15846 if (crtc->active) {
f9cd7b88
VS
15847 struct intel_plane *plane;
15848
9625604c 15849 drm_crtc_vblank_on(&crtc->base);
f9cd7b88
VS
15850
15851 /* Disable everything but the primary plane */
15852 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15853 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15854 continue;
15855
15856 plane->disable_plane(&plane->base, &crtc->base);
15857 }
9625604c 15858 }
d3eaf884 15859
24929352 15860 /* We need to sanitize the plane -> pipe mapping first because this will
fa555837
DV
15861 * disable the crtc (and hence change the state) if it is wrong. Note
15862 * that gen4+ has a fixed plane -> pipe mapping. */
15863 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
24929352
DV
15864 bool plane;
15865
78108b7c
VS
15866 DRM_DEBUG_KMS("[CRTC:%d:%s] wrong plane connection detected!\n",
15867 crtc->base.base.id, crtc->base.name);
24929352
DV
15868
15869 /* Pipe has the wrong plane attached and the plane is active.
15870 * Temporarily change the plane mapping and disable everything
15871 * ... */
15872 plane = crtc->plane;
b70709a6 15873 to_intel_plane_state(crtc->base.primary->state)->visible = true;
24929352 15874 crtc->plane = !plane;
b17d48e2 15875 intel_crtc_disable_noatomic(&crtc->base);
24929352 15876 crtc->plane = plane;
24929352 15877 }
24929352 15878
7fad798e
DV
15879 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15880 crtc->pipe == PIPE_A && !crtc->active) {
15881 /* BIOS forgot to enable pipe A, this mostly happens after
15882 * resume. Force-enable the pipe to fix this, the update_dpms
15883 * call below we restore the pipe to the right state, but leave
15884 * the required bits on. */
15885 intel_enable_pipe_a(dev);
15886 }
15887
24929352
DV
15888 /* Adjust the state of the output pipe according to whether we
15889 * have active connectors/encoders. */
842e0307 15890 if (crtc->active && !intel_crtc_has_encoders(crtc))
b17d48e2 15891 intel_crtc_disable_noatomic(&crtc->base);
24929352 15892
a3ed6aad 15893 if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
4cc31489
DV
15894 /*
15895 * We start out with underrun reporting disabled to avoid races.
15896 * For correct bookkeeping mark this on active crtcs.
15897 *
c5ab3bc0
DV
15898 * Also on gmch platforms we dont have any hardware bits to
15899 * disable the underrun reporting. Which means we need to start
15900 * out with underrun reporting disabled also on inactive pipes,
15901 * since otherwise we'll complain about the garbage we read when
15902 * e.g. coming up after runtime pm.
15903 *
4cc31489
DV
15904 * No protection against concurrent access is required - at
15905 * worst a fifo underrun happens which also sets this to false.
15906 */
15907 crtc->cpu_fifo_underrun_disabled = true;
15908 crtc->pch_fifo_underrun_disabled = true;
15909 }
24929352
DV
15910}
15911
15912static void intel_sanitize_encoder(struct intel_encoder *encoder)
15913{
15914 struct intel_connector *connector;
15915 struct drm_device *dev = encoder->base.dev;
15916
15917 /* We need to check both for a crtc link (meaning that the
15918 * encoder is active and trying to read from a pipe) and the
15919 * pipe itself being active. */
15920 bool has_active_crtc = encoder->base.crtc &&
15921 to_intel_crtc(encoder->base.crtc)->active;
15922
dd756198 15923 if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
24929352
DV
15924 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15925 encoder->base.base.id,
8e329a03 15926 encoder->base.name);
24929352
DV
15927
15928 /* Connector is active, but has no active pipe. This is
15929 * fallout from our resume register restoring. Disable
15930 * the encoder manually again. */
15931 if (encoder->base.crtc) {
15932 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15933 encoder->base.base.id,
8e329a03 15934 encoder->base.name);
24929352 15935 encoder->disable(encoder);
a62d1497
VS
15936 if (encoder->post_disable)
15937 encoder->post_disable(encoder);
24929352 15938 }
7f1950fb 15939 encoder->base.crtc = NULL;
24929352
DV
15940
15941 /* Inconsistent output/port/pipe state happens presumably due to
15942 * a bug in one of the get_hw_state functions. Or someplace else
15943 * in our code, like the register restore mess on resume. Clamp
15944 * things to off as a safer default. */
3a3371ff 15945 for_each_intel_connector(dev, connector) {
24929352
DV
15946 if (connector->encoder != encoder)
15947 continue;
7f1950fb
EE
15948 connector->base.dpms = DRM_MODE_DPMS_OFF;
15949 connector->base.encoder = NULL;
24929352
DV
15950 }
15951 }
15952 /* Enabled encoders without active connectors will be fixed in
15953 * the crtc fixup. */
15954}
15955
04098753 15956void i915_redisable_vga_power_on(struct drm_device *dev)
0fde901f
KM
15957{
15958 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 15959 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
0fde901f 15960
04098753
ID
15961 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15962 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15963 i915_disable_vga(dev);
15964 }
15965}
15966
15967void i915_redisable_vga(struct drm_device *dev)
15968{
15969 struct drm_i915_private *dev_priv = dev->dev_private;
15970
8dc8a27c
PZ
15971 /* This function can be called both from intel_modeset_setup_hw_state or
15972 * at a very early point in our resume sequence, where the power well
15973 * structures are not yet restored. Since this function is at a very
15974 * paranoid "someone might have enabled VGA while we were not looking"
15975 * level, just check if the power well is enabled instead of trying to
15976 * follow the "don't touch the power well if we don't need it" policy
15977 * the rest of the driver uses. */
6392f847 15978 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
8dc8a27c
PZ
15979 return;
15980
04098753 15981 i915_redisable_vga_power_on(dev);
6392f847
ID
15982
15983 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
0fde901f
KM
15984}
15985
f9cd7b88 15986static bool primary_get_hw_state(struct intel_plane *plane)
98ec7739 15987{
f9cd7b88 15988 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
98ec7739 15989
f9cd7b88 15990 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
d032ffa0
ML
15991}
15992
f9cd7b88
VS
15993/* FIXME read out full plane state for all planes */
15994static void readout_plane_state(struct intel_crtc *crtc)
d032ffa0 15995{
b26d3ea3 15996 struct drm_plane *primary = crtc->base.primary;
f9cd7b88 15997 struct intel_plane_state *plane_state =
b26d3ea3 15998 to_intel_plane_state(primary->state);
d032ffa0 15999
19b8d387 16000 plane_state->visible = crtc->active &&
b26d3ea3
ML
16001 primary_get_hw_state(to_intel_plane(primary));
16002
16003 if (plane_state->visible)
16004 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
98ec7739
VS
16005}
16006
30e984df 16007static void intel_modeset_readout_hw_state(struct drm_device *dev)
24929352
DV
16008{
16009 struct drm_i915_private *dev_priv = dev->dev_private;
16010 enum pipe pipe;
24929352
DV
16011 struct intel_crtc *crtc;
16012 struct intel_encoder *encoder;
16013 struct intel_connector *connector;
5358901f 16014 int i;
24929352 16015
565602d7
ML
16016 dev_priv->active_crtcs = 0;
16017
d3fcc808 16018 for_each_intel_crtc(dev, crtc) {
565602d7
ML
16019 struct intel_crtc_state *crtc_state = crtc->config;
16020 int pixclk = 0;
3b117c8f 16021
ec2dc6a0 16022 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
565602d7
ML
16023 memset(crtc_state, 0, sizeof(*crtc_state));
16024 crtc_state->base.crtc = &crtc->base;
24929352 16025
565602d7
ML
16026 crtc_state->base.active = crtc_state->base.enable =
16027 dev_priv->display.get_pipe_config(crtc, crtc_state);
16028
16029 crtc->base.enabled = crtc_state->base.enable;
16030 crtc->active = crtc_state->base.active;
16031
16032 if (crtc_state->base.active) {
16033 dev_priv->active_crtcs |= 1 << crtc->pipe;
16034
c89e39f3 16035 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
565602d7 16036 pixclk = ilk_pipe_pixel_rate(crtc_state);
9558d15d 16037 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
565602d7
ML
16038 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
16039 else
16040 WARN_ON(dev_priv->display.modeset_calc_cdclk);
9558d15d
VS
16041
16042 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
16043 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
16044 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
565602d7
ML
16045 }
16046
16047 dev_priv->min_pixclk[crtc->pipe] = pixclk;
b70709a6 16048
f9cd7b88 16049 readout_plane_state(crtc);
24929352 16050
78108b7c
VS
16051 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16052 crtc->base.base.id, crtc->base.name,
24929352
DV
16053 crtc->active ? "enabled" : "disabled");
16054 }
16055
5358901f
DV
16056 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16057 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16058
2edd6443
ACO
16059 pll->on = pll->funcs.get_hw_state(dev_priv, pll,
16060 &pll->config.hw_state);
3e369b76 16061 pll->config.crtc_mask = 0;
d3fcc808 16062 for_each_intel_crtc(dev, crtc) {
2dd66ebd 16063 if (crtc->active && crtc->config->shared_dpll == pll)
3e369b76 16064 pll->config.crtc_mask |= 1 << crtc->pipe;
5358901f 16065 }
2dd66ebd 16066 pll->active_mask = pll->config.crtc_mask;
5358901f 16067
1e6f2ddc 16068 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
3e369b76 16069 pll->name, pll->config.crtc_mask, pll->on);
5358901f
DV
16070 }
16071
b2784e15 16072 for_each_intel_encoder(dev, encoder) {
24929352
DV
16073 pipe = 0;
16074
16075 if (encoder->get_hw_state(encoder, &pipe)) {
045ac3b5
JB
16076 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
16077 encoder->base.crtc = &crtc->base;
6e3c9717 16078 encoder->get_config(encoder, crtc->config);
24929352
DV
16079 } else {
16080 encoder->base.crtc = NULL;
16081 }
16082
6f2bcceb 16083 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
24929352 16084 encoder->base.base.id,
8e329a03 16085 encoder->base.name,
24929352 16086 encoder->base.crtc ? "enabled" : "disabled",
6f2bcceb 16087 pipe_name(pipe));
24929352
DV
16088 }
16089
3a3371ff 16090 for_each_intel_connector(dev, connector) {
24929352
DV
16091 if (connector->get_hw_state(connector)) {
16092 connector->base.dpms = DRM_MODE_DPMS_ON;
2aa974c9
ML
16093
16094 encoder = connector->encoder;
16095 connector->base.encoder = &encoder->base;
16096
16097 if (encoder->base.crtc &&
16098 encoder->base.crtc->state->active) {
16099 /*
16100 * This has to be done during hardware readout
16101 * because anything calling .crtc_disable may
16102 * rely on the connector_mask being accurate.
16103 */
16104 encoder->base.crtc->state->connector_mask |=
16105 1 << drm_connector_index(&connector->base);
e87a52b3
ML
16106 encoder->base.crtc->state->encoder_mask |=
16107 1 << drm_encoder_index(&encoder->base);
2aa974c9
ML
16108 }
16109
24929352
DV
16110 } else {
16111 connector->base.dpms = DRM_MODE_DPMS_OFF;
16112 connector->base.encoder = NULL;
16113 }
16114 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
16115 connector->base.base.id,
c23cc417 16116 connector->base.name,
24929352
DV
16117 connector->base.encoder ? "enabled" : "disabled");
16118 }
7f4c6284
VS
16119
16120 for_each_intel_crtc(dev, crtc) {
16121 crtc->base.hwmode = crtc->config->base.adjusted_mode;
16122
16123 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
16124 if (crtc->base.state->active) {
16125 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
16126 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
16127 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16128
16129 /*
16130 * The initial mode needs to be set in order to keep
16131 * the atomic core happy. It wants a valid mode if the
16132 * crtc's enabled, so we do the above call.
16133 *
16134 * At this point some state updated by the connectors
16135 * in their ->detect() callback has not run yet, so
16136 * no recalculation can be done yet.
16137 *
16138 * Even if we could do a recalculation and modeset
16139 * right now it would cause a double modeset if
16140 * fbdev or userspace chooses a different initial mode.
16141 *
16142 * If that happens, someone indicated they wanted a
16143 * mode change, which means it's safe to do a full
16144 * recalculation.
16145 */
16146 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
9eca6832
VS
16147
16148 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
16149 update_scanline_offset(crtc);
7f4c6284 16150 }
e3b247da
VS
16151
16152 intel_pipe_config_sanity_check(dev_priv, crtc->config);
7f4c6284 16153 }
30e984df
DV
16154}
16155
043e9bda
ML
16156/* Scan out the current hw modeset state,
16157 * and sanitizes it to the current state
16158 */
16159static void
16160intel_modeset_setup_hw_state(struct drm_device *dev)
30e984df
DV
16161{
16162 struct drm_i915_private *dev_priv = dev->dev_private;
16163 enum pipe pipe;
30e984df
DV
16164 struct intel_crtc *crtc;
16165 struct intel_encoder *encoder;
35c95375 16166 int i;
30e984df
DV
16167
16168 intel_modeset_readout_hw_state(dev);
24929352
DV
16169
16170 /* HW state is read out, now we need to sanitize this mess. */
b2784e15 16171 for_each_intel_encoder(dev, encoder) {
24929352
DV
16172 intel_sanitize_encoder(encoder);
16173 }
16174
055e393f 16175 for_each_pipe(dev_priv, pipe) {
24929352
DV
16176 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
16177 intel_sanitize_crtc(crtc);
6e3c9717
ACO
16178 intel_dump_pipe_config(crtc, crtc->config,
16179 "[setup_hw_state]");
24929352 16180 }
9a935856 16181
d29b2f9d
ACO
16182 intel_modeset_update_connector_atomic_state(dev);
16183
35c95375
DV
16184 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16185 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16186
2dd66ebd 16187 if (!pll->on || pll->active_mask)
35c95375
DV
16188 continue;
16189
16190 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
16191
2edd6443 16192 pll->funcs.disable(dev_priv, pll);
35c95375
DV
16193 pll->on = false;
16194 }
16195
666a4537 16196 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6eb1a681
VS
16197 vlv_wm_get_hw_state(dev);
16198 else if (IS_GEN9(dev))
3078999f
PB
16199 skl_wm_get_hw_state(dev);
16200 else if (HAS_PCH_SPLIT(dev))
243e6a44 16201 ilk_wm_get_hw_state(dev);
292b990e
ML
16202
16203 for_each_intel_crtc(dev, crtc) {
16204 unsigned long put_domains;
16205
74bff5f9 16206 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
292b990e
ML
16207 if (WARN_ON(put_domains))
16208 modeset_put_power_domains(dev_priv, put_domains);
16209 }
16210 intel_display_set_init_power(dev_priv, false);
010cf73d
PZ
16211
16212 intel_fbc_init_pipe_state(dev_priv);
043e9bda 16213}
7d0bc1ea 16214
043e9bda
ML
16215void intel_display_resume(struct drm_device *dev)
16216{
e2c8b870
ML
16217 struct drm_i915_private *dev_priv = to_i915(dev);
16218 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16219 struct drm_modeset_acquire_ctx ctx;
043e9bda 16220 int ret;
e2c8b870 16221 bool setup = false;
f30da187 16222
e2c8b870 16223 dev_priv->modeset_restore_state = NULL;
043e9bda 16224
ea49c9ac
ML
16225 /*
16226 * This is a cludge because with real atomic modeset mode_config.mutex
16227 * won't be taken. Unfortunately some probed state like
16228 * audio_codec_enable is still protected by mode_config.mutex, so lock
16229 * it here for now.
16230 */
16231 mutex_lock(&dev->mode_config.mutex);
e2c8b870 16232 drm_modeset_acquire_init(&ctx, 0);
043e9bda 16233
e2c8b870
ML
16234retry:
16235 ret = drm_modeset_lock_all_ctx(dev, &ctx);
043e9bda 16236
e2c8b870
ML
16237 if (ret == 0 && !setup) {
16238 setup = true;
043e9bda 16239
e2c8b870
ML
16240 intel_modeset_setup_hw_state(dev);
16241 i915_redisable_vga(dev);
45e2b5f6 16242 }
8af6cf88 16243
e2c8b870
ML
16244 if (ret == 0 && state) {
16245 struct drm_crtc_state *crtc_state;
16246 struct drm_crtc *crtc;
16247 int i;
043e9bda 16248
e2c8b870
ML
16249 state->acquire_ctx = &ctx;
16250
e3d5457c
VS
16251 /* ignore any reset values/BIOS leftovers in the WM registers */
16252 to_intel_atomic_state(state)->skip_intermediate_wm = true;
16253
e2c8b870
ML
16254 for_each_crtc_in_state(state, crtc, crtc_state, i) {
16255 /*
16256 * Force recalculation even if we restore
16257 * current state. With fast modeset this may not result
16258 * in a modeset when the state is compatible.
16259 */
16260 crtc_state->mode_changed = true;
16261 }
16262
16263 ret = drm_atomic_commit(state);
043e9bda
ML
16264 }
16265
e2c8b870
ML
16266 if (ret == -EDEADLK) {
16267 drm_modeset_backoff(&ctx);
16268 goto retry;
16269 }
043e9bda 16270
e2c8b870
ML
16271 drm_modeset_drop_locks(&ctx);
16272 drm_modeset_acquire_fini(&ctx);
ea49c9ac 16273 mutex_unlock(&dev->mode_config.mutex);
043e9bda 16274
e2c8b870
ML
16275 if (ret) {
16276 DRM_ERROR("Restoring old state failed with %i\n", ret);
16277 drm_atomic_state_free(state);
16278 }
2c7111db
CW
16279}
16280
16281void intel_modeset_gem_init(struct drm_device *dev)
16282{
dc97997a 16283 struct drm_i915_private *dev_priv = to_i915(dev);
484b41dd 16284 struct drm_crtc *c;
2ff8fde1 16285 struct drm_i915_gem_object *obj;
e0d6149b 16286 int ret;
484b41dd 16287
dc97997a 16288 intel_init_gt_powersave(dev_priv);
ae48434c 16289
1833b134 16290 intel_modeset_init_hw(dev);
02e792fb 16291
1ee8da6d 16292 intel_setup_overlay(dev_priv);
484b41dd
JB
16293
16294 /*
16295 * Make sure any fbs we allocated at startup are properly
16296 * pinned & fenced. When we do the allocation it's too early
16297 * for this.
16298 */
70e1e0ec 16299 for_each_crtc(dev, c) {
2ff8fde1
MR
16300 obj = intel_fb_obj(c->primary->fb);
16301 if (obj == NULL)
484b41dd
JB
16302 continue;
16303
e0d6149b 16304 mutex_lock(&dev->struct_mutex);
3465c580
VS
16305 ret = intel_pin_and_fence_fb_obj(c->primary->fb,
16306 c->primary->state->rotation);
e0d6149b
TU
16307 mutex_unlock(&dev->struct_mutex);
16308 if (ret) {
484b41dd
JB
16309 DRM_ERROR("failed to pin boot fb on pipe %d\n",
16310 to_intel_crtc(c)->pipe);
66e514c1 16311 drm_framebuffer_unreference(c->primary->fb);
5a21b665 16312 c->primary->fb = NULL;
36750f28 16313 c->primary->crtc = c->primary->state->crtc = NULL;
5a21b665 16314 update_state_fb(c->primary);
36750f28 16315 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
484b41dd
JB
16316 }
16317 }
1ebaa0b9
CW
16318}
16319
16320int intel_connector_register(struct drm_connector *connector)
16321{
16322 struct intel_connector *intel_connector = to_intel_connector(connector);
16323 int ret;
16324
16325 ret = intel_backlight_device_register(intel_connector);
16326 if (ret)
16327 goto err;
16328
16329 return 0;
0962c3c9 16330
1ebaa0b9
CW
16331err:
16332 return ret;
79e53945
JB
16333}
16334
c191eca1 16335void intel_connector_unregister(struct drm_connector *connector)
4932e2c3 16336{
e63d87c0 16337 struct intel_connector *intel_connector = to_intel_connector(connector);
4932e2c3 16338
e63d87c0 16339 intel_backlight_device_unregister(intel_connector);
4932e2c3 16340 intel_panel_destroy_backlight(connector);
4932e2c3
ID
16341}
16342
79e53945
JB
16343void intel_modeset_cleanup(struct drm_device *dev)
16344{
652c393a 16345 struct drm_i915_private *dev_priv = dev->dev_private;
652c393a 16346
dc97997a 16347 intel_disable_gt_powersave(dev_priv);
2eb5252e 16348
fd0c0642
DV
16349 /*
16350 * Interrupts and polling as the first thing to avoid creating havoc.
2eb5252e 16351 * Too much stuff here (turning of connectors, ...) would
fd0c0642
DV
16352 * experience fancy races otherwise.
16353 */
2aeb7d3a 16354 intel_irq_uninstall(dev_priv);
eb21b92b 16355
fd0c0642
DV
16356 /*
16357 * Due to the hpd irq storm handling the hotplug work can re-arm the
16358 * poll handlers. Hence disable polling after hpd handling is shut down.
16359 */
f87ea761 16360 drm_kms_helper_poll_fini(dev);
fd0c0642 16361
723bfd70
JB
16362 intel_unregister_dsm_handler();
16363
c937ab3e 16364 intel_fbc_global_disable(dev_priv);
69341a5e 16365
1630fe75
CW
16366 /* flush any delayed tasks or pending work */
16367 flush_scheduled_work();
16368
79e53945 16369 drm_mode_config_cleanup(dev);
4d7bb011 16370
1ee8da6d 16371 intel_cleanup_overlay(dev_priv);
ae48434c 16372
dc97997a 16373 intel_cleanup_gt_powersave(dev_priv);
f5949141
DV
16374
16375 intel_teardown_gmbus(dev);
79e53945
JB
16376}
16377
df0e9248
CW
16378void intel_connector_attach_encoder(struct intel_connector *connector,
16379 struct intel_encoder *encoder)
16380{
16381 connector->encoder = encoder;
16382 drm_mode_connector_attach_encoder(&connector->base,
16383 &encoder->base);
79e53945 16384}
28d52043
DA
16385
16386/*
16387 * set vga decode state - true == enable VGA decode
16388 */
16389int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16390{
16391 struct drm_i915_private *dev_priv = dev->dev_private;
a885b3cc 16392 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
28d52043
DA
16393 u16 gmch_ctrl;
16394
75fa041d
CW
16395 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16396 DRM_ERROR("failed to read control word\n");
16397 return -EIO;
16398 }
16399
c0cc8a55
CW
16400 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16401 return 0;
16402
28d52043
DA
16403 if (state)
16404 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16405 else
16406 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
75fa041d
CW
16407
16408 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16409 DRM_ERROR("failed to write control word\n");
16410 return -EIO;
16411 }
16412
28d52043
DA
16413 return 0;
16414}
c4a1d9e4 16415
c4a1d9e4 16416struct intel_display_error_state {
ff57f1b0
PZ
16417
16418 u32 power_well_driver;
16419
63b66e5b
CW
16420 int num_transcoders;
16421
c4a1d9e4
CW
16422 struct intel_cursor_error_state {
16423 u32 control;
16424 u32 position;
16425 u32 base;
16426 u32 size;
52331309 16427 } cursor[I915_MAX_PIPES];
c4a1d9e4
CW
16428
16429 struct intel_pipe_error_state {
ddf9c536 16430 bool power_domain_on;
c4a1d9e4 16431 u32 source;
f301b1e1 16432 u32 stat;
52331309 16433 } pipe[I915_MAX_PIPES];
c4a1d9e4
CW
16434
16435 struct intel_plane_error_state {
16436 u32 control;
16437 u32 stride;
16438 u32 size;
16439 u32 pos;
16440 u32 addr;
16441 u32 surface;
16442 u32 tile_offset;
52331309 16443 } plane[I915_MAX_PIPES];
63b66e5b
CW
16444
16445 struct intel_transcoder_error_state {
ddf9c536 16446 bool power_domain_on;
63b66e5b
CW
16447 enum transcoder cpu_transcoder;
16448
16449 u32 conf;
16450
16451 u32 htotal;
16452 u32 hblank;
16453 u32 hsync;
16454 u32 vtotal;
16455 u32 vblank;
16456 u32 vsync;
16457 } transcoder[4];
c4a1d9e4
CW
16458};
16459
16460struct intel_display_error_state *
c033666a 16461intel_display_capture_error_state(struct drm_i915_private *dev_priv)
c4a1d9e4 16462{
c4a1d9e4 16463 struct intel_display_error_state *error;
63b66e5b
CW
16464 int transcoders[] = {
16465 TRANSCODER_A,
16466 TRANSCODER_B,
16467 TRANSCODER_C,
16468 TRANSCODER_EDP,
16469 };
c4a1d9e4
CW
16470 int i;
16471
c033666a 16472 if (INTEL_INFO(dev_priv)->num_pipes == 0)
63b66e5b
CW
16473 return NULL;
16474
9d1cb914 16475 error = kzalloc(sizeof(*error), GFP_ATOMIC);
c4a1d9e4
CW
16476 if (error == NULL)
16477 return NULL;
16478
c033666a 16479 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ff57f1b0
PZ
16480 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16481
055e393f 16482 for_each_pipe(dev_priv, i) {
ddf9c536 16483 error->pipe[i].power_domain_on =
f458ebbc
DV
16484 __intel_display_power_is_enabled(dev_priv,
16485 POWER_DOMAIN_PIPE(i));
ddf9c536 16486 if (!error->pipe[i].power_domain_on)
9d1cb914
PZ
16487 continue;
16488
5efb3e28
VS
16489 error->cursor[i].control = I915_READ(CURCNTR(i));
16490 error->cursor[i].position = I915_READ(CURPOS(i));
16491 error->cursor[i].base = I915_READ(CURBASE(i));
c4a1d9e4
CW
16492
16493 error->plane[i].control = I915_READ(DSPCNTR(i));
16494 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
c033666a 16495 if (INTEL_GEN(dev_priv) <= 3) {
51889b35 16496 error->plane[i].size = I915_READ(DSPSIZE(i));
80ca378b
PZ
16497 error->plane[i].pos = I915_READ(DSPPOS(i));
16498 }
c033666a 16499 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
ca291363 16500 error->plane[i].addr = I915_READ(DSPADDR(i));
c033666a 16501 if (INTEL_GEN(dev_priv) >= 4) {
c4a1d9e4
CW
16502 error->plane[i].surface = I915_READ(DSPSURF(i));
16503 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16504 }
16505
c4a1d9e4 16506 error->pipe[i].source = I915_READ(PIPESRC(i));
f301b1e1 16507
c033666a 16508 if (HAS_GMCH_DISPLAY(dev_priv))
f301b1e1 16509 error->pipe[i].stat = I915_READ(PIPESTAT(i));
63b66e5b
CW
16510 }
16511
4d1de975 16512 /* Note: this does not include DSI transcoders. */
c033666a 16513 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
2d1fe073 16514 if (HAS_DDI(dev_priv))
63b66e5b
CW
16515 error->num_transcoders++; /* Account for eDP. */
16516
16517 for (i = 0; i < error->num_transcoders; i++) {
16518 enum transcoder cpu_transcoder = transcoders[i];
16519
ddf9c536 16520 error->transcoder[i].power_domain_on =
f458ebbc 16521 __intel_display_power_is_enabled(dev_priv,
38cc1daf 16522 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
ddf9c536 16523 if (!error->transcoder[i].power_domain_on)
9d1cb914
PZ
16524 continue;
16525
63b66e5b
CW
16526 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16527
16528 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16529 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16530 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16531 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16532 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16533 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16534 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
c4a1d9e4
CW
16535 }
16536
16537 return error;
16538}
16539
edc3d884
MK
16540#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16541
c4a1d9e4 16542void
edc3d884 16543intel_display_print_error_state(struct drm_i915_error_state_buf *m,
c4a1d9e4
CW
16544 struct drm_device *dev,
16545 struct intel_display_error_state *error)
16546{
055e393f 16547 struct drm_i915_private *dev_priv = dev->dev_private;
c4a1d9e4
CW
16548 int i;
16549
63b66e5b
CW
16550 if (!error)
16551 return;
16552
edc3d884 16553 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
190be112 16554 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
edc3d884 16555 err_printf(m, "PWR_WELL_CTL2: %08x\n",
ff57f1b0 16556 error->power_well_driver);
055e393f 16557 for_each_pipe(dev_priv, i) {
edc3d884 16558 err_printf(m, "Pipe [%d]:\n", i);
ddf9c536 16559 err_printf(m, " Power: %s\n",
87ad3212 16560 onoff(error->pipe[i].power_domain_on));
edc3d884 16561 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
f301b1e1 16562 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
edc3d884
MK
16563
16564 err_printf(m, "Plane [%d]:\n", i);
16565 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
16566 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
80ca378b 16567 if (INTEL_INFO(dev)->gen <= 3) {
edc3d884
MK
16568 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
16569 err_printf(m, " POS: %08x\n", error->plane[i].pos);
80ca378b 16570 }
4b71a570 16571 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
edc3d884 16572 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
c4a1d9e4 16573 if (INTEL_INFO(dev)->gen >= 4) {
edc3d884
MK
16574 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
16575 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
c4a1d9e4
CW
16576 }
16577
edc3d884
MK
16578 err_printf(m, "Cursor [%d]:\n", i);
16579 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
16580 err_printf(m, " POS: %08x\n", error->cursor[i].position);
16581 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
c4a1d9e4 16582 }
63b66e5b
CW
16583
16584 for (i = 0; i < error->num_transcoders; i++) {
da205630 16585 err_printf(m, "CPU transcoder: %s\n",
63b66e5b 16586 transcoder_name(error->transcoder[i].cpu_transcoder));
ddf9c536 16587 err_printf(m, " Power: %s\n",
87ad3212 16588 onoff(error->transcoder[i].power_domain_on));
63b66e5b
CW
16589 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
16590 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
16591 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
16592 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
16593 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
16594 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
16595 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
16596 }
c4a1d9e4 16597}