]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/gpu/drm/i915/intel_display.c
drm/i915: Remove intel_finish_page_flip_plane.
[mirror_ubuntu-bionic-kernel.git] / drivers / gpu / drm / i915 / intel_display.c
CommitLineData
79e53945
JB
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
618563e3 27#include <linux/dmi.h>
c1c7af60
JB
28#include <linux/module.h>
29#include <linux/input.h>
79e53945 30#include <linux/i2c.h>
7662c8bd 31#include <linux/kernel.h>
5a0e3ad6 32#include <linux/slab.h>
9cce37f4 33#include <linux/vgaarb.h>
e0dac65e 34#include <drm/drm_edid.h>
760285e7 35#include <drm/drmP.h>
79e53945 36#include "intel_drv.h"
760285e7 37#include <drm/i915_drm.h>
79e53945 38#include "i915_drv.h"
db18b6a6 39#include "intel_dsi.h"
e5510fac 40#include "i915_trace.h"
319c1d42 41#include <drm/drm_atomic.h>
c196e1d6 42#include <drm/drm_atomic_helper.h>
760285e7
DH
43#include <drm/drm_dp_helper.h>
44#include <drm/drm_crtc_helper.h>
465c120c
MR
45#include <drm/drm_plane_helper.h>
46#include <drm/drm_rect.h>
c0f372b3 47#include <linux/dma_remapping.h>
fd8e058a
AG
48#include <linux/reservation.h>
49#include <linux/dma-buf.h>
79e53945 50
465c120c 51/* Primary plane formats for gen <= 3 */
568db4f2 52static const uint32_t i8xx_primary_formats[] = {
67fe7dc5
DL
53 DRM_FORMAT_C8,
54 DRM_FORMAT_RGB565,
465c120c 55 DRM_FORMAT_XRGB1555,
67fe7dc5 56 DRM_FORMAT_XRGB8888,
465c120c
MR
57};
58
59/* Primary plane formats for gen >= 4 */
568db4f2 60static const uint32_t i965_primary_formats[] = {
6c0fd451
DL
61 DRM_FORMAT_C8,
62 DRM_FORMAT_RGB565,
63 DRM_FORMAT_XRGB8888,
64 DRM_FORMAT_XBGR8888,
65 DRM_FORMAT_XRGB2101010,
66 DRM_FORMAT_XBGR2101010,
67};
68
69static const uint32_t skl_primary_formats[] = {
67fe7dc5
DL
70 DRM_FORMAT_C8,
71 DRM_FORMAT_RGB565,
72 DRM_FORMAT_XRGB8888,
465c120c 73 DRM_FORMAT_XBGR8888,
67fe7dc5 74 DRM_FORMAT_ARGB8888,
465c120c
MR
75 DRM_FORMAT_ABGR8888,
76 DRM_FORMAT_XRGB2101010,
465c120c 77 DRM_FORMAT_XBGR2101010,
ea916ea0
KM
78 DRM_FORMAT_YUYV,
79 DRM_FORMAT_YVYU,
80 DRM_FORMAT_UYVY,
81 DRM_FORMAT_VYUY,
465c120c
MR
82};
83
3d7d6510
MR
84/* Cursor formats */
85static const uint32_t intel_cursor_formats[] = {
86 DRM_FORMAT_ARGB8888,
87};
88
f1f644dc 89static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 90 struct intel_crtc_state *pipe_config);
18442d08 91static void ironlake_pch_clock_get(struct intel_crtc *crtc,
5cec258b 92 struct intel_crtc_state *pipe_config);
f1f644dc 93
eb1bfe80
JB
94static int intel_framebuffer_init(struct drm_device *dev,
95 struct intel_framebuffer *ifb,
96 struct drm_mode_fb_cmd2 *mode_cmd,
97 struct drm_i915_gem_object *obj);
5b18e57c
DV
98static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc);
99static void intel_set_pipe_timings(struct intel_crtc *intel_crtc);
bc58be60 100static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc);
29407aab 101static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
f769cd24
VK
102 struct intel_link_m_n *m_n,
103 struct intel_link_m_n *m2_n2);
29407aab 104static void ironlake_set_pipeconf(struct drm_crtc *crtc);
229fca97 105static void haswell_set_pipeconf(struct drm_crtc *crtc);
391bf048 106static void haswell_set_pipemisc(struct drm_crtc *crtc);
d288f65f 107static void vlv_prepare_pll(struct intel_crtc *crtc,
5cec258b 108 const struct intel_crtc_state *pipe_config);
d288f65f 109static void chv_prepare_pll(struct intel_crtc *crtc,
5cec258b 110 const struct intel_crtc_state *pipe_config);
613d2b27
ML
111static void intel_begin_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
112static void intel_finish_crtc_commit(struct drm_crtc *, struct drm_crtc_state *);
549e2bfb
CK
113static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
114 struct intel_crtc_state *crtc_state);
bfd16b2a
ML
115static void skylake_pfit_enable(struct intel_crtc *crtc);
116static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force);
117static void ironlake_pfit_enable(struct intel_crtc *crtc);
043e9bda 118static void intel_modeset_setup_hw_state(struct drm_device *dev);
2622a081 119static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
4e5ca60f 120static int ilk_max_pixel_rate(struct drm_atomic_state *state);
e7457a9a 121
d4906093 122struct intel_limit {
4c5def93
ACO
123 struct {
124 int min, max;
125 } dot, vco, n, m, m1, m2, p, p1;
126
127 struct {
128 int dot_limit;
129 int p2_slow, p2_fast;
130 } p2;
d4906093 131};
79e53945 132
bfa7df01
VS
133/* returns HPLL frequency in kHz */
134static int valleyview_get_vco(struct drm_i915_private *dev_priv)
135{
136 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
137
138 /* Obtain SKU information */
139 mutex_lock(&dev_priv->sb_lock);
140 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
141 CCK_FUSE_HPLL_FREQ_MASK;
142 mutex_unlock(&dev_priv->sb_lock);
143
144 return vco_freq[hpll_freq] * 1000;
145}
146
c30fec65
VS
147int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
148 const char *name, u32 reg, int ref_freq)
bfa7df01
VS
149{
150 u32 val;
151 int divider;
152
bfa7df01
VS
153 mutex_lock(&dev_priv->sb_lock);
154 val = vlv_cck_read(dev_priv, reg);
155 mutex_unlock(&dev_priv->sb_lock);
156
157 divider = val & CCK_FREQUENCY_VALUES;
158
159 WARN((val & CCK_FREQUENCY_STATUS) !=
160 (divider << CCK_FREQUENCY_STATUS_SHIFT),
161 "%s change in progress\n", name);
162
c30fec65
VS
163 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
164}
165
166static int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
167 const char *name, u32 reg)
168{
169 if (dev_priv->hpll_freq == 0)
170 dev_priv->hpll_freq = valleyview_get_vco(dev_priv);
171
172 return vlv_get_cck_clock(dev_priv, name, reg,
173 dev_priv->hpll_freq);
bfa7df01
VS
174}
175
e7dc33f3
VS
176static int
177intel_pch_rawclk(struct drm_i915_private *dev_priv)
d2acd215 178{
e7dc33f3
VS
179 return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
180}
d2acd215 181
e7dc33f3
VS
182static int
183intel_vlv_hrawclk(struct drm_i915_private *dev_priv)
184{
19ab4ed3 185 /* RAWCLK_FREQ_VLV register updated from power well code */
35d38d1f
VS
186 return vlv_get_cck_clock_hpll(dev_priv, "hrawclk",
187 CCK_DISPLAY_REF_CLOCK_CONTROL);
d2acd215
DV
188}
189
e7dc33f3
VS
190static int
191intel_g4x_hrawclk(struct drm_i915_private *dev_priv)
79e50a4f 192{
79e50a4f
JN
193 uint32_t clkcfg;
194
e7dc33f3 195 /* hrawclock is 1/4 the FSB frequency */
79e50a4f
JN
196 clkcfg = I915_READ(CLKCFG);
197 switch (clkcfg & CLKCFG_FSB_MASK) {
198 case CLKCFG_FSB_400:
e7dc33f3 199 return 100000;
79e50a4f 200 case CLKCFG_FSB_533:
e7dc33f3 201 return 133333;
79e50a4f 202 case CLKCFG_FSB_667:
e7dc33f3 203 return 166667;
79e50a4f 204 case CLKCFG_FSB_800:
e7dc33f3 205 return 200000;
79e50a4f 206 case CLKCFG_FSB_1067:
e7dc33f3 207 return 266667;
79e50a4f 208 case CLKCFG_FSB_1333:
e7dc33f3 209 return 333333;
79e50a4f
JN
210 /* these two are just a guess; one of them might be right */
211 case CLKCFG_FSB_1600:
212 case CLKCFG_FSB_1600_ALT:
e7dc33f3 213 return 400000;
79e50a4f 214 default:
e7dc33f3 215 return 133333;
79e50a4f
JN
216 }
217}
218
19ab4ed3 219void intel_update_rawclk(struct drm_i915_private *dev_priv)
e7dc33f3
VS
220{
221 if (HAS_PCH_SPLIT(dev_priv))
222 dev_priv->rawclk_freq = intel_pch_rawclk(dev_priv);
223 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
224 dev_priv->rawclk_freq = intel_vlv_hrawclk(dev_priv);
225 else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
226 dev_priv->rawclk_freq = intel_g4x_hrawclk(dev_priv);
227 else
228 return; /* no rawclk on other platforms, or no need to know it */
229
230 DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
231}
232
bfa7df01
VS
233static void intel_update_czclk(struct drm_i915_private *dev_priv)
234{
666a4537 235 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
bfa7df01
VS
236 return;
237
238 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
239 CCK_CZ_CLOCK_CONTROL);
240
241 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
242}
243
021357ac 244static inline u32 /* units of 100MHz */
21a727b3
VS
245intel_fdi_link_freq(struct drm_i915_private *dev_priv,
246 const struct intel_crtc_state *pipe_config)
021357ac 247{
21a727b3
VS
248 if (HAS_DDI(dev_priv))
249 return pipe_config->port_clock; /* SPLL */
250 else if (IS_GEN5(dev_priv))
251 return ((I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2) * 10000;
e3b247da 252 else
21a727b3 253 return 270000;
021357ac
CW
254}
255
1b6f4958 256static const struct intel_limit intel_limits_i8xx_dac = {
0206e353 257 .dot = { .min = 25000, .max = 350000 },
9c333719 258 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 259 .n = { .min = 2, .max = 16 },
0206e353
AJ
260 .m = { .min = 96, .max = 140 },
261 .m1 = { .min = 18, .max = 26 },
262 .m2 = { .min = 6, .max = 16 },
263 .p = { .min = 4, .max = 128 },
264 .p1 = { .min = 2, .max = 33 },
273e27ca
EA
265 .p2 = { .dot_limit = 165000,
266 .p2_slow = 4, .p2_fast = 2 },
e4b36699
KP
267};
268
1b6f4958 269static const struct intel_limit intel_limits_i8xx_dvo = {
5d536e28 270 .dot = { .min = 25000, .max = 350000 },
9c333719 271 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 272 .n = { .min = 2, .max = 16 },
5d536e28
DV
273 .m = { .min = 96, .max = 140 },
274 .m1 = { .min = 18, .max = 26 },
275 .m2 = { .min = 6, .max = 16 },
276 .p = { .min = 4, .max = 128 },
277 .p1 = { .min = 2, .max = 33 },
278 .p2 = { .dot_limit = 165000,
279 .p2_slow = 4, .p2_fast = 4 },
280};
281
1b6f4958 282static const struct intel_limit intel_limits_i8xx_lvds = {
0206e353 283 .dot = { .min = 25000, .max = 350000 },
9c333719 284 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 285 .n = { .min = 2, .max = 16 },
0206e353
AJ
286 .m = { .min = 96, .max = 140 },
287 .m1 = { .min = 18, .max = 26 },
288 .m2 = { .min = 6, .max = 16 },
289 .p = { .min = 4, .max = 128 },
290 .p1 = { .min = 1, .max = 6 },
273e27ca
EA
291 .p2 = { .dot_limit = 165000,
292 .p2_slow = 14, .p2_fast = 7 },
e4b36699 293};
273e27ca 294
1b6f4958 295static const struct intel_limit intel_limits_i9xx_sdvo = {
0206e353
AJ
296 .dot = { .min = 20000, .max = 400000 },
297 .vco = { .min = 1400000, .max = 2800000 },
298 .n = { .min = 1, .max = 6 },
299 .m = { .min = 70, .max = 120 },
4f7dfb67
PJ
300 .m1 = { .min = 8, .max = 18 },
301 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
302 .p = { .min = 5, .max = 80 },
303 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
304 .p2 = { .dot_limit = 200000,
305 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
306};
307
1b6f4958 308static const struct intel_limit intel_limits_i9xx_lvds = {
0206e353
AJ
309 .dot = { .min = 20000, .max = 400000 },
310 .vco = { .min = 1400000, .max = 2800000 },
311 .n = { .min = 1, .max = 6 },
312 .m = { .min = 70, .max = 120 },
53a7d2d1
PJ
313 .m1 = { .min = 8, .max = 18 },
314 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
315 .p = { .min = 7, .max = 98 },
316 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
317 .p2 = { .dot_limit = 112000,
318 .p2_slow = 14, .p2_fast = 7 },
e4b36699
KP
319};
320
273e27ca 321
1b6f4958 322static const struct intel_limit intel_limits_g4x_sdvo = {
273e27ca
EA
323 .dot = { .min = 25000, .max = 270000 },
324 .vco = { .min = 1750000, .max = 3500000},
325 .n = { .min = 1, .max = 4 },
326 .m = { .min = 104, .max = 138 },
327 .m1 = { .min = 17, .max = 23 },
328 .m2 = { .min = 5, .max = 11 },
329 .p = { .min = 10, .max = 30 },
330 .p1 = { .min = 1, .max = 3},
331 .p2 = { .dot_limit = 270000,
332 .p2_slow = 10,
333 .p2_fast = 10
044c7c41 334 },
e4b36699
KP
335};
336
1b6f4958 337static const struct intel_limit intel_limits_g4x_hdmi = {
273e27ca
EA
338 .dot = { .min = 22000, .max = 400000 },
339 .vco = { .min = 1750000, .max = 3500000},
340 .n = { .min = 1, .max = 4 },
341 .m = { .min = 104, .max = 138 },
342 .m1 = { .min = 16, .max = 23 },
343 .m2 = { .min = 5, .max = 11 },
344 .p = { .min = 5, .max = 80 },
345 .p1 = { .min = 1, .max = 8},
346 .p2 = { .dot_limit = 165000,
347 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
348};
349
1b6f4958 350static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
273e27ca
EA
351 .dot = { .min = 20000, .max = 115000 },
352 .vco = { .min = 1750000, .max = 3500000 },
353 .n = { .min = 1, .max = 3 },
354 .m = { .min = 104, .max = 138 },
355 .m1 = { .min = 17, .max = 23 },
356 .m2 = { .min = 5, .max = 11 },
357 .p = { .min = 28, .max = 112 },
358 .p1 = { .min = 2, .max = 8 },
359 .p2 = { .dot_limit = 0,
360 .p2_slow = 14, .p2_fast = 14
044c7c41 361 },
e4b36699
KP
362};
363
1b6f4958 364static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
273e27ca
EA
365 .dot = { .min = 80000, .max = 224000 },
366 .vco = { .min = 1750000, .max = 3500000 },
367 .n = { .min = 1, .max = 3 },
368 .m = { .min = 104, .max = 138 },
369 .m1 = { .min = 17, .max = 23 },
370 .m2 = { .min = 5, .max = 11 },
371 .p = { .min = 14, .max = 42 },
372 .p1 = { .min = 2, .max = 6 },
373 .p2 = { .dot_limit = 0,
374 .p2_slow = 7, .p2_fast = 7
044c7c41 375 },
e4b36699
KP
376};
377
1b6f4958 378static const struct intel_limit intel_limits_pineview_sdvo = {
0206e353
AJ
379 .dot = { .min = 20000, .max = 400000},
380 .vco = { .min = 1700000, .max = 3500000 },
273e27ca 381 /* Pineview's Ncounter is a ring counter */
0206e353
AJ
382 .n = { .min = 3, .max = 6 },
383 .m = { .min = 2, .max = 256 },
273e27ca 384 /* Pineview only has one combined m divider, which we treat as m2. */
0206e353
AJ
385 .m1 = { .min = 0, .max = 0 },
386 .m2 = { .min = 0, .max = 254 },
387 .p = { .min = 5, .max = 80 },
388 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
389 .p2 = { .dot_limit = 200000,
390 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
391};
392
1b6f4958 393static const struct intel_limit intel_limits_pineview_lvds = {
0206e353
AJ
394 .dot = { .min = 20000, .max = 400000 },
395 .vco = { .min = 1700000, .max = 3500000 },
396 .n = { .min = 3, .max = 6 },
397 .m = { .min = 2, .max = 256 },
398 .m1 = { .min = 0, .max = 0 },
399 .m2 = { .min = 0, .max = 254 },
400 .p = { .min = 7, .max = 112 },
401 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
402 .p2 = { .dot_limit = 112000,
403 .p2_slow = 14, .p2_fast = 14 },
e4b36699
KP
404};
405
273e27ca
EA
406/* Ironlake / Sandybridge
407 *
408 * We calculate clock using (register_value + 2) for N/M1/M2, so here
409 * the range value for them is (actual_value - 2).
410 */
1b6f4958 411static const struct intel_limit intel_limits_ironlake_dac = {
273e27ca
EA
412 .dot = { .min = 25000, .max = 350000 },
413 .vco = { .min = 1760000, .max = 3510000 },
414 .n = { .min = 1, .max = 5 },
415 .m = { .min = 79, .max = 127 },
416 .m1 = { .min = 12, .max = 22 },
417 .m2 = { .min = 5, .max = 9 },
418 .p = { .min = 5, .max = 80 },
419 .p1 = { .min = 1, .max = 8 },
420 .p2 = { .dot_limit = 225000,
421 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
422};
423
1b6f4958 424static const struct intel_limit intel_limits_ironlake_single_lvds = {
273e27ca
EA
425 .dot = { .min = 25000, .max = 350000 },
426 .vco = { .min = 1760000, .max = 3510000 },
427 .n = { .min = 1, .max = 3 },
428 .m = { .min = 79, .max = 118 },
429 .m1 = { .min = 12, .max = 22 },
430 .m2 = { .min = 5, .max = 9 },
431 .p = { .min = 28, .max = 112 },
432 .p1 = { .min = 2, .max = 8 },
433 .p2 = { .dot_limit = 225000,
434 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
435};
436
1b6f4958 437static const struct intel_limit intel_limits_ironlake_dual_lvds = {
273e27ca
EA
438 .dot = { .min = 25000, .max = 350000 },
439 .vco = { .min = 1760000, .max = 3510000 },
440 .n = { .min = 1, .max = 3 },
441 .m = { .min = 79, .max = 127 },
442 .m1 = { .min = 12, .max = 22 },
443 .m2 = { .min = 5, .max = 9 },
444 .p = { .min = 14, .max = 56 },
445 .p1 = { .min = 2, .max = 8 },
446 .p2 = { .dot_limit = 225000,
447 .p2_slow = 7, .p2_fast = 7 },
b91ad0ec
ZW
448};
449
273e27ca 450/* LVDS 100mhz refclk limits. */
1b6f4958 451static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
273e27ca
EA
452 .dot = { .min = 25000, .max = 350000 },
453 .vco = { .min = 1760000, .max = 3510000 },
454 .n = { .min = 1, .max = 2 },
455 .m = { .min = 79, .max = 126 },
456 .m1 = { .min = 12, .max = 22 },
457 .m2 = { .min = 5, .max = 9 },
458 .p = { .min = 28, .max = 112 },
0206e353 459 .p1 = { .min = 2, .max = 8 },
273e27ca
EA
460 .p2 = { .dot_limit = 225000,
461 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
462};
463
1b6f4958 464static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
273e27ca
EA
465 .dot = { .min = 25000, .max = 350000 },
466 .vco = { .min = 1760000, .max = 3510000 },
467 .n = { .min = 1, .max = 3 },
468 .m = { .min = 79, .max = 126 },
469 .m1 = { .min = 12, .max = 22 },
470 .m2 = { .min = 5, .max = 9 },
471 .p = { .min = 14, .max = 42 },
0206e353 472 .p1 = { .min = 2, .max = 6 },
273e27ca
EA
473 .p2 = { .dot_limit = 225000,
474 .p2_slow = 7, .p2_fast = 7 },
4547668a
ZY
475};
476
1b6f4958 477static const struct intel_limit intel_limits_vlv = {
f01b7962
VS
478 /*
479 * These are the data rate limits (measured in fast clocks)
480 * since those are the strictest limits we have. The fast
481 * clock and actual rate limits are more relaxed, so checking
482 * them would make no difference.
483 */
484 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
75e53986 485 .vco = { .min = 4000000, .max = 6000000 },
a0c4da24 486 .n = { .min = 1, .max = 7 },
a0c4da24
JB
487 .m1 = { .min = 2, .max = 3 },
488 .m2 = { .min = 11, .max = 156 },
b99ab663 489 .p1 = { .min = 2, .max = 3 },
5fdc9c49 490 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
a0c4da24
JB
491};
492
1b6f4958 493static const struct intel_limit intel_limits_chv = {
ef9348c8
CML
494 /*
495 * These are the data rate limits (measured in fast clocks)
496 * since those are the strictest limits we have. The fast
497 * clock and actual rate limits are more relaxed, so checking
498 * them would make no difference.
499 */
500 .dot = { .min = 25000 * 5, .max = 540000 * 5},
17fe1021 501 .vco = { .min = 4800000, .max = 6480000 },
ef9348c8
CML
502 .n = { .min = 1, .max = 1 },
503 .m1 = { .min = 2, .max = 2 },
504 .m2 = { .min = 24 << 22, .max = 175 << 22 },
505 .p1 = { .min = 2, .max = 4 },
506 .p2 = { .p2_slow = 1, .p2_fast = 14 },
507};
508
1b6f4958 509static const struct intel_limit intel_limits_bxt = {
5ab7b0b7
ID
510 /* FIXME: find real dot limits */
511 .dot = { .min = 0, .max = INT_MAX },
e6292556 512 .vco = { .min = 4800000, .max = 6700000 },
5ab7b0b7
ID
513 .n = { .min = 1, .max = 1 },
514 .m1 = { .min = 2, .max = 2 },
515 /* FIXME: find real m2 limits */
516 .m2 = { .min = 2 << 22, .max = 255 << 22 },
517 .p1 = { .min = 2, .max = 4 },
518 .p2 = { .p2_slow = 1, .p2_fast = 20 },
519};
520
cdba954e
ACO
521static bool
522needs_modeset(struct drm_crtc_state *state)
523{
fc596660 524 return drm_atomic_crtc_needs_modeset(state);
cdba954e
ACO
525}
526
e0638cdf
PZ
527/**
528 * Returns whether any output on the specified pipe is of the specified type
529 */
4093561b 530bool intel_pipe_has_type(struct intel_crtc *crtc, enum intel_output_type type)
e0638cdf 531{
409ee761 532 struct drm_device *dev = crtc->base.dev;
e0638cdf
PZ
533 struct intel_encoder *encoder;
534
409ee761 535 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
e0638cdf
PZ
536 if (encoder->type == type)
537 return true;
538
539 return false;
540}
541
d0737e1d
ACO
542/**
543 * Returns whether any output on the specified pipe will have the specified
544 * type after a staged modeset is complete, i.e., the same as
545 * intel_pipe_has_type() but looking at encoder->new_crtc instead of
546 * encoder->crtc.
547 */
a93e255f
ACO
548static bool intel_pipe_will_have_type(const struct intel_crtc_state *crtc_state,
549 int type)
d0737e1d 550{
a93e255f 551 struct drm_atomic_state *state = crtc_state->base.state;
da3ced29 552 struct drm_connector *connector;
a93e255f 553 struct drm_connector_state *connector_state;
d0737e1d 554 struct intel_encoder *encoder;
a93e255f
ACO
555 int i, num_connectors = 0;
556
da3ced29 557 for_each_connector_in_state(state, connector, connector_state, i) {
a93e255f
ACO
558 if (connector_state->crtc != crtc_state->base.crtc)
559 continue;
560
561 num_connectors++;
d0737e1d 562
a93e255f
ACO
563 encoder = to_intel_encoder(connector_state->best_encoder);
564 if (encoder->type == type)
d0737e1d 565 return true;
a93e255f
ACO
566 }
567
568 WARN_ON(num_connectors == 0);
d0737e1d
ACO
569
570 return false;
571}
572
dccbea3b
ID
573/*
574 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
575 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
576 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
577 * The helpers' return value is the rate of the clock that is fed to the
578 * display engine's pipe which can be the above fast dot clock rate or a
579 * divided-down version of it.
580 */
f2b115e6 581/* m1 is reserved as 0 in Pineview, n is a ring counter */
9e2c8475 582static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
79e53945 583{
2177832f
SL
584 clock->m = clock->m2 + 2;
585 clock->p = clock->p1 * clock->p2;
ed5ca77e 586 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 587 return 0;
fb03ac01
VS
588 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
589 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
590
591 return clock->dot;
2177832f
SL
592}
593
7429e9d4
DV
594static uint32_t i9xx_dpll_compute_m(struct dpll *dpll)
595{
596 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
597}
598
9e2c8475 599static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
2177832f 600{
7429e9d4 601 clock->m = i9xx_dpll_compute_m(clock);
79e53945 602 clock->p = clock->p1 * clock->p2;
ed5ca77e 603 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
dccbea3b 604 return 0;
fb03ac01
VS
605 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
606 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
607
608 return clock->dot;
79e53945
JB
609}
610
9e2c8475 611static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
589eca67
ID
612{
613 clock->m = clock->m1 * clock->m2;
614 clock->p = clock->p1 * clock->p2;
615 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 616 return 0;
589eca67
ID
617 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
618 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
619
620 return clock->dot / 5;
589eca67
ID
621}
622
9e2c8475 623int chv_calc_dpll_params(int refclk, struct dpll *clock)
ef9348c8
CML
624{
625 clock->m = clock->m1 * clock->m2;
626 clock->p = clock->p1 * clock->p2;
627 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 628 return 0;
ef9348c8
CML
629 clock->vco = DIV_ROUND_CLOSEST_ULL((uint64_t)refclk * clock->m,
630 clock->n << 22);
631 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
632
633 return clock->dot / 5;
ef9348c8
CML
634}
635
7c04d1d9 636#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
79e53945
JB
637/**
638 * Returns whether the given set of divisors are valid for a given refclk with
639 * the given connectors.
640 */
641
1b894b59 642static bool intel_PLL_is_valid(struct drm_device *dev,
1b6f4958 643 const struct intel_limit *limit,
9e2c8475 644 const struct dpll *clock)
79e53945 645{
f01b7962
VS
646 if (clock->n < limit->n.min || limit->n.max < clock->n)
647 INTELPllInvalid("n out of range\n");
79e53945 648 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
0206e353 649 INTELPllInvalid("p1 out of range\n");
79e53945 650 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
0206e353 651 INTELPllInvalid("m2 out of range\n");
79e53945 652 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
0206e353 653 INTELPllInvalid("m1 out of range\n");
f01b7962 654
666a4537
WB
655 if (!IS_PINEVIEW(dev) && !IS_VALLEYVIEW(dev) &&
656 !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev))
f01b7962
VS
657 if (clock->m1 <= clock->m2)
658 INTELPllInvalid("m1 <= m2\n");
659
666a4537 660 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) && !IS_BROXTON(dev)) {
f01b7962
VS
661 if (clock->p < limit->p.min || limit->p.max < clock->p)
662 INTELPllInvalid("p out of range\n");
663 if (clock->m < limit->m.min || limit->m.max < clock->m)
664 INTELPllInvalid("m out of range\n");
665 }
666
79e53945 667 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
0206e353 668 INTELPllInvalid("vco out of range\n");
79e53945
JB
669 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
670 * connector, etc., rather than just a single range.
671 */
672 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
0206e353 673 INTELPllInvalid("dot out of range\n");
79e53945
JB
674
675 return true;
676}
677
3b1429d9 678static int
1b6f4958 679i9xx_select_p2_div(const struct intel_limit *limit,
3b1429d9
VS
680 const struct intel_crtc_state *crtc_state,
681 int target)
79e53945 682{
3b1429d9 683 struct drm_device *dev = crtc_state->base.crtc->dev;
79e53945 684
a93e255f 685 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
79e53945 686 /*
a210b028
DV
687 * For LVDS just rely on its current settings for dual-channel.
688 * We haven't figured out how to reliably set up different
689 * single/dual channel state, if we even can.
79e53945 690 */
1974cad0 691 if (intel_is_dual_link_lvds(dev))
3b1429d9 692 return limit->p2.p2_fast;
79e53945 693 else
3b1429d9 694 return limit->p2.p2_slow;
79e53945
JB
695 } else {
696 if (target < limit->p2.dot_limit)
3b1429d9 697 return limit->p2.p2_slow;
79e53945 698 else
3b1429d9 699 return limit->p2.p2_fast;
79e53945 700 }
3b1429d9
VS
701}
702
70e8aa21
ACO
703/*
704 * Returns a set of divisors for the desired target clock with the given
705 * refclk, or FALSE. The returned values represent the clock equation:
706 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
707 *
708 * Target and reference clocks are specified in kHz.
709 *
710 * If match_clock is provided, then best_clock P divider must match the P
711 * divider from @match_clock used for LVDS downclocking.
712 */
3b1429d9 713static bool
1b6f4958 714i9xx_find_best_dpll(const struct intel_limit *limit,
3b1429d9 715 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
716 int target, int refclk, struct dpll *match_clock,
717 struct dpll *best_clock)
3b1429d9
VS
718{
719 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 720 struct dpll clock;
3b1429d9 721 int err = target;
79e53945 722
0206e353 723 memset(best_clock, 0, sizeof(*best_clock));
79e53945 724
3b1429d9
VS
725 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
726
42158660
ZY
727 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
728 clock.m1++) {
729 for (clock.m2 = limit->m2.min;
730 clock.m2 <= limit->m2.max; clock.m2++) {
c0efc387 731 if (clock.m2 >= clock.m1)
42158660
ZY
732 break;
733 for (clock.n = limit->n.min;
734 clock.n <= limit->n.max; clock.n++) {
735 for (clock.p1 = limit->p1.min;
736 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
737 int this_err;
738
dccbea3b 739 i9xx_calc_dpll_params(refclk, &clock);
ac58c3f0
DV
740 if (!intel_PLL_is_valid(dev, limit,
741 &clock))
742 continue;
743 if (match_clock &&
744 clock.p != match_clock->p)
745 continue;
746
747 this_err = abs(clock.dot - target);
748 if (this_err < err) {
749 *best_clock = clock;
750 err = this_err;
751 }
752 }
753 }
754 }
755 }
756
757 return (err != target);
758}
759
70e8aa21
ACO
760/*
761 * Returns a set of divisors for the desired target clock with the given
762 * refclk, or FALSE. The returned values represent the clock equation:
763 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
764 *
765 * Target and reference clocks are specified in kHz.
766 *
767 * If match_clock is provided, then best_clock P divider must match the P
768 * divider from @match_clock used for LVDS downclocking.
769 */
ac58c3f0 770static bool
1b6f4958 771pnv_find_best_dpll(const struct intel_limit *limit,
a93e255f 772 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
773 int target, int refclk, struct dpll *match_clock,
774 struct dpll *best_clock)
79e53945 775{
3b1429d9 776 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 777 struct dpll clock;
79e53945
JB
778 int err = target;
779
0206e353 780 memset(best_clock, 0, sizeof(*best_clock));
79e53945 781
3b1429d9
VS
782 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
783
42158660
ZY
784 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
785 clock.m1++) {
786 for (clock.m2 = limit->m2.min;
787 clock.m2 <= limit->m2.max; clock.m2++) {
42158660
ZY
788 for (clock.n = limit->n.min;
789 clock.n <= limit->n.max; clock.n++) {
790 for (clock.p1 = limit->p1.min;
791 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
792 int this_err;
793
dccbea3b 794 pnv_calc_dpll_params(refclk, &clock);
1b894b59
CW
795 if (!intel_PLL_is_valid(dev, limit,
796 &clock))
79e53945 797 continue;
cec2f356
SP
798 if (match_clock &&
799 clock.p != match_clock->p)
800 continue;
79e53945
JB
801
802 this_err = abs(clock.dot - target);
803 if (this_err < err) {
804 *best_clock = clock;
805 err = this_err;
806 }
807 }
808 }
809 }
810 }
811
812 return (err != target);
813}
814
997c030c
ACO
815/*
816 * Returns a set of divisors for the desired target clock with the given
817 * refclk, or FALSE. The returned values represent the clock equation:
818 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
70e8aa21
ACO
819 *
820 * Target and reference clocks are specified in kHz.
821 *
822 * If match_clock is provided, then best_clock P divider must match the P
823 * divider from @match_clock used for LVDS downclocking.
997c030c 824 */
d4906093 825static bool
1b6f4958 826g4x_find_best_dpll(const struct intel_limit *limit,
a93e255f 827 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
828 int target, int refclk, struct dpll *match_clock,
829 struct dpll *best_clock)
d4906093 830{
3b1429d9 831 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 832 struct dpll clock;
d4906093 833 int max_n;
3b1429d9 834 bool found = false;
6ba770dc
AJ
835 /* approximately equals target * 0.00585 */
836 int err_most = (target >> 8) + (target >> 9);
d4906093
ML
837
838 memset(best_clock, 0, sizeof(*best_clock));
3b1429d9
VS
839
840 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
841
d4906093 842 max_n = limit->n.max;
f77f13e2 843 /* based on hardware requirement, prefer smaller n to precision */
d4906093 844 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
f77f13e2 845 /* based on hardware requirement, prefere larger m1,m2 */
d4906093
ML
846 for (clock.m1 = limit->m1.max;
847 clock.m1 >= limit->m1.min; clock.m1--) {
848 for (clock.m2 = limit->m2.max;
849 clock.m2 >= limit->m2.min; clock.m2--) {
850 for (clock.p1 = limit->p1.max;
851 clock.p1 >= limit->p1.min; clock.p1--) {
852 int this_err;
853
dccbea3b 854 i9xx_calc_dpll_params(refclk, &clock);
1b894b59
CW
855 if (!intel_PLL_is_valid(dev, limit,
856 &clock))
d4906093 857 continue;
1b894b59
CW
858
859 this_err = abs(clock.dot - target);
d4906093
ML
860 if (this_err < err_most) {
861 *best_clock = clock;
862 err_most = this_err;
863 max_n = clock.n;
864 found = true;
865 }
866 }
867 }
868 }
869 }
2c07245f
ZW
870 return found;
871}
872
d5dd62bd
ID
873/*
874 * Check if the calculated PLL configuration is more optimal compared to the
875 * best configuration and error found so far. Return the calculated error.
876 */
877static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
9e2c8475
ACO
878 const struct dpll *calculated_clock,
879 const struct dpll *best_clock,
d5dd62bd
ID
880 unsigned int best_error_ppm,
881 unsigned int *error_ppm)
882{
9ca3ba01
ID
883 /*
884 * For CHV ignore the error and consider only the P value.
885 * Prefer a bigger P value based on HW requirements.
886 */
887 if (IS_CHERRYVIEW(dev)) {
888 *error_ppm = 0;
889
890 return calculated_clock->p > best_clock->p;
891 }
892
24be4e46
ID
893 if (WARN_ON_ONCE(!target_freq))
894 return false;
895
d5dd62bd
ID
896 *error_ppm = div_u64(1000000ULL *
897 abs(target_freq - calculated_clock->dot),
898 target_freq);
899 /*
900 * Prefer a better P value over a better (smaller) error if the error
901 * is small. Ensure this preference for future configurations too by
902 * setting the error to 0.
903 */
904 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
905 *error_ppm = 0;
906
907 return true;
908 }
909
910 return *error_ppm + 10 < best_error_ppm;
911}
912
65b3d6a9
ACO
913/*
914 * Returns a set of divisors for the desired target clock with the given
915 * refclk, or FALSE. The returned values represent the clock equation:
916 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
917 */
a0c4da24 918static bool
1b6f4958 919vlv_find_best_dpll(const struct intel_limit *limit,
a93e255f 920 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
921 int target, int refclk, struct dpll *match_clock,
922 struct dpll *best_clock)
a0c4da24 923{
a93e255f 924 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a919ff14 925 struct drm_device *dev = crtc->base.dev;
9e2c8475 926 struct dpll clock;
69e4f900 927 unsigned int bestppm = 1000000;
27e639bf
VS
928 /* min update 19.2 MHz */
929 int max_n = min(limit->n.max, refclk / 19200);
49e497ef 930 bool found = false;
a0c4da24 931
6b4bf1c4
VS
932 target *= 5; /* fast clock */
933
934 memset(best_clock, 0, sizeof(*best_clock));
a0c4da24
JB
935
936 /* based on hardware requirement, prefer smaller n to precision */
27e639bf 937 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
811bbf05 938 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
889059d8 939 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
c1a9ae43 940 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
6b4bf1c4 941 clock.p = clock.p1 * clock.p2;
a0c4da24 942 /* based on hardware requirement, prefer bigger m1,m2 values */
6b4bf1c4 943 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
d5dd62bd 944 unsigned int ppm;
69e4f900 945
6b4bf1c4
VS
946 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
947 refclk * clock.m1);
948
dccbea3b 949 vlv_calc_dpll_params(refclk, &clock);
43b0ac53 950
f01b7962
VS
951 if (!intel_PLL_is_valid(dev, limit,
952 &clock))
43b0ac53
VS
953 continue;
954
d5dd62bd
ID
955 if (!vlv_PLL_is_optimal(dev, target,
956 &clock,
957 best_clock,
958 bestppm, &ppm))
959 continue;
6b4bf1c4 960
d5dd62bd
ID
961 *best_clock = clock;
962 bestppm = ppm;
963 found = true;
a0c4da24
JB
964 }
965 }
966 }
967 }
a0c4da24 968
49e497ef 969 return found;
a0c4da24 970}
a4fc5ed6 971
65b3d6a9
ACO
972/*
973 * Returns a set of divisors for the desired target clock with the given
974 * refclk, or FALSE. The returned values represent the clock equation:
975 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
976 */
ef9348c8 977static bool
1b6f4958 978chv_find_best_dpll(const struct intel_limit *limit,
a93e255f 979 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
980 int target, int refclk, struct dpll *match_clock,
981 struct dpll *best_clock)
ef9348c8 982{
a93e255f 983 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a919ff14 984 struct drm_device *dev = crtc->base.dev;
9ca3ba01 985 unsigned int best_error_ppm;
9e2c8475 986 struct dpll clock;
ef9348c8
CML
987 uint64_t m2;
988 int found = false;
989
990 memset(best_clock, 0, sizeof(*best_clock));
9ca3ba01 991 best_error_ppm = 1000000;
ef9348c8
CML
992
993 /*
994 * Based on hardware doc, the n always set to 1, and m1 always
995 * set to 2. If requires to support 200Mhz refclk, we need to
996 * revisit this because n may not 1 anymore.
997 */
998 clock.n = 1, clock.m1 = 2;
999 target *= 5; /* fast clock */
1000
1001 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
1002 for (clock.p2 = limit->p2.p2_fast;
1003 clock.p2 >= limit->p2.p2_slow;
1004 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
9ca3ba01 1005 unsigned int error_ppm;
ef9348c8
CML
1006
1007 clock.p = clock.p1 * clock.p2;
1008
1009 m2 = DIV_ROUND_CLOSEST_ULL(((uint64_t)target * clock.p *
1010 clock.n) << 22, refclk * clock.m1);
1011
1012 if (m2 > INT_MAX/clock.m1)
1013 continue;
1014
1015 clock.m2 = m2;
1016
dccbea3b 1017 chv_calc_dpll_params(refclk, &clock);
ef9348c8
CML
1018
1019 if (!intel_PLL_is_valid(dev, limit, &clock))
1020 continue;
1021
9ca3ba01
ID
1022 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
1023 best_error_ppm, &error_ppm))
1024 continue;
1025
1026 *best_clock = clock;
1027 best_error_ppm = error_ppm;
1028 found = true;
ef9348c8
CML
1029 }
1030 }
1031
1032 return found;
1033}
1034
5ab7b0b7 1035bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state, int target_clock,
9e2c8475 1036 struct dpll *best_clock)
5ab7b0b7 1037{
65b3d6a9 1038 int refclk = 100000;
1b6f4958 1039 const struct intel_limit *limit = &intel_limits_bxt;
5ab7b0b7 1040
65b3d6a9 1041 return chv_find_best_dpll(limit, crtc_state,
5ab7b0b7
ID
1042 target_clock, refclk, NULL, best_clock);
1043}
1044
20ddf665
VS
1045bool intel_crtc_active(struct drm_crtc *crtc)
1046{
1047 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1048
1049 /* Be paranoid as we can arrive here with only partial
1050 * state retrieved from the hardware during setup.
1051 *
241bfc38 1052 * We can ditch the adjusted_mode.crtc_clock check as soon
20ddf665
VS
1053 * as Haswell has gained clock readout/fastboot support.
1054 *
66e514c1 1055 * We can ditch the crtc->primary->fb check as soon as we can
20ddf665 1056 * properly reconstruct framebuffers.
c3d1f436
MR
1057 *
1058 * FIXME: The intel_crtc->active here should be switched to
1059 * crtc->state->active once we have proper CRTC states wired up
1060 * for atomic.
20ddf665 1061 */
c3d1f436 1062 return intel_crtc->active && crtc->primary->state->fb &&
6e3c9717 1063 intel_crtc->config->base.adjusted_mode.crtc_clock;
20ddf665
VS
1064}
1065
a5c961d1
PZ
1066enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1067 enum pipe pipe)
1068{
1069 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
1070 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1071
6e3c9717 1072 return intel_crtc->config->cpu_transcoder;
a5c961d1
PZ
1073}
1074
fbf49ea2
VS
1075static bool pipe_dsl_stopped(struct drm_device *dev, enum pipe pipe)
1076{
1077 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1078 i915_reg_t reg = PIPEDSL(pipe);
fbf49ea2
VS
1079 u32 line1, line2;
1080 u32 line_mask;
1081
1082 if (IS_GEN2(dev))
1083 line_mask = DSL_LINEMASK_GEN2;
1084 else
1085 line_mask = DSL_LINEMASK_GEN3;
1086
1087 line1 = I915_READ(reg) & line_mask;
6adfb1ef 1088 msleep(5);
fbf49ea2
VS
1089 line2 = I915_READ(reg) & line_mask;
1090
1091 return line1 == line2;
1092}
1093
ab7ad7f6
KP
1094/*
1095 * intel_wait_for_pipe_off - wait for pipe to turn off
575f7ab7 1096 * @crtc: crtc whose pipe to wait for
9d0498a2
JB
1097 *
1098 * After disabling a pipe, we can't wait for vblank in the usual way,
1099 * spinning on the vblank interrupt status bit, since we won't actually
1100 * see an interrupt when the pipe is disabled.
1101 *
ab7ad7f6
KP
1102 * On Gen4 and above:
1103 * wait for the pipe register state bit to turn off
1104 *
1105 * Otherwise:
1106 * wait for the display line value to settle (it usually
1107 * ends up stopping at the start of the next frame).
58e10eb9 1108 *
9d0498a2 1109 */
575f7ab7 1110static void intel_wait_for_pipe_off(struct intel_crtc *crtc)
9d0498a2 1111{
575f7ab7 1112 struct drm_device *dev = crtc->base.dev;
9d0498a2 1113 struct drm_i915_private *dev_priv = dev->dev_private;
6e3c9717 1114 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
575f7ab7 1115 enum pipe pipe = crtc->pipe;
ab7ad7f6
KP
1116
1117 if (INTEL_INFO(dev)->gen >= 4) {
f0f59a00 1118 i915_reg_t reg = PIPECONF(cpu_transcoder);
ab7ad7f6
KP
1119
1120 /* Wait for the Pipe State to go off */
58e10eb9
CW
1121 if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0,
1122 100))
284637d9 1123 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 1124 } else {
ab7ad7f6 1125 /* Wait for the display line to settle */
fbf49ea2 1126 if (wait_for(pipe_dsl_stopped(dev, pipe), 100))
284637d9 1127 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 1128 }
79e53945
JB
1129}
1130
b24e7179 1131/* Only for pre-ILK configs */
55607e8a
DV
1132void assert_pll(struct drm_i915_private *dev_priv,
1133 enum pipe pipe, bool state)
b24e7179 1134{
b24e7179
JB
1135 u32 val;
1136 bool cur_state;
1137
649636ef 1138 val = I915_READ(DPLL(pipe));
b24e7179 1139 cur_state = !!(val & DPLL_VCO_ENABLE);
e2c719b7 1140 I915_STATE_WARN(cur_state != state,
b24e7179 1141 "PLL state assertion failure (expected %s, current %s)\n",
87ad3212 1142 onoff(state), onoff(cur_state));
b24e7179 1143}
b24e7179 1144
23538ef1 1145/* XXX: the dsi pll is shared between MIPI DSI ports */
8563b1e8 1146void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
23538ef1
JN
1147{
1148 u32 val;
1149 bool cur_state;
1150
a580516d 1151 mutex_lock(&dev_priv->sb_lock);
23538ef1 1152 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
a580516d 1153 mutex_unlock(&dev_priv->sb_lock);
23538ef1
JN
1154
1155 cur_state = val & DSI_PLL_VCO_EN;
e2c719b7 1156 I915_STATE_WARN(cur_state != state,
23538ef1 1157 "DSI PLL state assertion failure (expected %s, current %s)\n",
87ad3212 1158 onoff(state), onoff(cur_state));
23538ef1 1159}
23538ef1 1160
040484af
JB
1161static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1162 enum pipe pipe, bool state)
1163{
040484af 1164 bool cur_state;
ad80a810
PZ
1165 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1166 pipe);
040484af 1167
2d1fe073 1168 if (HAS_DDI(dev_priv)) {
affa9354 1169 /* DDI does not have a specific FDI_TX register */
649636ef 1170 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
ad80a810 1171 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
bf507ef7 1172 } else {
649636ef 1173 u32 val = I915_READ(FDI_TX_CTL(pipe));
bf507ef7
ED
1174 cur_state = !!(val & FDI_TX_ENABLE);
1175 }
e2c719b7 1176 I915_STATE_WARN(cur_state != state,
040484af 1177 "FDI TX state assertion failure (expected %s, current %s)\n",
87ad3212 1178 onoff(state), onoff(cur_state));
040484af
JB
1179}
1180#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1181#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1182
1183static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1184 enum pipe pipe, bool state)
1185{
040484af
JB
1186 u32 val;
1187 bool cur_state;
1188
649636ef 1189 val = I915_READ(FDI_RX_CTL(pipe));
d63fa0dc 1190 cur_state = !!(val & FDI_RX_ENABLE);
e2c719b7 1191 I915_STATE_WARN(cur_state != state,
040484af 1192 "FDI RX state assertion failure (expected %s, current %s)\n",
87ad3212 1193 onoff(state), onoff(cur_state));
040484af
JB
1194}
1195#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1196#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1197
1198static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1199 enum pipe pipe)
1200{
040484af
JB
1201 u32 val;
1202
1203 /* ILK FDI PLL is always enabled */
7e22dbbb 1204 if (IS_GEN5(dev_priv))
040484af
JB
1205 return;
1206
bf507ef7 1207 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
2d1fe073 1208 if (HAS_DDI(dev_priv))
bf507ef7
ED
1209 return;
1210
649636ef 1211 val = I915_READ(FDI_TX_CTL(pipe));
e2c719b7 1212 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
040484af
JB
1213}
1214
55607e8a
DV
1215void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1216 enum pipe pipe, bool state)
040484af 1217{
040484af 1218 u32 val;
55607e8a 1219 bool cur_state;
040484af 1220
649636ef 1221 val = I915_READ(FDI_RX_CTL(pipe));
55607e8a 1222 cur_state = !!(val & FDI_RX_PLL_ENABLE);
e2c719b7 1223 I915_STATE_WARN(cur_state != state,
55607e8a 1224 "FDI RX PLL assertion failure (expected %s, current %s)\n",
87ad3212 1225 onoff(state), onoff(cur_state));
040484af
JB
1226}
1227
b680c37a
DV
1228void assert_panel_unlocked(struct drm_i915_private *dev_priv,
1229 enum pipe pipe)
ea0760cf 1230{
bedd4dba 1231 struct drm_device *dev = dev_priv->dev;
f0f59a00 1232 i915_reg_t pp_reg;
ea0760cf
JB
1233 u32 val;
1234 enum pipe panel_pipe = PIPE_A;
0de3b485 1235 bool locked = true;
ea0760cf 1236
bedd4dba
JN
1237 if (WARN_ON(HAS_DDI(dev)))
1238 return;
1239
1240 if (HAS_PCH_SPLIT(dev)) {
1241 u32 port_sel;
1242
ea0760cf 1243 pp_reg = PCH_PP_CONTROL;
bedd4dba
JN
1244 port_sel = I915_READ(PCH_PP_ON_DELAYS) & PANEL_PORT_SELECT_MASK;
1245
1246 if (port_sel == PANEL_PORT_SELECT_LVDS &&
1247 I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT)
1248 panel_pipe = PIPE_B;
1249 /* XXX: else fix for eDP */
666a4537 1250 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
bedd4dba
JN
1251 /* presumably write lock depends on pipe, not port select */
1252 pp_reg = VLV_PIPE_PP_CONTROL(pipe);
1253 panel_pipe = pipe;
ea0760cf
JB
1254 } else {
1255 pp_reg = PP_CONTROL;
bedd4dba
JN
1256 if (I915_READ(LVDS) & LVDS_PIPEB_SELECT)
1257 panel_pipe = PIPE_B;
ea0760cf
JB
1258 }
1259
1260 val = I915_READ(pp_reg);
1261 if (!(val & PANEL_POWER_ON) ||
ec49ba2d 1262 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
ea0760cf
JB
1263 locked = false;
1264
e2c719b7 1265 I915_STATE_WARN(panel_pipe == pipe && locked,
ea0760cf 1266 "panel assertion failure, pipe %c regs locked\n",
9db4a9c7 1267 pipe_name(pipe));
ea0760cf
JB
1268}
1269
93ce0ba6
JN
1270static void assert_cursor(struct drm_i915_private *dev_priv,
1271 enum pipe pipe, bool state)
1272{
1273 struct drm_device *dev = dev_priv->dev;
1274 bool cur_state;
1275
d9d82081 1276 if (IS_845G(dev) || IS_I865G(dev))
0b87c24e 1277 cur_state = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
d9d82081 1278 else
5efb3e28 1279 cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
93ce0ba6 1280
e2c719b7 1281 I915_STATE_WARN(cur_state != state,
93ce0ba6 1282 "cursor on pipe %c assertion failure (expected %s, current %s)\n",
87ad3212 1283 pipe_name(pipe), onoff(state), onoff(cur_state));
93ce0ba6
JN
1284}
1285#define assert_cursor_enabled(d, p) assert_cursor(d, p, true)
1286#define assert_cursor_disabled(d, p) assert_cursor(d, p, false)
1287
b840d907
JB
1288void assert_pipe(struct drm_i915_private *dev_priv,
1289 enum pipe pipe, bool state)
b24e7179 1290{
63d7bbe9 1291 bool cur_state;
702e7a56
PZ
1292 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1293 pipe);
4feed0eb 1294 enum intel_display_power_domain power_domain;
b24e7179 1295
b6b5d049
VS
1296 /* if we need the pipe quirk it must be always on */
1297 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1298 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
8e636784
DV
1299 state = true;
1300
4feed0eb
ID
1301 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
1302 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
649636ef 1303 u32 val = I915_READ(PIPECONF(cpu_transcoder));
69310161 1304 cur_state = !!(val & PIPECONF_ENABLE);
4feed0eb
ID
1305
1306 intel_display_power_put(dev_priv, power_domain);
1307 } else {
1308 cur_state = false;
69310161
PZ
1309 }
1310
e2c719b7 1311 I915_STATE_WARN(cur_state != state,
63d7bbe9 1312 "pipe %c assertion failure (expected %s, current %s)\n",
87ad3212 1313 pipe_name(pipe), onoff(state), onoff(cur_state));
b24e7179
JB
1314}
1315
931872fc
CW
1316static void assert_plane(struct drm_i915_private *dev_priv,
1317 enum plane plane, bool state)
b24e7179 1318{
b24e7179 1319 u32 val;
931872fc 1320 bool cur_state;
b24e7179 1321
649636ef 1322 val = I915_READ(DSPCNTR(plane));
931872fc 1323 cur_state = !!(val & DISPLAY_PLANE_ENABLE);
e2c719b7 1324 I915_STATE_WARN(cur_state != state,
931872fc 1325 "plane %c assertion failure (expected %s, current %s)\n",
87ad3212 1326 plane_name(plane), onoff(state), onoff(cur_state));
b24e7179
JB
1327}
1328
931872fc
CW
1329#define assert_plane_enabled(d, p) assert_plane(d, p, true)
1330#define assert_plane_disabled(d, p) assert_plane(d, p, false)
1331
b24e7179
JB
1332static void assert_planes_disabled(struct drm_i915_private *dev_priv,
1333 enum pipe pipe)
1334{
653e1026 1335 struct drm_device *dev = dev_priv->dev;
649636ef 1336 int i;
b24e7179 1337
653e1026
VS
1338 /* Primary planes are fixed to pipes on gen4+ */
1339 if (INTEL_INFO(dev)->gen >= 4) {
649636ef 1340 u32 val = I915_READ(DSPCNTR(pipe));
e2c719b7 1341 I915_STATE_WARN(val & DISPLAY_PLANE_ENABLE,
28c05794
AJ
1342 "plane %c assertion failure, should be disabled but not\n",
1343 plane_name(pipe));
19ec1358 1344 return;
28c05794 1345 }
19ec1358 1346
b24e7179 1347 /* Need to check both planes against the pipe */
055e393f 1348 for_each_pipe(dev_priv, i) {
649636ef
VS
1349 u32 val = I915_READ(DSPCNTR(i));
1350 enum pipe cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
b24e7179 1351 DISPPLANE_SEL_PIPE_SHIFT;
e2c719b7 1352 I915_STATE_WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe,
9db4a9c7
JB
1353 "plane %c assertion failure, should be off on pipe %c but is still active\n",
1354 plane_name(i), pipe_name(pipe));
b24e7179
JB
1355 }
1356}
1357
19332d7a
JB
1358static void assert_sprites_disabled(struct drm_i915_private *dev_priv,
1359 enum pipe pipe)
1360{
20674eef 1361 struct drm_device *dev = dev_priv->dev;
649636ef 1362 int sprite;
19332d7a 1363
7feb8b88 1364 if (INTEL_INFO(dev)->gen >= 9) {
3bdcfc0c 1365 for_each_sprite(dev_priv, pipe, sprite) {
649636ef 1366 u32 val = I915_READ(PLANE_CTL(pipe, sprite));
e2c719b7 1367 I915_STATE_WARN(val & PLANE_CTL_ENABLE,
7feb8b88
DL
1368 "plane %d assertion failure, should be off on pipe %c but is still active\n",
1369 sprite, pipe_name(pipe));
1370 }
666a4537 1371 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
3bdcfc0c 1372 for_each_sprite(dev_priv, pipe, sprite) {
649636ef 1373 u32 val = I915_READ(SPCNTR(pipe, sprite));
e2c719b7 1374 I915_STATE_WARN(val & SP_ENABLE,
20674eef 1375 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
1fe47785 1376 sprite_name(pipe, sprite), pipe_name(pipe));
20674eef
VS
1377 }
1378 } else if (INTEL_INFO(dev)->gen >= 7) {
649636ef 1379 u32 val = I915_READ(SPRCTL(pipe));
e2c719b7 1380 I915_STATE_WARN(val & SPRITE_ENABLE,
06da8da2 1381 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
20674eef
VS
1382 plane_name(pipe), pipe_name(pipe));
1383 } else if (INTEL_INFO(dev)->gen >= 5) {
649636ef 1384 u32 val = I915_READ(DVSCNTR(pipe));
e2c719b7 1385 I915_STATE_WARN(val & DVS_ENABLE,
06da8da2 1386 "sprite %c assertion failure, should be off on pipe %c but is still active\n",
20674eef 1387 plane_name(pipe), pipe_name(pipe));
19332d7a
JB
1388 }
1389}
1390
08c71e5e
VS
1391static void assert_vblank_disabled(struct drm_crtc *crtc)
1392{
e2c719b7 1393 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
08c71e5e
VS
1394 drm_crtc_vblank_put(crtc);
1395}
1396
7abd4b35
ACO
1397void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1398 enum pipe pipe)
92f2584a 1399{
92f2584a
JB
1400 u32 val;
1401 bool enabled;
1402
649636ef 1403 val = I915_READ(PCH_TRANSCONF(pipe));
92f2584a 1404 enabled = !!(val & TRANS_ENABLE);
e2c719b7 1405 I915_STATE_WARN(enabled,
9db4a9c7
JB
1406 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1407 pipe_name(pipe));
92f2584a
JB
1408}
1409
4e634389
KP
1410static bool dp_pipe_enabled(struct drm_i915_private *dev_priv,
1411 enum pipe pipe, u32 port_sel, u32 val)
f0575e92
KP
1412{
1413 if ((val & DP_PORT_EN) == 0)
1414 return false;
1415
2d1fe073 1416 if (HAS_PCH_CPT(dev_priv)) {
f0f59a00 1417 u32 trans_dp_ctl = I915_READ(TRANS_DP_CTL(pipe));
f0575e92
KP
1418 if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel)
1419 return false;
2d1fe073 1420 } else if (IS_CHERRYVIEW(dev_priv)) {
44f37d1f
CML
1421 if ((val & DP_PIPE_MASK_CHV) != DP_PIPE_SELECT_CHV(pipe))
1422 return false;
f0575e92
KP
1423 } else {
1424 if ((val & DP_PIPE_MASK) != (pipe << 30))
1425 return false;
1426 }
1427 return true;
1428}
1429
1519b995
KP
1430static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv,
1431 enum pipe pipe, u32 val)
1432{
dc0fa718 1433 if ((val & SDVO_ENABLE) == 0)
1519b995
KP
1434 return false;
1435
2d1fe073 1436 if (HAS_PCH_CPT(dev_priv)) {
dc0fa718 1437 if ((val & SDVO_PIPE_SEL_MASK_CPT) != SDVO_PIPE_SEL_CPT(pipe))
1519b995 1438 return false;
2d1fe073 1439 } else if (IS_CHERRYVIEW(dev_priv)) {
44f37d1f
CML
1440 if ((val & SDVO_PIPE_SEL_MASK_CHV) != SDVO_PIPE_SEL_CHV(pipe))
1441 return false;
1519b995 1442 } else {
dc0fa718 1443 if ((val & SDVO_PIPE_SEL_MASK) != SDVO_PIPE_SEL(pipe))
1519b995
KP
1444 return false;
1445 }
1446 return true;
1447}
1448
1449static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv,
1450 enum pipe pipe, u32 val)
1451{
1452 if ((val & LVDS_PORT_EN) == 0)
1453 return false;
1454
2d1fe073 1455 if (HAS_PCH_CPT(dev_priv)) {
1519b995
KP
1456 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1457 return false;
1458 } else {
1459 if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe))
1460 return false;
1461 }
1462 return true;
1463}
1464
1465static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv,
1466 enum pipe pipe, u32 val)
1467{
1468 if ((val & ADPA_DAC_ENABLE) == 0)
1469 return false;
2d1fe073 1470 if (HAS_PCH_CPT(dev_priv)) {
1519b995
KP
1471 if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe))
1472 return false;
1473 } else {
1474 if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe))
1475 return false;
1476 }
1477 return true;
1478}
1479
291906f1 1480static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
f0f59a00
VS
1481 enum pipe pipe, i915_reg_t reg,
1482 u32 port_sel)
291906f1 1483{
47a05eca 1484 u32 val = I915_READ(reg);
e2c719b7 1485 I915_STATE_WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val),
291906f1 1486 "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n",
f0f59a00 1487 i915_mmio_reg_offset(reg), pipe_name(pipe));
de9a35ab 1488
2d1fe073 1489 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & DP_PORT_EN) == 0
75c5da27 1490 && (val & DP_PIPEB_SELECT),
de9a35ab 1491 "IBX PCH dp port still using transcoder B\n");
291906f1
JB
1492}
1493
1494static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
f0f59a00 1495 enum pipe pipe, i915_reg_t reg)
291906f1 1496{
47a05eca 1497 u32 val = I915_READ(reg);
e2c719b7 1498 I915_STATE_WARN(hdmi_pipe_enabled(dev_priv, pipe, val),
23c99e77 1499 "PCH HDMI (0x%08x) enabled on transcoder %c, should be disabled\n",
f0f59a00 1500 i915_mmio_reg_offset(reg), pipe_name(pipe));
de9a35ab 1501
2d1fe073 1502 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && (val & SDVO_ENABLE) == 0
75c5da27 1503 && (val & SDVO_PIPE_B_SELECT),
de9a35ab 1504 "IBX PCH hdmi port still using transcoder B\n");
291906f1
JB
1505}
1506
1507static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1508 enum pipe pipe)
1509{
291906f1 1510 u32 val;
291906f1 1511
f0575e92
KP
1512 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B);
1513 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C);
1514 assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D);
291906f1 1515
649636ef 1516 val = I915_READ(PCH_ADPA);
e2c719b7 1517 I915_STATE_WARN(adpa_pipe_enabled(dev_priv, pipe, val),
291906f1 1518 "PCH VGA enabled on transcoder %c, should be disabled\n",
9db4a9c7 1519 pipe_name(pipe));
291906f1 1520
649636ef 1521 val = I915_READ(PCH_LVDS);
e2c719b7 1522 I915_STATE_WARN(lvds_pipe_enabled(dev_priv, pipe, val),
291906f1 1523 "PCH LVDS enabled on transcoder %c, should be disabled\n",
9db4a9c7 1524 pipe_name(pipe));
291906f1 1525
e2debe91
PZ
1526 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIB);
1527 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMIC);
1528 assert_pch_hdmi_disabled(dev_priv, pipe, PCH_HDMID);
291906f1
JB
1529}
1530
cd2d34d9
VS
1531static void _vlv_enable_pll(struct intel_crtc *crtc,
1532 const struct intel_crtc_state *pipe_config)
1533{
1534 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1535 enum pipe pipe = crtc->pipe;
1536
1537 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1538 POSTING_READ(DPLL(pipe));
1539 udelay(150);
1540
1541 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
1542 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1543}
1544
d288f65f 1545static void vlv_enable_pll(struct intel_crtc *crtc,
5cec258b 1546 const struct intel_crtc_state *pipe_config)
87442f73 1547{
cd2d34d9 1548 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8bd3f301 1549 enum pipe pipe = crtc->pipe;
87442f73 1550
8bd3f301 1551 assert_pipe_disabled(dev_priv, pipe);
87442f73 1552
87442f73 1553 /* PLL is protected by panel, make sure we can write it */
7d1a83cb 1554 assert_panel_unlocked(dev_priv, pipe);
87442f73 1555
cd2d34d9
VS
1556 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1557 _vlv_enable_pll(crtc, pipe_config);
426115cf 1558
8bd3f301
VS
1559 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1560 POSTING_READ(DPLL_MD(pipe));
87442f73
DV
1561}
1562
cd2d34d9
VS
1563
1564static void _chv_enable_pll(struct intel_crtc *crtc,
1565 const struct intel_crtc_state *pipe_config)
9d556c99 1566{
cd2d34d9 1567 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8bd3f301 1568 enum pipe pipe = crtc->pipe;
9d556c99 1569 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9d556c99
CML
1570 u32 tmp;
1571
a580516d 1572 mutex_lock(&dev_priv->sb_lock);
9d556c99
CML
1573
1574 /* Enable back the 10bit clock to display controller */
1575 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1576 tmp |= DPIO_DCLKP_EN;
1577 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1578
54433e91
VS
1579 mutex_unlock(&dev_priv->sb_lock);
1580
9d556c99
CML
1581 /*
1582 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1583 */
1584 udelay(1);
1585
1586 /* Enable PLL */
d288f65f 1587 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
9d556c99
CML
1588
1589 /* Check PLL is locked */
a11b0703 1590 if (wait_for(((I915_READ(DPLL(pipe)) & DPLL_LOCK_VLV) == DPLL_LOCK_VLV), 1))
9d556c99 1591 DRM_ERROR("PLL %d failed to lock\n", pipe);
cd2d34d9
VS
1592}
1593
1594static void chv_enable_pll(struct intel_crtc *crtc,
1595 const struct intel_crtc_state *pipe_config)
1596{
1597 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1598 enum pipe pipe = crtc->pipe;
1599
1600 assert_pipe_disabled(dev_priv, pipe);
1601
1602 /* PLL is protected by panel, make sure we can write it */
1603 assert_panel_unlocked(dev_priv, pipe);
1604
1605 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1606 _chv_enable_pll(crtc, pipe_config);
9d556c99 1607
c231775c
VS
1608 if (pipe != PIPE_A) {
1609 /*
1610 * WaPixelRepeatModeFixForC0:chv
1611 *
1612 * DPLLCMD is AWOL. Use chicken bits to propagate
1613 * the value from DPLLBMD to either pipe B or C.
1614 */
1615 I915_WRITE(CBR4_VLV, pipe == PIPE_B ? CBR_DPLLBMD_PIPE_B : CBR_DPLLBMD_PIPE_C);
1616 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1617 I915_WRITE(CBR4_VLV, 0);
1618 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1619
1620 /*
1621 * DPLLB VGA mode also seems to cause problems.
1622 * We should always have it disabled.
1623 */
1624 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1625 } else {
1626 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1627 POSTING_READ(DPLL_MD(pipe));
1628 }
9d556c99
CML
1629}
1630
1c4e0274
VS
1631static int intel_num_dvo_pipes(struct drm_device *dev)
1632{
1633 struct intel_crtc *crtc;
1634 int count = 0;
1635
1636 for_each_intel_crtc(dev, crtc)
3538b9df 1637 count += crtc->base.state->active &&
409ee761 1638 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO);
1c4e0274
VS
1639
1640 return count;
1641}
1642
66e3d5c0 1643static void i9xx_enable_pll(struct intel_crtc *crtc)
63d7bbe9 1644{
66e3d5c0
DV
1645 struct drm_device *dev = crtc->base.dev;
1646 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 1647 i915_reg_t reg = DPLL(crtc->pipe);
6e3c9717 1648 u32 dpll = crtc->config->dpll_hw_state.dpll;
63d7bbe9 1649
66e3d5c0 1650 assert_pipe_disabled(dev_priv, crtc->pipe);
58c6eaa2 1651
63d7bbe9 1652 /* PLL is protected by panel, make sure we can write it */
66e3d5c0
DV
1653 if (IS_MOBILE(dev) && !IS_I830(dev))
1654 assert_panel_unlocked(dev_priv, crtc->pipe);
63d7bbe9 1655
1c4e0274
VS
1656 /* Enable DVO 2x clock on both PLLs if necessary */
1657 if (IS_I830(dev) && intel_num_dvo_pipes(dev) > 0) {
1658 /*
1659 * It appears to be important that we don't enable this
1660 * for the current pipe before otherwise configuring the
1661 * PLL. No idea how this should be handled if multiple
1662 * DVO outputs are enabled simultaneosly.
1663 */
1664 dpll |= DPLL_DVO_2X_MODE;
1665 I915_WRITE(DPLL(!crtc->pipe),
1666 I915_READ(DPLL(!crtc->pipe)) | DPLL_DVO_2X_MODE);
1667 }
66e3d5c0 1668
c2b63374
VS
1669 /*
1670 * Apparently we need to have VGA mode enabled prior to changing
1671 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1672 * dividers, even though the register value does change.
1673 */
1674 I915_WRITE(reg, 0);
1675
8e7a65aa
VS
1676 I915_WRITE(reg, dpll);
1677
66e3d5c0
DV
1678 /* Wait for the clocks to stabilize. */
1679 POSTING_READ(reg);
1680 udelay(150);
1681
1682 if (INTEL_INFO(dev)->gen >= 4) {
1683 I915_WRITE(DPLL_MD(crtc->pipe),
6e3c9717 1684 crtc->config->dpll_hw_state.dpll_md);
66e3d5c0
DV
1685 } else {
1686 /* The pixel multiplier can only be updated once the
1687 * DPLL is enabled and the clocks are stable.
1688 *
1689 * So write it again.
1690 */
1691 I915_WRITE(reg, dpll);
1692 }
63d7bbe9
JB
1693
1694 /* We do this three times for luck */
66e3d5c0 1695 I915_WRITE(reg, dpll);
63d7bbe9
JB
1696 POSTING_READ(reg);
1697 udelay(150); /* wait for warmup */
66e3d5c0 1698 I915_WRITE(reg, dpll);
63d7bbe9
JB
1699 POSTING_READ(reg);
1700 udelay(150); /* wait for warmup */
66e3d5c0 1701 I915_WRITE(reg, dpll);
63d7bbe9
JB
1702 POSTING_READ(reg);
1703 udelay(150); /* wait for warmup */
1704}
1705
1706/**
50b44a44 1707 * i9xx_disable_pll - disable a PLL
63d7bbe9
JB
1708 * @dev_priv: i915 private structure
1709 * @pipe: pipe PLL to disable
1710 *
1711 * Disable the PLL for @pipe, making sure the pipe is off first.
1712 *
1713 * Note! This is for pre-ILK only.
1714 */
1c4e0274 1715static void i9xx_disable_pll(struct intel_crtc *crtc)
63d7bbe9 1716{
1c4e0274
VS
1717 struct drm_device *dev = crtc->base.dev;
1718 struct drm_i915_private *dev_priv = dev->dev_private;
1719 enum pipe pipe = crtc->pipe;
1720
1721 /* Disable DVO 2x clock on both PLLs if necessary */
1722 if (IS_I830(dev) &&
409ee761 1723 intel_pipe_has_type(crtc, INTEL_OUTPUT_DVO) &&
3538b9df 1724 !intel_num_dvo_pipes(dev)) {
1c4e0274
VS
1725 I915_WRITE(DPLL(PIPE_B),
1726 I915_READ(DPLL(PIPE_B)) & ~DPLL_DVO_2X_MODE);
1727 I915_WRITE(DPLL(PIPE_A),
1728 I915_READ(DPLL(PIPE_A)) & ~DPLL_DVO_2X_MODE);
1729 }
1730
b6b5d049
VS
1731 /* Don't disable pipe or pipe PLLs if needed */
1732 if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
1733 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
63d7bbe9
JB
1734 return;
1735
1736 /* Make sure the pipe isn't still relying on us */
1737 assert_pipe_disabled(dev_priv, pipe);
1738
b8afb911 1739 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
50b44a44 1740 POSTING_READ(DPLL(pipe));
63d7bbe9
JB
1741}
1742
f6071166
JB
1743static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1744{
b8afb911 1745 u32 val;
f6071166
JB
1746
1747 /* Make sure the pipe isn't still relying on us */
1748 assert_pipe_disabled(dev_priv, pipe);
1749
03ed5cbf
VS
1750 val = DPLL_INTEGRATED_REF_CLK_VLV |
1751 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1752 if (pipe != PIPE_A)
1753 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1754
f6071166
JB
1755 I915_WRITE(DPLL(pipe), val);
1756 POSTING_READ(DPLL(pipe));
076ed3b2
CML
1757}
1758
1759static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1760{
d752048d 1761 enum dpio_channel port = vlv_pipe_to_channel(pipe);
076ed3b2
CML
1762 u32 val;
1763
a11b0703
VS
1764 /* Make sure the pipe isn't still relying on us */
1765 assert_pipe_disabled(dev_priv, pipe);
076ed3b2 1766
60bfe44f
VS
1767 val = DPLL_SSC_REF_CLK_CHV |
1768 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
a11b0703
VS
1769 if (pipe != PIPE_A)
1770 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
03ed5cbf 1771
a11b0703
VS
1772 I915_WRITE(DPLL(pipe), val);
1773 POSTING_READ(DPLL(pipe));
d752048d 1774
a580516d 1775 mutex_lock(&dev_priv->sb_lock);
d752048d
VS
1776
1777 /* Disable 10bit clock to display controller */
1778 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1779 val &= ~DPIO_DCLKP_EN;
1780 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1781
a580516d 1782 mutex_unlock(&dev_priv->sb_lock);
f6071166
JB
1783}
1784
e4607fcf 1785void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
9b6de0a1
VS
1786 struct intel_digital_port *dport,
1787 unsigned int expected_mask)
89b667f8
JB
1788{
1789 u32 port_mask;
f0f59a00 1790 i915_reg_t dpll_reg;
89b667f8 1791
e4607fcf
CML
1792 switch (dport->port) {
1793 case PORT_B:
89b667f8 1794 port_mask = DPLL_PORTB_READY_MASK;
00fc31b7 1795 dpll_reg = DPLL(0);
e4607fcf
CML
1796 break;
1797 case PORT_C:
89b667f8 1798 port_mask = DPLL_PORTC_READY_MASK;
00fc31b7 1799 dpll_reg = DPLL(0);
9b6de0a1 1800 expected_mask <<= 4;
00fc31b7
CML
1801 break;
1802 case PORT_D:
1803 port_mask = DPLL_PORTD_READY_MASK;
1804 dpll_reg = DPIO_PHY_STATUS;
e4607fcf
CML
1805 break;
1806 default:
1807 BUG();
1808 }
89b667f8 1809
9b6de0a1
VS
1810 if (wait_for((I915_READ(dpll_reg) & port_mask) == expected_mask, 1000))
1811 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
1812 port_name(dport->port), I915_READ(dpll_reg) & port_mask, expected_mask);
89b667f8
JB
1813}
1814
b8a4f404
PZ
1815static void ironlake_enable_pch_transcoder(struct drm_i915_private *dev_priv,
1816 enum pipe pipe)
040484af 1817{
23670b32 1818 struct drm_device *dev = dev_priv->dev;
7c26e5c6 1819 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
e2b78267 1820 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
f0f59a00
VS
1821 i915_reg_t reg;
1822 uint32_t val, pipeconf_val;
040484af 1823
040484af 1824 /* Make sure PCH DPLL is enabled */
8106ddbd 1825 assert_shared_dpll_enabled(dev_priv, intel_crtc->config->shared_dpll);
040484af
JB
1826
1827 /* FDI must be feeding us bits for PCH ports */
1828 assert_fdi_tx_enabled(dev_priv, pipe);
1829 assert_fdi_rx_enabled(dev_priv, pipe);
1830
23670b32
DV
1831 if (HAS_PCH_CPT(dev)) {
1832 /* Workaround: Set the timing override bit before enabling the
1833 * pch transcoder. */
1834 reg = TRANS_CHICKEN2(pipe);
1835 val = I915_READ(reg);
1836 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1837 I915_WRITE(reg, val);
59c859d6 1838 }
23670b32 1839
ab9412ba 1840 reg = PCH_TRANSCONF(pipe);
040484af 1841 val = I915_READ(reg);
5f7f726d 1842 pipeconf_val = I915_READ(PIPECONF(pipe));
e9bcff5c 1843
2d1fe073 1844 if (HAS_PCH_IBX(dev_priv)) {
e9bcff5c 1845 /*
c5de7c6f
VS
1846 * Make the BPC in transcoder be consistent with
1847 * that in pipeconf reg. For HDMI we must use 8bpc
1848 * here for both 8bpc and 12bpc.
e9bcff5c 1849 */
dfd07d72 1850 val &= ~PIPECONF_BPC_MASK;
c5de7c6f
VS
1851 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_HDMI))
1852 val |= PIPECONF_8BPC;
1853 else
1854 val |= pipeconf_val & PIPECONF_BPC_MASK;
e9bcff5c 1855 }
5f7f726d
PZ
1856
1857 val &= ~TRANS_INTERLACE_MASK;
1858 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK)
2d1fe073 1859 if (HAS_PCH_IBX(dev_priv) &&
409ee761 1860 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
7c26e5c6
PZ
1861 val |= TRANS_LEGACY_INTERLACED_ILK;
1862 else
1863 val |= TRANS_INTERLACED;
5f7f726d
PZ
1864 else
1865 val |= TRANS_PROGRESSIVE;
1866
040484af
JB
1867 I915_WRITE(reg, val | TRANS_ENABLE);
1868 if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100))
4bb6f1f3 1869 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
040484af
JB
1870}
1871
8fb033d7 1872static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
937bb610 1873 enum transcoder cpu_transcoder)
040484af 1874{
8fb033d7 1875 u32 val, pipeconf_val;
8fb033d7 1876
8fb033d7 1877 /* FDI must be feeding us bits for PCH ports */
1a240d4d 1878 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
937bb610 1879 assert_fdi_rx_enabled(dev_priv, TRANSCODER_A);
8fb033d7 1880
223a6fdf 1881 /* Workaround: set timing override bit. */
36c0d0cf 1882 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
23670b32 1883 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
36c0d0cf 1884 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
223a6fdf 1885
25f3ef11 1886 val = TRANS_ENABLE;
937bb610 1887 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
8fb033d7 1888
9a76b1c6
PZ
1889 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1890 PIPECONF_INTERLACED_ILK)
a35f2679 1891 val |= TRANS_INTERLACED;
8fb033d7
PZ
1892 else
1893 val |= TRANS_PROGRESSIVE;
1894
ab9412ba
DV
1895 I915_WRITE(LPT_TRANSCONF, val);
1896 if (wait_for(I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE, 100))
937bb610 1897 DRM_ERROR("Failed to enable PCH transcoder\n");
8fb033d7
PZ
1898}
1899
b8a4f404
PZ
1900static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1901 enum pipe pipe)
040484af 1902{
23670b32 1903 struct drm_device *dev = dev_priv->dev;
f0f59a00
VS
1904 i915_reg_t reg;
1905 uint32_t val;
040484af
JB
1906
1907 /* FDI relies on the transcoder */
1908 assert_fdi_tx_disabled(dev_priv, pipe);
1909 assert_fdi_rx_disabled(dev_priv, pipe);
1910
291906f1
JB
1911 /* Ports must be off as well */
1912 assert_pch_ports_disabled(dev_priv, pipe);
1913
ab9412ba 1914 reg = PCH_TRANSCONF(pipe);
040484af
JB
1915 val = I915_READ(reg);
1916 val &= ~TRANS_ENABLE;
1917 I915_WRITE(reg, val);
1918 /* wait for PCH transcoder off, transcoder state */
1919 if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50))
4bb6f1f3 1920 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
23670b32 1921
c465613b 1922 if (HAS_PCH_CPT(dev)) {
23670b32
DV
1923 /* Workaround: Clear the timing override chicken bit again. */
1924 reg = TRANS_CHICKEN2(pipe);
1925 val = I915_READ(reg);
1926 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1927 I915_WRITE(reg, val);
1928 }
040484af
JB
1929}
1930
ab4d966c 1931static void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
8fb033d7 1932{
8fb033d7
PZ
1933 u32 val;
1934
ab9412ba 1935 val = I915_READ(LPT_TRANSCONF);
8fb033d7 1936 val &= ~TRANS_ENABLE;
ab9412ba 1937 I915_WRITE(LPT_TRANSCONF, val);
8fb033d7 1938 /* wait for PCH transcoder off, transcoder state */
ab9412ba 1939 if (wait_for((I915_READ(LPT_TRANSCONF) & TRANS_STATE_ENABLE) == 0, 50))
8a52fd9f 1940 DRM_ERROR("Failed to disable PCH transcoder\n");
223a6fdf
PZ
1941
1942 /* Workaround: clear timing override bit. */
36c0d0cf 1943 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
23670b32 1944 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
36c0d0cf 1945 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
040484af
JB
1946}
1947
b24e7179 1948/**
309cfea8 1949 * intel_enable_pipe - enable a pipe, asserting requirements
0372264a 1950 * @crtc: crtc responsible for the pipe
b24e7179 1951 *
0372264a 1952 * Enable @crtc's pipe, making sure that various hardware specific requirements
b24e7179 1953 * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc.
b24e7179 1954 */
e1fdc473 1955static void intel_enable_pipe(struct intel_crtc *crtc)
b24e7179 1956{
0372264a
PZ
1957 struct drm_device *dev = crtc->base.dev;
1958 struct drm_i915_private *dev_priv = dev->dev_private;
1959 enum pipe pipe = crtc->pipe;
1a70a728 1960 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
1a240d4d 1961 enum pipe pch_transcoder;
f0f59a00 1962 i915_reg_t reg;
b24e7179
JB
1963 u32 val;
1964
9e2ee2dd
VS
1965 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1966
58c6eaa2 1967 assert_planes_disabled(dev_priv, pipe);
93ce0ba6 1968 assert_cursor_disabled(dev_priv, pipe);
58c6eaa2
DV
1969 assert_sprites_disabled(dev_priv, pipe);
1970
2d1fe073 1971 if (HAS_PCH_LPT(dev_priv))
cc391bbb
PZ
1972 pch_transcoder = TRANSCODER_A;
1973 else
1974 pch_transcoder = pipe;
1975
b24e7179
JB
1976 /*
1977 * A pipe without a PLL won't actually be able to drive bits from
1978 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1979 * need the check.
1980 */
2d1fe073 1981 if (HAS_GMCH_DISPLAY(dev_priv))
a65347ba 1982 if (crtc->config->has_dsi_encoder)
23538ef1
JN
1983 assert_dsi_pll_enabled(dev_priv);
1984 else
1985 assert_pll_enabled(dev_priv, pipe);
040484af 1986 else {
6e3c9717 1987 if (crtc->config->has_pch_encoder) {
040484af 1988 /* if driving the PCH, we need FDI enabled */
cc391bbb 1989 assert_fdi_rx_pll_enabled(dev_priv, pch_transcoder);
1a240d4d
DV
1990 assert_fdi_tx_pll_enabled(dev_priv,
1991 (enum pipe) cpu_transcoder);
040484af
JB
1992 }
1993 /* FIXME: assert CPU port conditions for SNB+ */
1994 }
b24e7179 1995
702e7a56 1996 reg = PIPECONF(cpu_transcoder);
b24e7179 1997 val = I915_READ(reg);
7ad25d48 1998 if (val & PIPECONF_ENABLE) {
b6b5d049
VS
1999 WARN_ON(!((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
2000 (pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE)));
00d70b15 2001 return;
7ad25d48 2002 }
00d70b15
CW
2003
2004 I915_WRITE(reg, val | PIPECONF_ENABLE);
851855d8 2005 POSTING_READ(reg);
b7792d8b
VS
2006
2007 /*
2008 * Until the pipe starts DSL will read as 0, which would cause
2009 * an apparent vblank timestamp jump, which messes up also the
2010 * frame count when it's derived from the timestamps. So let's
2011 * wait for the pipe to start properly before we call
2012 * drm_crtc_vblank_on()
2013 */
2014 if (dev->max_vblank_count == 0 &&
2015 wait_for(intel_get_crtc_scanline(crtc) != crtc->scanline_offset, 50))
2016 DRM_ERROR("pipe %c didn't start\n", pipe_name(pipe));
b24e7179
JB
2017}
2018
2019/**
309cfea8 2020 * intel_disable_pipe - disable a pipe, asserting requirements
575f7ab7 2021 * @crtc: crtc whose pipes is to be disabled
b24e7179 2022 *
575f7ab7
VS
2023 * Disable the pipe of @crtc, making sure that various hardware
2024 * specific requirements are met, if applicable, e.g. plane
2025 * disabled, panel fitter off, etc.
b24e7179
JB
2026 *
2027 * Will wait until the pipe has shut down before returning.
2028 */
575f7ab7 2029static void intel_disable_pipe(struct intel_crtc *crtc)
b24e7179 2030{
575f7ab7 2031 struct drm_i915_private *dev_priv = crtc->base.dev->dev_private;
6e3c9717 2032 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
575f7ab7 2033 enum pipe pipe = crtc->pipe;
f0f59a00 2034 i915_reg_t reg;
b24e7179
JB
2035 u32 val;
2036
9e2ee2dd
VS
2037 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
2038
b24e7179
JB
2039 /*
2040 * Make sure planes won't keep trying to pump pixels to us,
2041 * or we might hang the display.
2042 */
2043 assert_planes_disabled(dev_priv, pipe);
93ce0ba6 2044 assert_cursor_disabled(dev_priv, pipe);
19332d7a 2045 assert_sprites_disabled(dev_priv, pipe);
b24e7179 2046
702e7a56 2047 reg = PIPECONF(cpu_transcoder);
b24e7179 2048 val = I915_READ(reg);
00d70b15
CW
2049 if ((val & PIPECONF_ENABLE) == 0)
2050 return;
2051
67adc644
VS
2052 /*
2053 * Double wide has implications for planes
2054 * so best keep it disabled when not needed.
2055 */
6e3c9717 2056 if (crtc->config->double_wide)
67adc644
VS
2057 val &= ~PIPECONF_DOUBLE_WIDE;
2058
2059 /* Don't disable pipe or pipe PLLs if needed */
b6b5d049
VS
2060 if (!(pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) &&
2061 !(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
67adc644
VS
2062 val &= ~PIPECONF_ENABLE;
2063
2064 I915_WRITE(reg, val);
2065 if ((val & PIPECONF_ENABLE) == 0)
2066 intel_wait_for_pipe_off(crtc);
b24e7179
JB
2067}
2068
693db184
CW
2069static bool need_vtd_wa(struct drm_device *dev)
2070{
2071#ifdef CONFIG_INTEL_IOMMU
2072 if (INTEL_INFO(dev)->gen >= 6 && intel_iommu_gfx_mapped)
2073 return true;
2074#endif
2075 return false;
2076}
2077
832be82f
VS
2078static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
2079{
2080 return IS_GEN2(dev_priv) ? 2048 : 4096;
2081}
2082
27ba3910
VS
2083static unsigned int intel_tile_width_bytes(const struct drm_i915_private *dev_priv,
2084 uint64_t fb_modifier, unsigned int cpp)
7b49f948
VS
2085{
2086 switch (fb_modifier) {
2087 case DRM_FORMAT_MOD_NONE:
2088 return cpp;
2089 case I915_FORMAT_MOD_X_TILED:
2090 if (IS_GEN2(dev_priv))
2091 return 128;
2092 else
2093 return 512;
2094 case I915_FORMAT_MOD_Y_TILED:
2095 if (IS_GEN2(dev_priv) || HAS_128_BYTE_Y_TILING(dev_priv))
2096 return 128;
2097 else
2098 return 512;
2099 case I915_FORMAT_MOD_Yf_TILED:
2100 switch (cpp) {
2101 case 1:
2102 return 64;
2103 case 2:
2104 case 4:
2105 return 128;
2106 case 8:
2107 case 16:
2108 return 256;
2109 default:
2110 MISSING_CASE(cpp);
2111 return cpp;
2112 }
2113 break;
2114 default:
2115 MISSING_CASE(fb_modifier);
2116 return cpp;
2117 }
2118}
2119
832be82f
VS
2120unsigned int intel_tile_height(const struct drm_i915_private *dev_priv,
2121 uint64_t fb_modifier, unsigned int cpp)
a57ce0b2 2122{
832be82f
VS
2123 if (fb_modifier == DRM_FORMAT_MOD_NONE)
2124 return 1;
2125 else
2126 return intel_tile_size(dev_priv) /
27ba3910 2127 intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
6761dd31
TU
2128}
2129
8d0deca8
VS
2130/* Return the tile dimensions in pixel units */
2131static void intel_tile_dims(const struct drm_i915_private *dev_priv,
2132 unsigned int *tile_width,
2133 unsigned int *tile_height,
2134 uint64_t fb_modifier,
2135 unsigned int cpp)
2136{
2137 unsigned int tile_width_bytes =
2138 intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
2139
2140 *tile_width = tile_width_bytes / cpp;
2141 *tile_height = intel_tile_size(dev_priv) / tile_width_bytes;
2142}
2143
6761dd31
TU
2144unsigned int
2145intel_fb_align_height(struct drm_device *dev, unsigned int height,
832be82f 2146 uint32_t pixel_format, uint64_t fb_modifier)
6761dd31 2147{
832be82f
VS
2148 unsigned int cpp = drm_format_plane_cpp(pixel_format, 0);
2149 unsigned int tile_height = intel_tile_height(to_i915(dev), fb_modifier, cpp);
2150
2151 return ALIGN(height, tile_height);
a57ce0b2
JB
2152}
2153
1663b9d6
VS
2154unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
2155{
2156 unsigned int size = 0;
2157 int i;
2158
2159 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
2160 size += rot_info->plane[i].width * rot_info->plane[i].height;
2161
2162 return size;
2163}
2164
75c82a53 2165static void
3465c580
VS
2166intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2167 const struct drm_framebuffer *fb,
2168 unsigned int rotation)
f64b98cd 2169{
2d7a215f
VS
2170 if (intel_rotation_90_or_270(rotation)) {
2171 *view = i915_ggtt_view_rotated;
2172 view->params.rotated = to_intel_framebuffer(fb)->rot_info;
2173 } else {
2174 *view = i915_ggtt_view_normal;
2175 }
2176}
50470bb0 2177
2d7a215f
VS
2178static void
2179intel_fill_fb_info(struct drm_i915_private *dev_priv,
2180 struct drm_framebuffer *fb)
2181{
2182 struct intel_rotation_info *info = &to_intel_framebuffer(fb)->rot_info;
2183 unsigned int tile_size, tile_width, tile_height, cpp;
50470bb0 2184
d9b3288e
VS
2185 tile_size = intel_tile_size(dev_priv);
2186
2187 cpp = drm_format_plane_cpp(fb->pixel_format, 0);
8d0deca8
VS
2188 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2189 fb->modifier[0], cpp);
d9b3288e 2190
1663b9d6
VS
2191 info->plane[0].width = DIV_ROUND_UP(fb->pitches[0], tile_width * cpp);
2192 info->plane[0].height = DIV_ROUND_UP(fb->height, tile_height);
84fe03f7 2193
89e3e142 2194 if (info->pixel_format == DRM_FORMAT_NV12) {
832be82f 2195 cpp = drm_format_plane_cpp(fb->pixel_format, 1);
8d0deca8
VS
2196 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2197 fb->modifier[1], cpp);
d9b3288e 2198
2d7a215f 2199 info->uv_offset = fb->offsets[1];
1663b9d6
VS
2200 info->plane[1].width = DIV_ROUND_UP(fb->pitches[1], tile_width * cpp);
2201 info->plane[1].height = DIV_ROUND_UP(fb->height / 2, tile_height);
89e3e142 2202 }
f64b98cd
TU
2203}
2204
603525d7 2205static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
4e9a86b6
VS
2206{
2207 if (INTEL_INFO(dev_priv)->gen >= 9)
2208 return 256 * 1024;
985b8bb4 2209 else if (IS_BROADWATER(dev_priv) || IS_CRESTLINE(dev_priv) ||
666a4537 2210 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4e9a86b6
VS
2211 return 128 * 1024;
2212 else if (INTEL_INFO(dev_priv)->gen >= 4)
2213 return 4 * 1024;
2214 else
44c5905e 2215 return 0;
4e9a86b6
VS
2216}
2217
603525d7
VS
2218static unsigned int intel_surf_alignment(const struct drm_i915_private *dev_priv,
2219 uint64_t fb_modifier)
2220{
2221 switch (fb_modifier) {
2222 case DRM_FORMAT_MOD_NONE:
2223 return intel_linear_alignment(dev_priv);
2224 case I915_FORMAT_MOD_X_TILED:
2225 if (INTEL_INFO(dev_priv)->gen >= 9)
2226 return 256 * 1024;
2227 return 0;
2228 case I915_FORMAT_MOD_Y_TILED:
2229 case I915_FORMAT_MOD_Yf_TILED:
2230 return 1 * 1024 * 1024;
2231 default:
2232 MISSING_CASE(fb_modifier);
2233 return 0;
2234 }
2235}
2236
127bd2ac 2237int
3465c580
VS
2238intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
2239 unsigned int rotation)
6b95a207 2240{
850c4cdc 2241 struct drm_device *dev = fb->dev;
ce453d81 2242 struct drm_i915_private *dev_priv = dev->dev_private;
850c4cdc 2243 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
f64b98cd 2244 struct i915_ggtt_view view;
6b95a207
KH
2245 u32 alignment;
2246 int ret;
2247
ebcdd39e
MR
2248 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2249
603525d7 2250 alignment = intel_surf_alignment(dev_priv, fb->modifier[0]);
6b95a207 2251
3465c580 2252 intel_fill_fb_ggtt_view(&view, fb, rotation);
f64b98cd 2253
693db184
CW
2254 /* Note that the w/a also requires 64 PTE of padding following the
2255 * bo. We currently fill all unused PTE with the shadow page and so
2256 * we should always have valid PTE following the scanout preventing
2257 * the VT-d warning.
2258 */
2259 if (need_vtd_wa(dev) && alignment < 256 * 1024)
2260 alignment = 256 * 1024;
2261
d6dd6843
PZ
2262 /*
2263 * Global gtt pte registers are special registers which actually forward
2264 * writes to a chunk of system memory. Which means that there is no risk
2265 * that the register values disappear as soon as we call
2266 * intel_runtime_pm_put(), so it is correct to wrap only the
2267 * pin/unpin/fence and not more.
2268 */
2269 intel_runtime_pm_get(dev_priv);
2270
7580d774
ML
2271 ret = i915_gem_object_pin_to_display_plane(obj, alignment,
2272 &view);
48b956c5 2273 if (ret)
b26a6b35 2274 goto err_pm;
6b95a207
KH
2275
2276 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2277 * fence, whereas 965+ only requires a fence if using
2278 * framebuffer compression. For simplicity, we always install
2279 * a fence as the cost is not that onerous.
2280 */
9807216f
VK
2281 if (view.type == I915_GGTT_VIEW_NORMAL) {
2282 ret = i915_gem_object_get_fence(obj);
2283 if (ret == -EDEADLK) {
2284 /*
2285 * -EDEADLK means there are no free fences
2286 * no pending flips.
2287 *
2288 * This is propagated to atomic, but it uses
2289 * -EDEADLK to force a locking recovery, so
2290 * change the returned error to -EBUSY.
2291 */
2292 ret = -EBUSY;
2293 goto err_unpin;
2294 } else if (ret)
2295 goto err_unpin;
1690e1eb 2296
9807216f
VK
2297 i915_gem_object_pin_fence(obj);
2298 }
6b95a207 2299
d6dd6843 2300 intel_runtime_pm_put(dev_priv);
6b95a207 2301 return 0;
48b956c5
CW
2302
2303err_unpin:
f64b98cd 2304 i915_gem_object_unpin_from_display_plane(obj, &view);
b26a6b35 2305err_pm:
d6dd6843 2306 intel_runtime_pm_put(dev_priv);
48b956c5 2307 return ret;
6b95a207
KH
2308}
2309
fb4b8ce1 2310void intel_unpin_fb_obj(struct drm_framebuffer *fb, unsigned int rotation)
1690e1eb 2311{
82bc3b2d 2312 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
f64b98cd 2313 struct i915_ggtt_view view;
82bc3b2d 2314
ebcdd39e
MR
2315 WARN_ON(!mutex_is_locked(&obj->base.dev->struct_mutex));
2316
3465c580 2317 intel_fill_fb_ggtt_view(&view, fb, rotation);
f64b98cd 2318
9807216f
VK
2319 if (view.type == I915_GGTT_VIEW_NORMAL)
2320 i915_gem_object_unpin_fence(obj);
2321
f64b98cd 2322 i915_gem_object_unpin_from_display_plane(obj, &view);
1690e1eb
CW
2323}
2324
29cf9491
VS
2325/*
2326 * Adjust the tile offset by moving the difference into
2327 * the x/y offsets.
2328 *
2329 * Input tile dimensions and pitch must already be
2330 * rotated to match x and y, and in pixel units.
2331 */
2332static u32 intel_adjust_tile_offset(int *x, int *y,
2333 unsigned int tile_width,
2334 unsigned int tile_height,
2335 unsigned int tile_size,
2336 unsigned int pitch_tiles,
2337 u32 old_offset,
2338 u32 new_offset)
2339{
2340 unsigned int tiles;
2341
2342 WARN_ON(old_offset & (tile_size - 1));
2343 WARN_ON(new_offset & (tile_size - 1));
2344 WARN_ON(new_offset > old_offset);
2345
2346 tiles = (old_offset - new_offset) / tile_size;
2347
2348 *y += tiles / pitch_tiles * tile_height;
2349 *x += tiles % pitch_tiles * tile_width;
2350
2351 return new_offset;
2352}
2353
8d0deca8
VS
2354/*
2355 * Computes the linear offset to the base tile and adjusts
2356 * x, y. bytes per pixel is assumed to be a power-of-two.
2357 *
2358 * In the 90/270 rotated case, x and y are assumed
2359 * to be already rotated to match the rotated GTT view, and
2360 * pitch is the tile_height aligned framebuffer height.
2361 */
4f2d9934
VS
2362u32 intel_compute_tile_offset(int *x, int *y,
2363 const struct drm_framebuffer *fb, int plane,
8d0deca8
VS
2364 unsigned int pitch,
2365 unsigned int rotation)
c2c75131 2366{
4f2d9934
VS
2367 const struct drm_i915_private *dev_priv = to_i915(fb->dev);
2368 uint64_t fb_modifier = fb->modifier[plane];
2369 unsigned int cpp = drm_format_plane_cpp(fb->pixel_format, plane);
29cf9491
VS
2370 u32 offset, offset_aligned, alignment;
2371
2372 alignment = intel_surf_alignment(dev_priv, fb_modifier);
2373 if (alignment)
2374 alignment--;
2375
b5c65338 2376 if (fb_modifier != DRM_FORMAT_MOD_NONE) {
8d0deca8
VS
2377 unsigned int tile_size, tile_width, tile_height;
2378 unsigned int tile_rows, tiles, pitch_tiles;
c2c75131 2379
d843310d 2380 tile_size = intel_tile_size(dev_priv);
8d0deca8
VS
2381 intel_tile_dims(dev_priv, &tile_width, &tile_height,
2382 fb_modifier, cpp);
2383
2384 if (intel_rotation_90_or_270(rotation)) {
2385 pitch_tiles = pitch / tile_height;
2386 swap(tile_width, tile_height);
2387 } else {
2388 pitch_tiles = pitch / (tile_width * cpp);
2389 }
d843310d
VS
2390
2391 tile_rows = *y / tile_height;
2392 *y %= tile_height;
c2c75131 2393
8d0deca8
VS
2394 tiles = *x / tile_width;
2395 *x %= tile_width;
bc752862 2396
29cf9491
VS
2397 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2398 offset_aligned = offset & ~alignment;
bc752862 2399
29cf9491
VS
2400 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2401 tile_size, pitch_tiles,
2402 offset, offset_aligned);
2403 } else {
bc752862 2404 offset = *y * pitch + *x * cpp;
29cf9491
VS
2405 offset_aligned = offset & ~alignment;
2406
4e9a86b6
VS
2407 *y = (offset & alignment) / pitch;
2408 *x = ((offset & alignment) - *y * pitch) / cpp;
bc752862 2409 }
29cf9491
VS
2410
2411 return offset_aligned;
c2c75131
DV
2412}
2413
b35d63fa 2414static int i9xx_format_to_fourcc(int format)
46f297fb
JB
2415{
2416 switch (format) {
2417 case DISPPLANE_8BPP:
2418 return DRM_FORMAT_C8;
2419 case DISPPLANE_BGRX555:
2420 return DRM_FORMAT_XRGB1555;
2421 case DISPPLANE_BGRX565:
2422 return DRM_FORMAT_RGB565;
2423 default:
2424 case DISPPLANE_BGRX888:
2425 return DRM_FORMAT_XRGB8888;
2426 case DISPPLANE_RGBX888:
2427 return DRM_FORMAT_XBGR8888;
2428 case DISPPLANE_BGRX101010:
2429 return DRM_FORMAT_XRGB2101010;
2430 case DISPPLANE_RGBX101010:
2431 return DRM_FORMAT_XBGR2101010;
2432 }
2433}
2434
bc8d7dff
DL
2435static int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
2436{
2437 switch (format) {
2438 case PLANE_CTL_FORMAT_RGB_565:
2439 return DRM_FORMAT_RGB565;
2440 default:
2441 case PLANE_CTL_FORMAT_XRGB_8888:
2442 if (rgb_order) {
2443 if (alpha)
2444 return DRM_FORMAT_ABGR8888;
2445 else
2446 return DRM_FORMAT_XBGR8888;
2447 } else {
2448 if (alpha)
2449 return DRM_FORMAT_ARGB8888;
2450 else
2451 return DRM_FORMAT_XRGB8888;
2452 }
2453 case PLANE_CTL_FORMAT_XRGB_2101010:
2454 if (rgb_order)
2455 return DRM_FORMAT_XBGR2101010;
2456 else
2457 return DRM_FORMAT_XRGB2101010;
2458 }
2459}
2460
5724dbd1 2461static bool
f6936e29
DV
2462intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
2463 struct intel_initial_plane_config *plane_config)
46f297fb
JB
2464{
2465 struct drm_device *dev = crtc->base.dev;
3badb49f 2466 struct drm_i915_private *dev_priv = to_i915(dev);
72e96d64 2467 struct i915_ggtt *ggtt = &dev_priv->ggtt;
46f297fb
JB
2468 struct drm_i915_gem_object *obj = NULL;
2469 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2d14030b 2470 struct drm_framebuffer *fb = &plane_config->fb->base;
f37b5c2b
DV
2471 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
2472 u32 size_aligned = round_up(plane_config->base + plane_config->size,
2473 PAGE_SIZE);
2474
2475 size_aligned -= base_aligned;
46f297fb 2476
ff2652ea
CW
2477 if (plane_config->size == 0)
2478 return false;
2479
3badb49f
PZ
2480 /* If the FB is too big, just don't use it since fbdev is not very
2481 * important and we should probably use that space with FBC or other
2482 * features. */
72e96d64 2483 if (size_aligned * 2 > ggtt->stolen_usable_size)
3badb49f
PZ
2484 return false;
2485
12c83d99
TU
2486 mutex_lock(&dev->struct_mutex);
2487
f37b5c2b
DV
2488 obj = i915_gem_object_create_stolen_for_preallocated(dev,
2489 base_aligned,
2490 base_aligned,
2491 size_aligned);
12c83d99
TU
2492 if (!obj) {
2493 mutex_unlock(&dev->struct_mutex);
484b41dd 2494 return false;
12c83d99 2495 }
46f297fb 2496
49af449b
DL
2497 obj->tiling_mode = plane_config->tiling;
2498 if (obj->tiling_mode == I915_TILING_X)
6bf129df 2499 obj->stride = fb->pitches[0];
46f297fb 2500
6bf129df
DL
2501 mode_cmd.pixel_format = fb->pixel_format;
2502 mode_cmd.width = fb->width;
2503 mode_cmd.height = fb->height;
2504 mode_cmd.pitches[0] = fb->pitches[0];
18c5247e
DV
2505 mode_cmd.modifier[0] = fb->modifier[0];
2506 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
46f297fb 2507
6bf129df 2508 if (intel_framebuffer_init(dev, to_intel_framebuffer(fb),
484b41dd 2509 &mode_cmd, obj)) {
46f297fb
JB
2510 DRM_DEBUG_KMS("intel fb init failed\n");
2511 goto out_unref_obj;
2512 }
12c83d99 2513
46f297fb 2514 mutex_unlock(&dev->struct_mutex);
484b41dd 2515
f6936e29 2516 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
484b41dd 2517 return true;
46f297fb
JB
2518
2519out_unref_obj:
2520 drm_gem_object_unreference(&obj->base);
2521 mutex_unlock(&dev->struct_mutex);
484b41dd
JB
2522 return false;
2523}
2524
afd65eb4
MR
2525/* Update plane->state->fb to match plane->fb after driver-internal updates */
2526static void
2527update_state_fb(struct drm_plane *plane)
2528{
2529 if (plane->fb == plane->state->fb)
2530 return;
2531
2532 if (plane->state->fb)
2533 drm_framebuffer_unreference(plane->state->fb);
2534 plane->state->fb = plane->fb;
2535 if (plane->state->fb)
2536 drm_framebuffer_reference(plane->state->fb);
2537}
2538
5724dbd1 2539static void
f6936e29
DV
2540intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
2541 struct intel_initial_plane_config *plane_config)
484b41dd
JB
2542{
2543 struct drm_device *dev = intel_crtc->base.dev;
d9ceb816 2544 struct drm_i915_private *dev_priv = dev->dev_private;
484b41dd
JB
2545 struct drm_crtc *c;
2546 struct intel_crtc *i;
2ff8fde1 2547 struct drm_i915_gem_object *obj;
88595ac9 2548 struct drm_plane *primary = intel_crtc->base.primary;
be5651f2 2549 struct drm_plane_state *plane_state = primary->state;
200757f5
MR
2550 struct drm_crtc_state *crtc_state = intel_crtc->base.state;
2551 struct intel_plane *intel_plane = to_intel_plane(primary);
0a8d8a86
MR
2552 struct intel_plane_state *intel_state =
2553 to_intel_plane_state(plane_state);
88595ac9 2554 struct drm_framebuffer *fb;
484b41dd 2555
2d14030b 2556 if (!plane_config->fb)
484b41dd
JB
2557 return;
2558
f6936e29 2559 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
88595ac9
DV
2560 fb = &plane_config->fb->base;
2561 goto valid_fb;
f55548b5 2562 }
484b41dd 2563
2d14030b 2564 kfree(plane_config->fb);
484b41dd
JB
2565
2566 /*
2567 * Failed to alloc the obj, check to see if we should share
2568 * an fb with another CRTC instead
2569 */
70e1e0ec 2570 for_each_crtc(dev, c) {
484b41dd
JB
2571 i = to_intel_crtc(c);
2572
2573 if (c == &intel_crtc->base)
2574 continue;
2575
2ff8fde1
MR
2576 if (!i->active)
2577 continue;
2578
88595ac9
DV
2579 fb = c->primary->fb;
2580 if (!fb)
484b41dd
JB
2581 continue;
2582
88595ac9 2583 obj = intel_fb_obj(fb);
2ff8fde1 2584 if (i915_gem_obj_ggtt_offset(obj) == plane_config->base) {
88595ac9
DV
2585 drm_framebuffer_reference(fb);
2586 goto valid_fb;
484b41dd
JB
2587 }
2588 }
88595ac9 2589
200757f5
MR
2590 /*
2591 * We've failed to reconstruct the BIOS FB. Current display state
2592 * indicates that the primary plane is visible, but has a NULL FB,
2593 * which will lead to problems later if we don't fix it up. The
2594 * simplest solution is to just disable the primary plane now and
2595 * pretend the BIOS never had it enabled.
2596 */
2597 to_intel_plane_state(plane_state)->visible = false;
2598 crtc_state->plane_mask &= ~(1 << drm_plane_index(primary));
2622a081 2599 intel_pre_disable_primary_noatomic(&intel_crtc->base);
200757f5
MR
2600 intel_plane->disable_plane(primary, &intel_crtc->base);
2601
88595ac9
DV
2602 return;
2603
2604valid_fb:
f44e2659
VS
2605 plane_state->src_x = 0;
2606 plane_state->src_y = 0;
be5651f2
ML
2607 plane_state->src_w = fb->width << 16;
2608 plane_state->src_h = fb->height << 16;
2609
f44e2659
VS
2610 plane_state->crtc_x = 0;
2611 plane_state->crtc_y = 0;
be5651f2
ML
2612 plane_state->crtc_w = fb->width;
2613 plane_state->crtc_h = fb->height;
2614
0a8d8a86
MR
2615 intel_state->src.x1 = plane_state->src_x;
2616 intel_state->src.y1 = plane_state->src_y;
2617 intel_state->src.x2 = plane_state->src_x + plane_state->src_w;
2618 intel_state->src.y2 = plane_state->src_y + plane_state->src_h;
2619 intel_state->dst.x1 = plane_state->crtc_x;
2620 intel_state->dst.y1 = plane_state->crtc_y;
2621 intel_state->dst.x2 = plane_state->crtc_x + plane_state->crtc_w;
2622 intel_state->dst.y2 = plane_state->crtc_y + plane_state->crtc_h;
2623
88595ac9
DV
2624 obj = intel_fb_obj(fb);
2625 if (obj->tiling_mode != I915_TILING_NONE)
2626 dev_priv->preserve_bios_swizzle = true;
2627
be5651f2
ML
2628 drm_framebuffer_reference(fb);
2629 primary->fb = primary->state->fb = fb;
36750f28 2630 primary->crtc = primary->state->crtc = &intel_crtc->base;
36750f28 2631 intel_crtc->base.state->plane_mask |= (1 << drm_plane_index(primary));
a9ff8714 2632 obj->frontbuffer_bits |= to_intel_plane(primary)->frontbuffer_bit;
46f297fb
JB
2633}
2634
a8d201af
ML
2635static void i9xx_update_primary_plane(struct drm_plane *primary,
2636 const struct intel_crtc_state *crtc_state,
2637 const struct intel_plane_state *plane_state)
81255565 2638{
a8d201af 2639 struct drm_device *dev = primary->dev;
81255565 2640 struct drm_i915_private *dev_priv = dev->dev_private;
a8d201af
ML
2641 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2642 struct drm_framebuffer *fb = plane_state->base.fb;
2643 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
81255565 2644 int plane = intel_crtc->plane;
54ea9da8 2645 u32 linear_offset;
81255565 2646 u32 dspcntr;
f0f59a00 2647 i915_reg_t reg = DSPCNTR(plane);
8d0deca8 2648 unsigned int rotation = plane_state->base.rotation;
ac484963 2649 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
54ea9da8
VS
2650 int x = plane_state->src.x1 >> 16;
2651 int y = plane_state->src.y1 >> 16;
c9ba6fad 2652
f45651ba
VS
2653 dspcntr = DISPPLANE_GAMMA_ENABLE;
2654
fdd508a6 2655 dspcntr |= DISPLAY_PLANE_ENABLE;
f45651ba
VS
2656
2657 if (INTEL_INFO(dev)->gen < 4) {
2658 if (intel_crtc->pipe == PIPE_B)
2659 dspcntr |= DISPPLANE_SEL_PIPE_B;
2660
2661 /* pipesrc and dspsize control the size that is scaled from,
2662 * which should always be the user's requested size.
2663 */
2664 I915_WRITE(DSPSIZE(plane),
a8d201af
ML
2665 ((crtc_state->pipe_src_h - 1) << 16) |
2666 (crtc_state->pipe_src_w - 1));
f45651ba 2667 I915_WRITE(DSPPOS(plane), 0);
c14b0485
VS
2668 } else if (IS_CHERRYVIEW(dev) && plane == PLANE_B) {
2669 I915_WRITE(PRIMSIZE(plane),
a8d201af
ML
2670 ((crtc_state->pipe_src_h - 1) << 16) |
2671 (crtc_state->pipe_src_w - 1));
c14b0485
VS
2672 I915_WRITE(PRIMPOS(plane), 0);
2673 I915_WRITE(PRIMCNSTALPHA(plane), 0);
f45651ba 2674 }
81255565 2675
57779d06
VS
2676 switch (fb->pixel_format) {
2677 case DRM_FORMAT_C8:
81255565
JB
2678 dspcntr |= DISPPLANE_8BPP;
2679 break;
57779d06 2680 case DRM_FORMAT_XRGB1555:
57779d06 2681 dspcntr |= DISPPLANE_BGRX555;
81255565 2682 break;
57779d06
VS
2683 case DRM_FORMAT_RGB565:
2684 dspcntr |= DISPPLANE_BGRX565;
2685 break;
2686 case DRM_FORMAT_XRGB8888:
57779d06
VS
2687 dspcntr |= DISPPLANE_BGRX888;
2688 break;
2689 case DRM_FORMAT_XBGR8888:
57779d06
VS
2690 dspcntr |= DISPPLANE_RGBX888;
2691 break;
2692 case DRM_FORMAT_XRGB2101010:
57779d06
VS
2693 dspcntr |= DISPPLANE_BGRX101010;
2694 break;
2695 case DRM_FORMAT_XBGR2101010:
57779d06 2696 dspcntr |= DISPPLANE_RGBX101010;
81255565
JB
2697 break;
2698 default:
baba133a 2699 BUG();
81255565 2700 }
57779d06 2701
f45651ba
VS
2702 if (INTEL_INFO(dev)->gen >= 4 &&
2703 obj->tiling_mode != I915_TILING_NONE)
2704 dspcntr |= DISPPLANE_TILED;
81255565 2705
de1aa629
VS
2706 if (IS_G4X(dev))
2707 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
2708
ac484963 2709 linear_offset = y * fb->pitches[0] + x * cpp;
81255565 2710
c2c75131
DV
2711 if (INTEL_INFO(dev)->gen >= 4) {
2712 intel_crtc->dspaddr_offset =
4f2d9934 2713 intel_compute_tile_offset(&x, &y, fb, 0,
8d0deca8 2714 fb->pitches[0], rotation);
c2c75131
DV
2715 linear_offset -= intel_crtc->dspaddr_offset;
2716 } else {
e506a0c6 2717 intel_crtc->dspaddr_offset = linear_offset;
c2c75131 2718 }
e506a0c6 2719
8d0deca8 2720 if (rotation == BIT(DRM_ROTATE_180)) {
48404c1e
SJ
2721 dspcntr |= DISPPLANE_ROTATE_180;
2722
a8d201af
ML
2723 x += (crtc_state->pipe_src_w - 1);
2724 y += (crtc_state->pipe_src_h - 1);
48404c1e
SJ
2725
2726 /* Finding the last pixel of the last line of the display
2727 data and adding to linear_offset*/
2728 linear_offset +=
a8d201af 2729 (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
ac484963 2730 (crtc_state->pipe_src_w - 1) * cpp;
48404c1e
SJ
2731 }
2732
2db3366b
PZ
2733 intel_crtc->adjusted_x = x;
2734 intel_crtc->adjusted_y = y;
2735
48404c1e
SJ
2736 I915_WRITE(reg, dspcntr);
2737
01f2c773 2738 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
a6c45cf0 2739 if (INTEL_INFO(dev)->gen >= 4) {
85ba7b7d
DV
2740 I915_WRITE(DSPSURF(plane),
2741 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
5eddb70b 2742 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
e506a0c6 2743 I915_WRITE(DSPLINOFF(plane), linear_offset);
5eddb70b 2744 } else
f343c5f6 2745 I915_WRITE(DSPADDR(plane), i915_gem_obj_ggtt_offset(obj) + linear_offset);
5eddb70b 2746 POSTING_READ(reg);
17638cd6
JB
2747}
2748
a8d201af
ML
2749static void i9xx_disable_primary_plane(struct drm_plane *primary,
2750 struct drm_crtc *crtc)
17638cd6
JB
2751{
2752 struct drm_device *dev = crtc->dev;
2753 struct drm_i915_private *dev_priv = dev->dev_private;
2754 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
17638cd6 2755 int plane = intel_crtc->plane;
f45651ba 2756
a8d201af
ML
2757 I915_WRITE(DSPCNTR(plane), 0);
2758 if (INTEL_INFO(dev_priv)->gen >= 4)
fdd508a6 2759 I915_WRITE(DSPSURF(plane), 0);
a8d201af
ML
2760 else
2761 I915_WRITE(DSPADDR(plane), 0);
2762 POSTING_READ(DSPCNTR(plane));
2763}
c9ba6fad 2764
a8d201af
ML
2765static void ironlake_update_primary_plane(struct drm_plane *primary,
2766 const struct intel_crtc_state *crtc_state,
2767 const struct intel_plane_state *plane_state)
2768{
2769 struct drm_device *dev = primary->dev;
2770 struct drm_i915_private *dev_priv = dev->dev_private;
2771 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
2772 struct drm_framebuffer *fb = plane_state->base.fb;
2773 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
2774 int plane = intel_crtc->plane;
54ea9da8 2775 u32 linear_offset;
a8d201af
ML
2776 u32 dspcntr;
2777 i915_reg_t reg = DSPCNTR(plane);
8d0deca8 2778 unsigned int rotation = plane_state->base.rotation;
ac484963 2779 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
a8d201af
ML
2780 int x = plane_state->src.x1 >> 16;
2781 int y = plane_state->src.y1 >> 16;
c9ba6fad 2782
f45651ba 2783 dspcntr = DISPPLANE_GAMMA_ENABLE;
fdd508a6 2784 dspcntr |= DISPLAY_PLANE_ENABLE;
f45651ba
VS
2785
2786 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
2787 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
17638cd6 2788
57779d06
VS
2789 switch (fb->pixel_format) {
2790 case DRM_FORMAT_C8:
17638cd6
JB
2791 dspcntr |= DISPPLANE_8BPP;
2792 break;
57779d06
VS
2793 case DRM_FORMAT_RGB565:
2794 dspcntr |= DISPPLANE_BGRX565;
17638cd6 2795 break;
57779d06 2796 case DRM_FORMAT_XRGB8888:
57779d06
VS
2797 dspcntr |= DISPPLANE_BGRX888;
2798 break;
2799 case DRM_FORMAT_XBGR8888:
57779d06
VS
2800 dspcntr |= DISPPLANE_RGBX888;
2801 break;
2802 case DRM_FORMAT_XRGB2101010:
57779d06
VS
2803 dspcntr |= DISPPLANE_BGRX101010;
2804 break;
2805 case DRM_FORMAT_XBGR2101010:
57779d06 2806 dspcntr |= DISPPLANE_RGBX101010;
17638cd6
JB
2807 break;
2808 default:
baba133a 2809 BUG();
17638cd6
JB
2810 }
2811
2812 if (obj->tiling_mode != I915_TILING_NONE)
2813 dspcntr |= DISPPLANE_TILED;
17638cd6 2814
f45651ba 2815 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev))
1f5d76db 2816 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
17638cd6 2817
ac484963 2818 linear_offset = y * fb->pitches[0] + x * cpp;
c2c75131 2819 intel_crtc->dspaddr_offset =
4f2d9934 2820 intel_compute_tile_offset(&x, &y, fb, 0,
8d0deca8 2821 fb->pitches[0], rotation);
c2c75131 2822 linear_offset -= intel_crtc->dspaddr_offset;
8d0deca8 2823 if (rotation == BIT(DRM_ROTATE_180)) {
48404c1e
SJ
2824 dspcntr |= DISPPLANE_ROTATE_180;
2825
2826 if (!IS_HASWELL(dev) && !IS_BROADWELL(dev)) {
a8d201af
ML
2827 x += (crtc_state->pipe_src_w - 1);
2828 y += (crtc_state->pipe_src_h - 1);
48404c1e
SJ
2829
2830 /* Finding the last pixel of the last line of the display
2831 data and adding to linear_offset*/
2832 linear_offset +=
a8d201af 2833 (crtc_state->pipe_src_h - 1) * fb->pitches[0] +
ac484963 2834 (crtc_state->pipe_src_w - 1) * cpp;
48404c1e
SJ
2835 }
2836 }
2837
2db3366b
PZ
2838 intel_crtc->adjusted_x = x;
2839 intel_crtc->adjusted_y = y;
2840
48404c1e 2841 I915_WRITE(reg, dspcntr);
17638cd6 2842
01f2c773 2843 I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]);
85ba7b7d
DV
2844 I915_WRITE(DSPSURF(plane),
2845 i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset);
b3dc685e 2846 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
bc1c91eb
DL
2847 I915_WRITE(DSPOFFSET(plane), (y << 16) | x);
2848 } else {
2849 I915_WRITE(DSPTILEOFF(plane), (y << 16) | x);
2850 I915_WRITE(DSPLINOFF(plane), linear_offset);
2851 }
17638cd6 2852 POSTING_READ(reg);
17638cd6
JB
2853}
2854
7b49f948
VS
2855u32 intel_fb_stride_alignment(const struct drm_i915_private *dev_priv,
2856 uint64_t fb_modifier, uint32_t pixel_format)
b321803d 2857{
7b49f948 2858 if (fb_modifier == DRM_FORMAT_MOD_NONE) {
b321803d 2859 return 64;
7b49f948
VS
2860 } else {
2861 int cpp = drm_format_plane_cpp(pixel_format, 0);
2862
27ba3910 2863 return intel_tile_width_bytes(dev_priv, fb_modifier, cpp);
b321803d
DL
2864 }
2865}
2866
44eb0cb9
MK
2867u32 intel_plane_obj_offset(struct intel_plane *intel_plane,
2868 struct drm_i915_gem_object *obj,
2869 unsigned int plane)
121920fa 2870{
ce7f1728 2871 struct i915_ggtt_view view;
dedf278c 2872 struct i915_vma *vma;
44eb0cb9 2873 u64 offset;
121920fa 2874
e7941294 2875 intel_fill_fb_ggtt_view(&view, intel_plane->base.state->fb,
3465c580 2876 intel_plane->base.state->rotation);
121920fa 2877
ce7f1728 2878 vma = i915_gem_obj_to_ggtt_view(obj, &view);
dedf278c 2879 if (WARN(!vma, "ggtt vma for display object not found! (view=%u)\n",
ce7f1728 2880 view.type))
dedf278c
TU
2881 return -1;
2882
44eb0cb9 2883 offset = vma->node.start;
dedf278c
TU
2884
2885 if (plane == 1) {
7723f47d 2886 offset += vma->ggtt_view.params.rotated.uv_start_page *
dedf278c
TU
2887 PAGE_SIZE;
2888 }
2889
44eb0cb9
MK
2890 WARN_ON(upper_32_bits(offset));
2891
2892 return lower_32_bits(offset);
121920fa
TU
2893}
2894
e435d6e5
ML
2895static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
2896{
2897 struct drm_device *dev = intel_crtc->base.dev;
2898 struct drm_i915_private *dev_priv = dev->dev_private;
2899
2900 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
2901 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
2902 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
e435d6e5
ML
2903}
2904
a1b2278e
CK
2905/*
2906 * This function detaches (aka. unbinds) unused scalers in hardware
2907 */
0583236e 2908static void skl_detach_scalers(struct intel_crtc *intel_crtc)
a1b2278e 2909{
a1b2278e
CK
2910 struct intel_crtc_scaler_state *scaler_state;
2911 int i;
2912
a1b2278e
CK
2913 scaler_state = &intel_crtc->config->scaler_state;
2914
2915 /* loop through and disable scalers that aren't in use */
2916 for (i = 0; i < intel_crtc->num_scalers; i++) {
e435d6e5
ML
2917 if (!scaler_state->scalers[i].in_use)
2918 skl_detach_scaler(intel_crtc, i);
a1b2278e
CK
2919 }
2920}
2921
6156a456 2922u32 skl_plane_ctl_format(uint32_t pixel_format)
70d21f0e 2923{
6156a456 2924 switch (pixel_format) {
d161cf7a 2925 case DRM_FORMAT_C8:
c34ce3d1 2926 return PLANE_CTL_FORMAT_INDEXED;
70d21f0e 2927 case DRM_FORMAT_RGB565:
c34ce3d1 2928 return PLANE_CTL_FORMAT_RGB_565;
70d21f0e 2929 case DRM_FORMAT_XBGR8888:
c34ce3d1 2930 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
6156a456 2931 case DRM_FORMAT_XRGB8888:
c34ce3d1 2932 return PLANE_CTL_FORMAT_XRGB_8888;
6156a456
CK
2933 /*
2934 * XXX: For ARBG/ABGR formats we default to expecting scanout buffers
2935 * to be already pre-multiplied. We need to add a knob (or a different
2936 * DRM_FORMAT) for user-space to configure that.
2937 */
f75fb42a 2938 case DRM_FORMAT_ABGR8888:
c34ce3d1 2939 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX |
6156a456 2940 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
6156a456 2941 case DRM_FORMAT_ARGB8888:
c34ce3d1 2942 return PLANE_CTL_FORMAT_XRGB_8888 |
6156a456 2943 PLANE_CTL_ALPHA_SW_PREMULTIPLY;
70d21f0e 2944 case DRM_FORMAT_XRGB2101010:
c34ce3d1 2945 return PLANE_CTL_FORMAT_XRGB_2101010;
70d21f0e 2946 case DRM_FORMAT_XBGR2101010:
c34ce3d1 2947 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
6156a456 2948 case DRM_FORMAT_YUYV:
c34ce3d1 2949 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
6156a456 2950 case DRM_FORMAT_YVYU:
c34ce3d1 2951 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
6156a456 2952 case DRM_FORMAT_UYVY:
c34ce3d1 2953 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
6156a456 2954 case DRM_FORMAT_VYUY:
c34ce3d1 2955 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
70d21f0e 2956 default:
4249eeef 2957 MISSING_CASE(pixel_format);
70d21f0e 2958 }
8cfcba41 2959
c34ce3d1 2960 return 0;
6156a456 2961}
70d21f0e 2962
6156a456
CK
2963u32 skl_plane_ctl_tiling(uint64_t fb_modifier)
2964{
6156a456 2965 switch (fb_modifier) {
30af77c4 2966 case DRM_FORMAT_MOD_NONE:
70d21f0e 2967 break;
30af77c4 2968 case I915_FORMAT_MOD_X_TILED:
c34ce3d1 2969 return PLANE_CTL_TILED_X;
b321803d 2970 case I915_FORMAT_MOD_Y_TILED:
c34ce3d1 2971 return PLANE_CTL_TILED_Y;
b321803d 2972 case I915_FORMAT_MOD_Yf_TILED:
c34ce3d1 2973 return PLANE_CTL_TILED_YF;
70d21f0e 2974 default:
6156a456 2975 MISSING_CASE(fb_modifier);
70d21f0e 2976 }
8cfcba41 2977
c34ce3d1 2978 return 0;
6156a456 2979}
70d21f0e 2980
6156a456
CK
2981u32 skl_plane_ctl_rotation(unsigned int rotation)
2982{
3b7a5119 2983 switch (rotation) {
6156a456
CK
2984 case BIT(DRM_ROTATE_0):
2985 break;
1e8df167
SJ
2986 /*
2987 * DRM_ROTATE_ is counter clockwise to stay compatible with Xrandr
2988 * while i915 HW rotation is clockwise, thats why this swapping.
2989 */
3b7a5119 2990 case BIT(DRM_ROTATE_90):
1e8df167 2991 return PLANE_CTL_ROTATE_270;
3b7a5119 2992 case BIT(DRM_ROTATE_180):
c34ce3d1 2993 return PLANE_CTL_ROTATE_180;
3b7a5119 2994 case BIT(DRM_ROTATE_270):
1e8df167 2995 return PLANE_CTL_ROTATE_90;
6156a456
CK
2996 default:
2997 MISSING_CASE(rotation);
2998 }
2999
c34ce3d1 3000 return 0;
6156a456
CK
3001}
3002
a8d201af
ML
3003static void skylake_update_primary_plane(struct drm_plane *plane,
3004 const struct intel_crtc_state *crtc_state,
3005 const struct intel_plane_state *plane_state)
6156a456 3006{
a8d201af 3007 struct drm_device *dev = plane->dev;
6156a456 3008 struct drm_i915_private *dev_priv = dev->dev_private;
a8d201af
ML
3009 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3010 struct drm_framebuffer *fb = plane_state->base.fb;
3011 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6156a456
CK
3012 int pipe = intel_crtc->pipe;
3013 u32 plane_ctl, stride_div, stride;
3014 u32 tile_height, plane_offset, plane_size;
a8d201af 3015 unsigned int rotation = plane_state->base.rotation;
6156a456 3016 int x_offset, y_offset;
44eb0cb9 3017 u32 surf_addr;
a8d201af
ML
3018 int scaler_id = plane_state->scaler_id;
3019 int src_x = plane_state->src.x1 >> 16;
3020 int src_y = plane_state->src.y1 >> 16;
3021 int src_w = drm_rect_width(&plane_state->src) >> 16;
3022 int src_h = drm_rect_height(&plane_state->src) >> 16;
3023 int dst_x = plane_state->dst.x1;
3024 int dst_y = plane_state->dst.y1;
3025 int dst_w = drm_rect_width(&plane_state->dst);
3026 int dst_h = drm_rect_height(&plane_state->dst);
70d21f0e 3027
6156a456
CK
3028 plane_ctl = PLANE_CTL_ENABLE |
3029 PLANE_CTL_PIPE_GAMMA_ENABLE |
3030 PLANE_CTL_PIPE_CSC_ENABLE;
3031
3032 plane_ctl |= skl_plane_ctl_format(fb->pixel_format);
3033 plane_ctl |= skl_plane_ctl_tiling(fb->modifier[0]);
3034 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
6156a456
CK
3035 plane_ctl |= skl_plane_ctl_rotation(rotation);
3036
7b49f948 3037 stride_div = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
b321803d 3038 fb->pixel_format);
dedf278c 3039 surf_addr = intel_plane_obj_offset(to_intel_plane(plane), obj, 0);
3b7a5119 3040
a42e5a23
PZ
3041 WARN_ON(drm_rect_width(&plane_state->src) == 0);
3042
3b7a5119 3043 if (intel_rotation_90_or_270(rotation)) {
832be82f
VS
3044 int cpp = drm_format_plane_cpp(fb->pixel_format, 0);
3045
3b7a5119 3046 /* stride = Surface height in tiles */
832be82f 3047 tile_height = intel_tile_height(dev_priv, fb->modifier[0], cpp);
3b7a5119 3048 stride = DIV_ROUND_UP(fb->height, tile_height);
a8d201af
ML
3049 x_offset = stride * tile_height - src_y - src_h;
3050 y_offset = src_x;
6156a456 3051 plane_size = (src_w - 1) << 16 | (src_h - 1);
3b7a5119
SJ
3052 } else {
3053 stride = fb->pitches[0] / stride_div;
a8d201af
ML
3054 x_offset = src_x;
3055 y_offset = src_y;
6156a456 3056 plane_size = (src_h - 1) << 16 | (src_w - 1);
3b7a5119
SJ
3057 }
3058 plane_offset = y_offset << 16 | x_offset;
b321803d 3059
2db3366b
PZ
3060 intel_crtc->adjusted_x = x_offset;
3061 intel_crtc->adjusted_y = y_offset;
3062
70d21f0e 3063 I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl);
3b7a5119
SJ
3064 I915_WRITE(PLANE_OFFSET(pipe, 0), plane_offset);
3065 I915_WRITE(PLANE_SIZE(pipe, 0), plane_size);
3066 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
6156a456
CK
3067
3068 if (scaler_id >= 0) {
3069 uint32_t ps_ctrl = 0;
3070
3071 WARN_ON(!dst_w || !dst_h);
3072 ps_ctrl = PS_SCALER_EN | PS_PLANE_SEL(0) |
3073 crtc_state->scaler_state.scalers[scaler_id].mode;
3074 I915_WRITE(SKL_PS_CTRL(pipe, scaler_id), ps_ctrl);
3075 I915_WRITE(SKL_PS_PWR_GATE(pipe, scaler_id), 0);
3076 I915_WRITE(SKL_PS_WIN_POS(pipe, scaler_id), (dst_x << 16) | dst_y);
3077 I915_WRITE(SKL_PS_WIN_SZ(pipe, scaler_id), (dst_w << 16) | dst_h);
3078 I915_WRITE(PLANE_POS(pipe, 0), 0);
3079 } else {
3080 I915_WRITE(PLANE_POS(pipe, 0), (dst_y << 16) | dst_x);
3081 }
3082
121920fa 3083 I915_WRITE(PLANE_SURF(pipe, 0), surf_addr);
70d21f0e
DL
3084
3085 POSTING_READ(PLANE_SURF(pipe, 0));
3086}
3087
a8d201af
ML
3088static void skylake_disable_primary_plane(struct drm_plane *primary,
3089 struct drm_crtc *crtc)
17638cd6
JB
3090{
3091 struct drm_device *dev = crtc->dev;
3092 struct drm_i915_private *dev_priv = dev->dev_private;
a8d201af 3093 int pipe = to_intel_crtc(crtc)->pipe;
17638cd6 3094
a8d201af
ML
3095 I915_WRITE(PLANE_CTL(pipe, 0), 0);
3096 I915_WRITE(PLANE_SURF(pipe, 0), 0);
3097 POSTING_READ(PLANE_SURF(pipe, 0));
3098}
29b9bde6 3099
a8d201af
ML
3100/* Assume fb object is pinned & idle & fenced and just update base pointers */
3101static int
3102intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
3103 int x, int y, enum mode_set_atomic state)
3104{
3105 /* Support for kgdboc is disabled, this needs a major rework. */
3106 DRM_ERROR("legacy panic handler not supported any more.\n");
3107
3108 return -ENODEV;
81255565
JB
3109}
3110
91d14251 3111static void intel_complete_page_flips(struct drm_i915_private *dev_priv)
96a02917 3112{
ef58319d 3113 struct intel_crtc *crtc;
96a02917 3114
ef58319d
ML
3115 for_each_intel_crtc(dev_priv->dev, crtc) {
3116 intel_prepare_page_flip(dev_priv, crtc->plane);
3117 intel_finish_page_flip(dev_priv, crtc->pipe);
96a02917 3118 }
7514747d
VS
3119}
3120
3121static void intel_update_primary_planes(struct drm_device *dev)
3122{
7514747d 3123 struct drm_crtc *crtc;
96a02917 3124
70e1e0ec 3125 for_each_crtc(dev, crtc) {
11c22da6
ML
3126 struct intel_plane *plane = to_intel_plane(crtc->primary);
3127 struct intel_plane_state *plane_state;
96a02917 3128
11c22da6 3129 drm_modeset_lock_crtc(crtc, &plane->base);
11c22da6
ML
3130 plane_state = to_intel_plane_state(plane->base.state);
3131
a8d201af
ML
3132 if (plane_state->visible)
3133 plane->update_plane(&plane->base,
3134 to_intel_crtc_state(crtc->state),
3135 plane_state);
11c22da6
ML
3136
3137 drm_modeset_unlock_crtc(crtc);
96a02917
VS
3138 }
3139}
3140
c033666a 3141void intel_prepare_reset(struct drm_i915_private *dev_priv)
7514747d
VS
3142{
3143 /* no reset support for gen2 */
c033666a 3144 if (IS_GEN2(dev_priv))
7514747d
VS
3145 return;
3146
3147 /* reset doesn't touch the display */
c033666a 3148 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
7514747d
VS
3149 return;
3150
c033666a 3151 drm_modeset_lock_all(dev_priv->dev);
f98ce92f
VS
3152 /*
3153 * Disabling the crtcs gracefully seems nicer. Also the
3154 * g33 docs say we should at least disable all the planes.
3155 */
c033666a 3156 intel_display_suspend(dev_priv->dev);
7514747d
VS
3157}
3158
c033666a 3159void intel_finish_reset(struct drm_i915_private *dev_priv)
7514747d 3160{
7514747d
VS
3161 /*
3162 * Flips in the rings will be nuked by the reset,
3163 * so complete all pending flips so that user space
3164 * will get its events and not get stuck.
3165 */
91d14251 3166 intel_complete_page_flips(dev_priv);
7514747d
VS
3167
3168 /* no reset support for gen2 */
c033666a 3169 if (IS_GEN2(dev_priv))
7514747d
VS
3170 return;
3171
3172 /* reset doesn't touch the display */
c033666a 3173 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv)) {
7514747d
VS
3174 /*
3175 * Flips in the rings have been nuked by the reset,
3176 * so update the base address of all primary
3177 * planes to the the last fb to make sure we're
3178 * showing the correct fb after a reset.
11c22da6
ML
3179 *
3180 * FIXME: Atomic will make this obsolete since we won't schedule
3181 * CS-based flips (which might get lost in gpu resets) any more.
7514747d 3182 */
c033666a 3183 intel_update_primary_planes(dev_priv->dev);
7514747d
VS
3184 return;
3185 }
3186
3187 /*
3188 * The display has been reset as well,
3189 * so need a full re-initialization.
3190 */
3191 intel_runtime_pm_disable_interrupts(dev_priv);
3192 intel_runtime_pm_enable_interrupts(dev_priv);
3193
c033666a 3194 intel_modeset_init_hw(dev_priv->dev);
7514747d
VS
3195
3196 spin_lock_irq(&dev_priv->irq_lock);
3197 if (dev_priv->display.hpd_irq_setup)
91d14251 3198 dev_priv->display.hpd_irq_setup(dev_priv);
7514747d
VS
3199 spin_unlock_irq(&dev_priv->irq_lock);
3200
c033666a 3201 intel_display_resume(dev_priv->dev);
7514747d
VS
3202
3203 intel_hpd_init(dev_priv);
3204
c033666a 3205 drm_modeset_unlock_all(dev_priv->dev);
7514747d
VS
3206}
3207
7d5e3799
CW
3208static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
3209{
3210 struct drm_device *dev = crtc->dev;
7d5e3799 3211 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
c19ae989 3212 unsigned reset_counter;
7d5e3799
CW
3213 bool pending;
3214
7f1847eb
CW
3215 reset_counter = i915_reset_counter(&to_i915(dev)->gpu_error);
3216 if (intel_crtc->reset_counter != reset_counter)
7d5e3799
CW
3217 return false;
3218
5e2d7afc 3219 spin_lock_irq(&dev->event_lock);
7d5e3799 3220 pending = to_intel_crtc(crtc)->unpin_work != NULL;
5e2d7afc 3221 spin_unlock_irq(&dev->event_lock);
7d5e3799
CW
3222
3223 return pending;
3224}
3225
bfd16b2a
ML
3226static void intel_update_pipe_config(struct intel_crtc *crtc,
3227 struct intel_crtc_state *old_crtc_state)
e30e8f75
GP
3228{
3229 struct drm_device *dev = crtc->base.dev;
3230 struct drm_i915_private *dev_priv = dev->dev_private;
bfd16b2a
ML
3231 struct intel_crtc_state *pipe_config =
3232 to_intel_crtc_state(crtc->base.state);
e30e8f75 3233
bfd16b2a
ML
3234 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
3235 crtc->base.mode = crtc->base.state->mode;
3236
3237 DRM_DEBUG_KMS("Updating pipe size %ix%i -> %ix%i\n",
3238 old_crtc_state->pipe_src_w, old_crtc_state->pipe_src_h,
3239 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
e30e8f75
GP
3240
3241 /*
3242 * Update pipe size and adjust fitter if needed: the reason for this is
3243 * that in compute_mode_changes we check the native mode (not the pfit
3244 * mode) to see if we can flip rather than do a full mode set. In the
3245 * fastboot case, we'll flip, but if we don't update the pipesrc and
3246 * pfit state, we'll end up with a big fb scanned out into the wrong
3247 * sized surface.
e30e8f75
GP
3248 */
3249
e30e8f75 3250 I915_WRITE(PIPESRC(crtc->pipe),
bfd16b2a
ML
3251 ((pipe_config->pipe_src_w - 1) << 16) |
3252 (pipe_config->pipe_src_h - 1));
3253
3254 /* on skylake this is done by detaching scalers */
3255 if (INTEL_INFO(dev)->gen >= 9) {
3256 skl_detach_scalers(crtc);
3257
3258 if (pipe_config->pch_pfit.enabled)
3259 skylake_pfit_enable(crtc);
3260 } else if (HAS_PCH_SPLIT(dev)) {
3261 if (pipe_config->pch_pfit.enabled)
3262 ironlake_pfit_enable(crtc);
3263 else if (old_crtc_state->pch_pfit.enabled)
3264 ironlake_pfit_disable(crtc, true);
e30e8f75 3265 }
e30e8f75
GP
3266}
3267
5e84e1a4
ZW
3268static void intel_fdi_normal_train(struct drm_crtc *crtc)
3269{
3270 struct drm_device *dev = crtc->dev;
3271 struct drm_i915_private *dev_priv = dev->dev_private;
3272 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3273 int pipe = intel_crtc->pipe;
f0f59a00
VS
3274 i915_reg_t reg;
3275 u32 temp;
5e84e1a4
ZW
3276
3277 /* enable normal train */
3278 reg = FDI_TX_CTL(pipe);
3279 temp = I915_READ(reg);
61e499bf 3280 if (IS_IVYBRIDGE(dev)) {
357555c0
JB
3281 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3282 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
61e499bf
KP
3283 } else {
3284 temp &= ~FDI_LINK_TRAIN_NONE;
3285 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
357555c0 3286 }
5e84e1a4
ZW
3287 I915_WRITE(reg, temp);
3288
3289 reg = FDI_RX_CTL(pipe);
3290 temp = I915_READ(reg);
3291 if (HAS_PCH_CPT(dev)) {
3292 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3293 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
3294 } else {
3295 temp &= ~FDI_LINK_TRAIN_NONE;
3296 temp |= FDI_LINK_TRAIN_NONE;
3297 }
3298 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
3299
3300 /* wait one idle pattern time */
3301 POSTING_READ(reg);
3302 udelay(1000);
357555c0
JB
3303
3304 /* IVB wants error correction enabled */
3305 if (IS_IVYBRIDGE(dev))
3306 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
3307 FDI_FE_ERRC_ENABLE);
5e84e1a4
ZW
3308}
3309
8db9d77b
ZW
3310/* The FDI link training functions for ILK/Ibexpeak. */
3311static void ironlake_fdi_link_train(struct drm_crtc *crtc)
3312{
3313 struct drm_device *dev = crtc->dev;
3314 struct drm_i915_private *dev_priv = dev->dev_private;
3315 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3316 int pipe = intel_crtc->pipe;
f0f59a00
VS
3317 i915_reg_t reg;
3318 u32 temp, tries;
8db9d77b 3319
1c8562f6 3320 /* FDI needs bits from pipe first */
0fc932b8 3321 assert_pipe_enabled(dev_priv, pipe);
0fc932b8 3322
e1a44743
AJ
3323 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3324 for train result */
5eddb70b
CW
3325 reg = FDI_RX_IMR(pipe);
3326 temp = I915_READ(reg);
e1a44743
AJ
3327 temp &= ~FDI_RX_SYMBOL_LOCK;
3328 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
3329 I915_WRITE(reg, temp);
3330 I915_READ(reg);
e1a44743
AJ
3331 udelay(150);
3332
8db9d77b 3333 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
3334 reg = FDI_TX_CTL(pipe);
3335 temp = I915_READ(reg);
627eb5a3 3336 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3337 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
8db9d77b
ZW
3338 temp &= ~FDI_LINK_TRAIN_NONE;
3339 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b 3340 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 3341
5eddb70b
CW
3342 reg = FDI_RX_CTL(pipe);
3343 temp = I915_READ(reg);
8db9d77b
ZW
3344 temp &= ~FDI_LINK_TRAIN_NONE;
3345 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b
CW
3346 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3347
3348 POSTING_READ(reg);
8db9d77b
ZW
3349 udelay(150);
3350
5b2adf89 3351 /* Ironlake workaround, enable clock pointer after FDI enable*/
8f5718a6
DV
3352 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
3353 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
3354 FDI_RX_PHASE_SYNC_POINTER_EN);
5b2adf89 3355
5eddb70b 3356 reg = FDI_RX_IIR(pipe);
e1a44743 3357 for (tries = 0; tries < 5; tries++) {
5eddb70b 3358 temp = I915_READ(reg);
8db9d77b
ZW
3359 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3360
3361 if ((temp & FDI_RX_BIT_LOCK)) {
3362 DRM_DEBUG_KMS("FDI train 1 done.\n");
5eddb70b 3363 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
8db9d77b
ZW
3364 break;
3365 }
8db9d77b 3366 }
e1a44743 3367 if (tries == 5)
5eddb70b 3368 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
3369
3370 /* Train 2 */
5eddb70b
CW
3371 reg = FDI_TX_CTL(pipe);
3372 temp = I915_READ(reg);
8db9d77b
ZW
3373 temp &= ~FDI_LINK_TRAIN_NONE;
3374 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 3375 I915_WRITE(reg, temp);
8db9d77b 3376
5eddb70b
CW
3377 reg = FDI_RX_CTL(pipe);
3378 temp = I915_READ(reg);
8db9d77b
ZW
3379 temp &= ~FDI_LINK_TRAIN_NONE;
3380 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 3381 I915_WRITE(reg, temp);
8db9d77b 3382
5eddb70b
CW
3383 POSTING_READ(reg);
3384 udelay(150);
8db9d77b 3385
5eddb70b 3386 reg = FDI_RX_IIR(pipe);
e1a44743 3387 for (tries = 0; tries < 5; tries++) {
5eddb70b 3388 temp = I915_READ(reg);
8db9d77b
ZW
3389 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3390
3391 if (temp & FDI_RX_SYMBOL_LOCK) {
5eddb70b 3392 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
8db9d77b
ZW
3393 DRM_DEBUG_KMS("FDI train 2 done.\n");
3394 break;
3395 }
8db9d77b 3396 }
e1a44743 3397 if (tries == 5)
5eddb70b 3398 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
3399
3400 DRM_DEBUG_KMS("FDI train done\n");
5c5313c8 3401
8db9d77b
ZW
3402}
3403
0206e353 3404static const int snb_b_fdi_train_param[] = {
8db9d77b
ZW
3405 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
3406 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
3407 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
3408 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
3409};
3410
3411/* The FDI link training functions for SNB/Cougarpoint. */
3412static void gen6_fdi_link_train(struct drm_crtc *crtc)
3413{
3414 struct drm_device *dev = crtc->dev;
3415 struct drm_i915_private *dev_priv = dev->dev_private;
3416 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3417 int pipe = intel_crtc->pipe;
f0f59a00
VS
3418 i915_reg_t reg;
3419 u32 temp, i, retry;
8db9d77b 3420
e1a44743
AJ
3421 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3422 for train result */
5eddb70b
CW
3423 reg = FDI_RX_IMR(pipe);
3424 temp = I915_READ(reg);
e1a44743
AJ
3425 temp &= ~FDI_RX_SYMBOL_LOCK;
3426 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
3427 I915_WRITE(reg, temp);
3428
3429 POSTING_READ(reg);
e1a44743
AJ
3430 udelay(150);
3431
8db9d77b 3432 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
3433 reg = FDI_TX_CTL(pipe);
3434 temp = I915_READ(reg);
627eb5a3 3435 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3436 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
8db9d77b
ZW
3437 temp &= ~FDI_LINK_TRAIN_NONE;
3438 temp |= FDI_LINK_TRAIN_PATTERN_1;
3439 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3440 /* SNB-B */
3441 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5eddb70b 3442 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 3443
d74cf324
DV
3444 I915_WRITE(FDI_RX_MISC(pipe),
3445 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
3446
5eddb70b
CW
3447 reg = FDI_RX_CTL(pipe);
3448 temp = I915_READ(reg);
8db9d77b
ZW
3449 if (HAS_PCH_CPT(dev)) {
3450 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3451 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3452 } else {
3453 temp &= ~FDI_LINK_TRAIN_NONE;
3454 temp |= FDI_LINK_TRAIN_PATTERN_1;
3455 }
5eddb70b
CW
3456 I915_WRITE(reg, temp | FDI_RX_ENABLE);
3457
3458 POSTING_READ(reg);
8db9d77b
ZW
3459 udelay(150);
3460
0206e353 3461 for (i = 0; i < 4; i++) {
5eddb70b
CW
3462 reg = FDI_TX_CTL(pipe);
3463 temp = I915_READ(reg);
8db9d77b
ZW
3464 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3465 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
3466 I915_WRITE(reg, temp);
3467
3468 POSTING_READ(reg);
8db9d77b
ZW
3469 udelay(500);
3470
fa37d39e
SP
3471 for (retry = 0; retry < 5; retry++) {
3472 reg = FDI_RX_IIR(pipe);
3473 temp = I915_READ(reg);
3474 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3475 if (temp & FDI_RX_BIT_LOCK) {
3476 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3477 DRM_DEBUG_KMS("FDI train 1 done.\n");
3478 break;
3479 }
3480 udelay(50);
8db9d77b 3481 }
fa37d39e
SP
3482 if (retry < 5)
3483 break;
8db9d77b
ZW
3484 }
3485 if (i == 4)
5eddb70b 3486 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
3487
3488 /* Train 2 */
5eddb70b
CW
3489 reg = FDI_TX_CTL(pipe);
3490 temp = I915_READ(reg);
8db9d77b
ZW
3491 temp &= ~FDI_LINK_TRAIN_NONE;
3492 temp |= FDI_LINK_TRAIN_PATTERN_2;
3493 if (IS_GEN6(dev)) {
3494 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3495 /* SNB-B */
3496 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
3497 }
5eddb70b 3498 I915_WRITE(reg, temp);
8db9d77b 3499
5eddb70b
CW
3500 reg = FDI_RX_CTL(pipe);
3501 temp = I915_READ(reg);
8db9d77b
ZW
3502 if (HAS_PCH_CPT(dev)) {
3503 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3504 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
3505 } else {
3506 temp &= ~FDI_LINK_TRAIN_NONE;
3507 temp |= FDI_LINK_TRAIN_PATTERN_2;
3508 }
5eddb70b
CW
3509 I915_WRITE(reg, temp);
3510
3511 POSTING_READ(reg);
8db9d77b
ZW
3512 udelay(150);
3513
0206e353 3514 for (i = 0; i < 4; i++) {
5eddb70b
CW
3515 reg = FDI_TX_CTL(pipe);
3516 temp = I915_READ(reg);
8db9d77b
ZW
3517 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
3518 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
3519 I915_WRITE(reg, temp);
3520
3521 POSTING_READ(reg);
8db9d77b
ZW
3522 udelay(500);
3523
fa37d39e
SP
3524 for (retry = 0; retry < 5; retry++) {
3525 reg = FDI_RX_IIR(pipe);
3526 temp = I915_READ(reg);
3527 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
3528 if (temp & FDI_RX_SYMBOL_LOCK) {
3529 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3530 DRM_DEBUG_KMS("FDI train 2 done.\n");
3531 break;
3532 }
3533 udelay(50);
8db9d77b 3534 }
fa37d39e
SP
3535 if (retry < 5)
3536 break;
8db9d77b
ZW
3537 }
3538 if (i == 4)
5eddb70b 3539 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
3540
3541 DRM_DEBUG_KMS("FDI train done.\n");
3542}
3543
357555c0
JB
3544/* Manual link training for Ivy Bridge A0 parts */
3545static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
3546{
3547 struct drm_device *dev = crtc->dev;
3548 struct drm_i915_private *dev_priv = dev->dev_private;
3549 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3550 int pipe = intel_crtc->pipe;
f0f59a00
VS
3551 i915_reg_t reg;
3552 u32 temp, i, j;
357555c0
JB
3553
3554 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
3555 for train result */
3556 reg = FDI_RX_IMR(pipe);
3557 temp = I915_READ(reg);
3558 temp &= ~FDI_RX_SYMBOL_LOCK;
3559 temp &= ~FDI_RX_BIT_LOCK;
3560 I915_WRITE(reg, temp);
3561
3562 POSTING_READ(reg);
3563 udelay(150);
3564
01a415fd
DV
3565 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
3566 I915_READ(FDI_RX_IIR(pipe)));
3567
139ccd3f
JB
3568 /* Try each vswing and preemphasis setting twice before moving on */
3569 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
3570 /* disable first in case we need to retry */
3571 reg = FDI_TX_CTL(pipe);
3572 temp = I915_READ(reg);
3573 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
3574 temp &= ~FDI_TX_ENABLE;
3575 I915_WRITE(reg, temp);
357555c0 3576
139ccd3f
JB
3577 reg = FDI_RX_CTL(pipe);
3578 temp = I915_READ(reg);
3579 temp &= ~FDI_LINK_TRAIN_AUTO;
3580 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3581 temp &= ~FDI_RX_ENABLE;
3582 I915_WRITE(reg, temp);
357555c0 3583
139ccd3f 3584 /* enable CPU FDI TX and PCH FDI RX */
357555c0
JB
3585 reg = FDI_TX_CTL(pipe);
3586 temp = I915_READ(reg);
139ccd3f 3587 temp &= ~FDI_DP_PORT_WIDTH_MASK;
6e3c9717 3588 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
139ccd3f 3589 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
357555c0 3590 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
139ccd3f
JB
3591 temp |= snb_b_fdi_train_param[j/2];
3592 temp |= FDI_COMPOSITE_SYNC;
3593 I915_WRITE(reg, temp | FDI_TX_ENABLE);
357555c0 3594
139ccd3f
JB
3595 I915_WRITE(FDI_RX_MISC(pipe),
3596 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
357555c0 3597
139ccd3f 3598 reg = FDI_RX_CTL(pipe);
357555c0 3599 temp = I915_READ(reg);
139ccd3f
JB
3600 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3601 temp |= FDI_COMPOSITE_SYNC;
3602 I915_WRITE(reg, temp | FDI_RX_ENABLE);
357555c0 3603
139ccd3f
JB
3604 POSTING_READ(reg);
3605 udelay(1); /* should be 0.5us */
357555c0 3606
139ccd3f
JB
3607 for (i = 0; i < 4; i++) {
3608 reg = FDI_RX_IIR(pipe);
3609 temp = I915_READ(reg);
3610 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 3611
139ccd3f
JB
3612 if (temp & FDI_RX_BIT_LOCK ||
3613 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
3614 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
3615 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
3616 i);
3617 break;
3618 }
3619 udelay(1); /* should be 0.5us */
3620 }
3621 if (i == 4) {
3622 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
3623 continue;
3624 }
357555c0 3625
139ccd3f 3626 /* Train 2 */
357555c0
JB
3627 reg = FDI_TX_CTL(pipe);
3628 temp = I915_READ(reg);
139ccd3f
JB
3629 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
3630 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
3631 I915_WRITE(reg, temp);
3632
3633 reg = FDI_RX_CTL(pipe);
3634 temp = I915_READ(reg);
3635 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3636 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
357555c0
JB
3637 I915_WRITE(reg, temp);
3638
3639 POSTING_READ(reg);
139ccd3f 3640 udelay(2); /* should be 1.5us */
357555c0 3641
139ccd3f
JB
3642 for (i = 0; i < 4; i++) {
3643 reg = FDI_RX_IIR(pipe);
3644 temp = I915_READ(reg);
3645 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 3646
139ccd3f
JB
3647 if (temp & FDI_RX_SYMBOL_LOCK ||
3648 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
3649 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
3650 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
3651 i);
3652 goto train_done;
3653 }
3654 udelay(2); /* should be 1.5us */
357555c0 3655 }
139ccd3f
JB
3656 if (i == 4)
3657 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
357555c0 3658 }
357555c0 3659
139ccd3f 3660train_done:
357555c0
JB
3661 DRM_DEBUG_KMS("FDI train done.\n");
3662}
3663
88cefb6c 3664static void ironlake_fdi_pll_enable(struct intel_crtc *intel_crtc)
2c07245f 3665{
88cefb6c 3666 struct drm_device *dev = intel_crtc->base.dev;
2c07245f 3667 struct drm_i915_private *dev_priv = dev->dev_private;
2c07245f 3668 int pipe = intel_crtc->pipe;
f0f59a00
VS
3669 i915_reg_t reg;
3670 u32 temp;
c64e311e 3671
c98e9dcf 3672 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5eddb70b
CW
3673 reg = FDI_RX_CTL(pipe);
3674 temp = I915_READ(reg);
627eb5a3 3675 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
6e3c9717 3676 temp |= FDI_DP_PORT_WIDTH(intel_crtc->config->fdi_lanes);
dfd07d72 3677 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5eddb70b
CW
3678 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
3679
3680 POSTING_READ(reg);
c98e9dcf
JB
3681 udelay(200);
3682
3683 /* Switch from Rawclk to PCDclk */
5eddb70b
CW
3684 temp = I915_READ(reg);
3685 I915_WRITE(reg, temp | FDI_PCDCLK);
3686
3687 POSTING_READ(reg);
c98e9dcf
JB
3688 udelay(200);
3689
20749730
PZ
3690 /* Enable CPU FDI TX PLL, always on for Ironlake */
3691 reg = FDI_TX_CTL(pipe);
3692 temp = I915_READ(reg);
3693 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
3694 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5eddb70b 3695
20749730
PZ
3696 POSTING_READ(reg);
3697 udelay(100);
6be4a607 3698 }
0e23b99d
JB
3699}
3700
88cefb6c
DV
3701static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
3702{
3703 struct drm_device *dev = intel_crtc->base.dev;
3704 struct drm_i915_private *dev_priv = dev->dev_private;
3705 int pipe = intel_crtc->pipe;
f0f59a00
VS
3706 i915_reg_t reg;
3707 u32 temp;
88cefb6c
DV
3708
3709 /* Switch from PCDclk to Rawclk */
3710 reg = FDI_RX_CTL(pipe);
3711 temp = I915_READ(reg);
3712 I915_WRITE(reg, temp & ~FDI_PCDCLK);
3713
3714 /* Disable CPU FDI TX PLL */
3715 reg = FDI_TX_CTL(pipe);
3716 temp = I915_READ(reg);
3717 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
3718
3719 POSTING_READ(reg);
3720 udelay(100);
3721
3722 reg = FDI_RX_CTL(pipe);
3723 temp = I915_READ(reg);
3724 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
3725
3726 /* Wait for the clocks to turn off. */
3727 POSTING_READ(reg);
3728 udelay(100);
3729}
3730
0fc932b8
JB
3731static void ironlake_fdi_disable(struct drm_crtc *crtc)
3732{
3733 struct drm_device *dev = crtc->dev;
3734 struct drm_i915_private *dev_priv = dev->dev_private;
3735 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
3736 int pipe = intel_crtc->pipe;
f0f59a00
VS
3737 i915_reg_t reg;
3738 u32 temp;
0fc932b8
JB
3739
3740 /* disable CPU FDI tx and PCH FDI rx */
3741 reg = FDI_TX_CTL(pipe);
3742 temp = I915_READ(reg);
3743 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
3744 POSTING_READ(reg);
3745
3746 reg = FDI_RX_CTL(pipe);
3747 temp = I915_READ(reg);
3748 temp &= ~(0x7 << 16);
dfd07d72 3749 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
3750 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
3751
3752 POSTING_READ(reg);
3753 udelay(100);
3754
3755 /* Ironlake workaround, disable clock pointer after downing FDI */
eba905b2 3756 if (HAS_PCH_IBX(dev))
6f06ce18 3757 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
0fc932b8
JB
3758
3759 /* still set train pattern 1 */
3760 reg = FDI_TX_CTL(pipe);
3761 temp = I915_READ(reg);
3762 temp &= ~FDI_LINK_TRAIN_NONE;
3763 temp |= FDI_LINK_TRAIN_PATTERN_1;
3764 I915_WRITE(reg, temp);
3765
3766 reg = FDI_RX_CTL(pipe);
3767 temp = I915_READ(reg);
3768 if (HAS_PCH_CPT(dev)) {
3769 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
3770 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
3771 } else {
3772 temp &= ~FDI_LINK_TRAIN_NONE;
3773 temp |= FDI_LINK_TRAIN_PATTERN_1;
3774 }
3775 /* BPC in FDI rx is consistent with that in PIPECONF */
3776 temp &= ~(0x07 << 16);
dfd07d72 3777 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
3778 I915_WRITE(reg, temp);
3779
3780 POSTING_READ(reg);
3781 udelay(100);
3782}
3783
5dce5b93
CW
3784bool intel_has_pending_fb_unpin(struct drm_device *dev)
3785{
3786 struct intel_crtc *crtc;
3787
3788 /* Note that we don't need to be called with mode_config.lock here
3789 * as our list of CRTC objects is static for the lifetime of the
3790 * device and so cannot disappear as we iterate. Similarly, we can
3791 * happily treat the predicates as racy, atomic checks as userspace
3792 * cannot claim and pin a new fb without at least acquring the
3793 * struct_mutex and so serialising with us.
3794 */
d3fcc808 3795 for_each_intel_crtc(dev, crtc) {
5dce5b93
CW
3796 if (atomic_read(&crtc->unpin_work_count) == 0)
3797 continue;
3798
3799 if (crtc->unpin_work)
3800 intel_wait_for_vblank(dev, crtc->pipe);
3801
3802 return true;
3803 }
3804
3805 return false;
3806}
3807
d6bbafa1
CW
3808static void page_flip_completed(struct intel_crtc *intel_crtc)
3809{
3810 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
3811 struct intel_unpin_work *work = intel_crtc->unpin_work;
3812
d6bbafa1
CW
3813 intel_crtc->unpin_work = NULL;
3814
3815 if (work->event)
560ce1dc 3816 drm_crtc_send_vblank_event(&intel_crtc->base, work->event);
d6bbafa1
CW
3817
3818 drm_crtc_vblank_put(&intel_crtc->base);
3819
3820 wake_up_all(&dev_priv->pending_flip_queue);
3821 queue_work(dev_priv->wq, &work->work);
3822
3823 trace_i915_flip_complete(intel_crtc->plane,
3824 work->pending_flip_obj);
3825}
3826
5008e874 3827static int intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
e6c3a2a6 3828{
0f91128d 3829 struct drm_device *dev = crtc->dev;
5bb61643 3830 struct drm_i915_private *dev_priv = dev->dev_private;
5008e874 3831 long ret;
e6c3a2a6 3832
2c10d571 3833 WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
5008e874
ML
3834
3835 ret = wait_event_interruptible_timeout(
3836 dev_priv->pending_flip_queue,
3837 !intel_crtc_has_pending_flip(crtc),
3838 60*HZ);
3839
3840 if (ret < 0)
3841 return ret;
3842
3843 if (ret == 0) {
9c787942 3844 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
2c10d571 3845
5e2d7afc 3846 spin_lock_irq(&dev->event_lock);
9c787942
CW
3847 if (intel_crtc->unpin_work) {
3848 WARN_ONCE(1, "Removing stuck page flip\n");
3849 page_flip_completed(intel_crtc);
3850 }
5e2d7afc 3851 spin_unlock_irq(&dev->event_lock);
9c787942 3852 }
5bb61643 3853
5008e874 3854 return 0;
e6c3a2a6
CW
3855}
3856
060f02d8
VS
3857static void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
3858{
3859 u32 temp;
3860
3861 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
3862
3863 mutex_lock(&dev_priv->sb_lock);
3864
3865 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3866 temp |= SBI_SSCCTL_DISABLE;
3867 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
3868
3869 mutex_unlock(&dev_priv->sb_lock);
3870}
3871
e615efe4
ED
3872/* Program iCLKIP clock to the desired frequency */
3873static void lpt_program_iclkip(struct drm_crtc *crtc)
3874{
64b46a06 3875 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6e3c9717 3876 int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock;
e615efe4
ED
3877 u32 divsel, phaseinc, auxdiv, phasedir = 0;
3878 u32 temp;
3879
060f02d8 3880 lpt_disable_iclkip(dev_priv);
e615efe4 3881
64b46a06
VS
3882 /* The iCLK virtual clock root frequency is in MHz,
3883 * but the adjusted_mode->crtc_clock in in KHz. To get the
3884 * divisors, it is necessary to divide one by another, so we
3885 * convert the virtual clock precision to KHz here for higher
3886 * precision.
3887 */
3888 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
e615efe4
ED
3889 u32 iclk_virtual_root_freq = 172800 * 1000;
3890 u32 iclk_pi_range = 64;
64b46a06 3891 u32 desired_divisor;
e615efe4 3892
64b46a06
VS
3893 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3894 clock << auxdiv);
3895 divsel = (desired_divisor / iclk_pi_range) - 2;
3896 phaseinc = desired_divisor % iclk_pi_range;
e615efe4 3897
64b46a06
VS
3898 /*
3899 * Near 20MHz is a corner case which is
3900 * out of range for the 7-bit divisor
3901 */
3902 if (divsel <= 0x7f)
3903 break;
e615efe4
ED
3904 }
3905
3906 /* This should not happen with any sane values */
3907 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
3908 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
3909 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
3910 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
3911
3912 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
12d7ceed 3913 clock,
e615efe4
ED
3914 auxdiv,
3915 divsel,
3916 phasedir,
3917 phaseinc);
3918
060f02d8
VS
3919 mutex_lock(&dev_priv->sb_lock);
3920
e615efe4 3921 /* Program SSCDIVINTPHASE6 */
988d6ee8 3922 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
e615efe4
ED
3923 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
3924 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
3925 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
3926 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
3927 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
3928 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
988d6ee8 3929 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
e615efe4
ED
3930
3931 /* Program SSCAUXDIV */
988d6ee8 3932 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
e615efe4
ED
3933 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
3934 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
988d6ee8 3935 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
e615efe4
ED
3936
3937 /* Enable modulator and associated divider */
988d6ee8 3938 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
e615efe4 3939 temp &= ~SBI_SSCCTL_DISABLE;
988d6ee8 3940 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
e615efe4 3941
060f02d8
VS
3942 mutex_unlock(&dev_priv->sb_lock);
3943
e615efe4
ED
3944 /* Wait for initialization time */
3945 udelay(24);
3946
3947 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
3948}
3949
8802e5b6
VS
3950int lpt_get_iclkip(struct drm_i915_private *dev_priv)
3951{
3952 u32 divsel, phaseinc, auxdiv;
3953 u32 iclk_virtual_root_freq = 172800 * 1000;
3954 u32 iclk_pi_range = 64;
3955 u32 desired_divisor;
3956 u32 temp;
3957
3958 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
3959 return 0;
3960
3961 mutex_lock(&dev_priv->sb_lock);
3962
3963 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
3964 if (temp & SBI_SSCCTL_DISABLE) {
3965 mutex_unlock(&dev_priv->sb_lock);
3966 return 0;
3967 }
3968
3969 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
3970 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
3971 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
3972 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
3973 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
3974
3975 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
3976 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
3977 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
3978
3979 mutex_unlock(&dev_priv->sb_lock);
3980
3981 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
3982
3983 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
3984 desired_divisor << auxdiv);
3985}
3986
275f01b2
DV
3987static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc,
3988 enum pipe pch_transcoder)
3989{
3990 struct drm_device *dev = crtc->base.dev;
3991 struct drm_i915_private *dev_priv = dev->dev_private;
6e3c9717 3992 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
275f01b2
DV
3993
3994 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
3995 I915_READ(HTOTAL(cpu_transcoder)));
3996 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
3997 I915_READ(HBLANK(cpu_transcoder)));
3998 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
3999 I915_READ(HSYNC(cpu_transcoder)));
4000
4001 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
4002 I915_READ(VTOTAL(cpu_transcoder)));
4003 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
4004 I915_READ(VBLANK(cpu_transcoder)));
4005 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
4006 I915_READ(VSYNC(cpu_transcoder)));
4007 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
4008 I915_READ(VSYNCSHIFT(cpu_transcoder)));
4009}
4010
003632d9 4011static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable)
1fbc0d78
DV
4012{
4013 struct drm_i915_private *dev_priv = dev->dev_private;
4014 uint32_t temp;
4015
4016 temp = I915_READ(SOUTH_CHICKEN1);
003632d9 4017 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
1fbc0d78
DV
4018 return;
4019
4020 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
4021 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
4022
003632d9
ACO
4023 temp &= ~FDI_BC_BIFURCATION_SELECT;
4024 if (enable)
4025 temp |= FDI_BC_BIFURCATION_SELECT;
4026
4027 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
1fbc0d78
DV
4028 I915_WRITE(SOUTH_CHICKEN1, temp);
4029 POSTING_READ(SOUTH_CHICKEN1);
4030}
4031
4032static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc)
4033{
4034 struct drm_device *dev = intel_crtc->base.dev;
1fbc0d78
DV
4035
4036 switch (intel_crtc->pipe) {
4037 case PIPE_A:
4038 break;
4039 case PIPE_B:
6e3c9717 4040 if (intel_crtc->config->fdi_lanes > 2)
003632d9 4041 cpt_set_fdi_bc_bifurcation(dev, false);
1fbc0d78 4042 else
003632d9 4043 cpt_set_fdi_bc_bifurcation(dev, true);
1fbc0d78
DV
4044
4045 break;
4046 case PIPE_C:
003632d9 4047 cpt_set_fdi_bc_bifurcation(dev, true);
1fbc0d78
DV
4048
4049 break;
4050 default:
4051 BUG();
4052 }
4053}
4054
c48b5305
VS
4055/* Return which DP Port should be selected for Transcoder DP control */
4056static enum port
4057intel_trans_dp_port_sel(struct drm_crtc *crtc)
4058{
4059 struct drm_device *dev = crtc->dev;
4060 struct intel_encoder *encoder;
4061
4062 for_each_encoder_on_crtc(dev, crtc, encoder) {
4063 if (encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
4064 encoder->type == INTEL_OUTPUT_EDP)
4065 return enc_to_dig_port(&encoder->base)->port;
4066 }
4067
4068 return -1;
4069}
4070
f67a559d
JB
4071/*
4072 * Enable PCH resources required for PCH ports:
4073 * - PCH PLLs
4074 * - FDI training & RX/TX
4075 * - update transcoder timings
4076 * - DP transcoding bits
4077 * - transcoder
4078 */
4079static void ironlake_pch_enable(struct drm_crtc *crtc)
0e23b99d
JB
4080{
4081 struct drm_device *dev = crtc->dev;
4082 struct drm_i915_private *dev_priv = dev->dev_private;
4083 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4084 int pipe = intel_crtc->pipe;
f0f59a00 4085 u32 temp;
2c07245f 4086
ab9412ba 4087 assert_pch_transcoder_disabled(dev_priv, pipe);
e7e164db 4088
1fbc0d78
DV
4089 if (IS_IVYBRIDGE(dev))
4090 ivybridge_update_fdi_bc_bifurcation(intel_crtc);
4091
cd986abb
DV
4092 /* Write the TU size bits before fdi link training, so that error
4093 * detection works. */
4094 I915_WRITE(FDI_RX_TUSIZE1(pipe),
4095 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
4096
c98e9dcf 4097 /* For PCH output, training FDI link */
674cf967 4098 dev_priv->display.fdi_link_train(crtc);
2c07245f 4099
3ad8a208
DV
4100 /* We need to program the right clock selection before writing the pixel
4101 * mutliplier into the DPLL. */
303b81e0 4102 if (HAS_PCH_CPT(dev)) {
ee7b9f93 4103 u32 sel;
4b645f14 4104
c98e9dcf 4105 temp = I915_READ(PCH_DPLL_SEL);
11887397
DV
4106 temp |= TRANS_DPLL_ENABLE(pipe);
4107 sel = TRANS_DPLLB_SEL(pipe);
8106ddbd
ACO
4108 if (intel_crtc->config->shared_dpll ==
4109 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
ee7b9f93
JB
4110 temp |= sel;
4111 else
4112 temp &= ~sel;
c98e9dcf 4113 I915_WRITE(PCH_DPLL_SEL, temp);
c98e9dcf 4114 }
5eddb70b 4115
3ad8a208
DV
4116 /* XXX: pch pll's can be enabled any time before we enable the PCH
4117 * transcoder, and we actually should do this to not upset any PCH
4118 * transcoder that already use the clock when we share it.
4119 *
4120 * Note that enable_shared_dpll tries to do the right thing, but
4121 * get_shared_dpll unconditionally resets the pll - we need that to have
4122 * the right LVDS enable sequence. */
85b3894f 4123 intel_enable_shared_dpll(intel_crtc);
3ad8a208 4124
d9b6cb56
JB
4125 /* set transcoder timing, panel must allow it */
4126 assert_panel_unlocked(dev_priv, pipe);
275f01b2 4127 ironlake_pch_transcoder_set_timings(intel_crtc, pipe);
8db9d77b 4128
303b81e0 4129 intel_fdi_normal_train(crtc);
5e84e1a4 4130
c98e9dcf 4131 /* For PCH DP, enable TRANS_DP_CTL */
6e3c9717 4132 if (HAS_PCH_CPT(dev) && intel_crtc->config->has_dp_encoder) {
9c4edaee
VS
4133 const struct drm_display_mode *adjusted_mode =
4134 &intel_crtc->config->base.adjusted_mode;
dfd07d72 4135 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
f0f59a00 4136 i915_reg_t reg = TRANS_DP_CTL(pipe);
5eddb70b
CW
4137 temp = I915_READ(reg);
4138 temp &= ~(TRANS_DP_PORT_SEL_MASK |
220cad3c
EA
4139 TRANS_DP_SYNC_MASK |
4140 TRANS_DP_BPC_MASK);
e3ef4479 4141 temp |= TRANS_DP_OUTPUT_ENABLE;
9325c9f0 4142 temp |= bpc << 9; /* same format but at 11:9 */
c98e9dcf 4143
9c4edaee 4144 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5eddb70b 4145 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
9c4edaee 4146 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5eddb70b 4147 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
c98e9dcf
JB
4148
4149 switch (intel_trans_dp_port_sel(crtc)) {
c48b5305 4150 case PORT_B:
5eddb70b 4151 temp |= TRANS_DP_PORT_SEL_B;
c98e9dcf 4152 break;
c48b5305 4153 case PORT_C:
5eddb70b 4154 temp |= TRANS_DP_PORT_SEL_C;
c98e9dcf 4155 break;
c48b5305 4156 case PORT_D:
5eddb70b 4157 temp |= TRANS_DP_PORT_SEL_D;
c98e9dcf
JB
4158 break;
4159 default:
e95d41e1 4160 BUG();
32f9d658 4161 }
2c07245f 4162
5eddb70b 4163 I915_WRITE(reg, temp);
6be4a607 4164 }
b52eb4dc 4165
b8a4f404 4166 ironlake_enable_pch_transcoder(dev_priv, pipe);
f67a559d
JB
4167}
4168
1507e5bd
PZ
4169static void lpt_pch_enable(struct drm_crtc *crtc)
4170{
4171 struct drm_device *dev = crtc->dev;
4172 struct drm_i915_private *dev_priv = dev->dev_private;
4173 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 4174 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
1507e5bd 4175
ab9412ba 4176 assert_pch_transcoder_disabled(dev_priv, TRANSCODER_A);
1507e5bd 4177
8c52b5e8 4178 lpt_program_iclkip(crtc);
1507e5bd 4179
0540e488 4180 /* Set transcoder timing. */
275f01b2 4181 ironlake_pch_transcoder_set_timings(intel_crtc, PIPE_A);
1507e5bd 4182
937bb610 4183 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
f67a559d
JB
4184}
4185
a1520318 4186static void cpt_verify_modeset(struct drm_device *dev, int pipe)
d4270e57
JB
4187{
4188 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 4189 i915_reg_t dslreg = PIPEDSL(pipe);
d4270e57
JB
4190 u32 temp;
4191
4192 temp = I915_READ(dslreg);
4193 udelay(500);
4194 if (wait_for(I915_READ(dslreg) != temp, 5)) {
d4270e57 4195 if (wait_for(I915_READ(dslreg) != temp, 5))
84f44ce7 4196 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
d4270e57
JB
4197 }
4198}
4199
86adf9d7
ML
4200static int
4201skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
4202 unsigned scaler_user, int *scaler_id, unsigned int rotation,
4203 int src_w, int src_h, int dst_w, int dst_h)
a1b2278e 4204{
86adf9d7
ML
4205 struct intel_crtc_scaler_state *scaler_state =
4206 &crtc_state->scaler_state;
4207 struct intel_crtc *intel_crtc =
4208 to_intel_crtc(crtc_state->base.crtc);
a1b2278e 4209 int need_scaling;
6156a456
CK
4210
4211 need_scaling = intel_rotation_90_or_270(rotation) ?
4212 (src_h != dst_w || src_w != dst_h):
4213 (src_w != dst_w || src_h != dst_h);
a1b2278e
CK
4214
4215 /*
4216 * if plane is being disabled or scaler is no more required or force detach
4217 * - free scaler binded to this plane/crtc
4218 * - in order to do this, update crtc->scaler_usage
4219 *
4220 * Here scaler state in crtc_state is set free so that
4221 * scaler can be assigned to other user. Actual register
4222 * update to free the scaler is done in plane/panel-fit programming.
4223 * For this purpose crtc/plane_state->scaler_id isn't reset here.
4224 */
86adf9d7 4225 if (force_detach || !need_scaling) {
a1b2278e 4226 if (*scaler_id >= 0) {
86adf9d7 4227 scaler_state->scaler_users &= ~(1 << scaler_user);
a1b2278e
CK
4228 scaler_state->scalers[*scaler_id].in_use = 0;
4229
86adf9d7
ML
4230 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4231 "Staged freeing scaler id %d scaler_users = 0x%x\n",
4232 intel_crtc->pipe, scaler_user, *scaler_id,
a1b2278e
CK
4233 scaler_state->scaler_users);
4234 *scaler_id = -1;
4235 }
4236 return 0;
4237 }
4238
4239 /* range checks */
4240 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
4241 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
4242
4243 src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
4244 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H) {
86adf9d7 4245 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
a1b2278e 4246 "size is out of scaler range\n",
86adf9d7 4247 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
a1b2278e
CK
4248 return -EINVAL;
4249 }
4250
86adf9d7
ML
4251 /* mark this plane as a scaler user in crtc_state */
4252 scaler_state->scaler_users |= (1 << scaler_user);
4253 DRM_DEBUG_KMS("scaler_user index %u.%u: "
4254 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
4255 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
4256 scaler_state->scaler_users);
4257
4258 return 0;
4259}
4260
4261/**
4262 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
4263 *
4264 * @state: crtc's scaler state
86adf9d7
ML
4265 *
4266 * Return
4267 * 0 - scaler_usage updated successfully
4268 * error - requested scaling cannot be supported or other error condition
4269 */
e435d6e5 4270int skl_update_scaler_crtc(struct intel_crtc_state *state)
86adf9d7
ML
4271{
4272 struct intel_crtc *intel_crtc = to_intel_crtc(state->base.crtc);
7c5f93b0 4273 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
86adf9d7
ML
4274
4275 DRM_DEBUG_KMS("Updating scaler for [CRTC:%i] scaler_user index %u.%u\n",
4276 intel_crtc->base.base.id, intel_crtc->pipe, SKL_CRTC_INDEX);
4277
e435d6e5 4278 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
fa5a7970 4279 &state->scaler_state.scaler_id, BIT(DRM_ROTATE_0),
86adf9d7 4280 state->pipe_src_w, state->pipe_src_h,
aad941d5 4281 adjusted_mode->crtc_hdisplay, adjusted_mode->crtc_vdisplay);
86adf9d7
ML
4282}
4283
4284/**
4285 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
4286 *
4287 * @state: crtc's scaler state
86adf9d7
ML
4288 * @plane_state: atomic plane state to update
4289 *
4290 * Return
4291 * 0 - scaler_usage updated successfully
4292 * error - requested scaling cannot be supported or other error condition
4293 */
da20eabd
ML
4294static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
4295 struct intel_plane_state *plane_state)
86adf9d7
ML
4296{
4297
4298 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
da20eabd
ML
4299 struct intel_plane *intel_plane =
4300 to_intel_plane(plane_state->base.plane);
86adf9d7
ML
4301 struct drm_framebuffer *fb = plane_state->base.fb;
4302 int ret;
4303
4304 bool force_detach = !fb || !plane_state->visible;
4305
4306 DRM_DEBUG_KMS("Updating scaler for [PLANE:%d] scaler_user index %u.%u\n",
4307 intel_plane->base.base.id, intel_crtc->pipe,
4308 drm_plane_index(&intel_plane->base));
4309
4310 ret = skl_update_scaler(crtc_state, force_detach,
4311 drm_plane_index(&intel_plane->base),
4312 &plane_state->scaler_id,
4313 plane_state->base.rotation,
4314 drm_rect_width(&plane_state->src) >> 16,
4315 drm_rect_height(&plane_state->src) >> 16,
4316 drm_rect_width(&plane_state->dst),
4317 drm_rect_height(&plane_state->dst));
4318
4319 if (ret || plane_state->scaler_id < 0)
4320 return ret;
4321
a1b2278e 4322 /* check colorkey */
818ed961 4323 if (plane_state->ckey.flags != I915_SET_COLORKEY_NONE) {
86adf9d7 4324 DRM_DEBUG_KMS("[PLANE:%d] scaling with color key not allowed",
818ed961 4325 intel_plane->base.base.id);
a1b2278e
CK
4326 return -EINVAL;
4327 }
4328
4329 /* Check src format */
86adf9d7
ML
4330 switch (fb->pixel_format) {
4331 case DRM_FORMAT_RGB565:
4332 case DRM_FORMAT_XBGR8888:
4333 case DRM_FORMAT_XRGB8888:
4334 case DRM_FORMAT_ABGR8888:
4335 case DRM_FORMAT_ARGB8888:
4336 case DRM_FORMAT_XRGB2101010:
4337 case DRM_FORMAT_XBGR2101010:
4338 case DRM_FORMAT_YUYV:
4339 case DRM_FORMAT_YVYU:
4340 case DRM_FORMAT_UYVY:
4341 case DRM_FORMAT_VYUY:
4342 break;
4343 default:
4344 DRM_DEBUG_KMS("[PLANE:%d] FB:%d unsupported scaling format 0x%x\n",
4345 intel_plane->base.base.id, fb->base.id, fb->pixel_format);
4346 return -EINVAL;
a1b2278e
CK
4347 }
4348
a1b2278e
CK
4349 return 0;
4350}
4351
e435d6e5
ML
4352static void skylake_scaler_disable(struct intel_crtc *crtc)
4353{
4354 int i;
4355
4356 for (i = 0; i < crtc->num_scalers; i++)
4357 skl_detach_scaler(crtc, i);
4358}
4359
4360static void skylake_pfit_enable(struct intel_crtc *crtc)
bd2e244f
JB
4361{
4362 struct drm_device *dev = crtc->base.dev;
4363 struct drm_i915_private *dev_priv = dev->dev_private;
4364 int pipe = crtc->pipe;
a1b2278e
CK
4365 struct intel_crtc_scaler_state *scaler_state =
4366 &crtc->config->scaler_state;
4367
4368 DRM_DEBUG_KMS("for crtc_state = %p\n", crtc->config);
4369
6e3c9717 4370 if (crtc->config->pch_pfit.enabled) {
a1b2278e
CK
4371 int id;
4372
4373 if (WARN_ON(crtc->config->scaler_state.scaler_id < 0)) {
4374 DRM_ERROR("Requesting pfit without getting a scaler first\n");
4375 return;
4376 }
4377
4378 id = scaler_state->scaler_id;
4379 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
4380 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
4381 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc->config->pch_pfit.pos);
4382 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc->config->pch_pfit.size);
4383
4384 DRM_DEBUG_KMS("for crtc_state = %p scaler_id = %d\n", crtc->config, id);
bd2e244f
JB
4385 }
4386}
4387
b074cec8
JB
4388static void ironlake_pfit_enable(struct intel_crtc *crtc)
4389{
4390 struct drm_device *dev = crtc->base.dev;
4391 struct drm_i915_private *dev_priv = dev->dev_private;
4392 int pipe = crtc->pipe;
4393
6e3c9717 4394 if (crtc->config->pch_pfit.enabled) {
b074cec8
JB
4395 /* Force use of hard-coded filter coefficients
4396 * as some pre-programmed values are broken,
4397 * e.g. x201.
4398 */
4399 if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
4400 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
4401 PF_PIPE_SEL_IVB(pipe));
4402 else
4403 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
6e3c9717
ACO
4404 I915_WRITE(PF_WIN_POS(pipe), crtc->config->pch_pfit.pos);
4405 I915_WRITE(PF_WIN_SZ(pipe), crtc->config->pch_pfit.size);
d4270e57
JB
4406 }
4407}
4408
20bc8673 4409void hsw_enable_ips(struct intel_crtc *crtc)
d77e4531 4410{
cea165c3
VS
4411 struct drm_device *dev = crtc->base.dev;
4412 struct drm_i915_private *dev_priv = dev->dev_private;
d77e4531 4413
6e3c9717 4414 if (!crtc->config->ips_enabled)
d77e4531
PZ
4415 return;
4416
307e4498
ML
4417 /*
4418 * We can only enable IPS after we enable a plane and wait for a vblank
4419 * This function is called from post_plane_update, which is run after
4420 * a vblank wait.
4421 */
cea165c3 4422
d77e4531 4423 assert_plane_enabled(dev_priv, crtc->plane);
cea165c3 4424 if (IS_BROADWELL(dev)) {
2a114cc1
BW
4425 mutex_lock(&dev_priv->rps.hw_lock);
4426 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0xc0000000));
4427 mutex_unlock(&dev_priv->rps.hw_lock);
4428 /* Quoting Art Runyan: "its not safe to expect any particular
4429 * value in IPS_CTL bit 31 after enabling IPS through the
e59150dc
JB
4430 * mailbox." Moreover, the mailbox may return a bogus state,
4431 * so we need to just enable it and continue on.
2a114cc1
BW
4432 */
4433 } else {
4434 I915_WRITE(IPS_CTL, IPS_ENABLE);
4435 /* The bit only becomes 1 in the next vblank, so this wait here
4436 * is essentially intel_wait_for_vblank. If we don't have this
4437 * and don't wait for vblanks until the end of crtc_enable, then
4438 * the HW state readout code will complain that the expected
4439 * IPS_CTL value is not the one we read. */
4440 if (wait_for(I915_READ_NOTRACE(IPS_CTL) & IPS_ENABLE, 50))
4441 DRM_ERROR("Timed out waiting for IPS enable\n");
4442 }
d77e4531
PZ
4443}
4444
20bc8673 4445void hsw_disable_ips(struct intel_crtc *crtc)
d77e4531
PZ
4446{
4447 struct drm_device *dev = crtc->base.dev;
4448 struct drm_i915_private *dev_priv = dev->dev_private;
4449
6e3c9717 4450 if (!crtc->config->ips_enabled)
d77e4531
PZ
4451 return;
4452
4453 assert_plane_enabled(dev_priv, crtc->plane);
23d0b130 4454 if (IS_BROADWELL(dev)) {
2a114cc1
BW
4455 mutex_lock(&dev_priv->rps.hw_lock);
4456 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
4457 mutex_unlock(&dev_priv->rps.hw_lock);
23d0b130
BW
4458 /* wait for pcode to finish disabling IPS, which may take up to 42ms */
4459 if (wait_for((I915_READ(IPS_CTL) & IPS_ENABLE) == 0, 42))
4460 DRM_ERROR("Timed out waiting for IPS disable\n");
e59150dc 4461 } else {
2a114cc1 4462 I915_WRITE(IPS_CTL, 0);
e59150dc
JB
4463 POSTING_READ(IPS_CTL);
4464 }
d77e4531
PZ
4465
4466 /* We need to wait for a vblank before we can disable the plane. */
4467 intel_wait_for_vblank(dev, crtc->pipe);
4468}
4469
7cac945f 4470static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
d3eedb1a 4471{
7cac945f 4472 if (intel_crtc->overlay) {
d3eedb1a
VS
4473 struct drm_device *dev = intel_crtc->base.dev;
4474 struct drm_i915_private *dev_priv = dev->dev_private;
4475
4476 mutex_lock(&dev->struct_mutex);
4477 dev_priv->mm.interruptible = false;
4478 (void) intel_overlay_switch_off(intel_crtc->overlay);
4479 dev_priv->mm.interruptible = true;
4480 mutex_unlock(&dev->struct_mutex);
4481 }
4482
4483 /* Let userspace switch the overlay on again. In most cases userspace
4484 * has to recompute where to put it anyway.
4485 */
4486}
4487
87d4300a
ML
4488/**
4489 * intel_post_enable_primary - Perform operations after enabling primary plane
4490 * @crtc: the CRTC whose primary plane was just enabled
4491 *
4492 * Performs potentially sleeping operations that must be done after the primary
4493 * plane is enabled, such as updating FBC and IPS. Note that this may be
4494 * called due to an explicit primary plane update, or due to an implicit
4495 * re-enable that is caused when a sprite plane is updated to no longer
4496 * completely hide the primary plane.
4497 */
4498static void
4499intel_post_enable_primary(struct drm_crtc *crtc)
a5c4d7bc
VS
4500{
4501 struct drm_device *dev = crtc->dev;
87d4300a 4502 struct drm_i915_private *dev_priv = dev->dev_private;
a5c4d7bc
VS
4503 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4504 int pipe = intel_crtc->pipe;
a5c4d7bc 4505
87d4300a
ML
4506 /*
4507 * FIXME IPS should be fine as long as one plane is
4508 * enabled, but in practice it seems to have problems
4509 * when going from primary only to sprite only and vice
4510 * versa.
4511 */
a5c4d7bc
VS
4512 hsw_enable_ips(intel_crtc);
4513
f99d7069 4514 /*
87d4300a
ML
4515 * Gen2 reports pipe underruns whenever all planes are disabled.
4516 * So don't enable underrun reporting before at least some planes
4517 * are enabled.
4518 * FIXME: Need to fix the logic to work when we turn off all planes
4519 * but leave the pipe running.
f99d7069 4520 */
87d4300a
ML
4521 if (IS_GEN2(dev))
4522 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4523
aca7b684
VS
4524 /* Underruns don't always raise interrupts, so check manually. */
4525 intel_check_cpu_fifo_underruns(dev_priv);
4526 intel_check_pch_fifo_underruns(dev_priv);
a5c4d7bc
VS
4527}
4528
2622a081 4529/* FIXME move all this to pre_plane_update() with proper state tracking */
87d4300a
ML
4530static void
4531intel_pre_disable_primary(struct drm_crtc *crtc)
a5c4d7bc
VS
4532{
4533 struct drm_device *dev = crtc->dev;
4534 struct drm_i915_private *dev_priv = dev->dev_private;
4535 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4536 int pipe = intel_crtc->pipe;
a5c4d7bc 4537
87d4300a
ML
4538 /*
4539 * Gen2 reports pipe underruns whenever all planes are disabled.
4540 * So diasble underrun reporting before all the planes get disabled.
4541 * FIXME: Need to fix the logic to work when we turn off all planes
4542 * but leave the pipe running.
4543 */
4544 if (IS_GEN2(dev))
4545 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
a5c4d7bc 4546
2622a081
VS
4547 /*
4548 * FIXME IPS should be fine as long as one plane is
4549 * enabled, but in practice it seems to have problems
4550 * when going from primary only to sprite only and vice
4551 * versa.
4552 */
4553 hsw_disable_ips(intel_crtc);
4554}
4555
4556/* FIXME get rid of this and use pre_plane_update */
4557static void
4558intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
4559{
4560 struct drm_device *dev = crtc->dev;
4561 struct drm_i915_private *dev_priv = dev->dev_private;
4562 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4563 int pipe = intel_crtc->pipe;
4564
4565 intel_pre_disable_primary(crtc);
4566
87d4300a
ML
4567 /*
4568 * Vblank time updates from the shadow to live plane control register
4569 * are blocked if the memory self-refresh mode is active at that
4570 * moment. So to make sure the plane gets truly disabled, disable
4571 * first the self-refresh mode. The self-refresh enable bit in turn
4572 * will be checked/applied by the HW only at the next frame start
4573 * event which is after the vblank start event, so we need to have a
4574 * wait-for-vblank between disabling the plane and the pipe.
4575 */
262cd2e1 4576 if (HAS_GMCH_DISPLAY(dev)) {
87d4300a 4577 intel_set_memory_cxsr(dev_priv, false);
262cd2e1
VS
4578 dev_priv->wm.vlv.cxsr = false;
4579 intel_wait_for_vblank(dev, pipe);
4580 }
87d4300a
ML
4581}
4582
cd202f69 4583static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
ac21b225 4584{
cd202f69
ML
4585 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
4586 struct drm_atomic_state *old_state = old_crtc_state->base.state;
92826fcd
ML
4587 struct intel_crtc_state *pipe_config =
4588 to_intel_crtc_state(crtc->base.state);
ac21b225 4589 struct drm_device *dev = crtc->base.dev;
cd202f69
ML
4590 struct drm_plane *primary = crtc->base.primary;
4591 struct drm_plane_state *old_pri_state =
4592 drm_atomic_get_existing_plane_state(old_state, primary);
ac21b225 4593
cd202f69 4594 intel_frontbuffer_flip(dev, pipe_config->fb_bits);
ac21b225 4595
ab1d3a0e 4596 crtc->wm.cxsr_allowed = true;
852eb00d 4597
caed361d 4598 if (pipe_config->update_wm_post && pipe_config->base.active)
f015c551
VS
4599 intel_update_watermarks(&crtc->base);
4600
cd202f69
ML
4601 if (old_pri_state) {
4602 struct intel_plane_state *primary_state =
4603 to_intel_plane_state(primary->state);
4604 struct intel_plane_state *old_primary_state =
4605 to_intel_plane_state(old_pri_state);
4606
31ae71fc
ML
4607 intel_fbc_post_update(crtc);
4608
cd202f69
ML
4609 if (primary_state->visible &&
4610 (needs_modeset(&pipe_config->base) ||
4611 !old_primary_state->visible))
4612 intel_post_enable_primary(&crtc->base);
4613 }
ac21b225
ML
4614}
4615
5c74cd73 4616static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state)
ac21b225 4617{
5c74cd73 4618 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
ac21b225 4619 struct drm_device *dev = crtc->base.dev;
eddfcbcd 4620 struct drm_i915_private *dev_priv = dev->dev_private;
ab1d3a0e
ML
4621 struct intel_crtc_state *pipe_config =
4622 to_intel_crtc_state(crtc->base.state);
5c74cd73
ML
4623 struct drm_atomic_state *old_state = old_crtc_state->base.state;
4624 struct drm_plane *primary = crtc->base.primary;
4625 struct drm_plane_state *old_pri_state =
4626 drm_atomic_get_existing_plane_state(old_state, primary);
4627 bool modeset = needs_modeset(&pipe_config->base);
ac21b225 4628
5c74cd73
ML
4629 if (old_pri_state) {
4630 struct intel_plane_state *primary_state =
4631 to_intel_plane_state(primary->state);
4632 struct intel_plane_state *old_primary_state =
4633 to_intel_plane_state(old_pri_state);
4634
31ae71fc
ML
4635 intel_fbc_pre_update(crtc);
4636
5c74cd73
ML
4637 if (old_primary_state->visible &&
4638 (modeset || !primary_state->visible))
4639 intel_pre_disable_primary(&crtc->base);
4640 }
852eb00d 4641
ab1d3a0e 4642 if (pipe_config->disable_cxsr) {
852eb00d 4643 crtc->wm.cxsr_allowed = false;
2dfd178d 4644
2622a081
VS
4645 /*
4646 * Vblank time updates from the shadow to live plane control register
4647 * are blocked if the memory self-refresh mode is active at that
4648 * moment. So to make sure the plane gets truly disabled, disable
4649 * first the self-refresh mode. The self-refresh enable bit in turn
4650 * will be checked/applied by the HW only at the next frame start
4651 * event which is after the vblank start event, so we need to have a
4652 * wait-for-vblank between disabling the plane and the pipe.
4653 */
4654 if (old_crtc_state->base.active) {
2dfd178d 4655 intel_set_memory_cxsr(dev_priv, false);
2622a081
VS
4656 dev_priv->wm.vlv.cxsr = false;
4657 intel_wait_for_vblank(dev, crtc->pipe);
4658 }
852eb00d 4659 }
92826fcd 4660
ed4a6a7c
MR
4661 /*
4662 * IVB workaround: must disable low power watermarks for at least
4663 * one frame before enabling scaling. LP watermarks can be re-enabled
4664 * when scaling is disabled.
4665 *
4666 * WaCxSRDisabledForSpriteScaling:ivb
4667 */
4668 if (pipe_config->disable_lp_wm) {
4669 ilk_disable_lp_wm(dev);
4670 intel_wait_for_vblank(dev, crtc->pipe);
4671 }
4672
4673 /*
4674 * If we're doing a modeset, we're done. No need to do any pre-vblank
4675 * watermark programming here.
4676 */
4677 if (needs_modeset(&pipe_config->base))
4678 return;
4679
4680 /*
4681 * For platforms that support atomic watermarks, program the
4682 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
4683 * will be the intermediate values that are safe for both pre- and
4684 * post- vblank; when vblank happens, the 'active' values will be set
4685 * to the final 'target' values and we'll do this again to get the
4686 * optimal watermarks. For gen9+ platforms, the values we program here
4687 * will be the final target values which will get automatically latched
4688 * at vblank time; no further programming will be necessary.
4689 *
4690 * If a platform hasn't been transitioned to atomic watermarks yet,
4691 * we'll continue to update watermarks the old way, if flags tell
4692 * us to.
4693 */
4694 if (dev_priv->display.initial_watermarks != NULL)
4695 dev_priv->display.initial_watermarks(pipe_config);
caed361d 4696 else if (pipe_config->update_wm_pre)
92826fcd 4697 intel_update_watermarks(&crtc->base);
ac21b225
ML
4698}
4699
d032ffa0 4700static void intel_crtc_disable_planes(struct drm_crtc *crtc, unsigned plane_mask)
87d4300a
ML
4701{
4702 struct drm_device *dev = crtc->dev;
4703 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
d032ffa0 4704 struct drm_plane *p;
87d4300a
ML
4705 int pipe = intel_crtc->pipe;
4706
7cac945f 4707 intel_crtc_dpms_overlay_disable(intel_crtc);
27321ae8 4708
d032ffa0
ML
4709 drm_for_each_plane_mask(p, dev, plane_mask)
4710 to_intel_plane(p)->disable_plane(p, crtc);
f98551ae 4711
f99d7069
DV
4712 /*
4713 * FIXME: Once we grow proper nuclear flip support out of this we need
4714 * to compute the mask of flip planes precisely. For the time being
4715 * consider this a flip to a NULL plane.
4716 */
4717 intel_frontbuffer_flip(dev, INTEL_FRONTBUFFER_ALL_MASK(pipe));
a5c4d7bc
VS
4718}
4719
f67a559d
JB
4720static void ironlake_crtc_enable(struct drm_crtc *crtc)
4721{
4722 struct drm_device *dev = crtc->dev;
4723 struct drm_i915_private *dev_priv = dev->dev_private;
4724 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 4725 struct intel_encoder *encoder;
f67a559d 4726 int pipe = intel_crtc->pipe;
b95c5321
ML
4727 struct intel_crtc_state *pipe_config =
4728 to_intel_crtc_state(crtc->state);
f67a559d 4729
53d9f4e9 4730 if (WARN_ON(intel_crtc->active))
f67a559d
JB
4731 return;
4732
b2c0593a
VS
4733 /*
4734 * Sometimes spurious CPU pipe underruns happen during FDI
4735 * training, at least with VGA+HDMI cloning. Suppress them.
4736 *
4737 * On ILK we get an occasional spurious CPU pipe underruns
4738 * between eDP port A enable and vdd enable. Also PCH port
4739 * enable seems to result in the occasional CPU pipe underrun.
4740 *
4741 * Spurious PCH underruns also occur during PCH enabling.
4742 */
4743 if (intel_crtc->config->has_pch_encoder || IS_GEN5(dev_priv))
4744 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
81b088ca
VS
4745 if (intel_crtc->config->has_pch_encoder)
4746 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
4747
6e3c9717 4748 if (intel_crtc->config->has_pch_encoder)
b14b1055
DV
4749 intel_prepare_shared_dpll(intel_crtc);
4750
6e3c9717 4751 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 4752 intel_dp_set_m_n(intel_crtc, M1_N1);
29407aab
DV
4753
4754 intel_set_pipe_timings(intel_crtc);
bc58be60 4755 intel_set_pipe_src_size(intel_crtc);
29407aab 4756
6e3c9717 4757 if (intel_crtc->config->has_pch_encoder) {
29407aab 4758 intel_cpu_transcoder_set_m_n(intel_crtc,
6e3c9717 4759 &intel_crtc->config->fdi_m_n, NULL);
29407aab
DV
4760 }
4761
4762 ironlake_set_pipeconf(crtc);
4763
f67a559d 4764 intel_crtc->active = true;
8664281b 4765
f6736a1a 4766 for_each_encoder_on_crtc(dev, crtc, encoder)
952735ee
DV
4767 if (encoder->pre_enable)
4768 encoder->pre_enable(encoder);
f67a559d 4769
6e3c9717 4770 if (intel_crtc->config->has_pch_encoder) {
fff367c7
DV
4771 /* Note: FDI PLL enabling _must_ be done before we enable the
4772 * cpu pipes, hence this is separate from all the other fdi/pch
4773 * enabling. */
88cefb6c 4774 ironlake_fdi_pll_enable(intel_crtc);
46b6f814
DV
4775 } else {
4776 assert_fdi_tx_disabled(dev_priv, pipe);
4777 assert_fdi_rx_disabled(dev_priv, pipe);
4778 }
f67a559d 4779
b074cec8 4780 ironlake_pfit_enable(intel_crtc);
f67a559d 4781
9c54c0dd
JB
4782 /*
4783 * On ILK+ LUT must be loaded before the pipe is running but with
4784 * clocks enabled
4785 */
b95c5321 4786 intel_color_load_luts(&pipe_config->base);
9c54c0dd 4787
1d5bf5d9
ID
4788 if (dev_priv->display.initial_watermarks != NULL)
4789 dev_priv->display.initial_watermarks(intel_crtc->config);
e1fdc473 4790 intel_enable_pipe(intel_crtc);
f67a559d 4791
6e3c9717 4792 if (intel_crtc->config->has_pch_encoder)
f67a559d 4793 ironlake_pch_enable(crtc);
c98e9dcf 4794
f9b61ff6
DV
4795 assert_vblank_disabled(crtc);
4796 drm_crtc_vblank_on(crtc);
4797
fa5c73b1
DV
4798 for_each_encoder_on_crtc(dev, crtc, encoder)
4799 encoder->enable(encoder);
61b77ddd
DV
4800
4801 if (HAS_PCH_CPT(dev))
a1520318 4802 cpt_verify_modeset(dev, intel_crtc->pipe);
37ca8d4c
VS
4803
4804 /* Must wait for vblank to avoid spurious PCH FIFO underruns */
4805 if (intel_crtc->config->has_pch_encoder)
4806 intel_wait_for_vblank(dev, pipe);
b2c0593a 4807 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
37ca8d4c 4808 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6be4a607
JB
4809}
4810
42db64ef
PZ
4811/* IPS only exists on ULT machines and is tied to pipe A. */
4812static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
4813{
f5adf94e 4814 return HAS_IPS(crtc->base.dev) && crtc->pipe == PIPE_A;
42db64ef
PZ
4815}
4816
4f771f10
PZ
4817static void haswell_crtc_enable(struct drm_crtc *crtc)
4818{
4819 struct drm_device *dev = crtc->dev;
4820 struct drm_i915_private *dev_priv = dev->dev_private;
4821 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4822 struct intel_encoder *encoder;
99d736a2 4823 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
4d1de975 4824 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
99d736a2
ML
4825 struct intel_crtc_state *pipe_config =
4826 to_intel_crtc_state(crtc->state);
4f771f10 4827
53d9f4e9 4828 if (WARN_ON(intel_crtc->active))
4f771f10
PZ
4829 return;
4830
81b088ca
VS
4831 if (intel_crtc->config->has_pch_encoder)
4832 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4833 false);
4834
8106ddbd 4835 if (intel_crtc->config->shared_dpll)
df8ad70c
DV
4836 intel_enable_shared_dpll(intel_crtc);
4837
6e3c9717 4838 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 4839 intel_dp_set_m_n(intel_crtc, M1_N1);
229fca97 4840
4d1de975
JN
4841 if (!intel_crtc->config->has_dsi_encoder)
4842 intel_set_pipe_timings(intel_crtc);
4843
bc58be60 4844 intel_set_pipe_src_size(intel_crtc);
229fca97 4845
4d1de975
JN
4846 if (cpu_transcoder != TRANSCODER_EDP &&
4847 !transcoder_is_dsi(cpu_transcoder)) {
4848 I915_WRITE(PIPE_MULT(cpu_transcoder),
6e3c9717 4849 intel_crtc->config->pixel_multiplier - 1);
ebb69c95
CT
4850 }
4851
6e3c9717 4852 if (intel_crtc->config->has_pch_encoder) {
229fca97 4853 intel_cpu_transcoder_set_m_n(intel_crtc,
6e3c9717 4854 &intel_crtc->config->fdi_m_n, NULL);
229fca97
DV
4855 }
4856
4d1de975
JN
4857 if (!intel_crtc->config->has_dsi_encoder)
4858 haswell_set_pipeconf(crtc);
4859
391bf048 4860 haswell_set_pipemisc(crtc);
229fca97 4861
b95c5321 4862 intel_color_set_csc(&pipe_config->base);
229fca97 4863
4f771f10 4864 intel_crtc->active = true;
8664281b 4865
6b698516
DV
4866 if (intel_crtc->config->has_pch_encoder)
4867 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
4868 else
4869 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4870
7d4aefd0 4871 for_each_encoder_on_crtc(dev, crtc, encoder) {
4f771f10
PZ
4872 if (encoder->pre_enable)
4873 encoder->pre_enable(encoder);
7d4aefd0 4874 }
4f771f10 4875
d2d65408 4876 if (intel_crtc->config->has_pch_encoder)
4fe9467d 4877 dev_priv->display.fdi_link_train(crtc);
4fe9467d 4878
a65347ba 4879 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 4880 intel_ddi_enable_pipe_clock(intel_crtc);
4f771f10 4881
1c132b44 4882 if (INTEL_INFO(dev)->gen >= 9)
e435d6e5 4883 skylake_pfit_enable(intel_crtc);
ff6d9f55 4884 else
1c132b44 4885 ironlake_pfit_enable(intel_crtc);
4f771f10
PZ
4886
4887 /*
4888 * On ILK+ LUT must be loaded before the pipe is running but with
4889 * clocks enabled
4890 */
b95c5321 4891 intel_color_load_luts(&pipe_config->base);
4f771f10 4892
1f544388 4893 intel_ddi_set_pipe_settings(crtc);
a65347ba 4894 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 4895 intel_ddi_enable_transcoder_func(crtc);
4f771f10 4896
1d5bf5d9
ID
4897 if (dev_priv->display.initial_watermarks != NULL)
4898 dev_priv->display.initial_watermarks(pipe_config);
4899 else
4900 intel_update_watermarks(crtc);
4d1de975
JN
4901
4902 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
4903 if (!intel_crtc->config->has_dsi_encoder)
4904 intel_enable_pipe(intel_crtc);
42db64ef 4905
6e3c9717 4906 if (intel_crtc->config->has_pch_encoder)
1507e5bd 4907 lpt_pch_enable(crtc);
4f771f10 4908
a65347ba 4909 if (intel_crtc->config->dp_encoder_is_mst)
0e32b39c
DA
4910 intel_ddi_set_vc_payload_alloc(crtc, true);
4911
f9b61ff6
DV
4912 assert_vblank_disabled(crtc);
4913 drm_crtc_vblank_on(crtc);
4914
8807e55b 4915 for_each_encoder_on_crtc(dev, crtc, encoder) {
4f771f10 4916 encoder->enable(encoder);
8807e55b
JN
4917 intel_opregion_notify_encoder(encoder, true);
4918 }
4f771f10 4919
6b698516
DV
4920 if (intel_crtc->config->has_pch_encoder) {
4921 intel_wait_for_vblank(dev, pipe);
4922 intel_wait_for_vblank(dev, pipe);
4923 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
d2d65408
VS
4924 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
4925 true);
6b698516 4926 }
d2d65408 4927
e4916946
PZ
4928 /* If we change the relative order between pipe/planes enabling, we need
4929 * to change the workaround. */
99d736a2
ML
4930 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
4931 if (IS_HASWELL(dev) && hsw_workaround_pipe != INVALID_PIPE) {
4932 intel_wait_for_vblank(dev, hsw_workaround_pipe);
4933 intel_wait_for_vblank(dev, hsw_workaround_pipe);
4934 }
4f771f10
PZ
4935}
4936
bfd16b2a 4937static void ironlake_pfit_disable(struct intel_crtc *crtc, bool force)
3f8dce3a
DV
4938{
4939 struct drm_device *dev = crtc->base.dev;
4940 struct drm_i915_private *dev_priv = dev->dev_private;
4941 int pipe = crtc->pipe;
4942
4943 /* To avoid upsetting the power well on haswell only disable the pfit if
4944 * it's in use. The hw state code will make sure we get this right. */
bfd16b2a 4945 if (force || crtc->config->pch_pfit.enabled) {
3f8dce3a
DV
4946 I915_WRITE(PF_CTL(pipe), 0);
4947 I915_WRITE(PF_WIN_POS(pipe), 0);
4948 I915_WRITE(PF_WIN_SZ(pipe), 0);
4949 }
4950}
4951
6be4a607
JB
4952static void ironlake_crtc_disable(struct drm_crtc *crtc)
4953{
4954 struct drm_device *dev = crtc->dev;
4955 struct drm_i915_private *dev_priv = dev->dev_private;
4956 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 4957 struct intel_encoder *encoder;
6be4a607 4958 int pipe = intel_crtc->pipe;
b52eb4dc 4959
b2c0593a
VS
4960 /*
4961 * Sometimes spurious CPU pipe underruns happen when the
4962 * pipe is already disabled, but FDI RX/TX is still enabled.
4963 * Happens at least with VGA+HDMI cloning. Suppress them.
4964 */
4965 if (intel_crtc->config->has_pch_encoder) {
4966 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
37ca8d4c 4967 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
b2c0593a 4968 }
37ca8d4c 4969
ea9d758d
DV
4970 for_each_encoder_on_crtc(dev, crtc, encoder)
4971 encoder->disable(encoder);
4972
f9b61ff6
DV
4973 drm_crtc_vblank_off(crtc);
4974 assert_vblank_disabled(crtc);
4975
575f7ab7 4976 intel_disable_pipe(intel_crtc);
32f9d658 4977
bfd16b2a 4978 ironlake_pfit_disable(intel_crtc, false);
2c07245f 4979
b2c0593a 4980 if (intel_crtc->config->has_pch_encoder)
5a74f70a
VS
4981 ironlake_fdi_disable(crtc);
4982
bf49ec8c
DV
4983 for_each_encoder_on_crtc(dev, crtc, encoder)
4984 if (encoder->post_disable)
4985 encoder->post_disable(encoder);
2c07245f 4986
6e3c9717 4987 if (intel_crtc->config->has_pch_encoder) {
d925c59a 4988 ironlake_disable_pch_transcoder(dev_priv, pipe);
6be4a607 4989
d925c59a 4990 if (HAS_PCH_CPT(dev)) {
f0f59a00
VS
4991 i915_reg_t reg;
4992 u32 temp;
4993
d925c59a
DV
4994 /* disable TRANS_DP_CTL */
4995 reg = TRANS_DP_CTL(pipe);
4996 temp = I915_READ(reg);
4997 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
4998 TRANS_DP_PORT_SEL_MASK);
4999 temp |= TRANS_DP_PORT_SEL_NONE;
5000 I915_WRITE(reg, temp);
5001
5002 /* disable DPLL_SEL */
5003 temp = I915_READ(PCH_DPLL_SEL);
11887397 5004 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
d925c59a 5005 I915_WRITE(PCH_DPLL_SEL, temp);
9db4a9c7 5006 }
e3421a18 5007
d925c59a
DV
5008 ironlake_fdi_pll_disable(intel_crtc);
5009 }
81b088ca 5010
b2c0593a 5011 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
81b088ca 5012 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6be4a607 5013}
1b3c7a47 5014
4f771f10 5015static void haswell_crtc_disable(struct drm_crtc *crtc)
ee7b9f93 5016{
4f771f10
PZ
5017 struct drm_device *dev = crtc->dev;
5018 struct drm_i915_private *dev_priv = dev->dev_private;
ee7b9f93 5019 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4f771f10 5020 struct intel_encoder *encoder;
6e3c9717 5021 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
ee7b9f93 5022
d2d65408
VS
5023 if (intel_crtc->config->has_pch_encoder)
5024 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5025 false);
5026
8807e55b
JN
5027 for_each_encoder_on_crtc(dev, crtc, encoder) {
5028 intel_opregion_notify_encoder(encoder, false);
4f771f10 5029 encoder->disable(encoder);
8807e55b 5030 }
4f771f10 5031
f9b61ff6
DV
5032 drm_crtc_vblank_off(crtc);
5033 assert_vblank_disabled(crtc);
5034
4d1de975
JN
5035 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
5036 if (!intel_crtc->config->has_dsi_encoder)
5037 intel_disable_pipe(intel_crtc);
4f771f10 5038
6e3c9717 5039 if (intel_crtc->config->dp_encoder_is_mst)
a4bf214f
VS
5040 intel_ddi_set_vc_payload_alloc(crtc, false);
5041
a65347ba 5042 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 5043 intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
4f771f10 5044
1c132b44 5045 if (INTEL_INFO(dev)->gen >= 9)
e435d6e5 5046 skylake_scaler_disable(intel_crtc);
ff6d9f55 5047 else
bfd16b2a 5048 ironlake_pfit_disable(intel_crtc, false);
4f771f10 5049
a65347ba 5050 if (!intel_crtc->config->has_dsi_encoder)
7d4aefd0 5051 intel_ddi_disable_pipe_clock(intel_crtc);
4f771f10 5052
97b040aa
ID
5053 for_each_encoder_on_crtc(dev, crtc, encoder)
5054 if (encoder->post_disable)
5055 encoder->post_disable(encoder);
81b088ca 5056
92966a37
VS
5057 if (intel_crtc->config->has_pch_encoder) {
5058 lpt_disable_pch_transcoder(dev_priv);
503a74e9 5059 lpt_disable_iclkip(dev_priv);
92966a37
VS
5060 intel_ddi_fdi_disable(crtc);
5061
81b088ca
VS
5062 intel_set_pch_fifo_underrun_reporting(dev_priv, TRANSCODER_A,
5063 true);
92966a37 5064 }
4f771f10
PZ
5065}
5066
2dd24552
JB
5067static void i9xx_pfit_enable(struct intel_crtc *crtc)
5068{
5069 struct drm_device *dev = crtc->base.dev;
5070 struct drm_i915_private *dev_priv = dev->dev_private;
6e3c9717 5071 struct intel_crtc_state *pipe_config = crtc->config;
2dd24552 5072
681a8504 5073 if (!pipe_config->gmch_pfit.control)
2dd24552
JB
5074 return;
5075
2dd24552 5076 /*
c0b03411
DV
5077 * The panel fitter should only be adjusted whilst the pipe is disabled,
5078 * according to register description and PRM.
2dd24552 5079 */
c0b03411
DV
5080 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
5081 assert_pipe_disabled(dev_priv, crtc->pipe);
2dd24552 5082
b074cec8
JB
5083 I915_WRITE(PFIT_PGM_RATIOS, pipe_config->gmch_pfit.pgm_ratios);
5084 I915_WRITE(PFIT_CONTROL, pipe_config->gmch_pfit.control);
5a80c45c
DV
5085
5086 /* Border color in case we don't scale up to the full screen. Black by
5087 * default, change to something else for debugging. */
5088 I915_WRITE(BCLRPAT(crtc->pipe), 0);
2dd24552
JB
5089}
5090
d05410f9
DA
5091static enum intel_display_power_domain port_to_power_domain(enum port port)
5092{
5093 switch (port) {
5094 case PORT_A:
6331a704 5095 return POWER_DOMAIN_PORT_DDI_A_LANES;
d05410f9 5096 case PORT_B:
6331a704 5097 return POWER_DOMAIN_PORT_DDI_B_LANES;
d05410f9 5098 case PORT_C:
6331a704 5099 return POWER_DOMAIN_PORT_DDI_C_LANES;
d05410f9 5100 case PORT_D:
6331a704 5101 return POWER_DOMAIN_PORT_DDI_D_LANES;
d8e19f99 5102 case PORT_E:
6331a704 5103 return POWER_DOMAIN_PORT_DDI_E_LANES;
d05410f9 5104 default:
b9fec167 5105 MISSING_CASE(port);
d05410f9
DA
5106 return POWER_DOMAIN_PORT_OTHER;
5107 }
5108}
5109
25f78f58
VS
5110static enum intel_display_power_domain port_to_aux_power_domain(enum port port)
5111{
5112 switch (port) {
5113 case PORT_A:
5114 return POWER_DOMAIN_AUX_A;
5115 case PORT_B:
5116 return POWER_DOMAIN_AUX_B;
5117 case PORT_C:
5118 return POWER_DOMAIN_AUX_C;
5119 case PORT_D:
5120 return POWER_DOMAIN_AUX_D;
5121 case PORT_E:
5122 /* FIXME: Check VBT for actual wiring of PORT E */
5123 return POWER_DOMAIN_AUX_D;
5124 default:
b9fec167 5125 MISSING_CASE(port);
25f78f58
VS
5126 return POWER_DOMAIN_AUX_A;
5127 }
5128}
5129
319be8ae
ID
5130enum intel_display_power_domain
5131intel_display_port_power_domain(struct intel_encoder *intel_encoder)
5132{
5133 struct drm_device *dev = intel_encoder->base.dev;
5134 struct intel_digital_port *intel_dig_port;
5135
5136 switch (intel_encoder->type) {
5137 case INTEL_OUTPUT_UNKNOWN:
5138 /* Only DDI platforms should ever use this output type */
5139 WARN_ON_ONCE(!HAS_DDI(dev));
5140 case INTEL_OUTPUT_DISPLAYPORT:
5141 case INTEL_OUTPUT_HDMI:
5142 case INTEL_OUTPUT_EDP:
5143 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
d05410f9 5144 return port_to_power_domain(intel_dig_port->port);
0e32b39c
DA
5145 case INTEL_OUTPUT_DP_MST:
5146 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5147 return port_to_power_domain(intel_dig_port->port);
319be8ae
ID
5148 case INTEL_OUTPUT_ANALOG:
5149 return POWER_DOMAIN_PORT_CRT;
5150 case INTEL_OUTPUT_DSI:
5151 return POWER_DOMAIN_PORT_DSI;
5152 default:
5153 return POWER_DOMAIN_PORT_OTHER;
5154 }
5155}
5156
25f78f58
VS
5157enum intel_display_power_domain
5158intel_display_port_aux_power_domain(struct intel_encoder *intel_encoder)
5159{
5160 struct drm_device *dev = intel_encoder->base.dev;
5161 struct intel_digital_port *intel_dig_port;
5162
5163 switch (intel_encoder->type) {
5164 case INTEL_OUTPUT_UNKNOWN:
651174a4
ID
5165 case INTEL_OUTPUT_HDMI:
5166 /*
5167 * Only DDI platforms should ever use these output types.
5168 * We can get here after the HDMI detect code has already set
5169 * the type of the shared encoder. Since we can't be sure
5170 * what's the status of the given connectors, play safe and
5171 * run the DP detection too.
5172 */
25f78f58
VS
5173 WARN_ON_ONCE(!HAS_DDI(dev));
5174 case INTEL_OUTPUT_DISPLAYPORT:
5175 case INTEL_OUTPUT_EDP:
5176 intel_dig_port = enc_to_dig_port(&intel_encoder->base);
5177 return port_to_aux_power_domain(intel_dig_port->port);
5178 case INTEL_OUTPUT_DP_MST:
5179 intel_dig_port = enc_to_mst(&intel_encoder->base)->primary;
5180 return port_to_aux_power_domain(intel_dig_port->port);
5181 default:
b9fec167 5182 MISSING_CASE(intel_encoder->type);
25f78f58
VS
5183 return POWER_DOMAIN_AUX_A;
5184 }
5185}
5186
74bff5f9
ML
5187static unsigned long get_crtc_power_domains(struct drm_crtc *crtc,
5188 struct intel_crtc_state *crtc_state)
77d22dca 5189{
319be8ae 5190 struct drm_device *dev = crtc->dev;
74bff5f9 5191 struct drm_encoder *encoder;
319be8ae
ID
5192 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5193 enum pipe pipe = intel_crtc->pipe;
77d22dca 5194 unsigned long mask;
74bff5f9 5195 enum transcoder transcoder = crtc_state->cpu_transcoder;
77d22dca 5196
74bff5f9 5197 if (!crtc_state->base.active)
292b990e
ML
5198 return 0;
5199
77d22dca
ID
5200 mask = BIT(POWER_DOMAIN_PIPE(pipe));
5201 mask |= BIT(POWER_DOMAIN_TRANSCODER(transcoder));
74bff5f9
ML
5202 if (crtc_state->pch_pfit.enabled ||
5203 crtc_state->pch_pfit.force_thru)
77d22dca
ID
5204 mask |= BIT(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
5205
74bff5f9
ML
5206 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
5207 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
5208
319be8ae 5209 mask |= BIT(intel_display_port_power_domain(intel_encoder));
74bff5f9 5210 }
319be8ae 5211
15e7ec29
ML
5212 if (crtc_state->shared_dpll)
5213 mask |= BIT(POWER_DOMAIN_PLLS);
5214
77d22dca
ID
5215 return mask;
5216}
5217
74bff5f9
ML
5218static unsigned long
5219modeset_get_crtc_power_domains(struct drm_crtc *crtc,
5220 struct intel_crtc_state *crtc_state)
77d22dca 5221{
292b990e
ML
5222 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
5223 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5224 enum intel_display_power_domain domain;
5225 unsigned long domains, new_domains, old_domains;
77d22dca 5226
292b990e 5227 old_domains = intel_crtc->enabled_power_domains;
74bff5f9
ML
5228 intel_crtc->enabled_power_domains = new_domains =
5229 get_crtc_power_domains(crtc, crtc_state);
77d22dca 5230
292b990e
ML
5231 domains = new_domains & ~old_domains;
5232
5233 for_each_power_domain(domain, domains)
5234 intel_display_power_get(dev_priv, domain);
5235
5236 return old_domains & ~new_domains;
5237}
5238
5239static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
5240 unsigned long domains)
5241{
5242 enum intel_display_power_domain domain;
5243
5244 for_each_power_domain(domain, domains)
5245 intel_display_power_put(dev_priv, domain);
5246}
77d22dca 5247
adafdc6f
MK
5248static int intel_compute_max_dotclk(struct drm_i915_private *dev_priv)
5249{
5250 int max_cdclk_freq = dev_priv->max_cdclk_freq;
5251
5252 if (INTEL_INFO(dev_priv)->gen >= 9 ||
5253 IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
5254 return max_cdclk_freq;
5255 else if (IS_CHERRYVIEW(dev_priv))
5256 return max_cdclk_freq*95/100;
5257 else if (INTEL_INFO(dev_priv)->gen < 4)
5258 return 2*max_cdclk_freq*90/100;
5259 else
5260 return max_cdclk_freq*90/100;
5261}
5262
560a7ae4
DL
5263static void intel_update_max_cdclk(struct drm_device *dev)
5264{
5265 struct drm_i915_private *dev_priv = dev->dev_private;
5266
ef11bdb3 5267 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
560a7ae4
DL
5268 u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
5269
5270 if (limit == SKL_DFSM_CDCLK_LIMIT_675)
5271 dev_priv->max_cdclk_freq = 675000;
5272 else if (limit == SKL_DFSM_CDCLK_LIMIT_540)
5273 dev_priv->max_cdclk_freq = 540000;
5274 else if (limit == SKL_DFSM_CDCLK_LIMIT_450)
5275 dev_priv->max_cdclk_freq = 450000;
5276 else
5277 dev_priv->max_cdclk_freq = 337500;
281c114f
MR
5278 } else if (IS_BROXTON(dev)) {
5279 dev_priv->max_cdclk_freq = 624000;
560a7ae4
DL
5280 } else if (IS_BROADWELL(dev)) {
5281 /*
5282 * FIXME with extra cooling we can allow
5283 * 540 MHz for ULX and 675 Mhz for ULT.
5284 * How can we know if extra cooling is
5285 * available? PCI ID, VTB, something else?
5286 */
5287 if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
5288 dev_priv->max_cdclk_freq = 450000;
5289 else if (IS_BDW_ULX(dev))
5290 dev_priv->max_cdclk_freq = 450000;
5291 else if (IS_BDW_ULT(dev))
5292 dev_priv->max_cdclk_freq = 540000;
5293 else
5294 dev_priv->max_cdclk_freq = 675000;
0904deaf
MK
5295 } else if (IS_CHERRYVIEW(dev)) {
5296 dev_priv->max_cdclk_freq = 320000;
560a7ae4
DL
5297 } else if (IS_VALLEYVIEW(dev)) {
5298 dev_priv->max_cdclk_freq = 400000;
5299 } else {
5300 /* otherwise assume cdclk is fixed */
5301 dev_priv->max_cdclk_freq = dev_priv->cdclk_freq;
5302 }
5303
adafdc6f
MK
5304 dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
5305
560a7ae4
DL
5306 DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
5307 dev_priv->max_cdclk_freq);
adafdc6f
MK
5308
5309 DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
5310 dev_priv->max_dotclk_freq);
560a7ae4
DL
5311}
5312
5313static void intel_update_cdclk(struct drm_device *dev)
5314{
5315 struct drm_i915_private *dev_priv = dev->dev_private;
5316
5317 dev_priv->cdclk_freq = dev_priv->display.get_display_clock_speed(dev);
5318 DRM_DEBUG_DRIVER("Current CD clock rate: %d kHz\n",
5319 dev_priv->cdclk_freq);
5320
5321 /*
b5d99ff9
VS
5322 * 9:0 CMBUS [sic] CDCLK frequency (cdfreq):
5323 * Programmng [sic] note: bit[9:2] should be programmed to the number
5324 * of cdclk that generates 4MHz reference clock freq which is used to
5325 * generate GMBus clock. This will vary with the cdclk freq.
560a7ae4 5326 */
b5d99ff9 5327 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
560a7ae4 5328 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->cdclk_freq, 1000));
560a7ae4
DL
5329
5330 if (dev_priv->max_cdclk_freq == 0)
5331 intel_update_max_cdclk(dev);
5332}
5333
92891e45
VS
5334/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
5335static int skl_cdclk_decimal(int cdclk)
5336{
5337 return DIV_ROUND_CLOSEST(cdclk - 1000, 500);
5338}
5339
9ef56154 5340static void broxton_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
f8437dd1 5341{
f8437dd1
VK
5342 uint32_t divider;
5343 uint32_t ratio;
9ef56154 5344 uint32_t current_cdclk;
f8437dd1
VK
5345 int ret;
5346
5347 /* frequency = 19.2MHz * ratio / 2 / div{1,1.5,2,4} */
9ef56154 5348 switch (cdclk) {
f8437dd1
VK
5349 case 144000:
5350 divider = BXT_CDCLK_CD2X_DIV_SEL_4;
5351 ratio = BXT_DE_PLL_RATIO(60);
5352 break;
5353 case 288000:
5354 divider = BXT_CDCLK_CD2X_DIV_SEL_2;
5355 ratio = BXT_DE_PLL_RATIO(60);
5356 break;
5357 case 384000:
5358 divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
5359 ratio = BXT_DE_PLL_RATIO(60);
5360 break;
5361 case 576000:
5362 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5363 ratio = BXT_DE_PLL_RATIO(60);
5364 break;
5365 case 624000:
5366 divider = BXT_CDCLK_CD2X_DIV_SEL_1;
5367 ratio = BXT_DE_PLL_RATIO(65);
5368 break;
5369 case 19200:
5370 /*
5371 * Bypass frequency with DE PLL disabled. Init ratio, divider
5372 * to suppress GCC warning.
5373 */
5374 ratio = 0;
5375 divider = 0;
5376 break;
5377 default:
9ef56154 5378 DRM_ERROR("unsupported CDCLK freq %d", cdclk);
f8437dd1
VK
5379
5380 return;
5381 }
5382
5383 mutex_lock(&dev_priv->rps.hw_lock);
5384 /* Inform power controller of upcoming frequency change */
5385 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
5386 0x80000000);
5387 mutex_unlock(&dev_priv->rps.hw_lock);
5388
5389 if (ret) {
5390 DRM_ERROR("PCode CDCLK freq change notify failed (err %d, freq %d)\n",
9ef56154 5391 ret, cdclk);
f8437dd1
VK
5392 return;
5393 }
5394
9ef56154 5395 current_cdclk = I915_READ(CDCLK_CTL) & CDCLK_FREQ_DECIMAL_MASK;
f8437dd1 5396 /* convert from .1 fixpoint MHz with -1MHz offset to kHz */
9ef56154 5397 current_cdclk = current_cdclk * 500 + 1000;
f8437dd1
VK
5398
5399 /*
5400 * DE PLL has to be disabled when
5401 * - setting to 19.2MHz (bypass, PLL isn't used)
5402 * - before setting to 624MHz (PLL needs toggling)
5403 * - before setting to any frequency from 624MHz (PLL needs toggling)
5404 */
9ef56154
VS
5405 if (cdclk == 19200 || cdclk == 624000 ||
5406 current_cdclk == 624000) {
f8437dd1
VK
5407 I915_WRITE(BXT_DE_PLL_ENABLE, ~BXT_DE_PLL_PLL_ENABLE);
5408 /* Timeout 200us */
5409 if (wait_for(!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK),
5410 1))
5411 DRM_ERROR("timout waiting for DE PLL unlock\n");
5412 }
5413
9ef56154 5414 if (cdclk != 19200) {
f8437dd1
VK
5415 uint32_t val;
5416
5417 val = I915_READ(BXT_DE_PLL_CTL);
5418 val &= ~BXT_DE_PLL_RATIO_MASK;
5419 val |= ratio;
5420 I915_WRITE(BXT_DE_PLL_CTL, val);
5421
5422 I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
5423 /* Timeout 200us */
5424 if (wait_for(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK, 1))
5425 DRM_ERROR("timeout waiting for DE PLL lock\n");
5426
b8e75705 5427 val = divider | skl_cdclk_decimal(cdclk);
7fe62757
VS
5428 /*
5429 * FIXME if only the cd2x divider needs changing, it could be done
5430 * without shutting off the pipe (if only one pipe is active).
5431 */
5432 val |= BXT_CDCLK_CD2X_PIPE_NONE;
f8437dd1
VK
5433 /*
5434 * Disable SSA Precharge when CD clock frequency < 500 MHz,
5435 * enable otherwise.
5436 */
9ef56154 5437 if (cdclk >= 500000)
f8437dd1 5438 val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
f8437dd1
VK
5439 I915_WRITE(CDCLK_CTL, val);
5440 }
5441
5442 mutex_lock(&dev_priv->rps.hw_lock);
5443 ret = sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
9ef56154 5444 DIV_ROUND_UP(cdclk, 25000));
f8437dd1
VK
5445 mutex_unlock(&dev_priv->rps.hw_lock);
5446
5447 if (ret) {
5448 DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
9ef56154 5449 ret, cdclk);
f8437dd1
VK
5450 return;
5451 }
5452
c6c4696f 5453 intel_update_cdclk(dev_priv->dev);
f8437dd1
VK
5454}
5455
c2e001ef
ID
5456static bool broxton_cdclk_is_enabled(struct drm_i915_private *dev_priv)
5457{
5458 if (!(I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_PLL_ENABLE))
5459 return false;
5460
5461 /* TODO: Check for a valid CDCLK rate */
5462
5463 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_REQUEST)) {
5464 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power not requested\n");
5465
5466 return false;
5467 }
5468
5469 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) {
5470 DRM_DEBUG_DRIVER("CDCLK enabled, but DBUF power hasn't settled\n");
5471
5472 return false;
5473 }
5474
5475 return true;
5476}
5477
adc7f04b
ID
5478bool broxton_cdclk_verify_state(struct drm_i915_private *dev_priv)
5479{
5480 return broxton_cdclk_is_enabled(dev_priv);
5481}
5482
c6c4696f 5483void broxton_init_cdclk(struct drm_i915_private *dev_priv)
f8437dd1 5484{
f8437dd1 5485 /* check if cd clock is enabled */
c2e001ef
ID
5486 if (broxton_cdclk_is_enabled(dev_priv)) {
5487 DRM_DEBUG_KMS("CDCLK already enabled, won't reprogram it\n");
f8437dd1
VK
5488 return;
5489 }
5490
c2e001ef
ID
5491 DRM_DEBUG_KMS("CDCLK not enabled, enabling it\n");
5492
f8437dd1
VK
5493 /*
5494 * FIXME:
5495 * - The initial CDCLK needs to be read from VBT.
5496 * Need to make this change after VBT has changes for BXT.
5497 * - check if setting the max (or any) cdclk freq is really necessary
5498 * here, it belongs to modeset time
5499 */
c6c4696f 5500 broxton_set_cdclk(dev_priv, 624000);
f8437dd1
VK
5501
5502 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
22e02c0b
VS
5503 POSTING_READ(DBUF_CTL);
5504
f8437dd1
VK
5505 udelay(10);
5506
5507 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5508 DRM_ERROR("DBuf power enable timeout!\n");
5509}
5510
c6c4696f 5511void broxton_uninit_cdclk(struct drm_i915_private *dev_priv)
f8437dd1 5512{
f8437dd1 5513 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
22e02c0b
VS
5514 POSTING_READ(DBUF_CTL);
5515
f8437dd1
VK
5516 udelay(10);
5517
5518 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5519 DRM_ERROR("DBuf power disable timeout!\n");
5520
5521 /* Set minimum (bypass) frequency, in effect turning off the DE PLL */
c6c4696f 5522 broxton_set_cdclk(dev_priv, 19200);
f8437dd1
VK
5523}
5524
5d96d8af
DL
5525static const struct skl_cdclk_entry {
5526 unsigned int freq;
5527 unsigned int vco;
5528} skl_cdclk_frequencies[] = {
5529 { .freq = 308570, .vco = 8640 },
5530 { .freq = 337500, .vco = 8100 },
5531 { .freq = 432000, .vco = 8640 },
5532 { .freq = 450000, .vco = 8100 },
5533 { .freq = 540000, .vco = 8100 },
5534 { .freq = 617140, .vco = 8640 },
5535 { .freq = 675000, .vco = 8100 },
5536};
5537
5d96d8af
DL
5538static unsigned int skl_cdclk_get_vco(unsigned int freq)
5539{
5540 unsigned int i;
5541
5542 for (i = 0; i < ARRAY_SIZE(skl_cdclk_frequencies); i++) {
5543 const struct skl_cdclk_entry *e = &skl_cdclk_frequencies[i];
5544
5545 if (e->freq == freq)
5546 return e->vco;
5547 }
5548
5549 return 8100;
5550}
5551
5552static void
3861fc60 5553skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
5d96d8af 5554{
9ef56154 5555 int min_cdclk;
5d96d8af
DL
5556 u32 val;
5557
5558 /* select the minimum CDCLK before enabling DPLL 0 */
3861fc60 5559 if (vco == 8640)
9ef56154 5560 min_cdclk = 308570;
5d96d8af 5561 else
9ef56154 5562 min_cdclk = 337500;
5d96d8af 5563
9ef56154 5564 val = CDCLK_FREQ_337_308 | skl_cdclk_decimal(min_cdclk);
5d96d8af
DL
5565 I915_WRITE(CDCLK_CTL, val);
5566 POSTING_READ(CDCLK_CTL);
5567
5568 /*
5569 * We always enable DPLL0 with the lowest link rate possible, but still
5570 * taking into account the VCO required to operate the eDP panel at the
5571 * desired frequency. The usual DP link rates operate with a VCO of
5572 * 8100 while the eDP 1.4 alternate link rates need a VCO of 8640.
5573 * The modeset code is responsible for the selection of the exact link
5574 * rate later on, with the constraint of choosing a frequency that
5575 * works with required_vco.
5576 */
5577 val = I915_READ(DPLL_CTRL1);
5578
5579 val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
5580 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
5581 val |= DPLL_CTRL1_OVERRIDE(SKL_DPLL0);
3861fc60 5582 if (vco == 8640)
5d96d8af
DL
5583 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080,
5584 SKL_DPLL0);
5585 else
5586 val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
5587 SKL_DPLL0);
5588
5589 I915_WRITE(DPLL_CTRL1, val);
5590 POSTING_READ(DPLL_CTRL1);
5591
5592 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
5593
5594 if (wait_for(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK, 5))
5595 DRM_ERROR("DPLL0 not locked\n");
5596}
5597
430e05de
VS
5598static void
5599skl_dpll0_disable(struct drm_i915_private *dev_priv)
5600{
5601 I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
5602 if (wait_for(!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_LOCK), 1))
5603 DRM_ERROR("Couldn't disable DPLL0\n");
5604}
5605
5d96d8af
DL
5606static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv)
5607{
5608 int ret;
5609 u32 val;
5610
5611 /* inform PCU we want to change CDCLK */
5612 val = SKL_CDCLK_PREPARE_FOR_CHANGE;
5613 mutex_lock(&dev_priv->rps.hw_lock);
5614 ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val);
5615 mutex_unlock(&dev_priv->rps.hw_lock);
5616
5617 return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE);
5618}
5619
5620static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv)
5621{
5622 unsigned int i;
5623
5624 for (i = 0; i < 15; i++) {
5625 if (skl_cdclk_pcu_ready(dev_priv))
5626 return true;
5627 udelay(10);
5628 }
5629
5630 return false;
5631}
5632
9ef56154 5633static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk)
5d96d8af 5634{
560a7ae4 5635 struct drm_device *dev = dev_priv->dev;
5d96d8af
DL
5636 u32 freq_select, pcu_ack;
5637
9ef56154 5638 DRM_DEBUG_DRIVER("Changing CDCLK to %dKHz\n", cdclk);
5d96d8af
DL
5639
5640 if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) {
5641 DRM_ERROR("failed to inform PCU about cdclk change\n");
5642 return;
5643 }
5644
5645 /* set CDCLK_CTL */
9ef56154 5646 switch (cdclk) {
5d96d8af
DL
5647 case 450000:
5648 case 432000:
5649 freq_select = CDCLK_FREQ_450_432;
5650 pcu_ack = 1;
5651 break;
5652 case 540000:
5653 freq_select = CDCLK_FREQ_540;
5654 pcu_ack = 2;
5655 break;
5656 case 308570:
5657 case 337500:
5658 default:
5659 freq_select = CDCLK_FREQ_337_308;
5660 pcu_ack = 0;
5661 break;
5662 case 617140:
5663 case 675000:
5664 freq_select = CDCLK_FREQ_675_617;
5665 pcu_ack = 3;
5666 break;
5667 }
5668
9ef56154 5669 I915_WRITE(CDCLK_CTL, freq_select | skl_cdclk_decimal(cdclk));
5d96d8af
DL
5670 POSTING_READ(CDCLK_CTL);
5671
5672 /* inform PCU of the change */
5673 mutex_lock(&dev_priv->rps.hw_lock);
5674 sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, pcu_ack);
5675 mutex_unlock(&dev_priv->rps.hw_lock);
560a7ae4
DL
5676
5677 intel_update_cdclk(dev);
5d96d8af
DL
5678}
5679
5680void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
5681{
5682 /* disable DBUF power */
5683 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST);
5684 POSTING_READ(DBUF_CTL);
5685
5686 udelay(10);
5687
5688 if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE)
5689 DRM_ERROR("DBuf power disable timeout\n");
5690
430e05de 5691 skl_dpll0_disable(dev_priv);
5d96d8af
DL
5692}
5693
5694void skl_init_cdclk(struct drm_i915_private *dev_priv)
5695{
3861fc60 5696 unsigned int vco;
5d96d8af 5697
39d9b85a
GW
5698 /* DPLL0 not enabled (happens on early BIOS versions) */
5699 if (!(I915_READ(LCPLL1_CTL) & LCPLL_PLL_ENABLE)) {
5700 /* enable DPLL0 */
3861fc60
VS
5701 vco = skl_cdclk_get_vco(dev_priv->skl_boot_cdclk);
5702 skl_dpll0_enable(dev_priv, vco);
5d96d8af
DL
5703 }
5704
5d96d8af
DL
5705 /* set CDCLK to the frequency the BIOS chose */
5706 skl_set_cdclk(dev_priv, dev_priv->skl_boot_cdclk);
5707
5708 /* enable DBUF power */
5709 I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST);
5710 POSTING_READ(DBUF_CTL);
5711
5712 udelay(10);
5713
5714 if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE))
5715 DRM_ERROR("DBuf power enable timeout\n");
5716}
5717
c73666f3
SK
5718int skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
5719{
5720 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
5721 uint32_t cdctl = I915_READ(CDCLK_CTL);
5722 int freq = dev_priv->skl_boot_cdclk;
5723
f1b391a5
SK
5724 /*
5725 * check if the pre-os intialized the display
5726 * There is SWF18 scratchpad register defined which is set by the
5727 * pre-os which can be used by the OS drivers to check the status
5728 */
5729 if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
5730 goto sanitize;
5731
c73666f3
SK
5732 /* Is PLL enabled and locked ? */
5733 if (!((lcpll1 & LCPLL_PLL_ENABLE) && (lcpll1 & LCPLL_PLL_LOCK)))
5734 goto sanitize;
5735
5736 /* DPLL okay; verify the cdclock
5737 *
5738 * Noticed in some instances that the freq selection is correct but
5739 * decimal part is programmed wrong from BIOS where pre-os does not
5740 * enable display. Verify the same as well.
5741 */
5742 if (cdctl == ((cdctl & CDCLK_FREQ_SEL_MASK) | skl_cdclk_decimal(freq)))
5743 /* All well; nothing to sanitize */
5744 return false;
5745sanitize:
5746 /*
5747 * As of now initialize with max cdclk till
5748 * we get dynamic cdclk support
5749 * */
5750 dev_priv->skl_boot_cdclk = dev_priv->max_cdclk_freq;
5751 skl_init_cdclk(dev_priv);
5752
5753 /* we did have to sanitize */
5754 return true;
5755}
5756
30a970c6
JB
5757/* Adjust CDclk dividers to allow high res or save power if possible */
5758static void valleyview_set_cdclk(struct drm_device *dev, int cdclk)
5759{
5760 struct drm_i915_private *dev_priv = dev->dev_private;
5761 u32 val, cmd;
5762
164dfd28
VK
5763 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5764 != dev_priv->cdclk_freq);
d60c4473 5765
dfcab17e 5766 if (cdclk >= 320000) /* jump to highest voltage for 400MHz too */
30a970c6 5767 cmd = 2;
dfcab17e 5768 else if (cdclk == 266667)
30a970c6
JB
5769 cmd = 1;
5770 else
5771 cmd = 0;
5772
5773 mutex_lock(&dev_priv->rps.hw_lock);
5774 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5775 val &= ~DSPFREQGUAR_MASK;
5776 val |= (cmd << DSPFREQGUAR_SHIFT);
5777 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5778 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5779 DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
5780 50)) {
5781 DRM_ERROR("timed out waiting for CDclk change\n");
5782 }
5783 mutex_unlock(&dev_priv->rps.hw_lock);
5784
54433e91
VS
5785 mutex_lock(&dev_priv->sb_lock);
5786
dfcab17e 5787 if (cdclk == 400000) {
6bcda4f0 5788 u32 divider;
30a970c6 5789
6bcda4f0 5790 divider = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
30a970c6 5791
30a970c6
JB
5792 /* adjust cdclk divider */
5793 val = vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL);
87d5d259 5794 val &= ~CCK_FREQUENCY_VALUES;
30a970c6
JB
5795 val |= divider;
5796 vlv_cck_write(dev_priv, CCK_DISPLAY_CLOCK_CONTROL, val);
a877e801
VS
5797
5798 if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
87d5d259 5799 CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
a877e801
VS
5800 50))
5801 DRM_ERROR("timed out waiting for CDclk change\n");
30a970c6
JB
5802 }
5803
30a970c6
JB
5804 /* adjust self-refresh exit latency value */
5805 val = vlv_bunit_read(dev_priv, BUNIT_REG_BISOC);
5806 val &= ~0x7f;
5807
5808 /*
5809 * For high bandwidth configs, we set a higher latency in the bunit
5810 * so that the core display fetch happens in time to avoid underruns.
5811 */
dfcab17e 5812 if (cdclk == 400000)
30a970c6
JB
5813 val |= 4500 / 250; /* 4.5 usec */
5814 else
5815 val |= 3000 / 250; /* 3.0 usec */
5816 vlv_bunit_write(dev_priv, BUNIT_REG_BISOC, val);
54433e91 5817
a580516d 5818 mutex_unlock(&dev_priv->sb_lock);
30a970c6 5819
b6283055 5820 intel_update_cdclk(dev);
30a970c6
JB
5821}
5822
383c5a6a
VS
5823static void cherryview_set_cdclk(struct drm_device *dev, int cdclk)
5824{
5825 struct drm_i915_private *dev_priv = dev->dev_private;
5826 u32 val, cmd;
5827
164dfd28
VK
5828 WARN_ON(dev_priv->display.get_display_clock_speed(dev)
5829 != dev_priv->cdclk_freq);
383c5a6a
VS
5830
5831 switch (cdclk) {
383c5a6a
VS
5832 case 333333:
5833 case 320000:
383c5a6a 5834 case 266667:
383c5a6a 5835 case 200000:
383c5a6a
VS
5836 break;
5837 default:
5f77eeb0 5838 MISSING_CASE(cdclk);
383c5a6a
VS
5839 return;
5840 }
5841
9d0d3fda
VS
5842 /*
5843 * Specs are full of misinformation, but testing on actual
5844 * hardware has shown that we just need to write the desired
5845 * CCK divider into the Punit register.
5846 */
5847 cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1;
5848
383c5a6a
VS
5849 mutex_lock(&dev_priv->rps.hw_lock);
5850 val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
5851 val &= ~DSPFREQGUAR_MASK_CHV;
5852 val |= (cmd << DSPFREQGUAR_SHIFT_CHV);
5853 vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val);
5854 if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) &
5855 DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
5856 50)) {
5857 DRM_ERROR("timed out waiting for CDclk change\n");
5858 }
5859 mutex_unlock(&dev_priv->rps.hw_lock);
5860
b6283055 5861 intel_update_cdclk(dev);
383c5a6a
VS
5862}
5863
30a970c6
JB
5864static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv,
5865 int max_pixclk)
5866{
6bcda4f0 5867 int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000;
6cca3195 5868 int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90;
29dc7ef3 5869
30a970c6
JB
5870 /*
5871 * Really only a few cases to deal with, as only 4 CDclks are supported:
5872 * 200MHz
5873 * 267MHz
29dc7ef3 5874 * 320/333MHz (depends on HPLL freq)
6cca3195
VS
5875 * 400MHz (VLV only)
5876 * So we check to see whether we're above 90% (VLV) or 95% (CHV)
5877 * of the lower bin and adjust if needed.
e37c67a1
VS
5878 *
5879 * We seem to get an unstable or solid color picture at 200MHz.
5880 * Not sure what's wrong. For now use 200MHz only when all pipes
5881 * are off.
30a970c6 5882 */
6cca3195
VS
5883 if (!IS_CHERRYVIEW(dev_priv) &&
5884 max_pixclk > freq_320*limit/100)
dfcab17e 5885 return 400000;
6cca3195 5886 else if (max_pixclk > 266667*limit/100)
29dc7ef3 5887 return freq_320;
e37c67a1 5888 else if (max_pixclk > 0)
dfcab17e 5889 return 266667;
e37c67a1
VS
5890 else
5891 return 200000;
30a970c6
JB
5892}
5893
c44deb6c 5894static int broxton_calc_cdclk(int max_pixclk)
f8437dd1
VK
5895{
5896 /*
5897 * FIXME:
f8437dd1
VK
5898 * - set 19.2MHz bypass frequency if there are no active pipes
5899 */
760e1477 5900 if (max_pixclk > 576000)
f8437dd1 5901 return 624000;
760e1477 5902 else if (max_pixclk > 384000)
f8437dd1 5903 return 576000;
760e1477 5904 else if (max_pixclk > 288000)
f8437dd1 5905 return 384000;
760e1477 5906 else if (max_pixclk > 144000)
f8437dd1
VK
5907 return 288000;
5908 else
5909 return 144000;
5910}
5911
e8788cbc 5912/* Compute the max pixel clock for new configuration. */
a821fc46
ACO
5913static int intel_mode_max_pixclk(struct drm_device *dev,
5914 struct drm_atomic_state *state)
30a970c6 5915{
565602d7
ML
5916 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
5917 struct drm_i915_private *dev_priv = dev->dev_private;
5918 struct drm_crtc *crtc;
5919 struct drm_crtc_state *crtc_state;
5920 unsigned max_pixclk = 0, i;
5921 enum pipe pipe;
30a970c6 5922
565602d7
ML
5923 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
5924 sizeof(intel_state->min_pixclk));
304603f4 5925
565602d7
ML
5926 for_each_crtc_in_state(state, crtc, crtc_state, i) {
5927 int pixclk = 0;
5928
5929 if (crtc_state->enable)
5930 pixclk = crtc_state->adjusted_mode.crtc_clock;
304603f4 5931
565602d7 5932 intel_state->min_pixclk[i] = pixclk;
30a970c6
JB
5933 }
5934
565602d7
ML
5935 for_each_pipe(dev_priv, pipe)
5936 max_pixclk = max(intel_state->min_pixclk[pipe], max_pixclk);
5937
30a970c6
JB
5938 return max_pixclk;
5939}
5940
27c329ed 5941static int valleyview_modeset_calc_cdclk(struct drm_atomic_state *state)
30a970c6 5942{
27c329ed
ML
5943 struct drm_device *dev = state->dev;
5944 struct drm_i915_private *dev_priv = dev->dev_private;
5945 int max_pixclk = intel_mode_max_pixclk(dev, state);
1a617b77
ML
5946 struct intel_atomic_state *intel_state =
5947 to_intel_atomic_state(state);
30a970c6 5948
1a617b77 5949 intel_state->cdclk = intel_state->dev_cdclk =
27c329ed 5950 valleyview_calc_cdclk(dev_priv, max_pixclk);
0a9ab303 5951
1a617b77
ML
5952 if (!intel_state->active_crtcs)
5953 intel_state->dev_cdclk = valleyview_calc_cdclk(dev_priv, 0);
5954
27c329ed
ML
5955 return 0;
5956}
304603f4 5957
27c329ed
ML
5958static int broxton_modeset_calc_cdclk(struct drm_atomic_state *state)
5959{
4e5ca60f 5960 int max_pixclk = ilk_max_pixel_rate(state);
1a617b77
ML
5961 struct intel_atomic_state *intel_state =
5962 to_intel_atomic_state(state);
85a96e7a 5963
1a617b77 5964 intel_state->cdclk = intel_state->dev_cdclk =
c44deb6c 5965 broxton_calc_cdclk(max_pixclk);
85a96e7a 5966
1a617b77 5967 if (!intel_state->active_crtcs)
c44deb6c 5968 intel_state->dev_cdclk = broxton_calc_cdclk(0);
1a617b77 5969
27c329ed 5970 return 0;
30a970c6
JB
5971}
5972
1e69cd74
VS
5973static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
5974{
5975 unsigned int credits, default_credits;
5976
5977 if (IS_CHERRYVIEW(dev_priv))
5978 default_credits = PFI_CREDIT(12);
5979 else
5980 default_credits = PFI_CREDIT(8);
5981
bfa7df01 5982 if (dev_priv->cdclk_freq >= dev_priv->czclk_freq) {
1e69cd74
VS
5983 /* CHV suggested value is 31 or 63 */
5984 if (IS_CHERRYVIEW(dev_priv))
fcc0008f 5985 credits = PFI_CREDIT_63;
1e69cd74
VS
5986 else
5987 credits = PFI_CREDIT(15);
5988 } else {
5989 credits = default_credits;
5990 }
5991
5992 /*
5993 * WA - write default credits before re-programming
5994 * FIXME: should we also set the resend bit here?
5995 */
5996 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
5997 default_credits);
5998
5999 I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
6000 credits | PFI_CREDIT_RESEND);
6001
6002 /*
6003 * FIXME is this guaranteed to clear
6004 * immediately or should we poll for it?
6005 */
6006 WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
6007}
6008
27c329ed 6009static void valleyview_modeset_commit_cdclk(struct drm_atomic_state *old_state)
30a970c6 6010{
a821fc46 6011 struct drm_device *dev = old_state->dev;
30a970c6 6012 struct drm_i915_private *dev_priv = dev->dev_private;
1a617b77
ML
6013 struct intel_atomic_state *old_intel_state =
6014 to_intel_atomic_state(old_state);
6015 unsigned req_cdclk = old_intel_state->dev_cdclk;
30a970c6 6016
27c329ed
ML
6017 /*
6018 * FIXME: We can end up here with all power domains off, yet
6019 * with a CDCLK frequency other than the minimum. To account
6020 * for this take the PIPE-A power domain, which covers the HW
6021 * blocks needed for the following programming. This can be
6022 * removed once it's guaranteed that we get here either with
6023 * the minimum CDCLK set, or the required power domains
6024 * enabled.
6025 */
6026 intel_display_power_get(dev_priv, POWER_DOMAIN_PIPE_A);
738c05c0 6027
27c329ed
ML
6028 if (IS_CHERRYVIEW(dev))
6029 cherryview_set_cdclk(dev, req_cdclk);
6030 else
6031 valleyview_set_cdclk(dev, req_cdclk);
738c05c0 6032
27c329ed 6033 vlv_program_pfi_credits(dev_priv);
1e69cd74 6034
27c329ed 6035 intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A);
30a970c6
JB
6036}
6037
89b667f8
JB
6038static void valleyview_crtc_enable(struct drm_crtc *crtc)
6039{
6040 struct drm_device *dev = crtc->dev;
a72e4c9f 6041 struct drm_i915_private *dev_priv = to_i915(dev);
89b667f8
JB
6042 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6043 struct intel_encoder *encoder;
b95c5321
ML
6044 struct intel_crtc_state *pipe_config =
6045 to_intel_crtc_state(crtc->state);
89b667f8 6046 int pipe = intel_crtc->pipe;
89b667f8 6047
53d9f4e9 6048 if (WARN_ON(intel_crtc->active))
89b667f8
JB
6049 return;
6050
6e3c9717 6051 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 6052 intel_dp_set_m_n(intel_crtc, M1_N1);
5b18e57c
DV
6053
6054 intel_set_pipe_timings(intel_crtc);
bc58be60 6055 intel_set_pipe_src_size(intel_crtc);
5b18e57c 6056
c14b0485
VS
6057 if (IS_CHERRYVIEW(dev) && pipe == PIPE_B) {
6058 struct drm_i915_private *dev_priv = dev->dev_private;
6059
6060 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6061 I915_WRITE(CHV_CANVAS(pipe), 0);
6062 }
6063
5b18e57c
DV
6064 i9xx_set_pipeconf(intel_crtc);
6065
89b667f8 6066 intel_crtc->active = true;
89b667f8 6067
a72e4c9f 6068 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4a3436e8 6069
89b667f8
JB
6070 for_each_encoder_on_crtc(dev, crtc, encoder)
6071 if (encoder->pre_pll_enable)
6072 encoder->pre_pll_enable(encoder);
6073
cd2d34d9
VS
6074 if (IS_CHERRYVIEW(dev)) {
6075 chv_prepare_pll(intel_crtc, intel_crtc->config);
6076 chv_enable_pll(intel_crtc, intel_crtc->config);
6077 } else {
6078 vlv_prepare_pll(intel_crtc, intel_crtc->config);
6079 vlv_enable_pll(intel_crtc, intel_crtc->config);
9d556c99 6080 }
89b667f8
JB
6081
6082 for_each_encoder_on_crtc(dev, crtc, encoder)
6083 if (encoder->pre_enable)
6084 encoder->pre_enable(encoder);
6085
2dd24552
JB
6086 i9xx_pfit_enable(intel_crtc);
6087
b95c5321 6088 intel_color_load_luts(&pipe_config->base);
63cbb074 6089
caed361d 6090 intel_update_watermarks(crtc);
e1fdc473 6091 intel_enable_pipe(intel_crtc);
be6a6f8e 6092
4b3a9526
VS
6093 assert_vblank_disabled(crtc);
6094 drm_crtc_vblank_on(crtc);
6095
f9b61ff6
DV
6096 for_each_encoder_on_crtc(dev, crtc, encoder)
6097 encoder->enable(encoder);
89b667f8
JB
6098}
6099
f13c2ef3
DV
6100static void i9xx_set_pll_dividers(struct intel_crtc *crtc)
6101{
6102 struct drm_device *dev = crtc->base.dev;
6103 struct drm_i915_private *dev_priv = dev->dev_private;
6104
6e3c9717
ACO
6105 I915_WRITE(FP0(crtc->pipe), crtc->config->dpll_hw_state.fp0);
6106 I915_WRITE(FP1(crtc->pipe), crtc->config->dpll_hw_state.fp1);
f13c2ef3
DV
6107}
6108
0b8765c6 6109static void i9xx_crtc_enable(struct drm_crtc *crtc)
79e53945
JB
6110{
6111 struct drm_device *dev = crtc->dev;
a72e4c9f 6112 struct drm_i915_private *dev_priv = to_i915(dev);
79e53945 6113 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 6114 struct intel_encoder *encoder;
b95c5321
ML
6115 struct intel_crtc_state *pipe_config =
6116 to_intel_crtc_state(crtc->state);
cd2d34d9 6117 enum pipe pipe = intel_crtc->pipe;
79e53945 6118
53d9f4e9 6119 if (WARN_ON(intel_crtc->active))
f7abfe8b
CW
6120 return;
6121
f13c2ef3
DV
6122 i9xx_set_pll_dividers(intel_crtc);
6123
6e3c9717 6124 if (intel_crtc->config->has_dp_encoder)
fe3cd48d 6125 intel_dp_set_m_n(intel_crtc, M1_N1);
5b18e57c
DV
6126
6127 intel_set_pipe_timings(intel_crtc);
bc58be60 6128 intel_set_pipe_src_size(intel_crtc);
5b18e57c 6129
5b18e57c
DV
6130 i9xx_set_pipeconf(intel_crtc);
6131
f7abfe8b 6132 intel_crtc->active = true;
6b383a7f 6133
4a3436e8 6134 if (!IS_GEN2(dev))
a72e4c9f 6135 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
4a3436e8 6136
9d6d9f19
MK
6137 for_each_encoder_on_crtc(dev, crtc, encoder)
6138 if (encoder->pre_enable)
6139 encoder->pre_enable(encoder);
6140
f6736a1a
DV
6141 i9xx_enable_pll(intel_crtc);
6142
2dd24552
JB
6143 i9xx_pfit_enable(intel_crtc);
6144
b95c5321 6145 intel_color_load_luts(&pipe_config->base);
63cbb074 6146
f37fcc2a 6147 intel_update_watermarks(crtc);
e1fdc473 6148 intel_enable_pipe(intel_crtc);
be6a6f8e 6149
4b3a9526
VS
6150 assert_vblank_disabled(crtc);
6151 drm_crtc_vblank_on(crtc);
6152
f9b61ff6
DV
6153 for_each_encoder_on_crtc(dev, crtc, encoder)
6154 encoder->enable(encoder);
0b8765c6 6155}
79e53945 6156
87476d63
DV
6157static void i9xx_pfit_disable(struct intel_crtc *crtc)
6158{
6159 struct drm_device *dev = crtc->base.dev;
6160 struct drm_i915_private *dev_priv = dev->dev_private;
87476d63 6161
6e3c9717 6162 if (!crtc->config->gmch_pfit.control)
328d8e82 6163 return;
87476d63 6164
328d8e82 6165 assert_pipe_disabled(dev_priv, crtc->pipe);
87476d63 6166
328d8e82
DV
6167 DRM_DEBUG_DRIVER("disabling pfit, current: 0x%08x\n",
6168 I915_READ(PFIT_CONTROL));
6169 I915_WRITE(PFIT_CONTROL, 0);
87476d63
DV
6170}
6171
0b8765c6
JB
6172static void i9xx_crtc_disable(struct drm_crtc *crtc)
6173{
6174 struct drm_device *dev = crtc->dev;
6175 struct drm_i915_private *dev_priv = dev->dev_private;
6176 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ef9c3aee 6177 struct intel_encoder *encoder;
0b8765c6 6178 int pipe = intel_crtc->pipe;
ef9c3aee 6179
6304cd91
VS
6180 /*
6181 * On gen2 planes are double buffered but the pipe isn't, so we must
6182 * wait for planes to fully turn off before disabling the pipe.
6183 */
90e83e53
ACO
6184 if (IS_GEN2(dev))
6185 intel_wait_for_vblank(dev, pipe);
6304cd91 6186
4b3a9526
VS
6187 for_each_encoder_on_crtc(dev, crtc, encoder)
6188 encoder->disable(encoder);
6189
f9b61ff6
DV
6190 drm_crtc_vblank_off(crtc);
6191 assert_vblank_disabled(crtc);
6192
575f7ab7 6193 intel_disable_pipe(intel_crtc);
24a1f16d 6194
87476d63 6195 i9xx_pfit_disable(intel_crtc);
24a1f16d 6196
89b667f8
JB
6197 for_each_encoder_on_crtc(dev, crtc, encoder)
6198 if (encoder->post_disable)
6199 encoder->post_disable(encoder);
6200
a65347ba 6201 if (!intel_crtc->config->has_dsi_encoder) {
076ed3b2
CML
6202 if (IS_CHERRYVIEW(dev))
6203 chv_disable_pll(dev_priv, pipe);
6204 else if (IS_VALLEYVIEW(dev))
6205 vlv_disable_pll(dev_priv, pipe);
6206 else
1c4e0274 6207 i9xx_disable_pll(intel_crtc);
076ed3b2 6208 }
0b8765c6 6209
d6db995f
VS
6210 for_each_encoder_on_crtc(dev, crtc, encoder)
6211 if (encoder->post_pll_disable)
6212 encoder->post_pll_disable(encoder);
6213
4a3436e8 6214 if (!IS_GEN2(dev))
a72e4c9f 6215 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
0b8765c6
JB
6216}
6217
b17d48e2
ML
6218static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
6219{
842e0307 6220 struct intel_encoder *encoder;
b17d48e2
ML
6221 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6222 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6223 enum intel_display_power_domain domain;
6224 unsigned long domains;
6225
6226 if (!intel_crtc->active)
6227 return;
6228
a539205a 6229 if (to_intel_plane_state(crtc->primary->state)->visible) {
fc32b1fd
ML
6230 WARN_ON(intel_crtc->unpin_work);
6231
2622a081 6232 intel_pre_disable_primary_noatomic(crtc);
54a41961
ML
6233
6234 intel_crtc_disable_planes(crtc, 1 << drm_plane_index(crtc->primary));
6235 to_intel_plane_state(crtc->primary->state)->visible = false;
a539205a
ML
6236 }
6237
b17d48e2 6238 dev_priv->display.crtc_disable(crtc);
842e0307
ML
6239
6240 DRM_DEBUG_KMS("[CRTC:%d] hw state adjusted, was enabled, now disabled\n",
6241 crtc->base.id);
6242
6243 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6244 crtc->state->active = false;
37d9078b 6245 intel_crtc->active = false;
842e0307
ML
6246 crtc->enabled = false;
6247 crtc->state->connector_mask = 0;
6248 crtc->state->encoder_mask = 0;
6249
6250 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6251 encoder->base.crtc = NULL;
6252
58f9c0bc 6253 intel_fbc_disable(intel_crtc);
37d9078b 6254 intel_update_watermarks(crtc);
1f7457b1 6255 intel_disable_shared_dpll(intel_crtc);
b17d48e2
ML
6256
6257 domains = intel_crtc->enabled_power_domains;
6258 for_each_power_domain(domain, domains)
6259 intel_display_power_put(dev_priv, domain);
6260 intel_crtc->enabled_power_domains = 0;
565602d7
ML
6261
6262 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
6263 dev_priv->min_pixclk[intel_crtc->pipe] = 0;
b17d48e2
ML
6264}
6265
6b72d486
ML
6266/*
6267 * turn all crtc's off, but do not adjust state
6268 * This has to be paired with a call to intel_modeset_setup_hw_state.
6269 */
70e0bd74 6270int intel_display_suspend(struct drm_device *dev)
ee7b9f93 6271{
e2c8b870 6272 struct drm_i915_private *dev_priv = to_i915(dev);
70e0bd74 6273 struct drm_atomic_state *state;
e2c8b870 6274 int ret;
70e0bd74 6275
e2c8b870
ML
6276 state = drm_atomic_helper_suspend(dev);
6277 ret = PTR_ERR_OR_ZERO(state);
70e0bd74
ML
6278 if (ret)
6279 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
e2c8b870
ML
6280 else
6281 dev_priv->modeset_restore_state = state;
70e0bd74 6282 return ret;
ee7b9f93
JB
6283}
6284
ea5b213a 6285void intel_encoder_destroy(struct drm_encoder *encoder)
7e7d76c3 6286{
4ef69c7a 6287 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
ea5b213a 6288
ea5b213a
CW
6289 drm_encoder_cleanup(encoder);
6290 kfree(intel_encoder);
7e7d76c3
JB
6291}
6292
0a91ca29
DV
6293/* Cross check the actual hw state with our own modeset state tracking (and it's
6294 * internal consistency). */
c0ead703 6295static void intel_connector_verify_state(struct intel_connector *connector)
79e53945 6296{
35dd3c64
ML
6297 struct drm_crtc *crtc = connector->base.state->crtc;
6298
6299 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6300 connector->base.base.id,
6301 connector->base.name);
6302
0a91ca29 6303 if (connector->get_hw_state(connector)) {
e85376cb 6304 struct intel_encoder *encoder = connector->encoder;
35dd3c64 6305 struct drm_connector_state *conn_state = connector->base.state;
0a91ca29 6306
35dd3c64
ML
6307 I915_STATE_WARN(!crtc,
6308 "connector enabled without attached crtc\n");
0a91ca29 6309
35dd3c64
ML
6310 if (!crtc)
6311 return;
6312
6313 I915_STATE_WARN(!crtc->state->active,
6314 "connector is active, but attached crtc isn't\n");
6315
e85376cb 6316 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
35dd3c64
ML
6317 return;
6318
e85376cb 6319 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
35dd3c64
ML
6320 "atomic encoder doesn't match attached encoder\n");
6321
e85376cb 6322 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
35dd3c64
ML
6323 "attached encoder crtc differs from connector crtc\n");
6324 } else {
4d688a2a
ML
6325 I915_STATE_WARN(crtc && crtc->state->active,
6326 "attached crtc is active, but connector isn't\n");
35dd3c64
ML
6327 I915_STATE_WARN(!crtc && connector->base.state->best_encoder,
6328 "best encoder set without crtc!\n");
0a91ca29 6329 }
79e53945
JB
6330}
6331
08d9bc92
ACO
6332int intel_connector_init(struct intel_connector *connector)
6333{
5350a031 6334 drm_atomic_helper_connector_reset(&connector->base);
08d9bc92 6335
5350a031 6336 if (!connector->base.state)
08d9bc92
ACO
6337 return -ENOMEM;
6338
08d9bc92
ACO
6339 return 0;
6340}
6341
6342struct intel_connector *intel_connector_alloc(void)
6343{
6344 struct intel_connector *connector;
6345
6346 connector = kzalloc(sizeof *connector, GFP_KERNEL);
6347 if (!connector)
6348 return NULL;
6349
6350 if (intel_connector_init(connector) < 0) {
6351 kfree(connector);
6352 return NULL;
6353 }
6354
6355 return connector;
6356}
6357
f0947c37
DV
6358/* Simple connector->get_hw_state implementation for encoders that support only
6359 * one connector and no cloning and hence the encoder state determines the state
6360 * of the connector. */
6361bool intel_connector_get_hw_state(struct intel_connector *connector)
ea5b213a 6362{
24929352 6363 enum pipe pipe = 0;
f0947c37 6364 struct intel_encoder *encoder = connector->encoder;
ea5b213a 6365
f0947c37 6366 return encoder->get_hw_state(encoder, &pipe);
ea5b213a
CW
6367}
6368
6d293983 6369static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
d272ddfa 6370{
6d293983
ACO
6371 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
6372 return crtc_state->fdi_lanes;
d272ddfa
VS
6373
6374 return 0;
6375}
6376
6d293983 6377static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5cec258b 6378 struct intel_crtc_state *pipe_config)
1857e1da 6379{
6d293983
ACO
6380 struct drm_atomic_state *state = pipe_config->base.state;
6381 struct intel_crtc *other_crtc;
6382 struct intel_crtc_state *other_crtc_state;
6383
1857e1da
DV
6384 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
6385 pipe_name(pipe), pipe_config->fdi_lanes);
6386 if (pipe_config->fdi_lanes > 4) {
6387 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
6388 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6389 return -EINVAL;
1857e1da
DV
6390 }
6391
bafb6553 6392 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
1857e1da
DV
6393 if (pipe_config->fdi_lanes > 2) {
6394 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
6395 pipe_config->fdi_lanes);
6d293983 6396 return -EINVAL;
1857e1da 6397 } else {
6d293983 6398 return 0;
1857e1da
DV
6399 }
6400 }
6401
6402 if (INTEL_INFO(dev)->num_pipes == 2)
6d293983 6403 return 0;
1857e1da
DV
6404
6405 /* Ivybridge 3 pipe is really complicated */
6406 switch (pipe) {
6407 case PIPE_A:
6d293983 6408 return 0;
1857e1da 6409 case PIPE_B:
6d293983
ACO
6410 if (pipe_config->fdi_lanes <= 2)
6411 return 0;
6412
6413 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_C));
6414 other_crtc_state =
6415 intel_atomic_get_crtc_state(state, other_crtc);
6416 if (IS_ERR(other_crtc_state))
6417 return PTR_ERR(other_crtc_state);
6418
6419 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
1857e1da
DV
6420 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
6421 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6422 return -EINVAL;
1857e1da 6423 }
6d293983 6424 return 0;
1857e1da 6425 case PIPE_C:
251cc67c
VS
6426 if (pipe_config->fdi_lanes > 2) {
6427 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
6428 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 6429 return -EINVAL;
251cc67c 6430 }
6d293983
ACO
6431
6432 other_crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev, PIPE_B));
6433 other_crtc_state =
6434 intel_atomic_get_crtc_state(state, other_crtc);
6435 if (IS_ERR(other_crtc_state))
6436 return PTR_ERR(other_crtc_state);
6437
6438 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
1857e1da 6439 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6d293983 6440 return -EINVAL;
1857e1da 6441 }
6d293983 6442 return 0;
1857e1da
DV
6443 default:
6444 BUG();
6445 }
6446}
6447
e29c22c0
DV
6448#define RETRY 1
6449static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5cec258b 6450 struct intel_crtc_state *pipe_config)
877d48d5 6451{
1857e1da 6452 struct drm_device *dev = intel_crtc->base.dev;
7c5f93b0 6453 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6d293983
ACO
6454 int lane, link_bw, fdi_dotclock, ret;
6455 bool needs_recompute = false;
877d48d5 6456
e29c22c0 6457retry:
877d48d5
DV
6458 /* FDI is a binary signal running at ~2.7GHz, encoding
6459 * each output octet as 10 bits. The actual frequency
6460 * is stored as a divider into a 100MHz clock, and the
6461 * mode pixel clock is stored in units of 1KHz.
6462 * Hence the bw of each lane in terms of the mode signal
6463 * is:
6464 */
21a727b3 6465 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
877d48d5 6466
241bfc38 6467 fdi_dotclock = adjusted_mode->crtc_clock;
877d48d5 6468
2bd89a07 6469 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
877d48d5
DV
6470 pipe_config->pipe_bpp);
6471
6472 pipe_config->fdi_lanes = lane;
6473
2bd89a07 6474 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
877d48d5 6475 link_bw, &pipe_config->fdi_m_n);
1857e1da 6476
e3b247da 6477 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
6d293983 6478 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
e29c22c0
DV
6479 pipe_config->pipe_bpp -= 2*3;
6480 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
6481 pipe_config->pipe_bpp);
6482 needs_recompute = true;
6483 pipe_config->bw_constrained = true;
6484
6485 goto retry;
6486 }
6487
6488 if (needs_recompute)
6489 return RETRY;
6490
6d293983 6491 return ret;
877d48d5
DV
6492}
6493
8cfb3407
VS
6494static bool pipe_config_supports_ips(struct drm_i915_private *dev_priv,
6495 struct intel_crtc_state *pipe_config)
6496{
6497 if (pipe_config->pipe_bpp > 24)
6498 return false;
6499
6500 /* HSW can handle pixel rate up to cdclk? */
2d1fe073 6501 if (IS_HASWELL(dev_priv))
8cfb3407
VS
6502 return true;
6503
6504 /*
b432e5cf
VS
6505 * We compare against max which means we must take
6506 * the increased cdclk requirement into account when
6507 * calculating the new cdclk.
6508 *
6509 * Should measure whether using a lower cdclk w/o IPS
8cfb3407
VS
6510 */
6511 return ilk_pipe_pixel_rate(pipe_config) <=
6512 dev_priv->max_cdclk_freq * 95 / 100;
6513}
6514
42db64ef 6515static void hsw_compute_ips_config(struct intel_crtc *crtc,
5cec258b 6516 struct intel_crtc_state *pipe_config)
42db64ef 6517{
8cfb3407
VS
6518 struct drm_device *dev = crtc->base.dev;
6519 struct drm_i915_private *dev_priv = dev->dev_private;
6520
d330a953 6521 pipe_config->ips_enabled = i915.enable_ips &&
8cfb3407
VS
6522 hsw_crtc_supports_ips(crtc) &&
6523 pipe_config_supports_ips(dev_priv, pipe_config);
42db64ef
PZ
6524}
6525
39acb4aa
VS
6526static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
6527{
6528 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6529
6530 /* GDG double wide on either pipe, otherwise pipe A only */
6531 return INTEL_INFO(dev_priv)->gen < 4 &&
6532 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
6533}
6534
a43f6e0f 6535static int intel_crtc_compute_config(struct intel_crtc *crtc,
5cec258b 6536 struct intel_crtc_state *pipe_config)
79e53945 6537{
a43f6e0f 6538 struct drm_device *dev = crtc->base.dev;
8bd31e67 6539 struct drm_i915_private *dev_priv = dev->dev_private;
7c5f93b0 6540 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
89749350 6541
ad3a4479 6542 /* FIXME should check pixel clock limits on all platforms */
cf532bb2 6543 if (INTEL_INFO(dev)->gen < 4) {
39acb4aa 6544 int clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
cf532bb2
VS
6545
6546 /*
39acb4aa 6547 * Enable double wide mode when the dot clock
cf532bb2 6548 * is > 90% of the (display) core speed.
cf532bb2 6549 */
39acb4aa
VS
6550 if (intel_crtc_supports_double_wide(crtc) &&
6551 adjusted_mode->crtc_clock > clock_limit) {
ad3a4479 6552 clock_limit *= 2;
cf532bb2 6553 pipe_config->double_wide = true;
ad3a4479
VS
6554 }
6555
39acb4aa
VS
6556 if (adjusted_mode->crtc_clock > clock_limit) {
6557 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
6558 adjusted_mode->crtc_clock, clock_limit,
6559 yesno(pipe_config->double_wide));
e29c22c0 6560 return -EINVAL;
39acb4aa 6561 }
2c07245f 6562 }
89749350 6563
1d1d0e27
VS
6564 /*
6565 * Pipe horizontal size must be even in:
6566 * - DVO ganged mode
6567 * - LVDS dual channel mode
6568 * - Double wide pipe
6569 */
a93e255f 6570 if ((intel_pipe_will_have_type(pipe_config, INTEL_OUTPUT_LVDS) &&
1d1d0e27
VS
6571 intel_is_dual_link_lvds(dev)) || pipe_config->double_wide)
6572 pipe_config->pipe_src_w &= ~1;
6573
8693a824
DL
6574 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
6575 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
44f46b42
CW
6576 */
6577 if ((INTEL_INFO(dev)->gen > 4 || IS_G4X(dev)) &&
aad941d5 6578 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
e29c22c0 6579 return -EINVAL;
44f46b42 6580
f5adf94e 6581 if (HAS_IPS(dev))
a43f6e0f
DV
6582 hsw_compute_ips_config(crtc, pipe_config);
6583
877d48d5 6584 if (pipe_config->has_pch_encoder)
a43f6e0f 6585 return ironlake_fdi_compute_config(crtc, pipe_config);
877d48d5 6586
cf5a15be 6587 return 0;
79e53945
JB
6588}
6589
1652d19e
VS
6590static int skylake_get_display_clock_speed(struct drm_device *dev)
6591{
6592 struct drm_i915_private *dev_priv = to_i915(dev);
6593 uint32_t lcpll1 = I915_READ(LCPLL1_CTL);
6594 uint32_t cdctl = I915_READ(CDCLK_CTL);
6595 uint32_t linkrate;
6596
414355a7 6597 if (!(lcpll1 & LCPLL_PLL_ENABLE))
1652d19e 6598 return 24000; /* 24MHz is the cd freq with NSSC ref */
1652d19e
VS
6599
6600 if ((cdctl & CDCLK_FREQ_SEL_MASK) == CDCLK_FREQ_540)
6601 return 540000;
6602
6603 linkrate = (I915_READ(DPLL_CTRL1) &
71cd8423 6604 DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) >> 1;
1652d19e 6605
71cd8423
DL
6606 if (linkrate == DPLL_CTRL1_LINK_RATE_2160 ||
6607 linkrate == DPLL_CTRL1_LINK_RATE_1080) {
1652d19e
VS
6608 /* vco 8640 */
6609 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6610 case CDCLK_FREQ_450_432:
6611 return 432000;
6612 case CDCLK_FREQ_337_308:
6613 return 308570;
6614 case CDCLK_FREQ_675_617:
6615 return 617140;
6616 default:
6617 WARN(1, "Unknown cd freq selection\n");
6618 }
6619 } else {
6620 /* vco 8100 */
6621 switch (cdctl & CDCLK_FREQ_SEL_MASK) {
6622 case CDCLK_FREQ_450_432:
6623 return 450000;
6624 case CDCLK_FREQ_337_308:
6625 return 337500;
6626 case CDCLK_FREQ_675_617:
6627 return 675000;
6628 default:
6629 WARN(1, "Unknown cd freq selection\n");
6630 }
6631 }
6632
6633 /* error case, do as if DPLL0 isn't enabled */
6634 return 24000;
6635}
6636
acd3f3d3
BP
6637static int broxton_get_display_clock_speed(struct drm_device *dev)
6638{
6639 struct drm_i915_private *dev_priv = to_i915(dev);
6640 uint32_t cdctl = I915_READ(CDCLK_CTL);
6641 uint32_t pll_ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
6642 uint32_t pll_enab = I915_READ(BXT_DE_PLL_ENABLE);
6643 int cdclk;
6644
6645 if (!(pll_enab & BXT_DE_PLL_PLL_ENABLE))
6646 return 19200;
6647
6648 cdclk = 19200 * pll_ratio / 2;
6649
6650 switch (cdctl & BXT_CDCLK_CD2X_DIV_SEL_MASK) {
6651 case BXT_CDCLK_CD2X_DIV_SEL_1:
6652 return cdclk; /* 576MHz or 624MHz */
6653 case BXT_CDCLK_CD2X_DIV_SEL_1_5:
6654 return cdclk * 2 / 3; /* 384MHz */
6655 case BXT_CDCLK_CD2X_DIV_SEL_2:
6656 return cdclk / 2; /* 288MHz */
6657 case BXT_CDCLK_CD2X_DIV_SEL_4:
6658 return cdclk / 4; /* 144MHz */
6659 }
6660
6661 /* error case, do as if DE PLL isn't enabled */
6662 return 19200;
6663}
6664
1652d19e
VS
6665static int broadwell_get_display_clock_speed(struct drm_device *dev)
6666{
6667 struct drm_i915_private *dev_priv = dev->dev_private;
6668 uint32_t lcpll = I915_READ(LCPLL_CTL);
6669 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6670
6671 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6672 return 800000;
6673 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6674 return 450000;
6675 else if (freq == LCPLL_CLK_FREQ_450)
6676 return 450000;
6677 else if (freq == LCPLL_CLK_FREQ_54O_BDW)
6678 return 540000;
6679 else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
6680 return 337500;
6681 else
6682 return 675000;
6683}
6684
6685static int haswell_get_display_clock_speed(struct drm_device *dev)
6686{
6687 struct drm_i915_private *dev_priv = dev->dev_private;
6688 uint32_t lcpll = I915_READ(LCPLL_CTL);
6689 uint32_t freq = lcpll & LCPLL_CLK_FREQ_MASK;
6690
6691 if (lcpll & LCPLL_CD_SOURCE_FCLK)
6692 return 800000;
6693 else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
6694 return 450000;
6695 else if (freq == LCPLL_CLK_FREQ_450)
6696 return 450000;
6697 else if (IS_HSW_ULT(dev))
6698 return 337500;
6699 else
6700 return 540000;
79e53945
JB
6701}
6702
25eb05fc
JB
6703static int valleyview_get_display_clock_speed(struct drm_device *dev)
6704{
bfa7df01
VS
6705 return vlv_get_cck_clock_hpll(to_i915(dev), "cdclk",
6706 CCK_DISPLAY_CLOCK_CONTROL);
25eb05fc
JB
6707}
6708
b37a6434
VS
6709static int ilk_get_display_clock_speed(struct drm_device *dev)
6710{
6711 return 450000;
6712}
6713
e70236a8
JB
6714static int i945_get_display_clock_speed(struct drm_device *dev)
6715{
6716 return 400000;
6717}
79e53945 6718
e70236a8 6719static int i915_get_display_clock_speed(struct drm_device *dev)
79e53945 6720{
e907f170 6721 return 333333;
e70236a8 6722}
79e53945 6723
e70236a8
JB
6724static int i9xx_misc_get_display_clock_speed(struct drm_device *dev)
6725{
6726 return 200000;
6727}
79e53945 6728
257a7ffc
DV
6729static int pnv_get_display_clock_speed(struct drm_device *dev)
6730{
6731 u16 gcfgc = 0;
6732
6733 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6734
6735 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6736 case GC_DISPLAY_CLOCK_267_MHZ_PNV:
e907f170 6737 return 266667;
257a7ffc 6738 case GC_DISPLAY_CLOCK_333_MHZ_PNV:
e907f170 6739 return 333333;
257a7ffc 6740 case GC_DISPLAY_CLOCK_444_MHZ_PNV:
e907f170 6741 return 444444;
257a7ffc
DV
6742 case GC_DISPLAY_CLOCK_200_MHZ_PNV:
6743 return 200000;
6744 default:
6745 DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
6746 case GC_DISPLAY_CLOCK_133_MHZ_PNV:
e907f170 6747 return 133333;
257a7ffc 6748 case GC_DISPLAY_CLOCK_167_MHZ_PNV:
e907f170 6749 return 166667;
257a7ffc
DV
6750 }
6751}
6752
e70236a8
JB
6753static int i915gm_get_display_clock_speed(struct drm_device *dev)
6754{
6755 u16 gcfgc = 0;
79e53945 6756
e70236a8
JB
6757 pci_read_config_word(dev->pdev, GCFGC, &gcfgc);
6758
6759 if (gcfgc & GC_LOW_FREQUENCY_ENABLE)
e907f170 6760 return 133333;
e70236a8
JB
6761 else {
6762 switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
6763 case GC_DISPLAY_CLOCK_333_MHZ:
e907f170 6764 return 333333;
e70236a8
JB
6765 default:
6766 case GC_DISPLAY_CLOCK_190_200_MHZ:
6767 return 190000;
79e53945 6768 }
e70236a8
JB
6769 }
6770}
6771
6772static int i865_get_display_clock_speed(struct drm_device *dev)
6773{
e907f170 6774 return 266667;
e70236a8
JB
6775}
6776
1b1d2716 6777static int i85x_get_display_clock_speed(struct drm_device *dev)
e70236a8
JB
6778{
6779 u16 hpllcc = 0;
1b1d2716 6780
65cd2b3f
VS
6781 /*
6782 * 852GM/852GMV only supports 133 MHz and the HPLLCC
6783 * encoding is different :(
6784 * FIXME is this the right way to detect 852GM/852GMV?
6785 */
6786 if (dev->pdev->revision == 0x1)
6787 return 133333;
6788
1b1d2716
VS
6789 pci_bus_read_config_word(dev->pdev->bus,
6790 PCI_DEVFN(0, 3), HPLLCC, &hpllcc);
6791
e70236a8
JB
6792 /* Assume that the hardware is in the high speed state. This
6793 * should be the default.
6794 */
6795 switch (hpllcc & GC_CLOCK_CONTROL_MASK) {
6796 case GC_CLOCK_133_200:
1b1d2716 6797 case GC_CLOCK_133_200_2:
e70236a8
JB
6798 case GC_CLOCK_100_200:
6799 return 200000;
6800 case GC_CLOCK_166_250:
6801 return 250000;
6802 case GC_CLOCK_100_133:
e907f170 6803 return 133333;
1b1d2716
VS
6804 case GC_CLOCK_133_266:
6805 case GC_CLOCK_133_266_2:
6806 case GC_CLOCK_166_266:
6807 return 266667;
e70236a8 6808 }
79e53945 6809
e70236a8
JB
6810 /* Shouldn't happen */
6811 return 0;
6812}
79e53945 6813
e70236a8
JB
6814static int i830_get_display_clock_speed(struct drm_device *dev)
6815{
e907f170 6816 return 133333;
79e53945
JB
6817}
6818
34edce2f
VS
6819static unsigned int intel_hpll_vco(struct drm_device *dev)
6820{
6821 struct drm_i915_private *dev_priv = dev->dev_private;
6822 static const unsigned int blb_vco[8] = {
6823 [0] = 3200000,
6824 [1] = 4000000,
6825 [2] = 5333333,
6826 [3] = 4800000,
6827 [4] = 6400000,
6828 };
6829 static const unsigned int pnv_vco[8] = {
6830 [0] = 3200000,
6831 [1] = 4000000,
6832 [2] = 5333333,
6833 [3] = 4800000,
6834 [4] = 2666667,
6835 };
6836 static const unsigned int cl_vco[8] = {
6837 [0] = 3200000,
6838 [1] = 4000000,
6839 [2] = 5333333,
6840 [3] = 6400000,
6841 [4] = 3333333,
6842 [5] = 3566667,
6843 [6] = 4266667,
6844 };
6845 static const unsigned int elk_vco[8] = {
6846 [0] = 3200000,
6847 [1] = 4000000,
6848 [2] = 5333333,
6849 [3] = 4800000,
6850 };
6851 static const unsigned int ctg_vco[8] = {
6852 [0] = 3200000,
6853 [1] = 4000000,
6854 [2] = 5333333,
6855 [3] = 6400000,
6856 [4] = 2666667,
6857 [5] = 4266667,
6858 };
6859 const unsigned int *vco_table;
6860 unsigned int vco;
6861 uint8_t tmp = 0;
6862
6863 /* FIXME other chipsets? */
6864 if (IS_GM45(dev))
6865 vco_table = ctg_vco;
6866 else if (IS_G4X(dev))
6867 vco_table = elk_vco;
6868 else if (IS_CRESTLINE(dev))
6869 vco_table = cl_vco;
6870 else if (IS_PINEVIEW(dev))
6871 vco_table = pnv_vco;
6872 else if (IS_G33(dev))
6873 vco_table = blb_vco;
6874 else
6875 return 0;
6876
6877 tmp = I915_READ(IS_MOBILE(dev) ? HPLLVCO_MOBILE : HPLLVCO);
6878
6879 vco = vco_table[tmp & 0x7];
6880 if (vco == 0)
6881 DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
6882 else
6883 DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
6884
6885 return vco;
6886}
6887
6888static int gm45_get_display_clock_speed(struct drm_device *dev)
6889{
6890 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6891 uint16_t tmp = 0;
6892
6893 pci_read_config_word(dev->pdev, GCFGC, &tmp);
6894
6895 cdclk_sel = (tmp >> 12) & 0x1;
6896
6897 switch (vco) {
6898 case 2666667:
6899 case 4000000:
6900 case 5333333:
6901 return cdclk_sel ? 333333 : 222222;
6902 case 3200000:
6903 return cdclk_sel ? 320000 : 228571;
6904 default:
6905 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n", vco, tmp);
6906 return 222222;
6907 }
6908}
6909
6910static int i965gm_get_display_clock_speed(struct drm_device *dev)
6911{
6912 static const uint8_t div_3200[] = { 16, 10, 8 };
6913 static const uint8_t div_4000[] = { 20, 12, 10 };
6914 static const uint8_t div_5333[] = { 24, 16, 14 };
6915 const uint8_t *div_table;
6916 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6917 uint16_t tmp = 0;
6918
6919 pci_read_config_word(dev->pdev, GCFGC, &tmp);
6920
6921 cdclk_sel = ((tmp >> 8) & 0x1f) - 1;
6922
6923 if (cdclk_sel >= ARRAY_SIZE(div_3200))
6924 goto fail;
6925
6926 switch (vco) {
6927 case 3200000:
6928 div_table = div_3200;
6929 break;
6930 case 4000000:
6931 div_table = div_4000;
6932 break;
6933 case 5333333:
6934 div_table = div_5333;
6935 break;
6936 default:
6937 goto fail;
6938 }
6939
6940 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
6941
caf4e252 6942fail:
34edce2f
VS
6943 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n", vco, tmp);
6944 return 200000;
6945}
6946
6947static int g33_get_display_clock_speed(struct drm_device *dev)
6948{
6949 static const uint8_t div_3200[] = { 12, 10, 8, 7, 5, 16 };
6950 static const uint8_t div_4000[] = { 14, 12, 10, 8, 6, 20 };
6951 static const uint8_t div_4800[] = { 20, 14, 12, 10, 8, 24 };
6952 static const uint8_t div_5333[] = { 20, 16, 12, 12, 8, 28 };
6953 const uint8_t *div_table;
6954 unsigned int cdclk_sel, vco = intel_hpll_vco(dev);
6955 uint16_t tmp = 0;
6956
6957 pci_read_config_word(dev->pdev, GCFGC, &tmp);
6958
6959 cdclk_sel = (tmp >> 4) & 0x7;
6960
6961 if (cdclk_sel >= ARRAY_SIZE(div_3200))
6962 goto fail;
6963
6964 switch (vco) {
6965 case 3200000:
6966 div_table = div_3200;
6967 break;
6968 case 4000000:
6969 div_table = div_4000;
6970 break;
6971 case 4800000:
6972 div_table = div_4800;
6973 break;
6974 case 5333333:
6975 div_table = div_5333;
6976 break;
6977 default:
6978 goto fail;
6979 }
6980
6981 return DIV_ROUND_CLOSEST(vco, div_table[cdclk_sel]);
6982
caf4e252 6983fail:
34edce2f
VS
6984 DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n", vco, tmp);
6985 return 190476;
6986}
6987
2c07245f 6988static void
a65851af 6989intel_reduce_m_n_ratio(uint32_t *num, uint32_t *den)
2c07245f 6990{
a65851af
VS
6991 while (*num > DATA_LINK_M_N_MASK ||
6992 *den > DATA_LINK_M_N_MASK) {
2c07245f
ZW
6993 *num >>= 1;
6994 *den >>= 1;
6995 }
6996}
6997
a65851af
VS
6998static void compute_m_n(unsigned int m, unsigned int n,
6999 uint32_t *ret_m, uint32_t *ret_n)
7000{
7001 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
7002 *ret_m = div_u64((uint64_t) m * *ret_n, n);
7003 intel_reduce_m_n_ratio(ret_m, ret_n);
7004}
7005
e69d0bc1
DV
7006void
7007intel_link_compute_m_n(int bits_per_pixel, int nlanes,
7008 int pixel_clock, int link_clock,
7009 struct intel_link_m_n *m_n)
2c07245f 7010{
e69d0bc1 7011 m_n->tu = 64;
a65851af
VS
7012
7013 compute_m_n(bits_per_pixel * pixel_clock,
7014 link_clock * nlanes * 8,
7015 &m_n->gmch_m, &m_n->gmch_n);
7016
7017 compute_m_n(pixel_clock, link_clock,
7018 &m_n->link_m, &m_n->link_n);
2c07245f
ZW
7019}
7020
a7615030
CW
7021static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7022{
d330a953
JN
7023 if (i915.panel_use_ssc >= 0)
7024 return i915.panel_use_ssc != 0;
41aa3448 7025 return dev_priv->vbt.lvds_use_ssc
435793df 7026 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
a7615030
CW
7027}
7028
7429e9d4 7029static uint32_t pnv_dpll_compute_fp(struct dpll *dpll)
c65d77d8 7030{
7df00d7a 7031 return (1 << dpll->n) << 16 | dpll->m2;
7429e9d4 7032}
f47709a9 7033
7429e9d4
DV
7034static uint32_t i9xx_dpll_compute_fp(struct dpll *dpll)
7035{
7036 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
c65d77d8
JB
7037}
7038
f47709a9 7039static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
190f68c5 7040 struct intel_crtc_state *crtc_state,
9e2c8475 7041 struct dpll *reduced_clock)
a7516a05 7042{
f47709a9 7043 struct drm_device *dev = crtc->base.dev;
a7516a05
JB
7044 u32 fp, fp2 = 0;
7045
7046 if (IS_PINEVIEW(dev)) {
190f68c5 7047 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
a7516a05 7048 if (reduced_clock)
7429e9d4 7049 fp2 = pnv_dpll_compute_fp(reduced_clock);
a7516a05 7050 } else {
190f68c5 7051 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
a7516a05 7052 if (reduced_clock)
7429e9d4 7053 fp2 = i9xx_dpll_compute_fp(reduced_clock);
a7516a05
JB
7054 }
7055
190f68c5 7056 crtc_state->dpll_hw_state.fp0 = fp;
a7516a05 7057
f47709a9 7058 crtc->lowfreq_avail = false;
a93e255f 7059 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ab585dea 7060 reduced_clock) {
190f68c5 7061 crtc_state->dpll_hw_state.fp1 = fp2;
f47709a9 7062 crtc->lowfreq_avail = true;
a7516a05 7063 } else {
190f68c5 7064 crtc_state->dpll_hw_state.fp1 = fp;
a7516a05
JB
7065 }
7066}
7067
5e69f97f
CML
7068static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7069 pipe)
89b667f8
JB
7070{
7071 u32 reg_val;
7072
7073 /*
7074 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7075 * and set it to a reasonable value instead.
7076 */
ab3c759a 7077 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
89b667f8
JB
7078 reg_val &= 0xffffff00;
7079 reg_val |= 0x00000030;
ab3c759a 7080 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
89b667f8 7081
ab3c759a 7082 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
89b667f8
JB
7083 reg_val &= 0x8cffffff;
7084 reg_val = 0x8c000000;
ab3c759a 7085 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
89b667f8 7086
ab3c759a 7087 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
89b667f8 7088 reg_val &= 0xffffff00;
ab3c759a 7089 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
89b667f8 7090
ab3c759a 7091 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
89b667f8
JB
7092 reg_val &= 0x00ffffff;
7093 reg_val |= 0xb0000000;
ab3c759a 7094 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
89b667f8
JB
7095}
7096
b551842d
DV
7097static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc,
7098 struct intel_link_m_n *m_n)
7099{
7100 struct drm_device *dev = crtc->base.dev;
7101 struct drm_i915_private *dev_priv = dev->dev_private;
7102 int pipe = crtc->pipe;
7103
e3b95f1e
DV
7104 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7105 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7106 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7107 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
b551842d
DV
7108}
7109
7110static void intel_cpu_transcoder_set_m_n(struct intel_crtc *crtc,
f769cd24
VK
7111 struct intel_link_m_n *m_n,
7112 struct intel_link_m_n *m2_n2)
b551842d
DV
7113{
7114 struct drm_device *dev = crtc->base.dev;
7115 struct drm_i915_private *dev_priv = dev->dev_private;
7116 int pipe = crtc->pipe;
6e3c9717 7117 enum transcoder transcoder = crtc->config->cpu_transcoder;
b551842d
DV
7118
7119 if (INTEL_INFO(dev)->gen >= 5) {
7120 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7121 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7122 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7123 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
f769cd24
VK
7124 /* M2_N2 registers to be set only for gen < 8 (M2_N2 available
7125 * for gen < 8) and if DRRS is supported (to make sure the
7126 * registers are not unnecessarily accessed).
7127 */
44395bfe 7128 if (m2_n2 && (IS_CHERRYVIEW(dev) || INTEL_INFO(dev)->gen < 8) &&
6e3c9717 7129 crtc->config->has_drrs) {
f769cd24
VK
7130 I915_WRITE(PIPE_DATA_M2(transcoder),
7131 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7132 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7133 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7134 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7135 }
b551842d 7136 } else {
e3b95f1e
DV
7137 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7138 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7139 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7140 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
b551842d
DV
7141 }
7142}
7143
fe3cd48d 7144void intel_dp_set_m_n(struct intel_crtc *crtc, enum link_m_n_set m_n)
03afc4a2 7145{
fe3cd48d
R
7146 struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
7147
7148 if (m_n == M1_N1) {
7149 dp_m_n = &crtc->config->dp_m_n;
7150 dp_m2_n2 = &crtc->config->dp_m2_n2;
7151 } else if (m_n == M2_N2) {
7152
7153 /*
7154 * M2_N2 registers are not supported. Hence m2_n2 divider value
7155 * needs to be programmed into M1_N1.
7156 */
7157 dp_m_n = &crtc->config->dp_m2_n2;
7158 } else {
7159 DRM_ERROR("Unsupported divider value\n");
7160 return;
7161 }
7162
6e3c9717
ACO
7163 if (crtc->config->has_pch_encoder)
7164 intel_pch_transcoder_set_m_n(crtc, &crtc->config->dp_m_n);
03afc4a2 7165 else
fe3cd48d 7166 intel_cpu_transcoder_set_m_n(crtc, dp_m_n, dp_m2_n2);
03afc4a2
DV
7167}
7168
251ac862
DV
7169static void vlv_compute_dpll(struct intel_crtc *crtc,
7170 struct intel_crtc_state *pipe_config)
bdd4b6a6 7171{
03ed5cbf 7172 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
cd2d34d9 7173 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
03ed5cbf
VS
7174 if (crtc->pipe != PIPE_A)
7175 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
bdd4b6a6 7176
cd2d34d9 7177 /* DPLL not used with DSI, but still need the rest set up */
187a1c07 7178 if (!pipe_config->has_dsi_encoder)
cd2d34d9
VS
7179 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7180 DPLL_EXT_BUFFER_ENABLE_VLV;
7181
03ed5cbf
VS
7182 pipe_config->dpll_hw_state.dpll_md =
7183 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7184}
bdd4b6a6 7185
03ed5cbf
VS
7186static void chv_compute_dpll(struct intel_crtc *crtc,
7187 struct intel_crtc_state *pipe_config)
7188{
7189 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
cd2d34d9 7190 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
03ed5cbf
VS
7191 if (crtc->pipe != PIPE_A)
7192 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7193
cd2d34d9 7194 /* DPLL not used with DSI, but still need the rest set up */
187a1c07 7195 if (!pipe_config->has_dsi_encoder)
cd2d34d9
VS
7196 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7197
03ed5cbf
VS
7198 pipe_config->dpll_hw_state.dpll_md =
7199 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
bdd4b6a6
DV
7200}
7201
d288f65f 7202static void vlv_prepare_pll(struct intel_crtc *crtc,
5cec258b 7203 const struct intel_crtc_state *pipe_config)
a0c4da24 7204{
f47709a9 7205 struct drm_device *dev = crtc->base.dev;
a0c4da24 7206 struct drm_i915_private *dev_priv = dev->dev_private;
cd2d34d9 7207 enum pipe pipe = crtc->pipe;
bdd4b6a6 7208 u32 mdiv;
a0c4da24 7209 u32 bestn, bestm1, bestm2, bestp1, bestp2;
bdd4b6a6 7210 u32 coreclk, reg_val;
a0c4da24 7211
cd2d34d9
VS
7212 /* Enable Refclk */
7213 I915_WRITE(DPLL(pipe),
7214 pipe_config->dpll_hw_state.dpll &
7215 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7216
7217 /* No need to actually set up the DPLL with DSI */
7218 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7219 return;
7220
a580516d 7221 mutex_lock(&dev_priv->sb_lock);
09153000 7222
d288f65f
VS
7223 bestn = pipe_config->dpll.n;
7224 bestm1 = pipe_config->dpll.m1;
7225 bestm2 = pipe_config->dpll.m2;
7226 bestp1 = pipe_config->dpll.p1;
7227 bestp2 = pipe_config->dpll.p2;
a0c4da24 7228
89b667f8
JB
7229 /* See eDP HDMI DPIO driver vbios notes doc */
7230
7231 /* PLL B needs special handling */
bdd4b6a6 7232 if (pipe == PIPE_B)
5e69f97f 7233 vlv_pllb_recal_opamp(dev_priv, pipe);
89b667f8
JB
7234
7235 /* Set up Tx target for periodic Rcomp update */
ab3c759a 7236 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
89b667f8
JB
7237
7238 /* Disable target IRef on PLL */
ab3c759a 7239 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
89b667f8 7240 reg_val &= 0x00ffffff;
ab3c759a 7241 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
89b667f8
JB
7242
7243 /* Disable fast lock */
ab3c759a 7244 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
89b667f8
JB
7245
7246 /* Set idtafcrecal before PLL is enabled */
a0c4da24
JB
7247 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7248 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7249 mdiv |= ((bestn << DPIO_N_SHIFT));
a0c4da24 7250 mdiv |= (1 << DPIO_K_SHIFT);
7df5080b
JB
7251
7252 /*
7253 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7254 * but we don't support that).
7255 * Note: don't use the DAC post divider as it seems unstable.
7256 */
7257 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
ab3c759a 7258 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
a0c4da24 7259
a0c4da24 7260 mdiv |= DPIO_ENABLE_CALIBRATION;
ab3c759a 7261 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
a0c4da24 7262
89b667f8 7263 /* Set HBR and RBR LPF coefficients */
d288f65f 7264 if (pipe_config->port_clock == 162000 ||
409ee761
ACO
7265 intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG) ||
7266 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI))
ab3c759a 7267 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
885b0120 7268 0x009f0003);
89b667f8 7269 else
ab3c759a 7270 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
89b667f8
JB
7271 0x00d0000f);
7272
681a8504 7273 if (pipe_config->has_dp_encoder) {
89b667f8 7274 /* Use SSC source */
bdd4b6a6 7275 if (pipe == PIPE_A)
ab3c759a 7276 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7277 0x0df40000);
7278 else
ab3c759a 7279 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7280 0x0df70000);
7281 } else { /* HDMI or VGA */
7282 /* Use bend source */
bdd4b6a6 7283 if (pipe == PIPE_A)
ab3c759a 7284 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7285 0x0df70000);
7286 else
ab3c759a 7287 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7288 0x0df40000);
7289 }
a0c4da24 7290
ab3c759a 7291 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
89b667f8 7292 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
409ee761
ACO
7293 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
7294 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
89b667f8 7295 coreclk |= 0x01000000;
ab3c759a 7296 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
a0c4da24 7297
ab3c759a 7298 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
a580516d 7299 mutex_unlock(&dev_priv->sb_lock);
a0c4da24
JB
7300}
7301
d288f65f 7302static void chv_prepare_pll(struct intel_crtc *crtc,
5cec258b 7303 const struct intel_crtc_state *pipe_config)
9d556c99
CML
7304{
7305 struct drm_device *dev = crtc->base.dev;
7306 struct drm_i915_private *dev_priv = dev->dev_private;
cd2d34d9 7307 enum pipe pipe = crtc->pipe;
9d556c99 7308 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9cbe40c1 7309 u32 loopfilter, tribuf_calcntr;
9d556c99 7310 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
a945ce7e 7311 u32 dpio_val;
9cbe40c1 7312 int vco;
9d556c99 7313
cd2d34d9
VS
7314 /* Enable Refclk and SSC */
7315 I915_WRITE(DPLL(pipe),
7316 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7317
7318 /* No need to actually set up the DPLL with DSI */
7319 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7320 return;
7321
d288f65f
VS
7322 bestn = pipe_config->dpll.n;
7323 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7324 bestm1 = pipe_config->dpll.m1;
7325 bestm2 = pipe_config->dpll.m2 >> 22;
7326 bestp1 = pipe_config->dpll.p1;
7327 bestp2 = pipe_config->dpll.p2;
9cbe40c1 7328 vco = pipe_config->dpll.vco;
a945ce7e 7329 dpio_val = 0;
9cbe40c1 7330 loopfilter = 0;
9d556c99 7331
a580516d 7332 mutex_lock(&dev_priv->sb_lock);
9d556c99 7333
9d556c99
CML
7334 /* p1 and p2 divider */
7335 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7336 5 << DPIO_CHV_S1_DIV_SHIFT |
7337 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7338 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7339 1 << DPIO_CHV_K_DIV_SHIFT);
7340
7341 /* Feedback post-divider - m2 */
7342 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7343
7344 /* Feedback refclk divider - n and m1 */
7345 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7346 DPIO_CHV_M1_DIV_BY_2 |
7347 1 << DPIO_CHV_N_DIV_SHIFT);
7348
7349 /* M2 fraction division */
25a25dfc 7350 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
9d556c99
CML
7351
7352 /* M2 fraction division enable */
a945ce7e
VP
7353 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7354 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7355 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7356 if (bestm2_frac)
7357 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7358 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
9d556c99 7359
de3a0fde
VP
7360 /* Program digital lock detect threshold */
7361 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7362 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7363 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7364 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7365 if (!bestm2_frac)
7366 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7367 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7368
9d556c99 7369 /* Loop filter */
9cbe40c1
VP
7370 if (vco == 5400000) {
7371 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7372 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7373 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7374 tribuf_calcntr = 0x9;
7375 } else if (vco <= 6200000) {
7376 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7377 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7378 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7379 tribuf_calcntr = 0x9;
7380 } else if (vco <= 6480000) {
7381 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7382 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7383 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7384 tribuf_calcntr = 0x8;
7385 } else {
7386 /* Not supported. Apply the same limits as in the max case */
7387 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7388 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7389 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7390 tribuf_calcntr = 0;
7391 }
9d556c99
CML
7392 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7393
968040b2 7394 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
9cbe40c1
VP
7395 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7396 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7397 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7398
9d556c99
CML
7399 /* AFC Recal */
7400 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7401 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7402 DPIO_AFC_RECAL);
7403
a580516d 7404 mutex_unlock(&dev_priv->sb_lock);
9d556c99
CML
7405}
7406
d288f65f
VS
7407/**
7408 * vlv_force_pll_on - forcibly enable just the PLL
7409 * @dev_priv: i915 private structure
7410 * @pipe: pipe PLL to enable
7411 * @dpll: PLL configuration
7412 *
7413 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7414 * in cases where we need the PLL enabled even when @pipe is not going to
7415 * be enabled.
7416 */
3f36b937
TU
7417int vlv_force_pll_on(struct drm_device *dev, enum pipe pipe,
7418 const struct dpll *dpll)
d288f65f
VS
7419{
7420 struct intel_crtc *crtc =
7421 to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe));
3f36b937
TU
7422 struct intel_crtc_state *pipe_config;
7423
7424 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7425 if (!pipe_config)
7426 return -ENOMEM;
7427
7428 pipe_config->base.crtc = &crtc->base;
7429 pipe_config->pixel_multiplier = 1;
7430 pipe_config->dpll = *dpll;
d288f65f
VS
7431
7432 if (IS_CHERRYVIEW(dev)) {
3f36b937
TU
7433 chv_compute_dpll(crtc, pipe_config);
7434 chv_prepare_pll(crtc, pipe_config);
7435 chv_enable_pll(crtc, pipe_config);
d288f65f 7436 } else {
3f36b937
TU
7437 vlv_compute_dpll(crtc, pipe_config);
7438 vlv_prepare_pll(crtc, pipe_config);
7439 vlv_enable_pll(crtc, pipe_config);
d288f65f 7440 }
3f36b937
TU
7441
7442 kfree(pipe_config);
7443
7444 return 0;
d288f65f
VS
7445}
7446
7447/**
7448 * vlv_force_pll_off - forcibly disable just the PLL
7449 * @dev_priv: i915 private structure
7450 * @pipe: pipe PLL to disable
7451 *
7452 * Disable the PLL for @pipe. To be used in cases where we need
7453 * the PLL enabled even when @pipe is not going to be enabled.
7454 */
7455void vlv_force_pll_off(struct drm_device *dev, enum pipe pipe)
7456{
7457 if (IS_CHERRYVIEW(dev))
7458 chv_disable_pll(to_i915(dev), pipe);
7459 else
7460 vlv_disable_pll(to_i915(dev), pipe);
7461}
7462
251ac862
DV
7463static void i9xx_compute_dpll(struct intel_crtc *crtc,
7464 struct intel_crtc_state *crtc_state,
9e2c8475 7465 struct dpll *reduced_clock)
eb1cbe48 7466{
f47709a9 7467 struct drm_device *dev = crtc->base.dev;
eb1cbe48 7468 struct drm_i915_private *dev_priv = dev->dev_private;
eb1cbe48
DV
7469 u32 dpll;
7470 bool is_sdvo;
190f68c5 7471 struct dpll *clock = &crtc_state->dpll;
eb1cbe48 7472
190f68c5 7473 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
2a8f64ca 7474
a93e255f
ACO
7475 is_sdvo = intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7476 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI);
eb1cbe48
DV
7477
7478 dpll = DPLL_VGA_MODE_DIS;
7479
a93e255f 7480 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS))
eb1cbe48
DV
7481 dpll |= DPLLB_MODE_LVDS;
7482 else
7483 dpll |= DPLLB_MODE_DAC_SERIAL;
6cc5f341 7484
ef1b460d 7485 if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
190f68c5 7486 dpll |= (crtc_state->pixel_multiplier - 1)
198a037f 7487 << SDVO_MULTIPLIER_SHIFT_HIRES;
eb1cbe48 7488 }
198a037f
DV
7489
7490 if (is_sdvo)
4a33e48d 7491 dpll |= DPLL_SDVO_HIGH_SPEED;
198a037f 7492
190f68c5 7493 if (crtc_state->has_dp_encoder)
4a33e48d 7494 dpll |= DPLL_SDVO_HIGH_SPEED;
eb1cbe48
DV
7495
7496 /* compute bitmask from p1 value */
7497 if (IS_PINEVIEW(dev))
7498 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7499 else {
7500 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7501 if (IS_G4X(dev) && reduced_clock)
7502 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7503 }
7504 switch (clock->p2) {
7505 case 5:
7506 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7507 break;
7508 case 7:
7509 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7510 break;
7511 case 10:
7512 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7513 break;
7514 case 14:
7515 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7516 break;
7517 }
7518 if (INTEL_INFO(dev)->gen >= 4)
7519 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7520
190f68c5 7521 if (crtc_state->sdvo_tv_clock)
eb1cbe48 7522 dpll |= PLL_REF_INPUT_TVCLKINBC;
a93e255f 7523 else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ceb41007 7524 intel_panel_use_ssc(dev_priv))
eb1cbe48
DV
7525 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7526 else
7527 dpll |= PLL_REF_INPUT_DREFCLK;
7528
7529 dpll |= DPLL_VCO_ENABLE;
190f68c5 7530 crtc_state->dpll_hw_state.dpll = dpll;
8bcc2795 7531
eb1cbe48 7532 if (INTEL_INFO(dev)->gen >= 4) {
190f68c5 7533 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
ef1b460d 7534 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
190f68c5 7535 crtc_state->dpll_hw_state.dpll_md = dpll_md;
eb1cbe48
DV
7536 }
7537}
7538
251ac862
DV
7539static void i8xx_compute_dpll(struct intel_crtc *crtc,
7540 struct intel_crtc_state *crtc_state,
9e2c8475 7541 struct dpll *reduced_clock)
eb1cbe48 7542{
f47709a9 7543 struct drm_device *dev = crtc->base.dev;
eb1cbe48 7544 struct drm_i915_private *dev_priv = dev->dev_private;
eb1cbe48 7545 u32 dpll;
190f68c5 7546 struct dpll *clock = &crtc_state->dpll;
eb1cbe48 7547
190f68c5 7548 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
2a8f64ca 7549
eb1cbe48
DV
7550 dpll = DPLL_VGA_MODE_DIS;
7551
a93e255f 7552 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
eb1cbe48
DV
7553 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7554 } else {
7555 if (clock->p1 == 2)
7556 dpll |= PLL_P1_DIVIDE_BY_TWO;
7557 else
7558 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7559 if (clock->p2 == 4)
7560 dpll |= PLL_P2_DIVIDE_BY_4;
7561 }
7562
a93e255f 7563 if (!IS_I830(dev) && intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO))
4a33e48d
DV
7564 dpll |= DPLL_DVO_2X_MODE;
7565
a93e255f 7566 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ceb41007 7567 intel_panel_use_ssc(dev_priv))
eb1cbe48
DV
7568 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7569 else
7570 dpll |= PLL_REF_INPUT_DREFCLK;
7571
7572 dpll |= DPLL_VCO_ENABLE;
190f68c5 7573 crtc_state->dpll_hw_state.dpll = dpll;
eb1cbe48
DV
7574}
7575
8a654f3b 7576static void intel_set_pipe_timings(struct intel_crtc *intel_crtc)
b0e77b9c
PZ
7577{
7578 struct drm_device *dev = intel_crtc->base.dev;
7579 struct drm_i915_private *dev_priv = dev->dev_private;
7580 enum pipe pipe = intel_crtc->pipe;
6e3c9717 7581 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
7c5f93b0 7582 const struct drm_display_mode *adjusted_mode = &intel_crtc->config->base.adjusted_mode;
1caea6e9
VS
7583 uint32_t crtc_vtotal, crtc_vblank_end;
7584 int vsyncshift = 0;
4d8a62ea
DV
7585
7586 /* We need to be careful not to changed the adjusted mode, for otherwise
7587 * the hw state checker will get angry at the mismatch. */
7588 crtc_vtotal = adjusted_mode->crtc_vtotal;
7589 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
b0e77b9c 7590
609aeaca 7591 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
b0e77b9c 7592 /* the chip adds 2 halflines automatically */
4d8a62ea
DV
7593 crtc_vtotal -= 1;
7594 crtc_vblank_end -= 1;
609aeaca 7595
409ee761 7596 if (intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
609aeaca
VS
7597 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7598 else
7599 vsyncshift = adjusted_mode->crtc_hsync_start -
7600 adjusted_mode->crtc_htotal / 2;
1caea6e9
VS
7601 if (vsyncshift < 0)
7602 vsyncshift += adjusted_mode->crtc_htotal;
b0e77b9c
PZ
7603 }
7604
7605 if (INTEL_INFO(dev)->gen > 3)
fe2b8f9d 7606 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
b0e77b9c 7607
fe2b8f9d 7608 I915_WRITE(HTOTAL(cpu_transcoder),
b0e77b9c
PZ
7609 (adjusted_mode->crtc_hdisplay - 1) |
7610 ((adjusted_mode->crtc_htotal - 1) << 16));
fe2b8f9d 7611 I915_WRITE(HBLANK(cpu_transcoder),
b0e77b9c
PZ
7612 (adjusted_mode->crtc_hblank_start - 1) |
7613 ((adjusted_mode->crtc_hblank_end - 1) << 16));
fe2b8f9d 7614 I915_WRITE(HSYNC(cpu_transcoder),
b0e77b9c
PZ
7615 (adjusted_mode->crtc_hsync_start - 1) |
7616 ((adjusted_mode->crtc_hsync_end - 1) << 16));
7617
fe2b8f9d 7618 I915_WRITE(VTOTAL(cpu_transcoder),
b0e77b9c 7619 (adjusted_mode->crtc_vdisplay - 1) |
4d8a62ea 7620 ((crtc_vtotal - 1) << 16));
fe2b8f9d 7621 I915_WRITE(VBLANK(cpu_transcoder),
b0e77b9c 7622 (adjusted_mode->crtc_vblank_start - 1) |
4d8a62ea 7623 ((crtc_vblank_end - 1) << 16));
fe2b8f9d 7624 I915_WRITE(VSYNC(cpu_transcoder),
b0e77b9c
PZ
7625 (adjusted_mode->crtc_vsync_start - 1) |
7626 ((adjusted_mode->crtc_vsync_end - 1) << 16));
7627
b5e508d4
PZ
7628 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
7629 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
7630 * documented on the DDI_FUNC_CTL register description, EDP Input Select
7631 * bits. */
7632 if (IS_HASWELL(dev) && cpu_transcoder == TRANSCODER_EDP &&
7633 (pipe == PIPE_B || pipe == PIPE_C))
7634 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
7635
bc58be60
JN
7636}
7637
7638static void intel_set_pipe_src_size(struct intel_crtc *intel_crtc)
7639{
7640 struct drm_device *dev = intel_crtc->base.dev;
7641 struct drm_i915_private *dev_priv = dev->dev_private;
7642 enum pipe pipe = intel_crtc->pipe;
7643
b0e77b9c
PZ
7644 /* pipesrc controls the size that is scaled from, which should
7645 * always be the user's requested size.
7646 */
7647 I915_WRITE(PIPESRC(pipe),
6e3c9717
ACO
7648 ((intel_crtc->config->pipe_src_w - 1) << 16) |
7649 (intel_crtc->config->pipe_src_h - 1));
b0e77b9c
PZ
7650}
7651
1bd1bd80 7652static void intel_get_pipe_timings(struct intel_crtc *crtc,
5cec258b 7653 struct intel_crtc_state *pipe_config)
1bd1bd80
DV
7654{
7655 struct drm_device *dev = crtc->base.dev;
7656 struct drm_i915_private *dev_priv = dev->dev_private;
7657 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
7658 uint32_t tmp;
7659
7660 tmp = I915_READ(HTOTAL(cpu_transcoder));
2d112de7
ACO
7661 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
7662 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7663 tmp = I915_READ(HBLANK(cpu_transcoder));
2d112de7
ACO
7664 pipe_config->base.adjusted_mode.crtc_hblank_start = (tmp & 0xffff) + 1;
7665 pipe_config->base.adjusted_mode.crtc_hblank_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7666 tmp = I915_READ(HSYNC(cpu_transcoder));
2d112de7
ACO
7667 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
7668 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80
DV
7669
7670 tmp = I915_READ(VTOTAL(cpu_transcoder));
2d112de7
ACO
7671 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
7672 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7673 tmp = I915_READ(VBLANK(cpu_transcoder));
2d112de7
ACO
7674 pipe_config->base.adjusted_mode.crtc_vblank_start = (tmp & 0xffff) + 1;
7675 pipe_config->base.adjusted_mode.crtc_vblank_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80 7676 tmp = I915_READ(VSYNC(cpu_transcoder));
2d112de7
ACO
7677 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
7678 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80
DV
7679
7680 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
2d112de7
ACO
7681 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
7682 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
7683 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
1bd1bd80 7684 }
bc58be60
JN
7685}
7686
7687static void intel_get_pipe_src_size(struct intel_crtc *crtc,
7688 struct intel_crtc_state *pipe_config)
7689{
7690 struct drm_device *dev = crtc->base.dev;
7691 struct drm_i915_private *dev_priv = dev->dev_private;
7692 u32 tmp;
1bd1bd80
DV
7693
7694 tmp = I915_READ(PIPESRC(crtc->pipe));
37327abd
VS
7695 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
7696 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
7697
2d112de7
ACO
7698 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
7699 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
1bd1bd80
DV
7700}
7701
f6a83288 7702void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5cec258b 7703 struct intel_crtc_state *pipe_config)
babea61d 7704{
2d112de7
ACO
7705 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
7706 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
7707 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
7708 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
babea61d 7709
2d112de7
ACO
7710 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
7711 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
7712 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
7713 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
babea61d 7714
2d112de7 7715 mode->flags = pipe_config->base.adjusted_mode.flags;
cd13f5ab 7716 mode->type = DRM_MODE_TYPE_DRIVER;
babea61d 7717
2d112de7
ACO
7718 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
7719 mode->flags |= pipe_config->base.adjusted_mode.flags;
cd13f5ab
ML
7720
7721 mode->hsync = drm_mode_hsync(mode);
7722 mode->vrefresh = drm_mode_vrefresh(mode);
7723 drm_mode_set_name(mode);
babea61d
JB
7724}
7725
84b046f3
DV
7726static void i9xx_set_pipeconf(struct intel_crtc *intel_crtc)
7727{
7728 struct drm_device *dev = intel_crtc->base.dev;
7729 struct drm_i915_private *dev_priv = dev->dev_private;
7730 uint32_t pipeconf;
7731
9f11a9e4 7732 pipeconf = 0;
84b046f3 7733
b6b5d049
VS
7734 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
7735 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
7736 pipeconf |= I915_READ(PIPECONF(intel_crtc->pipe)) & PIPECONF_ENABLE;
67c72a12 7737
6e3c9717 7738 if (intel_crtc->config->double_wide)
cf532bb2 7739 pipeconf |= PIPECONF_DOUBLE_WIDE;
84b046f3 7740
ff9ce46e 7741 /* only g4x and later have fancy bpc/dither controls */
666a4537 7742 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
ff9ce46e 7743 /* Bspec claims that we can't use dithering for 30bpp pipes. */
6e3c9717 7744 if (intel_crtc->config->dither && intel_crtc->config->pipe_bpp != 30)
ff9ce46e 7745 pipeconf |= PIPECONF_DITHER_EN |
84b046f3 7746 PIPECONF_DITHER_TYPE_SP;
84b046f3 7747
6e3c9717 7748 switch (intel_crtc->config->pipe_bpp) {
ff9ce46e
DV
7749 case 18:
7750 pipeconf |= PIPECONF_6BPC;
7751 break;
7752 case 24:
7753 pipeconf |= PIPECONF_8BPC;
7754 break;
7755 case 30:
7756 pipeconf |= PIPECONF_10BPC;
7757 break;
7758 default:
7759 /* Case prevented by intel_choose_pipe_bpp_dither. */
7760 BUG();
84b046f3
DV
7761 }
7762 }
7763
7764 if (HAS_PIPE_CXSR(dev)) {
7765 if (intel_crtc->lowfreq_avail) {
7766 DRM_DEBUG_KMS("enabling CxSR downclocking\n");
7767 pipeconf |= PIPECONF_CXSR_DOWNCLOCK;
7768 } else {
7769 DRM_DEBUG_KMS("disabling CxSR downclocking\n");
84b046f3
DV
7770 }
7771 }
7772
6e3c9717 7773 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
efc2cfff 7774 if (INTEL_INFO(dev)->gen < 4 ||
409ee761 7775 intel_pipe_has_type(intel_crtc, INTEL_OUTPUT_SDVO))
efc2cfff
VS
7776 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
7777 else
7778 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
7779 } else
84b046f3
DV
7780 pipeconf |= PIPECONF_PROGRESSIVE;
7781
666a4537
WB
7782 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
7783 intel_crtc->config->limited_color_range)
9f11a9e4 7784 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
9c8e09b7 7785
84b046f3
DV
7786 I915_WRITE(PIPECONF(intel_crtc->pipe), pipeconf);
7787 POSTING_READ(PIPECONF(intel_crtc->pipe));
7788}
7789
81c97f52
ACO
7790static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
7791 struct intel_crtc_state *crtc_state)
7792{
7793 struct drm_device *dev = crtc->base.dev;
7794 struct drm_i915_private *dev_priv = dev->dev_private;
1b6f4958 7795 const struct intel_limit *limit;
81c97f52
ACO
7796 int refclk = 48000;
7797
7798 memset(&crtc_state->dpll_hw_state, 0,
7799 sizeof(crtc_state->dpll_hw_state));
7800
7801 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7802 if (intel_panel_use_ssc(dev_priv)) {
7803 refclk = dev_priv->vbt.lvds_ssc_freq;
7804 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7805 }
7806
7807 limit = &intel_limits_i8xx_lvds;
7808 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_DVO)) {
7809 limit = &intel_limits_i8xx_dvo;
7810 } else {
7811 limit = &intel_limits_i8xx_dac;
7812 }
7813
7814 if (!crtc_state->clock_set &&
7815 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7816 refclk, NULL, &crtc_state->dpll)) {
7817 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7818 return -EINVAL;
7819 }
7820
7821 i8xx_compute_dpll(crtc, crtc_state, NULL);
7822
7823 return 0;
7824}
7825
19ec6693
ACO
7826static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
7827 struct intel_crtc_state *crtc_state)
7828{
7829 struct drm_device *dev = crtc->base.dev;
7830 struct drm_i915_private *dev_priv = dev->dev_private;
1b6f4958 7831 const struct intel_limit *limit;
19ec6693
ACO
7832 int refclk = 96000;
7833
7834 memset(&crtc_state->dpll_hw_state, 0,
7835 sizeof(crtc_state->dpll_hw_state));
7836
7837 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7838 if (intel_panel_use_ssc(dev_priv)) {
7839 refclk = dev_priv->vbt.lvds_ssc_freq;
7840 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7841 }
7842
7843 if (intel_is_dual_link_lvds(dev))
7844 limit = &intel_limits_g4x_dual_channel_lvds;
7845 else
7846 limit = &intel_limits_g4x_single_channel_lvds;
7847 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_HDMI) ||
7848 intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
7849 limit = &intel_limits_g4x_hdmi;
7850 } else if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_SDVO)) {
7851 limit = &intel_limits_g4x_sdvo;
7852 } else {
7853 /* The option is for other outputs */
7854 limit = &intel_limits_i9xx_sdvo;
7855 }
7856
7857 if (!crtc_state->clock_set &&
7858 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7859 refclk, NULL, &crtc_state->dpll)) {
7860 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7861 return -EINVAL;
7862 }
7863
7864 i9xx_compute_dpll(crtc, crtc_state, NULL);
7865
7866 return 0;
7867}
7868
70e8aa21
ACO
7869static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
7870 struct intel_crtc_state *crtc_state)
7871{
7872 struct drm_device *dev = crtc->base.dev;
7873 struct drm_i915_private *dev_priv = dev->dev_private;
1b6f4958 7874 const struct intel_limit *limit;
70e8aa21
ACO
7875 int refclk = 96000;
7876
7877 memset(&crtc_state->dpll_hw_state, 0,
7878 sizeof(crtc_state->dpll_hw_state));
7879
7880 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7881 if (intel_panel_use_ssc(dev_priv)) {
7882 refclk = dev_priv->vbt.lvds_ssc_freq;
7883 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7884 }
7885
7886 limit = &intel_limits_pineview_lvds;
7887 } else {
7888 limit = &intel_limits_pineview_sdvo;
7889 }
7890
7891 if (!crtc_state->clock_set &&
7892 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7893 refclk, NULL, &crtc_state->dpll)) {
7894 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7895 return -EINVAL;
7896 }
7897
7898 i9xx_compute_dpll(crtc, crtc_state, NULL);
7899
7900 return 0;
7901}
7902
190f68c5
ACO
7903static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
7904 struct intel_crtc_state *crtc_state)
79e53945 7905{
c7653199 7906 struct drm_device *dev = crtc->base.dev;
79e53945 7907 struct drm_i915_private *dev_priv = dev->dev_private;
1b6f4958 7908 const struct intel_limit *limit;
81c97f52 7909 int refclk = 96000;
79e53945 7910
dd3cd74a
ACO
7911 memset(&crtc_state->dpll_hw_state, 0,
7912 sizeof(crtc_state->dpll_hw_state));
7913
70e8aa21
ACO
7914 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
7915 if (intel_panel_use_ssc(dev_priv)) {
7916 refclk = dev_priv->vbt.lvds_ssc_freq;
7917 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
7918 }
43565a06 7919
70e8aa21
ACO
7920 limit = &intel_limits_i9xx_lvds;
7921 } else {
7922 limit = &intel_limits_i9xx_sdvo;
81c97f52 7923 }
79e53945 7924
70e8aa21
ACO
7925 if (!crtc_state->clock_set &&
7926 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7927 refclk, NULL, &crtc_state->dpll)) {
7928 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7929 return -EINVAL;
f47709a9 7930 }
7026d4ac 7931
81c97f52 7932 i9xx_compute_dpll(crtc, crtc_state, NULL);
79e53945 7933
c8f7a0db 7934 return 0;
f564048e
EA
7935}
7936
65b3d6a9
ACO
7937static int chv_crtc_compute_clock(struct intel_crtc *crtc,
7938 struct intel_crtc_state *crtc_state)
7939{
7940 int refclk = 100000;
1b6f4958 7941 const struct intel_limit *limit = &intel_limits_chv;
65b3d6a9
ACO
7942
7943 memset(&crtc_state->dpll_hw_state, 0,
7944 sizeof(crtc_state->dpll_hw_state));
7945
65b3d6a9
ACO
7946 if (!crtc_state->clock_set &&
7947 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7948 refclk, NULL, &crtc_state->dpll)) {
7949 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7950 return -EINVAL;
7951 }
7952
7953 chv_compute_dpll(crtc, crtc_state);
7954
7955 return 0;
7956}
7957
7958static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
7959 struct intel_crtc_state *crtc_state)
7960{
7961 int refclk = 100000;
1b6f4958 7962 const struct intel_limit *limit = &intel_limits_vlv;
65b3d6a9
ACO
7963
7964 memset(&crtc_state->dpll_hw_state, 0,
7965 sizeof(crtc_state->dpll_hw_state));
7966
65b3d6a9
ACO
7967 if (!crtc_state->clock_set &&
7968 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
7969 refclk, NULL, &crtc_state->dpll)) {
7970 DRM_ERROR("Couldn't find PLL settings for mode!\n");
7971 return -EINVAL;
7972 }
7973
7974 vlv_compute_dpll(crtc, crtc_state);
7975
7976 return 0;
7977}
7978
2fa2fe9a 7979static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5cec258b 7980 struct intel_crtc_state *pipe_config)
2fa2fe9a
DV
7981{
7982 struct drm_device *dev = crtc->base.dev;
7983 struct drm_i915_private *dev_priv = dev->dev_private;
7984 uint32_t tmp;
7985
dc9e7dec
VS
7986 if (INTEL_INFO(dev)->gen <= 3 && (IS_I830(dev) || !IS_MOBILE(dev)))
7987 return;
7988
2fa2fe9a 7989 tmp = I915_READ(PFIT_CONTROL);
06922821
DV
7990 if (!(tmp & PFIT_ENABLE))
7991 return;
2fa2fe9a 7992
06922821 7993 /* Check whether the pfit is attached to our pipe. */
2fa2fe9a
DV
7994 if (INTEL_INFO(dev)->gen < 4) {
7995 if (crtc->pipe != PIPE_B)
7996 return;
2fa2fe9a
DV
7997 } else {
7998 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
7999 return;
8000 }
8001
06922821 8002 pipe_config->gmch_pfit.control = tmp;
2fa2fe9a 8003 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
2fa2fe9a
DV
8004}
8005
acbec814 8006static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 8007 struct intel_crtc_state *pipe_config)
acbec814
JB
8008{
8009 struct drm_device *dev = crtc->base.dev;
8010 struct drm_i915_private *dev_priv = dev->dev_private;
8011 int pipe = pipe_config->cpu_transcoder;
9e2c8475 8012 struct dpll clock;
acbec814 8013 u32 mdiv;
662c6ecb 8014 int refclk = 100000;
acbec814 8015
b521973b
VS
8016 /* In case of DSI, DPLL will not be used */
8017 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
f573de5a
SK
8018 return;
8019
a580516d 8020 mutex_lock(&dev_priv->sb_lock);
ab3c759a 8021 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
a580516d 8022 mutex_unlock(&dev_priv->sb_lock);
acbec814
JB
8023
8024 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8025 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8026 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8027 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8028 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8029
dccbea3b 8030 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
acbec814
JB
8031}
8032
5724dbd1
DL
8033static void
8034i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8035 struct intel_initial_plane_config *plane_config)
1ad292b5
JB
8036{
8037 struct drm_device *dev = crtc->base.dev;
8038 struct drm_i915_private *dev_priv = dev->dev_private;
8039 u32 val, base, offset;
8040 int pipe = crtc->pipe, plane = crtc->plane;
8041 int fourcc, pixel_format;
6761dd31 8042 unsigned int aligned_height;
b113d5ee 8043 struct drm_framebuffer *fb;
1b842c89 8044 struct intel_framebuffer *intel_fb;
1ad292b5 8045
42a7b088
DL
8046 val = I915_READ(DSPCNTR(plane));
8047 if (!(val & DISPLAY_PLANE_ENABLE))
8048 return;
8049
d9806c9f 8050 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 8051 if (!intel_fb) {
1ad292b5
JB
8052 DRM_DEBUG_KMS("failed to alloc fb\n");
8053 return;
8054 }
8055
1b842c89
DL
8056 fb = &intel_fb->base;
8057
18c5247e
DV
8058 if (INTEL_INFO(dev)->gen >= 4) {
8059 if (val & DISPPLANE_TILED) {
49af449b 8060 plane_config->tiling = I915_TILING_X;
18c5247e
DV
8061 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
8062 }
8063 }
1ad292b5
JB
8064
8065 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
b35d63fa 8066 fourcc = i9xx_format_to_fourcc(pixel_format);
b113d5ee
DL
8067 fb->pixel_format = fourcc;
8068 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
1ad292b5
JB
8069
8070 if (INTEL_INFO(dev)->gen >= 4) {
49af449b 8071 if (plane_config->tiling)
1ad292b5
JB
8072 offset = I915_READ(DSPTILEOFF(plane));
8073 else
8074 offset = I915_READ(DSPLINOFF(plane));
8075 base = I915_READ(DSPSURF(plane)) & 0xfffff000;
8076 } else {
8077 base = I915_READ(DSPADDR(plane));
8078 }
8079 plane_config->base = base;
8080
8081 val = I915_READ(PIPESRC(pipe));
b113d5ee
DL
8082 fb->width = ((val >> 16) & 0xfff) + 1;
8083 fb->height = ((val >> 0) & 0xfff) + 1;
1ad292b5
JB
8084
8085 val = I915_READ(DSPSTRIDE(pipe));
b113d5ee 8086 fb->pitches[0] = val & 0xffffffc0;
1ad292b5 8087
b113d5ee 8088 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
8089 fb->pixel_format,
8090 fb->modifier[0]);
1ad292b5 8091
f37b5c2b 8092 plane_config->size = fb->pitches[0] * aligned_height;
1ad292b5 8093
2844a921
DL
8094 DRM_DEBUG_KMS("pipe/plane %c/%d with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8095 pipe_name(pipe), plane, fb->width, fb->height,
8096 fb->bits_per_pixel, base, fb->pitches[0],
8097 plane_config->size);
1ad292b5 8098
2d14030b 8099 plane_config->fb = intel_fb;
1ad292b5
JB
8100}
8101
70b23a98 8102static void chv_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 8103 struct intel_crtc_state *pipe_config)
70b23a98
VS
8104{
8105 struct drm_device *dev = crtc->base.dev;
8106 struct drm_i915_private *dev_priv = dev->dev_private;
8107 int pipe = pipe_config->cpu_transcoder;
8108 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9e2c8475 8109 struct dpll clock;
0d7b6b11 8110 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
70b23a98
VS
8111 int refclk = 100000;
8112
b521973b
VS
8113 /* In case of DSI, DPLL will not be used */
8114 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8115 return;
8116
a580516d 8117 mutex_lock(&dev_priv->sb_lock);
70b23a98
VS
8118 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8119 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8120 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8121 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
0d7b6b11 8122 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
a580516d 8123 mutex_unlock(&dev_priv->sb_lock);
70b23a98
VS
8124
8125 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
0d7b6b11
ID
8126 clock.m2 = (pll_dw0 & 0xff) << 22;
8127 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8128 clock.m2 |= pll_dw2 & 0x3fffff;
70b23a98
VS
8129 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8130 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8131 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8132
dccbea3b 8133 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
70b23a98
VS
8134}
8135
0e8ffe1b 8136static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5cec258b 8137 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
8138{
8139 struct drm_device *dev = crtc->base.dev;
8140 struct drm_i915_private *dev_priv = dev->dev_private;
1729050e 8141 enum intel_display_power_domain power_domain;
0e8ffe1b 8142 uint32_t tmp;
1729050e 8143 bool ret;
0e8ffe1b 8144
1729050e
ID
8145 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
8146 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
b5482bd0
ID
8147 return false;
8148
e143a21c 8149 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8106ddbd 8150 pipe_config->shared_dpll = NULL;
eccb140b 8151
1729050e
ID
8152 ret = false;
8153
0e8ffe1b
DV
8154 tmp = I915_READ(PIPECONF(crtc->pipe));
8155 if (!(tmp & PIPECONF_ENABLE))
1729050e 8156 goto out;
0e8ffe1b 8157
666a4537 8158 if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
42571aef
VS
8159 switch (tmp & PIPECONF_BPC_MASK) {
8160 case PIPECONF_6BPC:
8161 pipe_config->pipe_bpp = 18;
8162 break;
8163 case PIPECONF_8BPC:
8164 pipe_config->pipe_bpp = 24;
8165 break;
8166 case PIPECONF_10BPC:
8167 pipe_config->pipe_bpp = 30;
8168 break;
8169 default:
8170 break;
8171 }
8172 }
8173
666a4537
WB
8174 if ((IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) &&
8175 (tmp & PIPECONF_COLOR_RANGE_SELECT))
b5a9fa09
DV
8176 pipe_config->limited_color_range = true;
8177
282740f7
VS
8178 if (INTEL_INFO(dev)->gen < 4)
8179 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8180
1bd1bd80 8181 intel_get_pipe_timings(crtc, pipe_config);
bc58be60 8182 intel_get_pipe_src_size(crtc, pipe_config);
1bd1bd80 8183
2fa2fe9a
DV
8184 i9xx_get_pfit_config(crtc, pipe_config);
8185
6c49f241 8186 if (INTEL_INFO(dev)->gen >= 4) {
c231775c
VS
8187 /* No way to read it out on pipes B and C */
8188 if (IS_CHERRYVIEW(dev) && crtc->pipe != PIPE_A)
8189 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8190 else
8191 tmp = I915_READ(DPLL_MD(crtc->pipe));
6c49f241
DV
8192 pipe_config->pixel_multiplier =
8193 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8194 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8bcc2795 8195 pipe_config->dpll_hw_state.dpll_md = tmp;
6c49f241
DV
8196 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
8197 tmp = I915_READ(DPLL(crtc->pipe));
8198 pipe_config->pixel_multiplier =
8199 ((tmp & SDVO_MULTIPLIER_MASK)
8200 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8201 } else {
8202 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8203 * port and will be fixed up in the encoder->get_config
8204 * function. */
8205 pipe_config->pixel_multiplier = 1;
8206 }
8bcc2795 8207 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
666a4537 8208 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
1c4e0274
VS
8209 /*
8210 * DPLL_DVO_2X_MODE must be enabled for both DPLLs
8211 * on 830. Filter it out here so that we don't
8212 * report errors due to that.
8213 */
8214 if (IS_I830(dev))
8215 pipe_config->dpll_hw_state.dpll &= ~DPLL_DVO_2X_MODE;
8216
8bcc2795
DV
8217 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8218 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
165e901c
VS
8219 } else {
8220 /* Mask out read-only status bits. */
8221 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8222 DPLL_PORTC_READY_MASK |
8223 DPLL_PORTB_READY_MASK);
8bcc2795 8224 }
6c49f241 8225
70b23a98
VS
8226 if (IS_CHERRYVIEW(dev))
8227 chv_crtc_clock_get(crtc, pipe_config);
8228 else if (IS_VALLEYVIEW(dev))
acbec814
JB
8229 vlv_crtc_clock_get(crtc, pipe_config);
8230 else
8231 i9xx_crtc_clock_get(crtc, pipe_config);
18442d08 8232
0f64614d
VS
8233 /*
8234 * Normally the dotclock is filled in by the encoder .get_config()
8235 * but in case the pipe is enabled w/o any ports we need a sane
8236 * default.
8237 */
8238 pipe_config->base.adjusted_mode.crtc_clock =
8239 pipe_config->port_clock / pipe_config->pixel_multiplier;
8240
1729050e
ID
8241 ret = true;
8242
8243out:
8244 intel_display_power_put(dev_priv, power_domain);
8245
8246 return ret;
0e8ffe1b
DV
8247}
8248
dde86e2d 8249static void ironlake_init_pch_refclk(struct drm_device *dev)
13d83a67
JB
8250{
8251 struct drm_i915_private *dev_priv = dev->dev_private;
13d83a67 8252 struct intel_encoder *encoder;
74cfd7ac 8253 u32 val, final;
13d83a67 8254 bool has_lvds = false;
199e5d79 8255 bool has_cpu_edp = false;
199e5d79 8256 bool has_panel = false;
99eb6a01
KP
8257 bool has_ck505 = false;
8258 bool can_ssc = false;
13d83a67
JB
8259
8260 /* We need to take the global config into account */
b2784e15 8261 for_each_intel_encoder(dev, encoder) {
199e5d79
KP
8262 switch (encoder->type) {
8263 case INTEL_OUTPUT_LVDS:
8264 has_panel = true;
8265 has_lvds = true;
8266 break;
8267 case INTEL_OUTPUT_EDP:
8268 has_panel = true;
2de6905f 8269 if (enc_to_dig_port(&encoder->base)->port == PORT_A)
199e5d79
KP
8270 has_cpu_edp = true;
8271 break;
6847d71b
PZ
8272 default:
8273 break;
13d83a67
JB
8274 }
8275 }
8276
99eb6a01 8277 if (HAS_PCH_IBX(dev)) {
41aa3448 8278 has_ck505 = dev_priv->vbt.display_clock_mode;
99eb6a01
KP
8279 can_ssc = has_ck505;
8280 } else {
8281 has_ck505 = false;
8282 can_ssc = true;
8283 }
8284
2de6905f
ID
8285 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d\n",
8286 has_panel, has_lvds, has_ck505);
13d83a67
JB
8287
8288 /* Ironlake: try to setup display ref clock before DPLL
8289 * enabling. This is only under driver's control after
8290 * PCH B stepping, previous chipset stepping should be
8291 * ignoring this setting.
8292 */
74cfd7ac
CW
8293 val = I915_READ(PCH_DREF_CONTROL);
8294
8295 /* As we must carefully and slowly disable/enable each source in turn,
8296 * compute the final state we want first and check if we need to
8297 * make any changes at all.
8298 */
8299 final = val;
8300 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8301 if (has_ck505)
8302 final |= DREF_NONSPREAD_CK505_ENABLE;
8303 else
8304 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8305
8306 final &= ~DREF_SSC_SOURCE_MASK;
8307 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8308 final &= ~DREF_SSC1_ENABLE;
8309
8310 if (has_panel) {
8311 final |= DREF_SSC_SOURCE_ENABLE;
8312
8313 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8314 final |= DREF_SSC1_ENABLE;
8315
8316 if (has_cpu_edp) {
8317 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8318 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8319 else
8320 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8321 } else
8322 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8323 } else {
8324 final |= DREF_SSC_SOURCE_DISABLE;
8325 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
8326 }
8327
8328 if (final == val)
8329 return;
8330
13d83a67 8331 /* Always enable nonspread source */
74cfd7ac 8332 val &= ~DREF_NONSPREAD_SOURCE_MASK;
13d83a67 8333
99eb6a01 8334 if (has_ck505)
74cfd7ac 8335 val |= DREF_NONSPREAD_CK505_ENABLE;
99eb6a01 8336 else
74cfd7ac 8337 val |= DREF_NONSPREAD_SOURCE_ENABLE;
13d83a67 8338
199e5d79 8339 if (has_panel) {
74cfd7ac
CW
8340 val &= ~DREF_SSC_SOURCE_MASK;
8341 val |= DREF_SSC_SOURCE_ENABLE;
13d83a67 8342
199e5d79 8343 /* SSC must be turned on before enabling the CPU output */
99eb6a01 8344 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 8345 DRM_DEBUG_KMS("Using SSC on panel\n");
74cfd7ac 8346 val |= DREF_SSC1_ENABLE;
e77166b5 8347 } else
74cfd7ac 8348 val &= ~DREF_SSC1_ENABLE;
199e5d79
KP
8349
8350 /* Get SSC going before enabling the outputs */
74cfd7ac 8351 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8352 POSTING_READ(PCH_DREF_CONTROL);
8353 udelay(200);
8354
74cfd7ac 8355 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
13d83a67
JB
8356
8357 /* Enable CPU source on CPU attached eDP */
199e5d79 8358 if (has_cpu_edp) {
99eb6a01 8359 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 8360 DRM_DEBUG_KMS("Using SSC on eDP\n");
74cfd7ac 8361 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
eba905b2 8362 } else
74cfd7ac 8363 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
199e5d79 8364 } else
74cfd7ac 8365 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 8366
74cfd7ac 8367 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8368 POSTING_READ(PCH_DREF_CONTROL);
8369 udelay(200);
8370 } else {
8371 DRM_DEBUG_KMS("Disabling SSC entirely\n");
8372
74cfd7ac 8373 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
199e5d79
KP
8374
8375 /* Turn off CPU output */
74cfd7ac 8376 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 8377
74cfd7ac 8378 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8379 POSTING_READ(PCH_DREF_CONTROL);
8380 udelay(200);
8381
8382 /* Turn off the SSC source */
74cfd7ac
CW
8383 val &= ~DREF_SSC_SOURCE_MASK;
8384 val |= DREF_SSC_SOURCE_DISABLE;
199e5d79
KP
8385
8386 /* Turn off SSC1 */
74cfd7ac 8387 val &= ~DREF_SSC1_ENABLE;
199e5d79 8388
74cfd7ac 8389 I915_WRITE(PCH_DREF_CONTROL, val);
13d83a67
JB
8390 POSTING_READ(PCH_DREF_CONTROL);
8391 udelay(200);
8392 }
74cfd7ac
CW
8393
8394 BUG_ON(val != final);
13d83a67
JB
8395}
8396
f31f2d55 8397static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
dde86e2d 8398{
f31f2d55 8399 uint32_t tmp;
dde86e2d 8400
0ff066a9
PZ
8401 tmp = I915_READ(SOUTH_CHICKEN2);
8402 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8403 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 8404
0ff066a9
PZ
8405 if (wait_for_atomic_us(I915_READ(SOUTH_CHICKEN2) &
8406 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
8407 DRM_ERROR("FDI mPHY reset assert timeout\n");
dde86e2d 8408
0ff066a9
PZ
8409 tmp = I915_READ(SOUTH_CHICKEN2);
8410 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8411 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 8412
0ff066a9
PZ
8413 if (wait_for_atomic_us((I915_READ(SOUTH_CHICKEN2) &
8414 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
8415 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
f31f2d55
PZ
8416}
8417
8418/* WaMPhyProgramming:hsw */
8419static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8420{
8421 uint32_t tmp;
dde86e2d
PZ
8422
8423 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8424 tmp &= ~(0xFF << 24);
8425 tmp |= (0x12 << 24);
8426 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8427
dde86e2d
PZ
8428 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8429 tmp |= (1 << 11);
8430 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8431
8432 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8433 tmp |= (1 << 11);
8434 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8435
dde86e2d
PZ
8436 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8437 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8438 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8439
8440 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8441 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8442 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8443
0ff066a9
PZ
8444 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8445 tmp &= ~(7 << 13);
8446 tmp |= (5 << 13);
8447 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
dde86e2d 8448
0ff066a9
PZ
8449 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8450 tmp &= ~(7 << 13);
8451 tmp |= (5 << 13);
8452 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
dde86e2d
PZ
8453
8454 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8455 tmp &= ~0xFF;
8456 tmp |= 0x1C;
8457 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8458
8459 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8460 tmp &= ~0xFF;
8461 tmp |= 0x1C;
8462 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8463
8464 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8465 tmp &= ~(0xFF << 16);
8466 tmp |= (0x1C << 16);
8467 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8468
8469 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8470 tmp &= ~(0xFF << 16);
8471 tmp |= (0x1C << 16);
8472 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8473
0ff066a9
PZ
8474 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8475 tmp |= (1 << 27);
8476 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
dde86e2d 8477
0ff066a9
PZ
8478 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8479 tmp |= (1 << 27);
8480 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
dde86e2d 8481
0ff066a9
PZ
8482 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8483 tmp &= ~(0xF << 28);
8484 tmp |= (4 << 28);
8485 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
dde86e2d 8486
0ff066a9
PZ
8487 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8488 tmp &= ~(0xF << 28);
8489 tmp |= (4 << 28);
8490 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
f31f2d55
PZ
8491}
8492
2fa86a1f
PZ
8493/* Implements 3 different sequences from BSpec chapter "Display iCLK
8494 * Programming" based on the parameters passed:
8495 * - Sequence to enable CLKOUT_DP
8496 * - Sequence to enable CLKOUT_DP without spread
8497 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8498 */
8499static void lpt_enable_clkout_dp(struct drm_device *dev, bool with_spread,
8500 bool with_fdi)
f31f2d55
PZ
8501{
8502 struct drm_i915_private *dev_priv = dev->dev_private;
2fa86a1f
PZ
8503 uint32_t reg, tmp;
8504
8505 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
8506 with_spread = true;
c2699524 8507 if (WARN(HAS_PCH_LPT_LP(dev) && with_fdi, "LP PCH doesn't have FDI\n"))
2fa86a1f 8508 with_fdi = false;
f31f2d55 8509
a580516d 8510 mutex_lock(&dev_priv->sb_lock);
f31f2d55
PZ
8511
8512 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8513 tmp &= ~SBI_SSCCTL_DISABLE;
8514 tmp |= SBI_SSCCTL_PATHALT;
8515 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8516
8517 udelay(24);
8518
2fa86a1f
PZ
8519 if (with_spread) {
8520 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8521 tmp &= ~SBI_SSCCTL_PATHALT;
8522 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
f31f2d55 8523
2fa86a1f
PZ
8524 if (with_fdi) {
8525 lpt_reset_fdi_mphy(dev_priv);
8526 lpt_program_fdi_mphy(dev_priv);
8527 }
8528 }
dde86e2d 8529
c2699524 8530 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
2fa86a1f
PZ
8531 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8532 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8533 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
c00db246 8534
a580516d 8535 mutex_unlock(&dev_priv->sb_lock);
dde86e2d
PZ
8536}
8537
47701c3b
PZ
8538/* Sequence to disable CLKOUT_DP */
8539static void lpt_disable_clkout_dp(struct drm_device *dev)
8540{
8541 struct drm_i915_private *dev_priv = dev->dev_private;
8542 uint32_t reg, tmp;
8543
a580516d 8544 mutex_lock(&dev_priv->sb_lock);
47701c3b 8545
c2699524 8546 reg = HAS_PCH_LPT_LP(dev) ? SBI_GEN0 : SBI_DBUFF0;
47701c3b
PZ
8547 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
8548 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
8549 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
8550
8551 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
8552 if (!(tmp & SBI_SSCCTL_DISABLE)) {
8553 if (!(tmp & SBI_SSCCTL_PATHALT)) {
8554 tmp |= SBI_SSCCTL_PATHALT;
8555 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8556 udelay(32);
8557 }
8558 tmp |= SBI_SSCCTL_DISABLE;
8559 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
8560 }
8561
a580516d 8562 mutex_unlock(&dev_priv->sb_lock);
47701c3b
PZ
8563}
8564
f7be2c21
VS
8565#define BEND_IDX(steps) ((50 + (steps)) / 5)
8566
8567static const uint16_t sscdivintphase[] = {
8568 [BEND_IDX( 50)] = 0x3B23,
8569 [BEND_IDX( 45)] = 0x3B23,
8570 [BEND_IDX( 40)] = 0x3C23,
8571 [BEND_IDX( 35)] = 0x3C23,
8572 [BEND_IDX( 30)] = 0x3D23,
8573 [BEND_IDX( 25)] = 0x3D23,
8574 [BEND_IDX( 20)] = 0x3E23,
8575 [BEND_IDX( 15)] = 0x3E23,
8576 [BEND_IDX( 10)] = 0x3F23,
8577 [BEND_IDX( 5)] = 0x3F23,
8578 [BEND_IDX( 0)] = 0x0025,
8579 [BEND_IDX( -5)] = 0x0025,
8580 [BEND_IDX(-10)] = 0x0125,
8581 [BEND_IDX(-15)] = 0x0125,
8582 [BEND_IDX(-20)] = 0x0225,
8583 [BEND_IDX(-25)] = 0x0225,
8584 [BEND_IDX(-30)] = 0x0325,
8585 [BEND_IDX(-35)] = 0x0325,
8586 [BEND_IDX(-40)] = 0x0425,
8587 [BEND_IDX(-45)] = 0x0425,
8588 [BEND_IDX(-50)] = 0x0525,
8589};
8590
8591/*
8592 * Bend CLKOUT_DP
8593 * steps -50 to 50 inclusive, in steps of 5
8594 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
8595 * change in clock period = -(steps / 10) * 5.787 ps
8596 */
8597static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
8598{
8599 uint32_t tmp;
8600 int idx = BEND_IDX(steps);
8601
8602 if (WARN_ON(steps % 5 != 0))
8603 return;
8604
8605 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
8606 return;
8607
8608 mutex_lock(&dev_priv->sb_lock);
8609
8610 if (steps % 10 != 0)
8611 tmp = 0xAAAAAAAB;
8612 else
8613 tmp = 0x00000000;
8614 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
8615
8616 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
8617 tmp &= 0xffff0000;
8618 tmp |= sscdivintphase[idx];
8619 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
8620
8621 mutex_unlock(&dev_priv->sb_lock);
8622}
8623
8624#undef BEND_IDX
8625
bf8fa3d3
PZ
8626static void lpt_init_pch_refclk(struct drm_device *dev)
8627{
bf8fa3d3
PZ
8628 struct intel_encoder *encoder;
8629 bool has_vga = false;
8630
b2784e15 8631 for_each_intel_encoder(dev, encoder) {
bf8fa3d3
PZ
8632 switch (encoder->type) {
8633 case INTEL_OUTPUT_ANALOG:
8634 has_vga = true;
8635 break;
6847d71b
PZ
8636 default:
8637 break;
bf8fa3d3
PZ
8638 }
8639 }
8640
f7be2c21
VS
8641 if (has_vga) {
8642 lpt_bend_clkout_dp(to_i915(dev), 0);
47701c3b 8643 lpt_enable_clkout_dp(dev, true, true);
f7be2c21 8644 } else {
47701c3b 8645 lpt_disable_clkout_dp(dev);
f7be2c21 8646 }
bf8fa3d3
PZ
8647}
8648
dde86e2d
PZ
8649/*
8650 * Initialize reference clocks when the driver loads
8651 */
8652void intel_init_pch_refclk(struct drm_device *dev)
8653{
8654 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev))
8655 ironlake_init_pch_refclk(dev);
8656 else if (HAS_PCH_LPT(dev))
8657 lpt_init_pch_refclk(dev);
8658}
8659
6ff93609 8660static void ironlake_set_pipeconf(struct drm_crtc *crtc)
79e53945 8661{
c8203565 8662 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
79e53945
JB
8663 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8664 int pipe = intel_crtc->pipe;
c8203565
PZ
8665 uint32_t val;
8666
78114071 8667 val = 0;
c8203565 8668
6e3c9717 8669 switch (intel_crtc->config->pipe_bpp) {
c8203565 8670 case 18:
dfd07d72 8671 val |= PIPECONF_6BPC;
c8203565
PZ
8672 break;
8673 case 24:
dfd07d72 8674 val |= PIPECONF_8BPC;
c8203565
PZ
8675 break;
8676 case 30:
dfd07d72 8677 val |= PIPECONF_10BPC;
c8203565
PZ
8678 break;
8679 case 36:
dfd07d72 8680 val |= PIPECONF_12BPC;
c8203565
PZ
8681 break;
8682 default:
cc769b62
PZ
8683 /* Case prevented by intel_choose_pipe_bpp_dither. */
8684 BUG();
c8203565
PZ
8685 }
8686
6e3c9717 8687 if (intel_crtc->config->dither)
c8203565
PZ
8688 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8689
6e3c9717 8690 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
c8203565
PZ
8691 val |= PIPECONF_INTERLACED_ILK;
8692 else
8693 val |= PIPECONF_PROGRESSIVE;
8694
6e3c9717 8695 if (intel_crtc->config->limited_color_range)
3685a8f3 8696 val |= PIPECONF_COLOR_RANGE_SELECT;
3685a8f3 8697
c8203565
PZ
8698 I915_WRITE(PIPECONF(pipe), val);
8699 POSTING_READ(PIPECONF(pipe));
8700}
8701
6ff93609 8702static void haswell_set_pipeconf(struct drm_crtc *crtc)
ee2b0b38 8703{
391bf048 8704 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
ee2b0b38 8705 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 8706 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
391bf048 8707 u32 val = 0;
ee2b0b38 8708
391bf048 8709 if (IS_HASWELL(dev_priv) && intel_crtc->config->dither)
ee2b0b38
PZ
8710 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
8711
6e3c9717 8712 if (intel_crtc->config->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
ee2b0b38
PZ
8713 val |= PIPECONF_INTERLACED_ILK;
8714 else
8715 val |= PIPECONF_PROGRESSIVE;
8716
702e7a56
PZ
8717 I915_WRITE(PIPECONF(cpu_transcoder), val);
8718 POSTING_READ(PIPECONF(cpu_transcoder));
391bf048
JN
8719}
8720
391bf048
JN
8721static void haswell_set_pipemisc(struct drm_crtc *crtc)
8722{
8723 struct drm_i915_private *dev_priv = crtc->dev->dev_private;
8724 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
756f85cf 8725
391bf048
JN
8726 if (IS_BROADWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 9) {
8727 u32 val = 0;
756f85cf 8728
6e3c9717 8729 switch (intel_crtc->config->pipe_bpp) {
756f85cf
PZ
8730 case 18:
8731 val |= PIPEMISC_DITHER_6_BPC;
8732 break;
8733 case 24:
8734 val |= PIPEMISC_DITHER_8_BPC;
8735 break;
8736 case 30:
8737 val |= PIPEMISC_DITHER_10_BPC;
8738 break;
8739 case 36:
8740 val |= PIPEMISC_DITHER_12_BPC;
8741 break;
8742 default:
8743 /* Case prevented by pipe_config_set_bpp. */
8744 BUG();
8745 }
8746
6e3c9717 8747 if (intel_crtc->config->dither)
756f85cf
PZ
8748 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
8749
391bf048 8750 I915_WRITE(PIPEMISC(intel_crtc->pipe), val);
756f85cf 8751 }
ee2b0b38
PZ
8752}
8753
d4b1931c
PZ
8754int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
8755{
8756 /*
8757 * Account for spread spectrum to avoid
8758 * oversubscribing the link. Max center spread
8759 * is 2.5%; use 5% for safety's sake.
8760 */
8761 u32 bps = target_clock * bpp * 21 / 20;
619d4d04 8762 return DIV_ROUND_UP(bps, link_bw * 8);
d4b1931c
PZ
8763}
8764
7429e9d4 8765static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6cf86a5e 8766{
7429e9d4 8767 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
f48d8f23
PZ
8768}
8769
b75ca6f6
ACO
8770static void ironlake_compute_dpll(struct intel_crtc *intel_crtc,
8771 struct intel_crtc_state *crtc_state,
9e2c8475 8772 struct dpll *reduced_clock)
79e53945 8773{
de13a2e3 8774 struct drm_crtc *crtc = &intel_crtc->base;
79e53945
JB
8775 struct drm_device *dev = crtc->dev;
8776 struct drm_i915_private *dev_priv = dev->dev_private;
55bb9992 8777 struct drm_atomic_state *state = crtc_state->base.state;
da3ced29 8778 struct drm_connector *connector;
55bb9992
ACO
8779 struct drm_connector_state *connector_state;
8780 struct intel_encoder *encoder;
b75ca6f6 8781 u32 dpll, fp, fp2;
ceb41007 8782 int factor, i;
09ede541 8783 bool is_lvds = false, is_sdvo = false;
79e53945 8784
da3ced29 8785 for_each_connector_in_state(state, connector, connector_state, i) {
55bb9992
ACO
8786 if (connector_state->crtc != crtc_state->base.crtc)
8787 continue;
8788
8789 encoder = to_intel_encoder(connector_state->best_encoder);
8790
8791 switch (encoder->type) {
79e53945
JB
8792 case INTEL_OUTPUT_LVDS:
8793 is_lvds = true;
8794 break;
8795 case INTEL_OUTPUT_SDVO:
7d57382e 8796 case INTEL_OUTPUT_HDMI:
79e53945 8797 is_sdvo = true;
79e53945 8798 break;
6847d71b
PZ
8799 default:
8800 break;
79e53945
JB
8801 }
8802 }
79e53945 8803
c1858123 8804 /* Enable autotuning of the PLL clock (if permissible) */
8febb297
EA
8805 factor = 21;
8806 if (is_lvds) {
8807 if ((intel_panel_use_ssc(dev_priv) &&
e91e941b 8808 dev_priv->vbt.lvds_ssc_freq == 100000) ||
f0b44056 8809 (HAS_PCH_IBX(dev) && intel_is_dual_link_lvds(dev)))
8febb297 8810 factor = 25;
190f68c5 8811 } else if (crtc_state->sdvo_tv_clock)
8febb297 8812 factor = 20;
c1858123 8813
b75ca6f6
ACO
8814 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
8815
190f68c5 8816 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
b75ca6f6
ACO
8817 fp |= FP_CB_TUNE;
8818
8819 if (reduced_clock) {
8820 fp2 = i9xx_dpll_compute_fp(reduced_clock);
2c07245f 8821
b75ca6f6
ACO
8822 if (reduced_clock->m < factor * reduced_clock->n)
8823 fp2 |= FP_CB_TUNE;
8824 } else {
8825 fp2 = fp;
8826 }
9a7c7890 8827
5eddb70b 8828 dpll = 0;
2c07245f 8829
a07d6787
EA
8830 if (is_lvds)
8831 dpll |= DPLLB_MODE_LVDS;
8832 else
8833 dpll |= DPLLB_MODE_DAC_SERIAL;
198a037f 8834
190f68c5 8835 dpll |= (crtc_state->pixel_multiplier - 1)
ef1b460d 8836 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
198a037f
DV
8837
8838 if (is_sdvo)
4a33e48d 8839 dpll |= DPLL_SDVO_HIGH_SPEED;
190f68c5 8840 if (crtc_state->has_dp_encoder)
4a33e48d 8841 dpll |= DPLL_SDVO_HIGH_SPEED;
79e53945 8842
a07d6787 8843 /* compute bitmask from p1 value */
190f68c5 8844 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
a07d6787 8845 /* also FPA1 */
190f68c5 8846 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
a07d6787 8847
190f68c5 8848 switch (crtc_state->dpll.p2) {
a07d6787
EA
8849 case 5:
8850 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
8851 break;
8852 case 7:
8853 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
8854 break;
8855 case 10:
8856 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
8857 break;
8858 case 14:
8859 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
8860 break;
79e53945
JB
8861 }
8862
ceb41007 8863 if (is_lvds && intel_panel_use_ssc(dev_priv))
43565a06 8864 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
79e53945
JB
8865 else
8866 dpll |= PLL_REF_INPUT_DREFCLK;
8867
b75ca6f6
ACO
8868 dpll |= DPLL_VCO_ENABLE;
8869
8870 crtc_state->dpll_hw_state.dpll = dpll;
8871 crtc_state->dpll_hw_state.fp0 = fp;
8872 crtc_state->dpll_hw_state.fp1 = fp2;
de13a2e3
PZ
8873}
8874
190f68c5
ACO
8875static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
8876 struct intel_crtc_state *crtc_state)
de13a2e3 8877{
997c030c
ACO
8878 struct drm_device *dev = crtc->base.dev;
8879 struct drm_i915_private *dev_priv = dev->dev_private;
9e2c8475 8880 struct dpll reduced_clock;
7ed9f894 8881 bool has_reduced_clock = false;
e2b78267 8882 struct intel_shared_dpll *pll;
1b6f4958 8883 const struct intel_limit *limit;
997c030c 8884 int refclk = 120000;
de13a2e3 8885
dd3cd74a
ACO
8886 memset(&crtc_state->dpll_hw_state, 0,
8887 sizeof(crtc_state->dpll_hw_state));
8888
ded220e2
ACO
8889 crtc->lowfreq_avail = false;
8890
8891 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
8892 if (!crtc_state->has_pch_encoder)
8893 return 0;
79e53945 8894
997c030c
ACO
8895 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8896 if (intel_panel_use_ssc(dev_priv)) {
8897 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
8898 dev_priv->vbt.lvds_ssc_freq);
8899 refclk = dev_priv->vbt.lvds_ssc_freq;
8900 }
8901
8902 if (intel_is_dual_link_lvds(dev)) {
8903 if (refclk == 100000)
8904 limit = &intel_limits_ironlake_dual_lvds_100m;
8905 else
8906 limit = &intel_limits_ironlake_dual_lvds;
8907 } else {
8908 if (refclk == 100000)
8909 limit = &intel_limits_ironlake_single_lvds_100m;
8910 else
8911 limit = &intel_limits_ironlake_single_lvds;
8912 }
8913 } else {
8914 limit = &intel_limits_ironlake_dac;
8915 }
8916
364ee29d 8917 if (!crtc_state->clock_set &&
997c030c
ACO
8918 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8919 refclk, NULL, &crtc_state->dpll)) {
364ee29d
ACO
8920 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8921 return -EINVAL;
f47709a9 8922 }
79e53945 8923
b75ca6f6
ACO
8924 ironlake_compute_dpll(crtc, crtc_state,
8925 has_reduced_clock ? &reduced_clock : NULL);
66e985c0 8926
ded220e2
ACO
8927 pll = intel_get_shared_dpll(crtc, crtc_state, NULL);
8928 if (pll == NULL) {
8929 DRM_DEBUG_DRIVER("failed to find PLL for pipe %c\n",
8930 pipe_name(crtc->pipe));
8931 return -EINVAL;
3fb37703 8932 }
79e53945 8933
ded220e2
ACO
8934 if (intel_pipe_will_have_type(crtc_state, INTEL_OUTPUT_LVDS) &&
8935 has_reduced_clock)
c7653199 8936 crtc->lowfreq_avail = true;
e2b78267 8937
c8f7a0db 8938 return 0;
79e53945
JB
8939}
8940
eb14cb74
VS
8941static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
8942 struct intel_link_m_n *m_n)
8943{
8944 struct drm_device *dev = crtc->base.dev;
8945 struct drm_i915_private *dev_priv = dev->dev_private;
8946 enum pipe pipe = crtc->pipe;
8947
8948 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
8949 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
8950 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
8951 & ~TU_SIZE_MASK;
8952 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
8953 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
8954 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8955}
8956
8957static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
8958 enum transcoder transcoder,
b95af8be
VK
8959 struct intel_link_m_n *m_n,
8960 struct intel_link_m_n *m2_n2)
72419203
DV
8961{
8962 struct drm_device *dev = crtc->base.dev;
8963 struct drm_i915_private *dev_priv = dev->dev_private;
eb14cb74 8964 enum pipe pipe = crtc->pipe;
72419203 8965
eb14cb74
VS
8966 if (INTEL_INFO(dev)->gen >= 5) {
8967 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
8968 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
8969 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
8970 & ~TU_SIZE_MASK;
8971 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
8972 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
8973 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
b95af8be
VK
8974 /* Read M2_N2 registers only for gen < 8 (M2_N2 available for
8975 * gen < 8) and if DRRS is supported (to make sure the
8976 * registers are not unnecessarily read).
8977 */
8978 if (m2_n2 && INTEL_INFO(dev)->gen < 8 &&
6e3c9717 8979 crtc->config->has_drrs) {
b95af8be
VK
8980 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
8981 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
8982 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
8983 & ~TU_SIZE_MASK;
8984 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
8985 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
8986 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8987 }
eb14cb74
VS
8988 } else {
8989 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
8990 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
8991 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
8992 & ~TU_SIZE_MASK;
8993 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
8994 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
8995 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
8996 }
8997}
8998
8999void intel_dp_get_m_n(struct intel_crtc *crtc,
5cec258b 9000 struct intel_crtc_state *pipe_config)
eb14cb74 9001{
681a8504 9002 if (pipe_config->has_pch_encoder)
eb14cb74
VS
9003 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9004 else
9005 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
b95af8be
VK
9006 &pipe_config->dp_m_n,
9007 &pipe_config->dp_m2_n2);
eb14cb74 9008}
72419203 9009
eb14cb74 9010static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
5cec258b 9011 struct intel_crtc_state *pipe_config)
eb14cb74
VS
9012{
9013 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
b95af8be 9014 &pipe_config->fdi_m_n, NULL);
72419203
DV
9015}
9016
bd2e244f 9017static void skylake_get_pfit_config(struct intel_crtc *crtc,
5cec258b 9018 struct intel_crtc_state *pipe_config)
bd2e244f
JB
9019{
9020 struct drm_device *dev = crtc->base.dev;
9021 struct drm_i915_private *dev_priv = dev->dev_private;
a1b2278e
CK
9022 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
9023 uint32_t ps_ctrl = 0;
9024 int id = -1;
9025 int i;
bd2e244f 9026
a1b2278e
CK
9027 /* find scaler attached to this pipe */
9028 for (i = 0; i < crtc->num_scalers; i++) {
9029 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9030 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9031 id = i;
9032 pipe_config->pch_pfit.enabled = true;
9033 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9034 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
9035 break;
9036 }
9037 }
bd2e244f 9038
a1b2278e
CK
9039 scaler_state->scaler_id = id;
9040 if (id >= 0) {
9041 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9042 } else {
9043 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
bd2e244f
JB
9044 }
9045}
9046
5724dbd1
DL
9047static void
9048skylake_get_initial_plane_config(struct intel_crtc *crtc,
9049 struct intel_initial_plane_config *plane_config)
bc8d7dff
DL
9050{
9051 struct drm_device *dev = crtc->base.dev;
9052 struct drm_i915_private *dev_priv = dev->dev_private;
40f46283 9053 u32 val, base, offset, stride_mult, tiling;
bc8d7dff
DL
9054 int pipe = crtc->pipe;
9055 int fourcc, pixel_format;
6761dd31 9056 unsigned int aligned_height;
bc8d7dff 9057 struct drm_framebuffer *fb;
1b842c89 9058 struct intel_framebuffer *intel_fb;
bc8d7dff 9059
d9806c9f 9060 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 9061 if (!intel_fb) {
bc8d7dff
DL
9062 DRM_DEBUG_KMS("failed to alloc fb\n");
9063 return;
9064 }
9065
1b842c89
DL
9066 fb = &intel_fb->base;
9067
bc8d7dff 9068 val = I915_READ(PLANE_CTL(pipe, 0));
42a7b088
DL
9069 if (!(val & PLANE_CTL_ENABLE))
9070 goto error;
9071
bc8d7dff
DL
9072 pixel_format = val & PLANE_CTL_FORMAT_MASK;
9073 fourcc = skl_format_to_fourcc(pixel_format,
9074 val & PLANE_CTL_ORDER_RGBX,
9075 val & PLANE_CTL_ALPHA_MASK);
9076 fb->pixel_format = fourcc;
9077 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
9078
40f46283
DL
9079 tiling = val & PLANE_CTL_TILED_MASK;
9080 switch (tiling) {
9081 case PLANE_CTL_TILED_LINEAR:
9082 fb->modifier[0] = DRM_FORMAT_MOD_NONE;
9083 break;
9084 case PLANE_CTL_TILED_X:
9085 plane_config->tiling = I915_TILING_X;
9086 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9087 break;
9088 case PLANE_CTL_TILED_Y:
9089 fb->modifier[0] = I915_FORMAT_MOD_Y_TILED;
9090 break;
9091 case PLANE_CTL_TILED_YF:
9092 fb->modifier[0] = I915_FORMAT_MOD_Yf_TILED;
9093 break;
9094 default:
9095 MISSING_CASE(tiling);
9096 goto error;
9097 }
9098
bc8d7dff
DL
9099 base = I915_READ(PLANE_SURF(pipe, 0)) & 0xfffff000;
9100 plane_config->base = base;
9101
9102 offset = I915_READ(PLANE_OFFSET(pipe, 0));
9103
9104 val = I915_READ(PLANE_SIZE(pipe, 0));
9105 fb->height = ((val >> 16) & 0xfff) + 1;
9106 fb->width = ((val >> 0) & 0x1fff) + 1;
9107
9108 val = I915_READ(PLANE_STRIDE(pipe, 0));
7b49f948 9109 stride_mult = intel_fb_stride_alignment(dev_priv, fb->modifier[0],
40f46283 9110 fb->pixel_format);
bc8d7dff
DL
9111 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9112
9113 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
9114 fb->pixel_format,
9115 fb->modifier[0]);
bc8d7dff 9116
f37b5c2b 9117 plane_config->size = fb->pitches[0] * aligned_height;
bc8d7dff
DL
9118
9119 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9120 pipe_name(pipe), fb->width, fb->height,
9121 fb->bits_per_pixel, base, fb->pitches[0],
9122 plane_config->size);
9123
2d14030b 9124 plane_config->fb = intel_fb;
bc8d7dff
DL
9125 return;
9126
9127error:
9128 kfree(fb);
9129}
9130
2fa2fe9a 9131static void ironlake_get_pfit_config(struct intel_crtc *crtc,
5cec258b 9132 struct intel_crtc_state *pipe_config)
2fa2fe9a
DV
9133{
9134 struct drm_device *dev = crtc->base.dev;
9135 struct drm_i915_private *dev_priv = dev->dev_private;
9136 uint32_t tmp;
9137
9138 tmp = I915_READ(PF_CTL(crtc->pipe));
9139
9140 if (tmp & PF_ENABLE) {
fd4daa9c 9141 pipe_config->pch_pfit.enabled = true;
2fa2fe9a
DV
9142 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9143 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
cb8b2a30
DV
9144
9145 /* We currently do not free assignements of panel fitters on
9146 * ivb/hsw (since we don't use the higher upscaling modes which
9147 * differentiates them) so just WARN about this case for now. */
9148 if (IS_GEN7(dev)) {
9149 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9150 PF_PIPE_SEL_IVB(crtc->pipe));
9151 }
2fa2fe9a 9152 }
79e53945
JB
9153}
9154
5724dbd1
DL
9155static void
9156ironlake_get_initial_plane_config(struct intel_crtc *crtc,
9157 struct intel_initial_plane_config *plane_config)
4c6baa59
JB
9158{
9159 struct drm_device *dev = crtc->base.dev;
9160 struct drm_i915_private *dev_priv = dev->dev_private;
9161 u32 val, base, offset;
aeee5a49 9162 int pipe = crtc->pipe;
4c6baa59 9163 int fourcc, pixel_format;
6761dd31 9164 unsigned int aligned_height;
b113d5ee 9165 struct drm_framebuffer *fb;
1b842c89 9166 struct intel_framebuffer *intel_fb;
4c6baa59 9167
42a7b088
DL
9168 val = I915_READ(DSPCNTR(pipe));
9169 if (!(val & DISPLAY_PLANE_ENABLE))
9170 return;
9171
d9806c9f 9172 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 9173 if (!intel_fb) {
4c6baa59
JB
9174 DRM_DEBUG_KMS("failed to alloc fb\n");
9175 return;
9176 }
9177
1b842c89
DL
9178 fb = &intel_fb->base;
9179
18c5247e
DV
9180 if (INTEL_INFO(dev)->gen >= 4) {
9181 if (val & DISPPLANE_TILED) {
49af449b 9182 plane_config->tiling = I915_TILING_X;
18c5247e
DV
9183 fb->modifier[0] = I915_FORMAT_MOD_X_TILED;
9184 }
9185 }
4c6baa59
JB
9186
9187 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
b35d63fa 9188 fourcc = i9xx_format_to_fourcc(pixel_format);
b113d5ee
DL
9189 fb->pixel_format = fourcc;
9190 fb->bits_per_pixel = drm_format_plane_cpp(fourcc, 0) * 8;
4c6baa59 9191
aeee5a49 9192 base = I915_READ(DSPSURF(pipe)) & 0xfffff000;
4c6baa59 9193 if (IS_HASWELL(dev) || IS_BROADWELL(dev)) {
aeee5a49 9194 offset = I915_READ(DSPOFFSET(pipe));
4c6baa59 9195 } else {
49af449b 9196 if (plane_config->tiling)
aeee5a49 9197 offset = I915_READ(DSPTILEOFF(pipe));
4c6baa59 9198 else
aeee5a49 9199 offset = I915_READ(DSPLINOFF(pipe));
4c6baa59
JB
9200 }
9201 plane_config->base = base;
9202
9203 val = I915_READ(PIPESRC(pipe));
b113d5ee
DL
9204 fb->width = ((val >> 16) & 0xfff) + 1;
9205 fb->height = ((val >> 0) & 0xfff) + 1;
4c6baa59
JB
9206
9207 val = I915_READ(DSPSTRIDE(pipe));
b113d5ee 9208 fb->pitches[0] = val & 0xffffffc0;
4c6baa59 9209
b113d5ee 9210 aligned_height = intel_fb_align_height(dev, fb->height,
091df6cb
DV
9211 fb->pixel_format,
9212 fb->modifier[0]);
4c6baa59 9213
f37b5c2b 9214 plane_config->size = fb->pitches[0] * aligned_height;
4c6baa59 9215
2844a921
DL
9216 DRM_DEBUG_KMS("pipe %c with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9217 pipe_name(pipe), fb->width, fb->height,
9218 fb->bits_per_pixel, base, fb->pitches[0],
9219 plane_config->size);
b113d5ee 9220
2d14030b 9221 plane_config->fb = intel_fb;
4c6baa59
JB
9222}
9223
0e8ffe1b 9224static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5cec258b 9225 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
9226{
9227 struct drm_device *dev = crtc->base.dev;
9228 struct drm_i915_private *dev_priv = dev->dev_private;
1729050e 9229 enum intel_display_power_domain power_domain;
0e8ffe1b 9230 uint32_t tmp;
1729050e 9231 bool ret;
0e8ffe1b 9232
1729050e
ID
9233 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9234 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
930e8c9e
PZ
9235 return false;
9236
e143a21c 9237 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8106ddbd 9238 pipe_config->shared_dpll = NULL;
eccb140b 9239
1729050e 9240 ret = false;
0e8ffe1b
DV
9241 tmp = I915_READ(PIPECONF(crtc->pipe));
9242 if (!(tmp & PIPECONF_ENABLE))
1729050e 9243 goto out;
0e8ffe1b 9244
42571aef
VS
9245 switch (tmp & PIPECONF_BPC_MASK) {
9246 case PIPECONF_6BPC:
9247 pipe_config->pipe_bpp = 18;
9248 break;
9249 case PIPECONF_8BPC:
9250 pipe_config->pipe_bpp = 24;
9251 break;
9252 case PIPECONF_10BPC:
9253 pipe_config->pipe_bpp = 30;
9254 break;
9255 case PIPECONF_12BPC:
9256 pipe_config->pipe_bpp = 36;
9257 break;
9258 default:
9259 break;
9260 }
9261
b5a9fa09
DV
9262 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9263 pipe_config->limited_color_range = true;
9264
ab9412ba 9265 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
66e985c0 9266 struct intel_shared_dpll *pll;
8106ddbd 9267 enum intel_dpll_id pll_id;
66e985c0 9268
88adfff1
DV
9269 pipe_config->has_pch_encoder = true;
9270
627eb5a3
DV
9271 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9272 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9273 FDI_DP_PORT_WIDTH_SHIFT) + 1;
72419203
DV
9274
9275 ironlake_get_fdi_m_n_config(crtc, pipe_config);
6c49f241 9276
2d1fe073 9277 if (HAS_PCH_IBX(dev_priv)) {
d9a7bc67
ID
9278 /*
9279 * The pipe->pch transcoder and pch transcoder->pll
9280 * mapping is fixed.
9281 */
8106ddbd 9282 pll_id = (enum intel_dpll_id) crtc->pipe;
c0d43d62
DV
9283 } else {
9284 tmp = I915_READ(PCH_DPLL_SEL);
9285 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
8106ddbd 9286 pll_id = DPLL_ID_PCH_PLL_B;
c0d43d62 9287 else
8106ddbd 9288 pll_id= DPLL_ID_PCH_PLL_A;
c0d43d62 9289 }
66e985c0 9290
8106ddbd
ACO
9291 pipe_config->shared_dpll =
9292 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9293 pll = pipe_config->shared_dpll;
66e985c0 9294
2edd6443
ACO
9295 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9296 &pipe_config->dpll_hw_state));
c93f54cf
DV
9297
9298 tmp = pipe_config->dpll_hw_state.dpll;
9299 pipe_config->pixel_multiplier =
9300 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9301 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
18442d08
VS
9302
9303 ironlake_pch_clock_get(crtc, pipe_config);
6c49f241
DV
9304 } else {
9305 pipe_config->pixel_multiplier = 1;
627eb5a3
DV
9306 }
9307
1bd1bd80 9308 intel_get_pipe_timings(crtc, pipe_config);
bc58be60 9309 intel_get_pipe_src_size(crtc, pipe_config);
1bd1bd80 9310
2fa2fe9a
DV
9311 ironlake_get_pfit_config(crtc, pipe_config);
9312
1729050e
ID
9313 ret = true;
9314
9315out:
9316 intel_display_power_put(dev_priv, power_domain);
9317
9318 return ret;
0e8ffe1b
DV
9319}
9320
be256dc7
PZ
9321static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
9322{
9323 struct drm_device *dev = dev_priv->dev;
be256dc7 9324 struct intel_crtc *crtc;
be256dc7 9325
d3fcc808 9326 for_each_intel_crtc(dev, crtc)
e2c719b7 9327 I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
be256dc7
PZ
9328 pipe_name(crtc->pipe));
9329
e2c719b7
RC
9330 I915_STATE_WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
9331 I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL enabled\n");
01403de3
VS
9332 I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE, "WRPLL1 enabled\n");
9333 I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE, "WRPLL2 enabled\n");
e2c719b7
RC
9334 I915_STATE_WARN(I915_READ(PCH_PP_STATUS) & PP_ON, "Panel power on\n");
9335 I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
be256dc7 9336 "CPU PWM1 enabled\n");
c5107b87 9337 if (IS_HASWELL(dev))
e2c719b7 9338 I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
c5107b87 9339 "CPU PWM2 enabled\n");
e2c719b7 9340 I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
be256dc7 9341 "PCH PWM1 enabled\n");
e2c719b7 9342 I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
be256dc7 9343 "Utility pin enabled\n");
e2c719b7 9344 I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE, "PCH GTC enabled\n");
be256dc7 9345
9926ada1
PZ
9346 /*
9347 * In theory we can still leave IRQs enabled, as long as only the HPD
9348 * interrupts remain enabled. We used to check for that, but since it's
9349 * gen-specific and since we only disable LCPLL after we fully disable
9350 * the interrupts, the check below should be enough.
9351 */
e2c719b7 9352 I915_STATE_WARN(intel_irqs_enabled(dev_priv), "IRQs enabled\n");
be256dc7
PZ
9353}
9354
9ccd5aeb
PZ
9355static uint32_t hsw_read_dcomp(struct drm_i915_private *dev_priv)
9356{
9357 struct drm_device *dev = dev_priv->dev;
9358
9359 if (IS_HASWELL(dev))
9360 return I915_READ(D_COMP_HSW);
9361 else
9362 return I915_READ(D_COMP_BDW);
9363}
9364
3c4c9b81
PZ
9365static void hsw_write_dcomp(struct drm_i915_private *dev_priv, uint32_t val)
9366{
9367 struct drm_device *dev = dev_priv->dev;
9368
9369 if (IS_HASWELL(dev)) {
9370 mutex_lock(&dev_priv->rps.hw_lock);
9371 if (sandybridge_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP,
9372 val))
f475dadf 9373 DRM_ERROR("Failed to write to D_COMP\n");
3c4c9b81
PZ
9374 mutex_unlock(&dev_priv->rps.hw_lock);
9375 } else {
9ccd5aeb
PZ
9376 I915_WRITE(D_COMP_BDW, val);
9377 POSTING_READ(D_COMP_BDW);
3c4c9b81 9378 }
be256dc7
PZ
9379}
9380
9381/*
9382 * This function implements pieces of two sequences from BSpec:
9383 * - Sequence for display software to disable LCPLL
9384 * - Sequence for display software to allow package C8+
9385 * The steps implemented here are just the steps that actually touch the LCPLL
9386 * register. Callers should take care of disabling all the display engine
9387 * functions, doing the mode unset, fixing interrupts, etc.
9388 */
6ff58d53
PZ
9389static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
9390 bool switch_to_fclk, bool allow_power_down)
be256dc7
PZ
9391{
9392 uint32_t val;
9393
9394 assert_can_disable_lcpll(dev_priv);
9395
9396 val = I915_READ(LCPLL_CTL);
9397
9398 if (switch_to_fclk) {
9399 val |= LCPLL_CD_SOURCE_FCLK;
9400 I915_WRITE(LCPLL_CTL, val);
9401
9402 if (wait_for_atomic_us(I915_READ(LCPLL_CTL) &
9403 LCPLL_CD_SOURCE_FCLK_DONE, 1))
9404 DRM_ERROR("Switching to FCLK failed\n");
9405
9406 val = I915_READ(LCPLL_CTL);
9407 }
9408
9409 val |= LCPLL_PLL_DISABLE;
9410 I915_WRITE(LCPLL_CTL, val);
9411 POSTING_READ(LCPLL_CTL);
9412
9413 if (wait_for((I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK) == 0, 1))
9414 DRM_ERROR("LCPLL still locked\n");
9415
9ccd5aeb 9416 val = hsw_read_dcomp(dev_priv);
be256dc7 9417 val |= D_COMP_COMP_DISABLE;
3c4c9b81 9418 hsw_write_dcomp(dev_priv, val);
be256dc7
PZ
9419 ndelay(100);
9420
9ccd5aeb
PZ
9421 if (wait_for((hsw_read_dcomp(dev_priv) & D_COMP_RCOMP_IN_PROGRESS) == 0,
9422 1))
be256dc7
PZ
9423 DRM_ERROR("D_COMP RCOMP still in progress\n");
9424
9425 if (allow_power_down) {
9426 val = I915_READ(LCPLL_CTL);
9427 val |= LCPLL_POWER_DOWN_ALLOW;
9428 I915_WRITE(LCPLL_CTL, val);
9429 POSTING_READ(LCPLL_CTL);
9430 }
9431}
9432
9433/*
9434 * Fully restores LCPLL, disallowing power down and switching back to LCPLL
9435 * source.
9436 */
6ff58d53 9437static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
be256dc7
PZ
9438{
9439 uint32_t val;
9440
9441 val = I915_READ(LCPLL_CTL);
9442
9443 if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
9444 LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
9445 return;
9446
a8a8bd54
PZ
9447 /*
9448 * Make sure we're not on PC8 state before disabling PC8, otherwise
9449 * we'll hang the machine. To prevent PC8 state, just enable force_wake.
a8a8bd54 9450 */
59bad947 9451 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
215733fa 9452
be256dc7
PZ
9453 if (val & LCPLL_POWER_DOWN_ALLOW) {
9454 val &= ~LCPLL_POWER_DOWN_ALLOW;
9455 I915_WRITE(LCPLL_CTL, val);
35d8f2eb 9456 POSTING_READ(LCPLL_CTL);
be256dc7
PZ
9457 }
9458
9ccd5aeb 9459 val = hsw_read_dcomp(dev_priv);
be256dc7
PZ
9460 val |= D_COMP_COMP_FORCE;
9461 val &= ~D_COMP_COMP_DISABLE;
3c4c9b81 9462 hsw_write_dcomp(dev_priv, val);
be256dc7
PZ
9463
9464 val = I915_READ(LCPLL_CTL);
9465 val &= ~LCPLL_PLL_DISABLE;
9466 I915_WRITE(LCPLL_CTL, val);
9467
9468 if (wait_for(I915_READ(LCPLL_CTL) & LCPLL_PLL_LOCK, 5))
9469 DRM_ERROR("LCPLL not locked yet\n");
9470
9471 if (val & LCPLL_CD_SOURCE_FCLK) {
9472 val = I915_READ(LCPLL_CTL);
9473 val &= ~LCPLL_CD_SOURCE_FCLK;
9474 I915_WRITE(LCPLL_CTL, val);
9475
9476 if (wait_for_atomic_us((I915_READ(LCPLL_CTL) &
9477 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
9478 DRM_ERROR("Switching back to LCPLL failed\n");
9479 }
215733fa 9480
59bad947 9481 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
b6283055 9482 intel_update_cdclk(dev_priv->dev);
be256dc7
PZ
9483}
9484
765dab67
PZ
9485/*
9486 * Package states C8 and deeper are really deep PC states that can only be
9487 * reached when all the devices on the system allow it, so even if the graphics
9488 * device allows PC8+, it doesn't mean the system will actually get to these
9489 * states. Our driver only allows PC8+ when going into runtime PM.
9490 *
9491 * The requirements for PC8+ are that all the outputs are disabled, the power
9492 * well is disabled and most interrupts are disabled, and these are also
9493 * requirements for runtime PM. When these conditions are met, we manually do
9494 * the other conditions: disable the interrupts, clocks and switch LCPLL refclk
9495 * to Fclk. If we're in PC8+ and we get an non-hotplug interrupt, we can hard
9496 * hang the machine.
9497 *
9498 * When we really reach PC8 or deeper states (not just when we allow it) we lose
9499 * the state of some registers, so when we come back from PC8+ we need to
9500 * restore this state. We don't get into PC8+ if we're not in RC6, so we don't
9501 * need to take care of the registers kept by RC6. Notice that this happens even
9502 * if we don't put the device in PCI D3 state (which is what currently happens
9503 * because of the runtime PM support).
9504 *
9505 * For more, read "Display Sequences for Package C8" on the hardware
9506 * documentation.
9507 */
a14cb6fc 9508void hsw_enable_pc8(struct drm_i915_private *dev_priv)
c67a470b 9509{
c67a470b
PZ
9510 struct drm_device *dev = dev_priv->dev;
9511 uint32_t val;
9512
c67a470b
PZ
9513 DRM_DEBUG_KMS("Enabling package C8+\n");
9514
c2699524 9515 if (HAS_PCH_LPT_LP(dev)) {
c67a470b
PZ
9516 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9517 val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
9518 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9519 }
9520
9521 lpt_disable_clkout_dp(dev);
c67a470b
PZ
9522 hsw_disable_lcpll(dev_priv, true, true);
9523}
9524
a14cb6fc 9525void hsw_disable_pc8(struct drm_i915_private *dev_priv)
c67a470b
PZ
9526{
9527 struct drm_device *dev = dev_priv->dev;
9528 uint32_t val;
9529
c67a470b
PZ
9530 DRM_DEBUG_KMS("Disabling package C8+\n");
9531
9532 hsw_restore_lcpll(dev_priv);
c67a470b
PZ
9533 lpt_init_pch_refclk(dev);
9534
c2699524 9535 if (HAS_PCH_LPT_LP(dev)) {
c67a470b
PZ
9536 val = I915_READ(SOUTH_DSPCLK_GATE_D);
9537 val |= PCH_LP_PARTITION_LEVEL_DISABLE;
9538 I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
9539 }
c67a470b
PZ
9540}
9541
27c329ed 9542static void broxton_modeset_commit_cdclk(struct drm_atomic_state *old_state)
f8437dd1 9543{
a821fc46 9544 struct drm_device *dev = old_state->dev;
1a617b77
ML
9545 struct intel_atomic_state *old_intel_state =
9546 to_intel_atomic_state(old_state);
9547 unsigned int req_cdclk = old_intel_state->dev_cdclk;
f8437dd1 9548
c6c4696f 9549 broxton_set_cdclk(to_i915(dev), req_cdclk);
f8437dd1
VK
9550}
9551
b432e5cf 9552/* compute the max rate for new configuration */
27c329ed 9553static int ilk_max_pixel_rate(struct drm_atomic_state *state)
b432e5cf 9554{
565602d7
ML
9555 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
9556 struct drm_i915_private *dev_priv = state->dev->dev_private;
9557 struct drm_crtc *crtc;
9558 struct drm_crtc_state *cstate;
27c329ed 9559 struct intel_crtc_state *crtc_state;
565602d7
ML
9560 unsigned max_pixel_rate = 0, i;
9561 enum pipe pipe;
b432e5cf 9562
565602d7
ML
9563 memcpy(intel_state->min_pixclk, dev_priv->min_pixclk,
9564 sizeof(intel_state->min_pixclk));
27c329ed 9565
565602d7
ML
9566 for_each_crtc_in_state(state, crtc, cstate, i) {
9567 int pixel_rate;
27c329ed 9568
565602d7
ML
9569 crtc_state = to_intel_crtc_state(cstate);
9570 if (!crtc_state->base.enable) {
9571 intel_state->min_pixclk[i] = 0;
b432e5cf 9572 continue;
565602d7 9573 }
b432e5cf 9574
27c329ed 9575 pixel_rate = ilk_pipe_pixel_rate(crtc_state);
b432e5cf
VS
9576
9577 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
565602d7 9578 if (IS_BROADWELL(dev_priv) && crtc_state->ips_enabled)
b432e5cf
VS
9579 pixel_rate = DIV_ROUND_UP(pixel_rate * 100, 95);
9580
565602d7 9581 intel_state->min_pixclk[i] = pixel_rate;
b432e5cf
VS
9582 }
9583
565602d7
ML
9584 for_each_pipe(dev_priv, pipe)
9585 max_pixel_rate = max(intel_state->min_pixclk[pipe], max_pixel_rate);
9586
b432e5cf
VS
9587 return max_pixel_rate;
9588}
9589
9590static void broadwell_set_cdclk(struct drm_device *dev, int cdclk)
9591{
9592 struct drm_i915_private *dev_priv = dev->dev_private;
9593 uint32_t val, data;
9594 int ret;
9595
9596 if (WARN((I915_READ(LCPLL_CTL) &
9597 (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
9598 LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
9599 LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
9600 LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
9601 "trying to change cdclk frequency with cdclk not enabled\n"))
9602 return;
9603
9604 mutex_lock(&dev_priv->rps.hw_lock);
9605 ret = sandybridge_pcode_write(dev_priv,
9606 BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
9607 mutex_unlock(&dev_priv->rps.hw_lock);
9608 if (ret) {
9609 DRM_ERROR("failed to inform pcode about cdclk change\n");
9610 return;
9611 }
9612
9613 val = I915_READ(LCPLL_CTL);
9614 val |= LCPLL_CD_SOURCE_FCLK;
9615 I915_WRITE(LCPLL_CTL, val);
9616
5ba00178
TU
9617 if (wait_for_us(I915_READ(LCPLL_CTL) &
9618 LCPLL_CD_SOURCE_FCLK_DONE, 1))
b432e5cf
VS
9619 DRM_ERROR("Switching to FCLK failed\n");
9620
9621 val = I915_READ(LCPLL_CTL);
9622 val &= ~LCPLL_CLK_FREQ_MASK;
9623
9624 switch (cdclk) {
9625 case 450000:
9626 val |= LCPLL_CLK_FREQ_450;
9627 data = 0;
9628 break;
9629 case 540000:
9630 val |= LCPLL_CLK_FREQ_54O_BDW;
9631 data = 1;
9632 break;
9633 case 337500:
9634 val |= LCPLL_CLK_FREQ_337_5_BDW;
9635 data = 2;
9636 break;
9637 case 675000:
9638 val |= LCPLL_CLK_FREQ_675_BDW;
9639 data = 3;
9640 break;
9641 default:
9642 WARN(1, "invalid cdclk frequency\n");
9643 return;
9644 }
9645
9646 I915_WRITE(LCPLL_CTL, val);
9647
9648 val = I915_READ(LCPLL_CTL);
9649 val &= ~LCPLL_CD_SOURCE_FCLK;
9650 I915_WRITE(LCPLL_CTL, val);
9651
5ba00178
TU
9652 if (wait_for_us((I915_READ(LCPLL_CTL) &
9653 LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
b432e5cf
VS
9654 DRM_ERROR("Switching back to LCPLL failed\n");
9655
9656 mutex_lock(&dev_priv->rps.hw_lock);
9657 sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, data);
9658 mutex_unlock(&dev_priv->rps.hw_lock);
9659
7f1052a8
VS
9660 I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
9661
b432e5cf
VS
9662 intel_update_cdclk(dev);
9663
9664 WARN(cdclk != dev_priv->cdclk_freq,
9665 "cdclk requested %d kHz but got %d kHz\n",
9666 cdclk, dev_priv->cdclk_freq);
9667}
9668
587c7914
VS
9669static int broadwell_calc_cdclk(int max_pixclk)
9670{
9671 if (max_pixclk > 540000)
9672 return 675000;
9673 else if (max_pixclk > 450000)
9674 return 540000;
9675 else if (max_pixclk > 337500)
9676 return 450000;
9677 else
9678 return 337500;
9679}
9680
27c329ed 9681static int broadwell_modeset_calc_cdclk(struct drm_atomic_state *state)
b432e5cf 9682{
27c329ed 9683 struct drm_i915_private *dev_priv = to_i915(state->dev);
1a617b77 9684 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
27c329ed 9685 int max_pixclk = ilk_max_pixel_rate(state);
b432e5cf
VS
9686 int cdclk;
9687
9688 /*
9689 * FIXME should also account for plane ratio
9690 * once 64bpp pixel formats are supported.
9691 */
587c7914 9692 cdclk = broadwell_calc_cdclk(max_pixclk);
b432e5cf 9693
b432e5cf 9694 if (cdclk > dev_priv->max_cdclk_freq) {
63ba534e
ML
9695 DRM_DEBUG_KMS("requested cdclk (%d kHz) exceeds max (%d kHz)\n",
9696 cdclk, dev_priv->max_cdclk_freq);
9697 return -EINVAL;
b432e5cf
VS
9698 }
9699
1a617b77
ML
9700 intel_state->cdclk = intel_state->dev_cdclk = cdclk;
9701 if (!intel_state->active_crtcs)
587c7914 9702 intel_state->dev_cdclk = broadwell_calc_cdclk(0);
b432e5cf
VS
9703
9704 return 0;
9705}
9706
27c329ed 9707static void broadwell_modeset_commit_cdclk(struct drm_atomic_state *old_state)
b432e5cf 9708{
27c329ed 9709 struct drm_device *dev = old_state->dev;
1a617b77
ML
9710 struct intel_atomic_state *old_intel_state =
9711 to_intel_atomic_state(old_state);
9712 unsigned req_cdclk = old_intel_state->dev_cdclk;
b432e5cf 9713
27c329ed 9714 broadwell_set_cdclk(dev, req_cdclk);
b432e5cf
VS
9715}
9716
190f68c5
ACO
9717static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9718 struct intel_crtc_state *crtc_state)
09b4ddf9 9719{
af3997b5
MK
9720 struct intel_encoder *intel_encoder =
9721 intel_ddi_get_crtc_new_encoder(crtc_state);
9722
9723 if (intel_encoder->type != INTEL_OUTPUT_DSI) {
9724 if (!intel_ddi_pll_select(crtc, crtc_state))
9725 return -EINVAL;
9726 }
716c2e55 9727
c7653199 9728 crtc->lowfreq_avail = false;
644cef34 9729
c8f7a0db 9730 return 0;
79e53945
JB
9731}
9732
3760b59c
S
9733static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9734 enum port port,
9735 struct intel_crtc_state *pipe_config)
9736{
8106ddbd
ACO
9737 enum intel_dpll_id id;
9738
3760b59c
S
9739 switch (port) {
9740 case PORT_A:
9741 pipe_config->ddi_pll_sel = SKL_DPLL0;
08250c4b 9742 id = DPLL_ID_SKL_DPLL0;
3760b59c
S
9743 break;
9744 case PORT_B:
9745 pipe_config->ddi_pll_sel = SKL_DPLL1;
08250c4b 9746 id = DPLL_ID_SKL_DPLL1;
3760b59c
S
9747 break;
9748 case PORT_C:
9749 pipe_config->ddi_pll_sel = SKL_DPLL2;
08250c4b 9750 id = DPLL_ID_SKL_DPLL2;
3760b59c
S
9751 break;
9752 default:
9753 DRM_ERROR("Incorrect port type\n");
8106ddbd 9754 return;
3760b59c 9755 }
8106ddbd
ACO
9756
9757 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
3760b59c
S
9758}
9759
96b7dfb7
S
9760static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9761 enum port port,
5cec258b 9762 struct intel_crtc_state *pipe_config)
96b7dfb7 9763{
8106ddbd 9764 enum intel_dpll_id id;
a3c988ea 9765 u32 temp;
96b7dfb7
S
9766
9767 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
9768 pipe_config->ddi_pll_sel = temp >> (port * 3 + 1);
9769
9770 switch (pipe_config->ddi_pll_sel) {
3148ade7 9771 case SKL_DPLL0:
a3c988ea
ACO
9772 id = DPLL_ID_SKL_DPLL0;
9773 break;
96b7dfb7 9774 case SKL_DPLL1:
8106ddbd 9775 id = DPLL_ID_SKL_DPLL1;
96b7dfb7
S
9776 break;
9777 case SKL_DPLL2:
8106ddbd 9778 id = DPLL_ID_SKL_DPLL2;
96b7dfb7
S
9779 break;
9780 case SKL_DPLL3:
8106ddbd 9781 id = DPLL_ID_SKL_DPLL3;
96b7dfb7 9782 break;
8106ddbd
ACO
9783 default:
9784 MISSING_CASE(pipe_config->ddi_pll_sel);
9785 return;
96b7dfb7 9786 }
8106ddbd
ACO
9787
9788 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
96b7dfb7
S
9789}
9790
7d2c8175
DL
9791static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9792 enum port port,
5cec258b 9793 struct intel_crtc_state *pipe_config)
7d2c8175 9794{
8106ddbd
ACO
9795 enum intel_dpll_id id;
9796
7d2c8175
DL
9797 pipe_config->ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
9798
9799 switch (pipe_config->ddi_pll_sel) {
9800 case PORT_CLK_SEL_WRPLL1:
8106ddbd 9801 id = DPLL_ID_WRPLL1;
7d2c8175
DL
9802 break;
9803 case PORT_CLK_SEL_WRPLL2:
8106ddbd 9804 id = DPLL_ID_WRPLL2;
7d2c8175 9805 break;
00490c22 9806 case PORT_CLK_SEL_SPLL:
8106ddbd 9807 id = DPLL_ID_SPLL;
79bd23da 9808 break;
9d16da65
ACO
9809 case PORT_CLK_SEL_LCPLL_810:
9810 id = DPLL_ID_LCPLL_810;
9811 break;
9812 case PORT_CLK_SEL_LCPLL_1350:
9813 id = DPLL_ID_LCPLL_1350;
9814 break;
9815 case PORT_CLK_SEL_LCPLL_2700:
9816 id = DPLL_ID_LCPLL_2700;
9817 break;
8106ddbd
ACO
9818 default:
9819 MISSING_CASE(pipe_config->ddi_pll_sel);
9820 /* fall through */
9821 case PORT_CLK_SEL_NONE:
8106ddbd 9822 return;
7d2c8175 9823 }
8106ddbd
ACO
9824
9825 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
7d2c8175
DL
9826}
9827
cf30429e
JN
9828static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9829 struct intel_crtc_state *pipe_config,
9830 unsigned long *power_domain_mask)
9831{
9832 struct drm_device *dev = crtc->base.dev;
9833 struct drm_i915_private *dev_priv = dev->dev_private;
9834 enum intel_display_power_domain power_domain;
9835 u32 tmp;
9836
d9a7bc67
ID
9837 /*
9838 * The pipe->transcoder mapping is fixed with the exception of the eDP
9839 * transcoder handled below.
9840 */
cf30429e
JN
9841 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9842
9843 /*
9844 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9845 * consistency and less surprising code; it's in always on power).
9846 */
9847 tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
9848 if (tmp & TRANS_DDI_FUNC_ENABLE) {
9849 enum pipe trans_edp_pipe;
9850 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
9851 default:
9852 WARN(1, "unknown pipe linked to edp transcoder\n");
9853 case TRANS_DDI_EDP_INPUT_A_ONOFF:
9854 case TRANS_DDI_EDP_INPUT_A_ON:
9855 trans_edp_pipe = PIPE_A;
9856 break;
9857 case TRANS_DDI_EDP_INPUT_B_ONOFF:
9858 trans_edp_pipe = PIPE_B;
9859 break;
9860 case TRANS_DDI_EDP_INPUT_C_ONOFF:
9861 trans_edp_pipe = PIPE_C;
9862 break;
9863 }
9864
9865 if (trans_edp_pipe == crtc->pipe)
9866 pipe_config->cpu_transcoder = TRANSCODER_EDP;
9867 }
9868
9869 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
9870 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9871 return false;
9872 *power_domain_mask |= BIT(power_domain);
9873
9874 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
9875
9876 return tmp & PIPECONF_ENABLE;
9877}
9878
4d1de975
JN
9879static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
9880 struct intel_crtc_state *pipe_config,
9881 unsigned long *power_domain_mask)
9882{
9883 struct drm_device *dev = crtc->base.dev;
9884 struct drm_i915_private *dev_priv = dev->dev_private;
9885 enum intel_display_power_domain power_domain;
9886 enum port port;
9887 enum transcoder cpu_transcoder;
9888 u32 tmp;
9889
9890 pipe_config->has_dsi_encoder = false;
9891
9892 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
9893 if (port == PORT_A)
9894 cpu_transcoder = TRANSCODER_DSI_A;
9895 else
9896 cpu_transcoder = TRANSCODER_DSI_C;
9897
9898 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
9899 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
9900 continue;
9901 *power_domain_mask |= BIT(power_domain);
9902
db18b6a6
ID
9903 /*
9904 * The PLL needs to be enabled with a valid divider
9905 * configuration, otherwise accessing DSI registers will hang
9906 * the machine. See BSpec North Display Engine
9907 * registers/MIPI[BXT]. We can break out here early, since we
9908 * need the same DSI PLL to be enabled for both DSI ports.
9909 */
9910 if (!intel_dsi_pll_is_enabled(dev_priv))
9911 break;
9912
4d1de975
JN
9913 /* XXX: this works for video mode only */
9914 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
9915 if (!(tmp & DPI_ENABLE))
9916 continue;
9917
9918 tmp = I915_READ(MIPI_CTRL(port));
9919 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
9920 continue;
9921
9922 pipe_config->cpu_transcoder = cpu_transcoder;
9923 pipe_config->has_dsi_encoder = true;
9924 break;
9925 }
9926
9927 return pipe_config->has_dsi_encoder;
9928}
9929
26804afd 9930static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
5cec258b 9931 struct intel_crtc_state *pipe_config)
26804afd
DV
9932{
9933 struct drm_device *dev = crtc->base.dev;
9934 struct drm_i915_private *dev_priv = dev->dev_private;
d452c5b6 9935 struct intel_shared_dpll *pll;
26804afd
DV
9936 enum port port;
9937 uint32_t tmp;
9938
9939 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
9940
9941 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
9942
ef11bdb3 9943 if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
96b7dfb7 9944 skylake_get_ddi_pll(dev_priv, port, pipe_config);
3760b59c
S
9945 else if (IS_BROXTON(dev))
9946 bxt_get_ddi_pll(dev_priv, port, pipe_config);
96b7dfb7
S
9947 else
9948 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9cd86933 9949
8106ddbd
ACO
9950 pll = pipe_config->shared_dpll;
9951 if (pll) {
2edd6443
ACO
9952 WARN_ON(!pll->funcs.get_hw_state(dev_priv, pll,
9953 &pipe_config->dpll_hw_state));
d452c5b6
DV
9954 }
9955
26804afd
DV
9956 /*
9957 * Haswell has only FDI/PCH transcoder A. It is which is connected to
9958 * DDI E. So just check whether this pipe is wired to DDI E and whether
9959 * the PCH transcoder is on.
9960 */
ca370455
DL
9961 if (INTEL_INFO(dev)->gen < 9 &&
9962 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
26804afd
DV
9963 pipe_config->has_pch_encoder = true;
9964
9965 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
9966 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9967 FDI_DP_PORT_WIDTH_SHIFT) + 1;
9968
9969 ironlake_get_fdi_m_n_config(crtc, pipe_config);
9970 }
9971}
9972
0e8ffe1b 9973static bool haswell_get_pipe_config(struct intel_crtc *crtc,
5cec258b 9974 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
9975{
9976 struct drm_device *dev = crtc->base.dev;
9977 struct drm_i915_private *dev_priv = dev->dev_private;
1729050e
ID
9978 enum intel_display_power_domain power_domain;
9979 unsigned long power_domain_mask;
cf30429e 9980 bool active;
0e8ffe1b 9981
1729050e
ID
9982 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
9983 if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
b5482bd0 9984 return false;
1729050e
ID
9985 power_domain_mask = BIT(power_domain);
9986
8106ddbd 9987 pipe_config->shared_dpll = NULL;
c0d43d62 9988
cf30429e 9989 active = hsw_get_transcoder_state(crtc, pipe_config, &power_domain_mask);
eccb140b 9990
4d1de975
JN
9991 if (IS_BROXTON(dev_priv)) {
9992 bxt_get_dsi_transcoder_state(crtc, pipe_config,
9993 &power_domain_mask);
9994 WARN_ON(active && pipe_config->has_dsi_encoder);
9995 if (pipe_config->has_dsi_encoder)
9996 active = true;
9997 }
9998
cf30429e 9999 if (!active)
1729050e 10000 goto out;
0e8ffe1b 10001
4d1de975
JN
10002 if (!pipe_config->has_dsi_encoder) {
10003 haswell_get_ddi_port_state(crtc, pipe_config);
10004 intel_get_pipe_timings(crtc, pipe_config);
10005 }
627eb5a3 10006
bc58be60 10007 intel_get_pipe_src_size(crtc, pipe_config);
1bd1bd80 10008
05dc698c
LL
10009 pipe_config->gamma_mode =
10010 I915_READ(GAMMA_MODE(crtc->pipe)) & GAMMA_MODE_MODE_MASK;
10011
a1b2278e
CK
10012 if (INTEL_INFO(dev)->gen >= 9) {
10013 skl_init_scalers(dev, crtc, pipe_config);
10014 }
10015
af99ceda
CK
10016 if (INTEL_INFO(dev)->gen >= 9) {
10017 pipe_config->scaler_state.scaler_id = -1;
10018 pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
10019 }
10020
1729050e
ID
10021 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
10022 if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
10023 power_domain_mask |= BIT(power_domain);
1c132b44 10024 if (INTEL_INFO(dev)->gen >= 9)
bd2e244f 10025 skylake_get_pfit_config(crtc, pipe_config);
ff6d9f55 10026 else
1c132b44 10027 ironlake_get_pfit_config(crtc, pipe_config);
bd2e244f 10028 }
88adfff1 10029
e59150dc
JB
10030 if (IS_HASWELL(dev))
10031 pipe_config->ips_enabled = hsw_crtc_supports_ips(crtc) &&
10032 (I915_READ(IPS_CTL) & IPS_ENABLE);
42db64ef 10033
4d1de975
JN
10034 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10035 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
ebb69c95
CT
10036 pipe_config->pixel_multiplier =
10037 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10038 } else {
10039 pipe_config->pixel_multiplier = 1;
10040 }
6c49f241 10041
1729050e
ID
10042out:
10043 for_each_power_domain(power_domain, power_domain_mask)
10044 intel_display_power_put(dev_priv, power_domain);
10045
cf30429e 10046 return active;
0e8ffe1b
DV
10047}
10048
55a08b3f
ML
10049static void i845_update_cursor(struct drm_crtc *crtc, u32 base,
10050 const struct intel_plane_state *plane_state)
560b85bb
CW
10051{
10052 struct drm_device *dev = crtc->dev;
10053 struct drm_i915_private *dev_priv = dev->dev_private;
10054 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
dc41c154 10055 uint32_t cntl = 0, size = 0;
560b85bb 10056
55a08b3f
ML
10057 if (plane_state && plane_state->visible) {
10058 unsigned int width = plane_state->base.crtc_w;
10059 unsigned int height = plane_state->base.crtc_h;
dc41c154
VS
10060 unsigned int stride = roundup_pow_of_two(width) * 4;
10061
10062 switch (stride) {
10063 default:
10064 WARN_ONCE(1, "Invalid cursor width/stride, width=%u, stride=%u\n",
10065 width, stride);
10066 stride = 256;
10067 /* fallthrough */
10068 case 256:
10069 case 512:
10070 case 1024:
10071 case 2048:
10072 break;
4b0e333e
CW
10073 }
10074
dc41c154
VS
10075 cntl |= CURSOR_ENABLE |
10076 CURSOR_GAMMA_ENABLE |
10077 CURSOR_FORMAT_ARGB |
10078 CURSOR_STRIDE(stride);
10079
10080 size = (height << 12) | width;
4b0e333e 10081 }
560b85bb 10082
dc41c154
VS
10083 if (intel_crtc->cursor_cntl != 0 &&
10084 (intel_crtc->cursor_base != base ||
10085 intel_crtc->cursor_size != size ||
10086 intel_crtc->cursor_cntl != cntl)) {
10087 /* On these chipsets we can only modify the base/size/stride
10088 * whilst the cursor is disabled.
10089 */
0b87c24e
VS
10090 I915_WRITE(CURCNTR(PIPE_A), 0);
10091 POSTING_READ(CURCNTR(PIPE_A));
dc41c154 10092 intel_crtc->cursor_cntl = 0;
4b0e333e 10093 }
560b85bb 10094
99d1f387 10095 if (intel_crtc->cursor_base != base) {
0b87c24e 10096 I915_WRITE(CURBASE(PIPE_A), base);
99d1f387
VS
10097 intel_crtc->cursor_base = base;
10098 }
4726e0b0 10099
dc41c154
VS
10100 if (intel_crtc->cursor_size != size) {
10101 I915_WRITE(CURSIZE, size);
10102 intel_crtc->cursor_size = size;
4b0e333e 10103 }
560b85bb 10104
4b0e333e 10105 if (intel_crtc->cursor_cntl != cntl) {
0b87c24e
VS
10106 I915_WRITE(CURCNTR(PIPE_A), cntl);
10107 POSTING_READ(CURCNTR(PIPE_A));
4b0e333e 10108 intel_crtc->cursor_cntl = cntl;
560b85bb 10109 }
560b85bb
CW
10110}
10111
55a08b3f
ML
10112static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base,
10113 const struct intel_plane_state *plane_state)
65a21cd6
JB
10114{
10115 struct drm_device *dev = crtc->dev;
10116 struct drm_i915_private *dev_priv = dev->dev_private;
10117 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10118 int pipe = intel_crtc->pipe;
663f3122 10119 uint32_t cntl = 0;
4b0e333e 10120
55a08b3f 10121 if (plane_state && plane_state->visible) {
4b0e333e 10122 cntl = MCURSOR_GAMMA_ENABLE;
55a08b3f 10123 switch (plane_state->base.crtc_w) {
4726e0b0
SK
10124 case 64:
10125 cntl |= CURSOR_MODE_64_ARGB_AX;
10126 break;
10127 case 128:
10128 cntl |= CURSOR_MODE_128_ARGB_AX;
10129 break;
10130 case 256:
10131 cntl |= CURSOR_MODE_256_ARGB_AX;
10132 break;
10133 default:
55a08b3f 10134 MISSING_CASE(plane_state->base.crtc_w);
4726e0b0 10135 return;
65a21cd6 10136 }
4b0e333e 10137 cntl |= pipe << 28; /* Connect to correct pipe */
47bf17a7 10138
fc6f93bc 10139 if (HAS_DDI(dev))
47bf17a7 10140 cntl |= CURSOR_PIPE_CSC_ENABLE;
65a21cd6 10141
55a08b3f
ML
10142 if (plane_state->base.rotation == BIT(DRM_ROTATE_180))
10143 cntl |= CURSOR_ROTATE_180;
10144 }
4398ad45 10145
4b0e333e
CW
10146 if (intel_crtc->cursor_cntl != cntl) {
10147 I915_WRITE(CURCNTR(pipe), cntl);
10148 POSTING_READ(CURCNTR(pipe));
10149 intel_crtc->cursor_cntl = cntl;
65a21cd6 10150 }
4b0e333e 10151
65a21cd6 10152 /* and commit changes on next vblank */
5efb3e28
VS
10153 I915_WRITE(CURBASE(pipe), base);
10154 POSTING_READ(CURBASE(pipe));
99d1f387
VS
10155
10156 intel_crtc->cursor_base = base;
65a21cd6
JB
10157}
10158
cda4b7d3 10159/* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */
6b383a7f 10160static void intel_crtc_update_cursor(struct drm_crtc *crtc,
55a08b3f 10161 const struct intel_plane_state *plane_state)
cda4b7d3
CW
10162{
10163 struct drm_device *dev = crtc->dev;
10164 struct drm_i915_private *dev_priv = dev->dev_private;
10165 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10166 int pipe = intel_crtc->pipe;
55a08b3f
ML
10167 u32 base = intel_crtc->cursor_addr;
10168 u32 pos = 0;
cda4b7d3 10169
55a08b3f
ML
10170 if (plane_state) {
10171 int x = plane_state->base.crtc_x;
10172 int y = plane_state->base.crtc_y;
cda4b7d3 10173
55a08b3f
ML
10174 if (x < 0) {
10175 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10176 x = -x;
10177 }
10178 pos |= x << CURSOR_X_SHIFT;
cda4b7d3 10179
55a08b3f
ML
10180 if (y < 0) {
10181 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10182 y = -y;
10183 }
10184 pos |= y << CURSOR_Y_SHIFT;
10185
10186 /* ILK+ do this automagically */
10187 if (HAS_GMCH_DISPLAY(dev) &&
10188 plane_state->base.rotation == BIT(DRM_ROTATE_180)) {
10189 base += (plane_state->base.crtc_h *
10190 plane_state->base.crtc_w - 1) * 4;
10191 }
cda4b7d3 10192 }
cda4b7d3 10193
5efb3e28
VS
10194 I915_WRITE(CURPOS(pipe), pos);
10195
8ac54669 10196 if (IS_845G(dev) || IS_I865G(dev))
55a08b3f 10197 i845_update_cursor(crtc, base, plane_state);
5efb3e28 10198 else
55a08b3f 10199 i9xx_update_cursor(crtc, base, plane_state);
cda4b7d3
CW
10200}
10201
dc41c154
VS
10202static bool cursor_size_ok(struct drm_device *dev,
10203 uint32_t width, uint32_t height)
10204{
10205 if (width == 0 || height == 0)
10206 return false;
10207
10208 /*
10209 * 845g/865g are special in that they are only limited by
10210 * the width of their cursors, the height is arbitrary up to
10211 * the precision of the register. Everything else requires
10212 * square cursors, limited to a few power-of-two sizes.
10213 */
10214 if (IS_845G(dev) || IS_I865G(dev)) {
10215 if ((width & 63) != 0)
10216 return false;
10217
10218 if (width > (IS_845G(dev) ? 64 : 512))
10219 return false;
10220
10221 if (height > 1023)
10222 return false;
10223 } else {
10224 switch (width | height) {
10225 case 256:
10226 case 128:
10227 if (IS_GEN2(dev))
10228 return false;
10229 case 64:
10230 break;
10231 default:
10232 return false;
10233 }
10234 }
10235
10236 return true;
10237}
10238
79e53945
JB
10239/* VESA 640x480x72Hz mode to set on the pipe */
10240static struct drm_display_mode load_detect_mode = {
10241 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10242 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10243};
10244
a8bb6818
DV
10245struct drm_framebuffer *
10246__intel_framebuffer_create(struct drm_device *dev,
10247 struct drm_mode_fb_cmd2 *mode_cmd,
10248 struct drm_i915_gem_object *obj)
d2dff872
CW
10249{
10250 struct intel_framebuffer *intel_fb;
10251 int ret;
10252
10253 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
dcb1394e 10254 if (!intel_fb)
d2dff872 10255 return ERR_PTR(-ENOMEM);
d2dff872
CW
10256
10257 ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
dd4916c5
DV
10258 if (ret)
10259 goto err;
d2dff872
CW
10260
10261 return &intel_fb->base;
dcb1394e 10262
dd4916c5 10263err:
dd4916c5 10264 kfree(intel_fb);
dd4916c5 10265 return ERR_PTR(ret);
d2dff872
CW
10266}
10267
b5ea642a 10268static struct drm_framebuffer *
a8bb6818
DV
10269intel_framebuffer_create(struct drm_device *dev,
10270 struct drm_mode_fb_cmd2 *mode_cmd,
10271 struct drm_i915_gem_object *obj)
10272{
10273 struct drm_framebuffer *fb;
10274 int ret;
10275
10276 ret = i915_mutex_lock_interruptible(dev);
10277 if (ret)
10278 return ERR_PTR(ret);
10279 fb = __intel_framebuffer_create(dev, mode_cmd, obj);
10280 mutex_unlock(&dev->struct_mutex);
10281
10282 return fb;
10283}
10284
d2dff872
CW
10285static u32
10286intel_framebuffer_pitch_for_width(int width, int bpp)
10287{
10288 u32 pitch = DIV_ROUND_UP(width * bpp, 8);
10289 return ALIGN(pitch, 64);
10290}
10291
10292static u32
10293intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp)
10294{
10295 u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp);
1267a26b 10296 return PAGE_ALIGN(pitch * mode->vdisplay);
d2dff872
CW
10297}
10298
10299static struct drm_framebuffer *
10300intel_framebuffer_create_for_mode(struct drm_device *dev,
10301 struct drm_display_mode *mode,
10302 int depth, int bpp)
10303{
dcb1394e 10304 struct drm_framebuffer *fb;
d2dff872 10305 struct drm_i915_gem_object *obj;
0fed39bd 10306 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
d2dff872 10307
d37cd8a8 10308 obj = i915_gem_object_create(dev,
d2dff872 10309 intel_framebuffer_size_for_mode(mode, bpp));
fe3db79b
CW
10310 if (IS_ERR(obj))
10311 return ERR_CAST(obj);
d2dff872
CW
10312
10313 mode_cmd.width = mode->hdisplay;
10314 mode_cmd.height = mode->vdisplay;
308e5bcb
JB
10315 mode_cmd.pitches[0] = intel_framebuffer_pitch_for_width(mode_cmd.width,
10316 bpp);
5ca0c34a 10317 mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth);
d2dff872 10318
dcb1394e
LW
10319 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
10320 if (IS_ERR(fb))
10321 drm_gem_object_unreference_unlocked(&obj->base);
10322
10323 return fb;
d2dff872
CW
10324}
10325
10326static struct drm_framebuffer *
10327mode_fits_in_fbdev(struct drm_device *dev,
10328 struct drm_display_mode *mode)
10329{
0695726e 10330#ifdef CONFIG_DRM_FBDEV_EMULATION
d2dff872
CW
10331 struct drm_i915_private *dev_priv = dev->dev_private;
10332 struct drm_i915_gem_object *obj;
10333 struct drm_framebuffer *fb;
10334
4c0e5528 10335 if (!dev_priv->fbdev)
d2dff872
CW
10336 return NULL;
10337
4c0e5528 10338 if (!dev_priv->fbdev->fb)
d2dff872
CW
10339 return NULL;
10340
4c0e5528
DV
10341 obj = dev_priv->fbdev->fb->obj;
10342 BUG_ON(!obj);
10343
8bcd4553 10344 fb = &dev_priv->fbdev->fb->base;
01f2c773
VS
10345 if (fb->pitches[0] < intel_framebuffer_pitch_for_width(mode->hdisplay,
10346 fb->bits_per_pixel))
d2dff872
CW
10347 return NULL;
10348
01f2c773 10349 if (obj->base.size < mode->vdisplay * fb->pitches[0])
d2dff872
CW
10350 return NULL;
10351
edde3617 10352 drm_framebuffer_reference(fb);
d2dff872 10353 return fb;
4520f53a
DV
10354#else
10355 return NULL;
10356#endif
d2dff872
CW
10357}
10358
d3a40d1b
ACO
10359static int intel_modeset_setup_plane_state(struct drm_atomic_state *state,
10360 struct drm_crtc *crtc,
10361 struct drm_display_mode *mode,
10362 struct drm_framebuffer *fb,
10363 int x, int y)
10364{
10365 struct drm_plane_state *plane_state;
10366 int hdisplay, vdisplay;
10367 int ret;
10368
10369 plane_state = drm_atomic_get_plane_state(state, crtc->primary);
10370 if (IS_ERR(plane_state))
10371 return PTR_ERR(plane_state);
10372
10373 if (mode)
10374 drm_crtc_get_hv_timing(mode, &hdisplay, &vdisplay);
10375 else
10376 hdisplay = vdisplay = 0;
10377
10378 ret = drm_atomic_set_crtc_for_plane(plane_state, fb ? crtc : NULL);
10379 if (ret)
10380 return ret;
10381 drm_atomic_set_fb_for_plane(plane_state, fb);
10382 plane_state->crtc_x = 0;
10383 plane_state->crtc_y = 0;
10384 plane_state->crtc_w = hdisplay;
10385 plane_state->crtc_h = vdisplay;
10386 plane_state->src_x = x << 16;
10387 plane_state->src_y = y << 16;
10388 plane_state->src_w = hdisplay << 16;
10389 plane_state->src_h = vdisplay << 16;
10390
10391 return 0;
10392}
10393
d2434ab7 10394bool intel_get_load_detect_pipe(struct drm_connector *connector,
7173188d 10395 struct drm_display_mode *mode,
51fd371b
RC
10396 struct intel_load_detect_pipe *old,
10397 struct drm_modeset_acquire_ctx *ctx)
79e53945
JB
10398{
10399 struct intel_crtc *intel_crtc;
d2434ab7
DV
10400 struct intel_encoder *intel_encoder =
10401 intel_attached_encoder(connector);
79e53945 10402 struct drm_crtc *possible_crtc;
4ef69c7a 10403 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
10404 struct drm_crtc *crtc = NULL;
10405 struct drm_device *dev = encoder->dev;
94352cf9 10406 struct drm_framebuffer *fb;
51fd371b 10407 struct drm_mode_config *config = &dev->mode_config;
edde3617 10408 struct drm_atomic_state *state = NULL, *restore_state = NULL;
944b0c76 10409 struct drm_connector_state *connector_state;
4be07317 10410 struct intel_crtc_state *crtc_state;
51fd371b 10411 int ret, i = -1;
79e53945 10412
d2dff872 10413 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
c23cc417 10414 connector->base.id, connector->name,
8e329a03 10415 encoder->base.id, encoder->name);
d2dff872 10416
edde3617
ML
10417 old->restore_state = NULL;
10418
51fd371b
RC
10419retry:
10420 ret = drm_modeset_lock(&config->connection_mutex, ctx);
10421 if (ret)
ad3c558f 10422 goto fail;
6e9f798d 10423
79e53945
JB
10424 /*
10425 * Algorithm gets a little messy:
7a5e4805 10426 *
79e53945
JB
10427 * - if the connector already has an assigned crtc, use it (but make
10428 * sure it's on first)
7a5e4805 10429 *
79e53945
JB
10430 * - try to find the first unused crtc that can drive this connector,
10431 * and use that if we find one
79e53945
JB
10432 */
10433
10434 /* See if we already have a CRTC for this connector */
edde3617
ML
10435 if (connector->state->crtc) {
10436 crtc = connector->state->crtc;
8261b191 10437
51fd371b 10438 ret = drm_modeset_lock(&crtc->mutex, ctx);
4d02e2de 10439 if (ret)
ad3c558f 10440 goto fail;
8261b191
CW
10441
10442 /* Make sure the crtc and connector are running */
edde3617 10443 goto found;
79e53945
JB
10444 }
10445
10446 /* Find an unused one (if possible) */
70e1e0ec 10447 for_each_crtc(dev, possible_crtc) {
79e53945
JB
10448 i++;
10449 if (!(encoder->possible_crtcs & (1 << i)))
10450 continue;
edde3617
ML
10451
10452 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10453 if (ret)
10454 goto fail;
10455
10456 if (possible_crtc->state->enable) {
10457 drm_modeset_unlock(&possible_crtc->mutex);
a459249c 10458 continue;
edde3617 10459 }
a459249c
VS
10460
10461 crtc = possible_crtc;
10462 break;
79e53945
JB
10463 }
10464
10465 /*
10466 * If we didn't find an unused CRTC, don't use any.
10467 */
10468 if (!crtc) {
7173188d 10469 DRM_DEBUG_KMS("no pipe available for load-detect\n");
ad3c558f 10470 goto fail;
79e53945
JB
10471 }
10472
edde3617
ML
10473found:
10474 intel_crtc = to_intel_crtc(crtc);
10475
4d02e2de
DV
10476 ret = drm_modeset_lock(&crtc->primary->mutex, ctx);
10477 if (ret)
ad3c558f 10478 goto fail;
79e53945 10479
83a57153 10480 state = drm_atomic_state_alloc(dev);
edde3617
ML
10481 restore_state = drm_atomic_state_alloc(dev);
10482 if (!state || !restore_state) {
10483 ret = -ENOMEM;
10484 goto fail;
10485 }
83a57153
ACO
10486
10487 state->acquire_ctx = ctx;
edde3617 10488 restore_state->acquire_ctx = ctx;
83a57153 10489
944b0c76
ACO
10490 connector_state = drm_atomic_get_connector_state(state, connector);
10491 if (IS_ERR(connector_state)) {
10492 ret = PTR_ERR(connector_state);
10493 goto fail;
10494 }
10495
edde3617
ML
10496 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10497 if (ret)
10498 goto fail;
944b0c76 10499
4be07317
ACO
10500 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10501 if (IS_ERR(crtc_state)) {
10502 ret = PTR_ERR(crtc_state);
10503 goto fail;
10504 }
10505
49d6fa21 10506 crtc_state->base.active = crtc_state->base.enable = true;
4be07317 10507
6492711d
CW
10508 if (!mode)
10509 mode = &load_detect_mode;
79e53945 10510
d2dff872
CW
10511 /* We need a framebuffer large enough to accommodate all accesses
10512 * that the plane may generate whilst we perform load detection.
10513 * We can not rely on the fbcon either being present (we get called
10514 * during its initialisation to detect all boot displays, or it may
10515 * not even exist) or that it is large enough to satisfy the
10516 * requested mode.
10517 */
94352cf9
DV
10518 fb = mode_fits_in_fbdev(dev, mode);
10519 if (fb == NULL) {
d2dff872 10520 DRM_DEBUG_KMS("creating tmp fb for load-detection\n");
94352cf9 10521 fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32);
d2dff872
CW
10522 } else
10523 DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n");
94352cf9 10524 if (IS_ERR(fb)) {
d2dff872 10525 DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n");
412b61d8 10526 goto fail;
79e53945 10527 }
79e53945 10528
d3a40d1b
ACO
10529 ret = intel_modeset_setup_plane_state(state, crtc, mode, fb, 0, 0);
10530 if (ret)
10531 goto fail;
10532
edde3617
ML
10533 drm_framebuffer_unreference(fb);
10534
10535 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
10536 if (ret)
10537 goto fail;
10538
10539 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10540 if (!ret)
10541 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
10542 if (!ret)
10543 ret = PTR_ERR_OR_ZERO(drm_atomic_get_plane_state(restore_state, crtc->primary));
10544 if (ret) {
10545 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10546 goto fail;
10547 }
8c7b5ccb 10548
3ba86073
ML
10549 ret = drm_atomic_commit(state);
10550 if (ret) {
6492711d 10551 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
412b61d8 10552 goto fail;
79e53945 10553 }
edde3617
ML
10554
10555 old->restore_state = restore_state;
7173188d 10556
79e53945 10557 /* let the connector get through one full cycle before testing */
9d0498a2 10558 intel_wait_for_vblank(dev, intel_crtc->pipe);
7173188d 10559 return true;
412b61d8 10560
ad3c558f 10561fail:
e5d958ef 10562 drm_atomic_state_free(state);
edde3617
ML
10563 drm_atomic_state_free(restore_state);
10564 restore_state = state = NULL;
83a57153 10565
51fd371b
RC
10566 if (ret == -EDEADLK) {
10567 drm_modeset_backoff(ctx);
10568 goto retry;
10569 }
10570
412b61d8 10571 return false;
79e53945
JB
10572}
10573
d2434ab7 10574void intel_release_load_detect_pipe(struct drm_connector *connector,
49172fee
ACO
10575 struct intel_load_detect_pipe *old,
10576 struct drm_modeset_acquire_ctx *ctx)
79e53945 10577{
d2434ab7
DV
10578 struct intel_encoder *intel_encoder =
10579 intel_attached_encoder(connector);
4ef69c7a 10580 struct drm_encoder *encoder = &intel_encoder->base;
edde3617 10581 struct drm_atomic_state *state = old->restore_state;
d3a40d1b 10582 int ret;
79e53945 10583
d2dff872 10584 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
c23cc417 10585 connector->base.id, connector->name,
8e329a03 10586 encoder->base.id, encoder->name);
d2dff872 10587
edde3617 10588 if (!state)
0622a53c 10589 return;
79e53945 10590
edde3617
ML
10591 ret = drm_atomic_commit(state);
10592 if (ret) {
10593 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
10594 drm_atomic_state_free(state);
10595 }
79e53945
JB
10596}
10597
da4a1efa 10598static int i9xx_pll_refclk(struct drm_device *dev,
5cec258b 10599 const struct intel_crtc_state *pipe_config)
da4a1efa
VS
10600{
10601 struct drm_i915_private *dev_priv = dev->dev_private;
10602 u32 dpll = pipe_config->dpll_hw_state.dpll;
10603
10604 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
e91e941b 10605 return dev_priv->vbt.lvds_ssc_freq;
da4a1efa
VS
10606 else if (HAS_PCH_SPLIT(dev))
10607 return 120000;
10608 else if (!IS_GEN2(dev))
10609 return 96000;
10610 else
10611 return 48000;
10612}
10613
79e53945 10614/* Returns the clock of the currently programmed mode of the given pipe. */
f1f644dc 10615static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 10616 struct intel_crtc_state *pipe_config)
79e53945 10617{
f1f644dc 10618 struct drm_device *dev = crtc->base.dev;
79e53945 10619 struct drm_i915_private *dev_priv = dev->dev_private;
f1f644dc 10620 int pipe = pipe_config->cpu_transcoder;
293623f7 10621 u32 dpll = pipe_config->dpll_hw_state.dpll;
79e53945 10622 u32 fp;
9e2c8475 10623 struct dpll clock;
dccbea3b 10624 int port_clock;
da4a1efa 10625 int refclk = i9xx_pll_refclk(dev, pipe_config);
79e53945
JB
10626
10627 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
293623f7 10628 fp = pipe_config->dpll_hw_state.fp0;
79e53945 10629 else
293623f7 10630 fp = pipe_config->dpll_hw_state.fp1;
79e53945
JB
10631
10632 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
f2b115e6
AJ
10633 if (IS_PINEVIEW(dev)) {
10634 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
10635 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
2177832f
SL
10636 } else {
10637 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
10638 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
10639 }
10640
a6c45cf0 10641 if (!IS_GEN2(dev)) {
f2b115e6
AJ
10642 if (IS_PINEVIEW(dev))
10643 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
10644 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
2177832f
SL
10645 else
10646 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
79e53945
JB
10647 DPLL_FPA01_P1_POST_DIV_SHIFT);
10648
10649 switch (dpll & DPLL_MODE_MASK) {
10650 case DPLLB_MODE_DAC_SERIAL:
10651 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
10652 5 : 10;
10653 break;
10654 case DPLLB_MODE_LVDS:
10655 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
10656 7 : 14;
10657 break;
10658 default:
28c97730 10659 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
79e53945 10660 "mode\n", (int)(dpll & DPLL_MODE_MASK));
f1f644dc 10661 return;
79e53945
JB
10662 }
10663
ac58c3f0 10664 if (IS_PINEVIEW(dev))
dccbea3b 10665 port_clock = pnv_calc_dpll_params(refclk, &clock);
ac58c3f0 10666 else
dccbea3b 10667 port_clock = i9xx_calc_dpll_params(refclk, &clock);
79e53945 10668 } else {
0fb58223 10669 u32 lvds = IS_I830(dev) ? 0 : I915_READ(LVDS);
b1c560d1 10670 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
79e53945
JB
10671
10672 if (is_lvds) {
10673 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
10674 DPLL_FPA01_P1_POST_DIV_SHIFT);
b1c560d1
VS
10675
10676 if (lvds & LVDS_CLKB_POWER_UP)
10677 clock.p2 = 7;
10678 else
10679 clock.p2 = 14;
79e53945
JB
10680 } else {
10681 if (dpll & PLL_P1_DIVIDE_BY_TWO)
10682 clock.p1 = 2;
10683 else {
10684 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
10685 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
10686 }
10687 if (dpll & PLL_P2_DIVIDE_BY_4)
10688 clock.p2 = 4;
10689 else
10690 clock.p2 = 2;
79e53945 10691 }
da4a1efa 10692
dccbea3b 10693 port_clock = i9xx_calc_dpll_params(refclk, &clock);
79e53945
JB
10694 }
10695
18442d08
VS
10696 /*
10697 * This value includes pixel_multiplier. We will use
241bfc38 10698 * port_clock to compute adjusted_mode.crtc_clock in the
18442d08
VS
10699 * encoder's get_config() function.
10700 */
dccbea3b 10701 pipe_config->port_clock = port_clock;
f1f644dc
JB
10702}
10703
6878da05
VS
10704int intel_dotclock_calculate(int link_freq,
10705 const struct intel_link_m_n *m_n)
f1f644dc 10706{
f1f644dc
JB
10707 /*
10708 * The calculation for the data clock is:
1041a02f 10709 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
f1f644dc 10710 * But we want to avoid losing precison if possible, so:
1041a02f 10711 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
f1f644dc
JB
10712 *
10713 * and the link clock is simpler:
1041a02f 10714 * link_clock = (m * link_clock) / n
f1f644dc
JB
10715 */
10716
6878da05
VS
10717 if (!m_n->link_n)
10718 return 0;
f1f644dc 10719
6878da05
VS
10720 return div_u64((u64)m_n->link_m * link_freq, m_n->link_n);
10721}
f1f644dc 10722
18442d08 10723static void ironlake_pch_clock_get(struct intel_crtc *crtc,
5cec258b 10724 struct intel_crtc_state *pipe_config)
6878da05 10725{
e3b247da 10726 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
79e53945 10727
18442d08
VS
10728 /* read out port_clock from the DPLL */
10729 i9xx_crtc_clock_get(crtc, pipe_config);
f1f644dc 10730
f1f644dc 10731 /*
e3b247da
VS
10732 * In case there is an active pipe without active ports,
10733 * we may need some idea for the dotclock anyway.
10734 * Calculate one based on the FDI configuration.
79e53945 10735 */
2d112de7 10736 pipe_config->base.adjusted_mode.crtc_clock =
21a727b3 10737 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
18442d08 10738 &pipe_config->fdi_m_n);
79e53945
JB
10739}
10740
10741/** Returns the currently programmed mode of the given pipe. */
10742struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
10743 struct drm_crtc *crtc)
10744{
548f245b 10745 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 10746 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6e3c9717 10747 enum transcoder cpu_transcoder = intel_crtc->config->cpu_transcoder;
79e53945 10748 struct drm_display_mode *mode;
3f36b937 10749 struct intel_crtc_state *pipe_config;
fe2b8f9d
PZ
10750 int htot = I915_READ(HTOTAL(cpu_transcoder));
10751 int hsync = I915_READ(HSYNC(cpu_transcoder));
10752 int vtot = I915_READ(VTOTAL(cpu_transcoder));
10753 int vsync = I915_READ(VSYNC(cpu_transcoder));
293623f7 10754 enum pipe pipe = intel_crtc->pipe;
79e53945
JB
10755
10756 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
10757 if (!mode)
10758 return NULL;
10759
3f36b937
TU
10760 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
10761 if (!pipe_config) {
10762 kfree(mode);
10763 return NULL;
10764 }
10765
f1f644dc
JB
10766 /*
10767 * Construct a pipe_config sufficient for getting the clock info
10768 * back out of crtc_clock_get.
10769 *
10770 * Note, if LVDS ever uses a non-1 pixel multiplier, we'll need
10771 * to use a real value here instead.
10772 */
3f36b937
TU
10773 pipe_config->cpu_transcoder = (enum transcoder) pipe;
10774 pipe_config->pixel_multiplier = 1;
10775 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(pipe));
10776 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(pipe));
10777 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(pipe));
10778 i9xx_crtc_clock_get(intel_crtc, pipe_config);
10779
10780 mode->clock = pipe_config->port_clock / pipe_config->pixel_multiplier;
79e53945
JB
10781 mode->hdisplay = (htot & 0xffff) + 1;
10782 mode->htotal = ((htot & 0xffff0000) >> 16) + 1;
10783 mode->hsync_start = (hsync & 0xffff) + 1;
10784 mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1;
10785 mode->vdisplay = (vtot & 0xffff) + 1;
10786 mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1;
10787 mode->vsync_start = (vsync & 0xffff) + 1;
10788 mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1;
10789
10790 drm_mode_set_name(mode);
79e53945 10791
3f36b937
TU
10792 kfree(pipe_config);
10793
79e53945
JB
10794 return mode;
10795}
10796
7d993739 10797void intel_mark_busy(struct drm_i915_private *dev_priv)
f047e395 10798{
f62a0076
CW
10799 if (dev_priv->mm.busy)
10800 return;
10801
43694d69 10802 intel_runtime_pm_get(dev_priv);
c67a470b 10803 i915_update_gfx_val(dev_priv);
7d993739 10804 if (INTEL_GEN(dev_priv) >= 6)
43cf3bf0 10805 gen6_rps_busy(dev_priv);
f62a0076 10806 dev_priv->mm.busy = true;
f047e395
CW
10807}
10808
7d993739 10809void intel_mark_idle(struct drm_i915_private *dev_priv)
652c393a 10810{
f62a0076
CW
10811 if (!dev_priv->mm.busy)
10812 return;
10813
10814 dev_priv->mm.busy = false;
10815
7d993739
TU
10816 if (INTEL_GEN(dev_priv) >= 6)
10817 gen6_rps_idle(dev_priv);
bb4cdd53 10818
43694d69 10819 intel_runtime_pm_put(dev_priv);
652c393a
JB
10820}
10821
79e53945
JB
10822static void intel_crtc_destroy(struct drm_crtc *crtc)
10823{
10824 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
67e77c5a
DV
10825 struct drm_device *dev = crtc->dev;
10826 struct intel_unpin_work *work;
67e77c5a 10827
5e2d7afc 10828 spin_lock_irq(&dev->event_lock);
67e77c5a
DV
10829 work = intel_crtc->unpin_work;
10830 intel_crtc->unpin_work = NULL;
5e2d7afc 10831 spin_unlock_irq(&dev->event_lock);
67e77c5a
DV
10832
10833 if (work) {
10834 cancel_work_sync(&work->work);
10835 kfree(work);
10836 }
79e53945
JB
10837
10838 drm_crtc_cleanup(crtc);
67e77c5a 10839
79e53945
JB
10840 kfree(intel_crtc);
10841}
10842
6b95a207
KH
10843static void intel_unpin_work_fn(struct work_struct *__work)
10844{
10845 struct intel_unpin_work *work =
10846 container_of(__work, struct intel_unpin_work, work);
a9ff8714
VS
10847 struct intel_crtc *crtc = to_intel_crtc(work->crtc);
10848 struct drm_device *dev = crtc->base.dev;
10849 struct drm_plane *primary = crtc->base.primary;
6b95a207 10850
b4a98e57 10851 mutex_lock(&dev->struct_mutex);
3465c580 10852 intel_unpin_fb_obj(work->old_fb, primary->state->rotation);
05394f39 10853 drm_gem_object_unreference(&work->pending_flip_obj->base);
d9e86c0e 10854
f06cc1b9 10855 if (work->flip_queued_req)
146d84f0 10856 i915_gem_request_assign(&work->flip_queued_req, NULL);
b4a98e57
CW
10857 mutex_unlock(&dev->struct_mutex);
10858
a9ff8714 10859 intel_frontbuffer_flip_complete(dev, to_intel_plane(primary)->frontbuffer_bit);
1eb52238 10860 intel_fbc_post_update(crtc);
89ed88ba 10861 drm_framebuffer_unreference(work->old_fb);
f99d7069 10862
a9ff8714
VS
10863 BUG_ON(atomic_read(&crtc->unpin_work_count) == 0);
10864 atomic_dec(&crtc->unpin_work_count);
b4a98e57 10865
6b95a207
KH
10866 kfree(work);
10867}
10868
91d14251 10869static void do_intel_finish_page_flip(struct drm_i915_private *dev_priv,
49b14a5c 10870 struct drm_crtc *crtc)
6b95a207 10871{
91d14251 10872 struct drm_device *dev = dev_priv->dev;
6b95a207
KH
10873 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
10874 struct intel_unpin_work *work;
6b95a207
KH
10875 unsigned long flags;
10876
10877 /* Ignore early vblank irqs */
10878 if (intel_crtc == NULL)
10879 return;
10880
f326038a
DV
10881 /*
10882 * This is called both by irq handlers and the reset code (to complete
10883 * lost pageflips) so needs the full irqsave spinlocks.
10884 */
6b95a207
KH
10885 spin_lock_irqsave(&dev->event_lock, flags);
10886 work = intel_crtc->unpin_work;
e7d841ca 10887
55d80d23
ML
10888 if (work && atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE) {
10889 /* ensure that the unpin work is consistent wrt ->pending. */
10890 smp_rmb();
e7d841ca 10891
55d80d23 10892 page_flip_completed(intel_crtc);
6b95a207
KH
10893 }
10894
6b95a207 10895 spin_unlock_irqrestore(&dev->event_lock, flags);
6b95a207
KH
10896}
10897
91d14251 10898void intel_finish_page_flip(struct drm_i915_private *dev_priv, int pipe)
1afe3e9d 10899{
1afe3e9d
JB
10900 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
10901
91d14251 10902 do_intel_finish_page_flip(dev_priv, crtc);
1afe3e9d
JB
10903}
10904
75f7f3ec
VS
10905/* Is 'a' after or equal to 'b'? */
10906static bool g4x_flip_count_after_eq(u32 a, u32 b)
10907{
10908 return !((a - b) & 0x80000000);
10909}
10910
10911static bool page_flip_finished(struct intel_crtc *crtc)
10912{
10913 struct drm_device *dev = crtc->base.dev;
10914 struct drm_i915_private *dev_priv = dev->dev_private;
c19ae989 10915 unsigned reset_counter;
75f7f3ec 10916
c19ae989 10917 reset_counter = i915_reset_counter(&dev_priv->gpu_error);
7f1847eb 10918 if (crtc->reset_counter != reset_counter)
bdfa7542
VS
10919 return true;
10920
75f7f3ec
VS
10921 /*
10922 * The relevant registers doen't exist on pre-ctg.
10923 * As the flip done interrupt doesn't trigger for mmio
10924 * flips on gmch platforms, a flip count check isn't
10925 * really needed there. But since ctg has the registers,
10926 * include it in the check anyway.
10927 */
10928 if (INTEL_INFO(dev)->gen < 5 && !IS_G4X(dev))
10929 return true;
10930
e8861675
ML
10931 /*
10932 * BDW signals flip done immediately if the plane
10933 * is disabled, even if the plane enable is already
10934 * armed to occur at the next vblank :(
10935 */
10936
75f7f3ec
VS
10937 /*
10938 * A DSPSURFLIVE check isn't enough in case the mmio and CS flips
10939 * used the same base address. In that case the mmio flip might
10940 * have completed, but the CS hasn't even executed the flip yet.
10941 *
10942 * A flip count check isn't enough as the CS might have updated
10943 * the base address just after start of vblank, but before we
10944 * managed to process the interrupt. This means we'd complete the
10945 * CS flip too soon.
10946 *
10947 * Combining both checks should get us a good enough result. It may
10948 * still happen that the CS flip has been executed, but has not
10949 * yet actually completed. But in case the base address is the same
10950 * anyway, we don't really care.
10951 */
10952 return (I915_READ(DSPSURFLIVE(crtc->plane)) & ~0xfff) ==
10953 crtc->unpin_work->gtt_offset &&
fd8f507c 10954 g4x_flip_count_after_eq(I915_READ(PIPE_FLIPCOUNT_G4X(crtc->pipe)),
75f7f3ec
VS
10955 crtc->unpin_work->flip_count);
10956}
10957
91d14251 10958void intel_prepare_page_flip(struct drm_i915_private *dev_priv, int plane)
6b95a207 10959{
91d14251 10960 struct drm_device *dev = dev_priv->dev;
6b95a207
KH
10961 struct intel_crtc *intel_crtc =
10962 to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]);
10963 unsigned long flags;
10964
f326038a
DV
10965
10966 /*
10967 * This is called both by irq handlers and the reset code (to complete
10968 * lost pageflips) so needs the full irqsave spinlocks.
10969 *
10970 * NB: An MMIO update of the plane base pointer will also
e7d841ca
CW
10971 * generate a page-flip completion irq, i.e. every modeset
10972 * is also accompanied by a spurious intel_prepare_page_flip().
10973 */
6b95a207 10974 spin_lock_irqsave(&dev->event_lock, flags);
75f7f3ec 10975 if (intel_crtc->unpin_work && page_flip_finished(intel_crtc))
e7d841ca 10976 atomic_inc_not_zero(&intel_crtc->unpin_work->pending);
6b95a207
KH
10977 spin_unlock_irqrestore(&dev->event_lock, flags);
10978}
10979
6042639c 10980static inline void intel_mark_page_flip_active(struct intel_unpin_work *work)
e7d841ca
CW
10981{
10982 /* Ensure that the work item is consistent when activating it ... */
55d80d23 10983 smp_mb__before_atomic();
6042639c 10984 atomic_set(&work->pending, INTEL_FLIP_PENDING);
e7d841ca
CW
10985}
10986
8c9f3aaf
JB
10987static int intel_gen2_queue_flip(struct drm_device *dev,
10988 struct drm_crtc *crtc,
10989 struct drm_framebuffer *fb,
ed8d1975 10990 struct drm_i915_gem_object *obj,
6258fbe2 10991 struct drm_i915_gem_request *req,
ed8d1975 10992 uint32_t flags)
8c9f3aaf 10993{
4a570db5 10994 struct intel_engine_cs *engine = req->engine;
8c9f3aaf 10995 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8c9f3aaf
JB
10996 u32 flip_mask;
10997 int ret;
10998
5fb9de1a 10999 ret = intel_ring_begin(req, 6);
8c9f3aaf 11000 if (ret)
4fa62c89 11001 return ret;
8c9f3aaf
JB
11002
11003 /* Can't queue multiple flips, so wait for the previous
11004 * one to finish before executing the next.
11005 */
11006 if (intel_crtc->plane)
11007 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11008 else
11009 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
e2f80391
TU
11010 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11011 intel_ring_emit(engine, MI_NOOP);
11012 intel_ring_emit(engine, MI_DISPLAY_FLIP |
6d90c952 11013 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
e2f80391
TU
11014 intel_ring_emit(engine, fb->pitches[0]);
11015 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11016 intel_ring_emit(engine, 0); /* aux display base address, unused */
e7d841ca 11017
83d4092b 11018 return 0;
8c9f3aaf
JB
11019}
11020
11021static int intel_gen3_queue_flip(struct drm_device *dev,
11022 struct drm_crtc *crtc,
11023 struct drm_framebuffer *fb,
ed8d1975 11024 struct drm_i915_gem_object *obj,
6258fbe2 11025 struct drm_i915_gem_request *req,
ed8d1975 11026 uint32_t flags)
8c9f3aaf 11027{
4a570db5 11028 struct intel_engine_cs *engine = req->engine;
8c9f3aaf 11029 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8c9f3aaf
JB
11030 u32 flip_mask;
11031 int ret;
11032
5fb9de1a 11033 ret = intel_ring_begin(req, 6);
8c9f3aaf 11034 if (ret)
4fa62c89 11035 return ret;
8c9f3aaf
JB
11036
11037 if (intel_crtc->plane)
11038 flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
11039 else
11040 flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
e2f80391
TU
11041 intel_ring_emit(engine, MI_WAIT_FOR_EVENT | flip_mask);
11042 intel_ring_emit(engine, MI_NOOP);
11043 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 |
6d90c952 11044 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
e2f80391
TU
11045 intel_ring_emit(engine, fb->pitches[0]);
11046 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11047 intel_ring_emit(engine, MI_NOOP);
6d90c952 11048
83d4092b 11049 return 0;
8c9f3aaf
JB
11050}
11051
11052static int intel_gen4_queue_flip(struct drm_device *dev,
11053 struct drm_crtc *crtc,
11054 struct drm_framebuffer *fb,
ed8d1975 11055 struct drm_i915_gem_object *obj,
6258fbe2 11056 struct drm_i915_gem_request *req,
ed8d1975 11057 uint32_t flags)
8c9f3aaf 11058{
4a570db5 11059 struct intel_engine_cs *engine = req->engine;
8c9f3aaf
JB
11060 struct drm_i915_private *dev_priv = dev->dev_private;
11061 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11062 uint32_t pf, pipesrc;
11063 int ret;
11064
5fb9de1a 11065 ret = intel_ring_begin(req, 4);
8c9f3aaf 11066 if (ret)
4fa62c89 11067 return ret;
8c9f3aaf
JB
11068
11069 /* i965+ uses the linear or tiled offsets from the
11070 * Display Registers (which do not change across a page-flip)
11071 * so we need only reprogram the base address.
11072 */
e2f80391 11073 intel_ring_emit(engine, MI_DISPLAY_FLIP |
6d90c952 11074 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
e2f80391
TU
11075 intel_ring_emit(engine, fb->pitches[0]);
11076 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset |
c2c75131 11077 obj->tiling_mode);
8c9f3aaf
JB
11078
11079 /* XXX Enabling the panel-fitter across page-flip is so far
11080 * untested on non-native modes, so ignore it for now.
11081 * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
11082 */
11083 pf = 0;
11084 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
e2f80391 11085 intel_ring_emit(engine, pf | pipesrc);
e7d841ca 11086
83d4092b 11087 return 0;
8c9f3aaf
JB
11088}
11089
11090static int intel_gen6_queue_flip(struct drm_device *dev,
11091 struct drm_crtc *crtc,
11092 struct drm_framebuffer *fb,
ed8d1975 11093 struct drm_i915_gem_object *obj,
6258fbe2 11094 struct drm_i915_gem_request *req,
ed8d1975 11095 uint32_t flags)
8c9f3aaf 11096{
4a570db5 11097 struct intel_engine_cs *engine = req->engine;
8c9f3aaf
JB
11098 struct drm_i915_private *dev_priv = dev->dev_private;
11099 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11100 uint32_t pf, pipesrc;
11101 int ret;
11102
5fb9de1a 11103 ret = intel_ring_begin(req, 4);
8c9f3aaf 11104 if (ret)
4fa62c89 11105 return ret;
8c9f3aaf 11106
e2f80391 11107 intel_ring_emit(engine, MI_DISPLAY_FLIP |
6d90c952 11108 MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
e2f80391
TU
11109 intel_ring_emit(engine, fb->pitches[0] | obj->tiling_mode);
11110 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
8c9f3aaf 11111
dc257cf1
DV
11112 /* Contrary to the suggestions in the documentation,
11113 * "Enable Panel Fitter" does not seem to be required when page
11114 * flipping with a non-native mode, and worse causes a normal
11115 * modeset to fail.
11116 * pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE;
11117 */
11118 pf = 0;
8c9f3aaf 11119 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff;
e2f80391 11120 intel_ring_emit(engine, pf | pipesrc);
e7d841ca 11121
83d4092b 11122 return 0;
8c9f3aaf
JB
11123}
11124
7c9017e5
JB
11125static int intel_gen7_queue_flip(struct drm_device *dev,
11126 struct drm_crtc *crtc,
11127 struct drm_framebuffer *fb,
ed8d1975 11128 struct drm_i915_gem_object *obj,
6258fbe2 11129 struct drm_i915_gem_request *req,
ed8d1975 11130 uint32_t flags)
7c9017e5 11131{
4a570db5 11132 struct intel_engine_cs *engine = req->engine;
7c9017e5 11133 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
cb05d8de 11134 uint32_t plane_bit = 0;
ffe74d75
CW
11135 int len, ret;
11136
eba905b2 11137 switch (intel_crtc->plane) {
cb05d8de
DV
11138 case PLANE_A:
11139 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
11140 break;
11141 case PLANE_B:
11142 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
11143 break;
11144 case PLANE_C:
11145 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
11146 break;
11147 default:
11148 WARN_ONCE(1, "unknown plane in flip command\n");
4fa62c89 11149 return -ENODEV;
cb05d8de
DV
11150 }
11151
ffe74d75 11152 len = 4;
e2f80391 11153 if (engine->id == RCS) {
ffe74d75 11154 len += 6;
f476828a
DL
11155 /*
11156 * On Gen 8, SRM is now taking an extra dword to accommodate
11157 * 48bits addresses, and we need a NOOP for the batch size to
11158 * stay even.
11159 */
11160 if (IS_GEN8(dev))
11161 len += 2;
11162 }
ffe74d75 11163
f66fab8e
VS
11164 /*
11165 * BSpec MI_DISPLAY_FLIP for IVB:
11166 * "The full packet must be contained within the same cache line."
11167 *
11168 * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
11169 * cacheline, if we ever start emitting more commands before
11170 * the MI_DISPLAY_FLIP we may need to first emit everything else,
11171 * then do the cacheline alignment, and finally emit the
11172 * MI_DISPLAY_FLIP.
11173 */
bba09b12 11174 ret = intel_ring_cacheline_align(req);
f66fab8e 11175 if (ret)
4fa62c89 11176 return ret;
f66fab8e 11177
5fb9de1a 11178 ret = intel_ring_begin(req, len);
7c9017e5 11179 if (ret)
4fa62c89 11180 return ret;
7c9017e5 11181
ffe74d75
CW
11182 /* Unmask the flip-done completion message. Note that the bspec says that
11183 * we should do this for both the BCS and RCS, and that we must not unmask
11184 * more than one flip event at any time (or ensure that one flip message
11185 * can be sent by waiting for flip-done prior to queueing new flips).
11186 * Experimentation says that BCS works despite DERRMR masking all
11187 * flip-done completion events and that unmasking all planes at once
11188 * for the RCS also doesn't appear to drop events. Setting the DERRMR
11189 * to zero does lead to lockups within MI_DISPLAY_FLIP.
11190 */
e2f80391
TU
11191 if (engine->id == RCS) {
11192 intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
11193 intel_ring_emit_reg(engine, DERRMR);
11194 intel_ring_emit(engine, ~(DERRMR_PIPEA_PRI_FLIP_DONE |
11195 DERRMR_PIPEB_PRI_FLIP_DONE |
11196 DERRMR_PIPEC_PRI_FLIP_DONE));
f476828a 11197 if (IS_GEN8(dev))
e2f80391 11198 intel_ring_emit(engine, MI_STORE_REGISTER_MEM_GEN8 |
f476828a
DL
11199 MI_SRM_LRM_GLOBAL_GTT);
11200 else
e2f80391 11201 intel_ring_emit(engine, MI_STORE_REGISTER_MEM |
f476828a 11202 MI_SRM_LRM_GLOBAL_GTT);
e2f80391
TU
11203 intel_ring_emit_reg(engine, DERRMR);
11204 intel_ring_emit(engine, engine->scratch.gtt_offset + 256);
f476828a 11205 if (IS_GEN8(dev)) {
e2f80391
TU
11206 intel_ring_emit(engine, 0);
11207 intel_ring_emit(engine, MI_NOOP);
f476828a 11208 }
ffe74d75
CW
11209 }
11210
e2f80391
TU
11211 intel_ring_emit(engine, MI_DISPLAY_FLIP_I915 | plane_bit);
11212 intel_ring_emit(engine, (fb->pitches[0] | obj->tiling_mode));
11213 intel_ring_emit(engine, intel_crtc->unpin_work->gtt_offset);
11214 intel_ring_emit(engine, (MI_NOOP));
e7d841ca 11215
83d4092b 11216 return 0;
7c9017e5
JB
11217}
11218
0bc40be8 11219static bool use_mmio_flip(struct intel_engine_cs *engine,
84c33a64
SG
11220 struct drm_i915_gem_object *obj)
11221{
11222 /*
11223 * This is not being used for older platforms, because
11224 * non-availability of flip done interrupt forces us to use
11225 * CS flips. Older platforms derive flip done using some clever
11226 * tricks involving the flip_pending status bits and vblank irqs.
11227 * So using MMIO flips there would disrupt this mechanism.
11228 */
11229
0bc40be8 11230 if (engine == NULL)
8e09bf83
CW
11231 return true;
11232
c033666a 11233 if (INTEL_GEN(engine->i915) < 5)
84c33a64
SG
11234 return false;
11235
11236 if (i915.use_mmio_flip < 0)
11237 return false;
11238 else if (i915.use_mmio_flip > 0)
11239 return true;
14bf993e
OM
11240 else if (i915.enable_execlists)
11241 return true;
fd8e058a
AG
11242 else if (obj->base.dma_buf &&
11243 !reservation_object_test_signaled_rcu(obj->base.dma_buf->resv,
11244 false))
11245 return true;
84c33a64 11246 else
666796da 11247 return engine != i915_gem_request_get_engine(obj->last_write_req);
84c33a64
SG
11248}
11249
6042639c 11250static void skl_do_mmio_flip(struct intel_crtc *intel_crtc,
86efe24a 11251 unsigned int rotation,
6042639c 11252 struct intel_unpin_work *work)
ff944564
DL
11253{
11254 struct drm_device *dev = intel_crtc->base.dev;
11255 struct drm_i915_private *dev_priv = dev->dev_private;
11256 struct drm_framebuffer *fb = intel_crtc->base.primary->fb;
ff944564 11257 const enum pipe pipe = intel_crtc->pipe;
86efe24a 11258 u32 ctl, stride, tile_height;
ff944564
DL
11259
11260 ctl = I915_READ(PLANE_CTL(pipe, 0));
11261 ctl &= ~PLANE_CTL_TILED_MASK;
2ebef630
TU
11262 switch (fb->modifier[0]) {
11263 case DRM_FORMAT_MOD_NONE:
11264 break;
11265 case I915_FORMAT_MOD_X_TILED:
ff944564 11266 ctl |= PLANE_CTL_TILED_X;
2ebef630
TU
11267 break;
11268 case I915_FORMAT_MOD_Y_TILED:
11269 ctl |= PLANE_CTL_TILED_Y;
11270 break;
11271 case I915_FORMAT_MOD_Yf_TILED:
11272 ctl |= PLANE_CTL_TILED_YF;
11273 break;
11274 default:
11275 MISSING_CASE(fb->modifier[0]);
11276 }
ff944564
DL
11277
11278 /*
11279 * The stride is either expressed as a multiple of 64 bytes chunks for
11280 * linear buffers or in number of tiles for tiled buffers.
11281 */
86efe24a
TU
11282 if (intel_rotation_90_or_270(rotation)) {
11283 /* stride = Surface height in tiles */
832be82f 11284 tile_height = intel_tile_height(dev_priv, fb->modifier[0], 0);
86efe24a
TU
11285 stride = DIV_ROUND_UP(fb->height, tile_height);
11286 } else {
11287 stride = fb->pitches[0] /
7b49f948
VS
11288 intel_fb_stride_alignment(dev_priv, fb->modifier[0],
11289 fb->pixel_format);
86efe24a 11290 }
ff944564
DL
11291
11292 /*
11293 * Both PLANE_CTL and PLANE_STRIDE are not updated on vblank but on
11294 * PLANE_SURF updates, the update is then guaranteed to be atomic.
11295 */
11296 I915_WRITE(PLANE_CTL(pipe, 0), ctl);
11297 I915_WRITE(PLANE_STRIDE(pipe, 0), stride);
11298
6042639c 11299 I915_WRITE(PLANE_SURF(pipe, 0), work->gtt_offset);
ff944564
DL
11300 POSTING_READ(PLANE_SURF(pipe, 0));
11301}
11302
6042639c
CW
11303static void ilk_do_mmio_flip(struct intel_crtc *intel_crtc,
11304 struct intel_unpin_work *work)
84c33a64
SG
11305{
11306 struct drm_device *dev = intel_crtc->base.dev;
11307 struct drm_i915_private *dev_priv = dev->dev_private;
11308 struct intel_framebuffer *intel_fb =
11309 to_intel_framebuffer(intel_crtc->base.primary->fb);
11310 struct drm_i915_gem_object *obj = intel_fb->obj;
f0f59a00 11311 i915_reg_t reg = DSPCNTR(intel_crtc->plane);
84c33a64 11312 u32 dspcntr;
84c33a64 11313
84c33a64
SG
11314 dspcntr = I915_READ(reg);
11315
c5d97472
DL
11316 if (obj->tiling_mode != I915_TILING_NONE)
11317 dspcntr |= DISPPLANE_TILED;
11318 else
11319 dspcntr &= ~DISPPLANE_TILED;
11320
84c33a64
SG
11321 I915_WRITE(reg, dspcntr);
11322
6042639c 11323 I915_WRITE(DSPSURF(intel_crtc->plane), work->gtt_offset);
84c33a64 11324 POSTING_READ(DSPSURF(intel_crtc->plane));
ff944564
DL
11325}
11326
11327/*
11328 * XXX: This is the temporary way to update the plane registers until we get
11329 * around to using the usual plane update functions for MMIO flips
11330 */
6042639c 11331static void intel_do_mmio_flip(struct intel_mmio_flip *mmio_flip)
ff944564 11332{
6042639c
CW
11333 struct intel_crtc *crtc = mmio_flip->crtc;
11334 struct intel_unpin_work *work;
11335
11336 spin_lock_irq(&crtc->base.dev->event_lock);
11337 work = crtc->unpin_work;
11338 spin_unlock_irq(&crtc->base.dev->event_lock);
11339 if (work == NULL)
11340 return;
ff944564 11341
6042639c 11342 intel_pipe_update_start(crtc);
ff944564 11343
6042639c 11344 if (INTEL_INFO(mmio_flip->i915)->gen >= 9)
86efe24a 11345 skl_do_mmio_flip(crtc, mmio_flip->rotation, work);
ff944564
DL
11346 else
11347 /* use_mmio_flip() retricts MMIO flips to ilk+ */
6042639c 11348 ilk_do_mmio_flip(crtc, work);
ff944564 11349
6042639c 11350 intel_pipe_update_end(crtc);
55d80d23
ML
11351
11352 intel_mark_page_flip_active(work);
84c33a64
SG
11353}
11354
9362c7c5 11355static void intel_mmio_flip_work_func(struct work_struct *work)
84c33a64 11356{
b2cfe0ab
CW
11357 struct intel_mmio_flip *mmio_flip =
11358 container_of(work, struct intel_mmio_flip, work);
fd8e058a
AG
11359 struct intel_framebuffer *intel_fb =
11360 to_intel_framebuffer(mmio_flip->crtc->base.primary->fb);
11361 struct drm_i915_gem_object *obj = intel_fb->obj;
84c33a64 11362
6042639c 11363 if (mmio_flip->req) {
eed29a5b 11364 WARN_ON(__i915_wait_request(mmio_flip->req,
bcafc4e3
CW
11365 false, NULL,
11366 &mmio_flip->i915->rps.mmioflips));
73db04cf 11367 i915_gem_request_unreference(mmio_flip->req);
6042639c 11368 }
84c33a64 11369
fd8e058a
AG
11370 /* For framebuffer backed by dmabuf, wait for fence */
11371 if (obj->base.dma_buf)
11372 WARN_ON(reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
11373 false, false,
11374 MAX_SCHEDULE_TIMEOUT) < 0);
11375
6042639c 11376 intel_do_mmio_flip(mmio_flip);
b2cfe0ab 11377 kfree(mmio_flip);
84c33a64
SG
11378}
11379
11380static int intel_queue_mmio_flip(struct drm_device *dev,
11381 struct drm_crtc *crtc,
86efe24a 11382 struct drm_i915_gem_object *obj)
84c33a64 11383{
b2cfe0ab
CW
11384 struct intel_mmio_flip *mmio_flip;
11385
11386 mmio_flip = kmalloc(sizeof(*mmio_flip), GFP_KERNEL);
11387 if (mmio_flip == NULL)
11388 return -ENOMEM;
84c33a64 11389
bcafc4e3 11390 mmio_flip->i915 = to_i915(dev);
eed29a5b 11391 mmio_flip->req = i915_gem_request_reference(obj->last_write_req);
b2cfe0ab 11392 mmio_flip->crtc = to_intel_crtc(crtc);
86efe24a 11393 mmio_flip->rotation = crtc->primary->state->rotation;
536f5b5e 11394
b2cfe0ab
CW
11395 INIT_WORK(&mmio_flip->work, intel_mmio_flip_work_func);
11396 schedule_work(&mmio_flip->work);
84c33a64 11397
84c33a64
SG
11398 return 0;
11399}
11400
8c9f3aaf
JB
11401static int intel_default_queue_flip(struct drm_device *dev,
11402 struct drm_crtc *crtc,
11403 struct drm_framebuffer *fb,
ed8d1975 11404 struct drm_i915_gem_object *obj,
6258fbe2 11405 struct drm_i915_gem_request *req,
ed8d1975 11406 uint32_t flags)
8c9f3aaf
JB
11407{
11408 return -ENODEV;
11409}
11410
d6bbafa1
CW
11411static bool __intel_pageflip_stall_check(struct drm_device *dev,
11412 struct drm_crtc *crtc)
11413{
11414 struct drm_i915_private *dev_priv = dev->dev_private;
11415 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11416 struct intel_unpin_work *work = intel_crtc->unpin_work;
11417 u32 addr;
55d80d23 11418 u32 pending;
d6bbafa1 11419
55d80d23
ML
11420 pending = atomic_read(&work->pending);
11421 /* ensure that the unpin work is consistent wrt ->pending. */
11422 smp_rmb();
908565c2 11423
55d80d23
ML
11424 if (pending != INTEL_FLIP_PENDING)
11425 return pending == INTEL_FLIP_COMPLETE;
d6bbafa1
CW
11426
11427 if (work->flip_ready_vblank == 0) {
3a8a946e
DV
11428 if (work->flip_queued_req &&
11429 !i915_gem_request_completed(work->flip_queued_req, true))
d6bbafa1
CW
11430 return false;
11431
1e3feefd 11432 work->flip_ready_vblank = drm_crtc_vblank_count(crtc);
d6bbafa1
CW
11433 }
11434
1e3feefd 11435 if (drm_crtc_vblank_count(crtc) - work->flip_ready_vblank < 3)
d6bbafa1
CW
11436 return false;
11437
11438 /* Potential stall - if we see that the flip has happened,
11439 * assume a missed interrupt. */
11440 if (INTEL_INFO(dev)->gen >= 4)
11441 addr = I915_HI_DISPBASE(I915_READ(DSPSURF(intel_crtc->plane)));
11442 else
11443 addr = I915_READ(DSPADDR(intel_crtc->plane));
11444
11445 /* There is a potential issue here with a false positive after a flip
11446 * to the same address. We could address this by checking for a
11447 * non-incrementing frame counter.
11448 */
11449 return addr == work->gtt_offset;
11450}
11451
91d14251 11452void intel_check_page_flip(struct drm_i915_private *dev_priv, int pipe)
d6bbafa1 11453{
91d14251 11454 struct drm_device *dev = dev_priv->dev;
d6bbafa1
CW
11455 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
11456 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6ad790c0 11457 struct intel_unpin_work *work;
f326038a 11458
6c51d46f 11459 WARN_ON(!in_interrupt());
d6bbafa1
CW
11460
11461 if (crtc == NULL)
11462 return;
11463
f326038a 11464 spin_lock(&dev->event_lock);
6ad790c0
CW
11465 work = intel_crtc->unpin_work;
11466 if (work != NULL && __intel_pageflip_stall_check(dev, crtc)) {
d6bbafa1 11467 WARN_ONCE(1, "Kicking stuck page flip: queued at %d, now %d\n",
6ad790c0 11468 work->flip_queued_vblank, drm_vblank_count(dev, pipe));
d6bbafa1 11469 page_flip_completed(intel_crtc);
6ad790c0 11470 work = NULL;
d6bbafa1 11471 }
6ad790c0
CW
11472 if (work != NULL &&
11473 drm_vblank_count(dev, pipe) - work->flip_queued_vblank > 1)
91d14251 11474 intel_queue_rps_boost_for_request(work->flip_queued_req);
f326038a 11475 spin_unlock(&dev->event_lock);
d6bbafa1
CW
11476}
11477
6b95a207
KH
11478static int intel_crtc_page_flip(struct drm_crtc *crtc,
11479 struct drm_framebuffer *fb,
ed8d1975
KP
11480 struct drm_pending_vblank_event *event,
11481 uint32_t page_flip_flags)
6b95a207
KH
11482{
11483 struct drm_device *dev = crtc->dev;
11484 struct drm_i915_private *dev_priv = dev->dev_private;
f4510a27 11485 struct drm_framebuffer *old_fb = crtc->primary->fb;
2ff8fde1 11486 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6b95a207 11487 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
455a6808 11488 struct drm_plane *primary = crtc->primary;
a071fa00 11489 enum pipe pipe = intel_crtc->pipe;
6b95a207 11490 struct intel_unpin_work *work;
e2f80391 11491 struct intel_engine_cs *engine;
cf5d8a46 11492 bool mmio_flip;
91af127f 11493 struct drm_i915_gem_request *request = NULL;
52e68630 11494 int ret;
6b95a207 11495
2ff8fde1
MR
11496 /*
11497 * drm_mode_page_flip_ioctl() should already catch this, but double
11498 * check to be safe. In the future we may enable pageflipping from
11499 * a disabled primary plane.
11500 */
11501 if (WARN_ON(intel_fb_obj(old_fb) == NULL))
11502 return -EBUSY;
11503
e6a595d2 11504 /* Can't change pixel format via MI display flips. */
f4510a27 11505 if (fb->pixel_format != crtc->primary->fb->pixel_format)
e6a595d2
VS
11506 return -EINVAL;
11507
11508 /*
11509 * TILEOFF/LINOFF registers can't be changed via MI display flips.
11510 * Note that pitch changes could also affect these register.
11511 */
11512 if (INTEL_INFO(dev)->gen > 3 &&
f4510a27
MR
11513 (fb->offsets[0] != crtc->primary->fb->offsets[0] ||
11514 fb->pitches[0] != crtc->primary->fb->pitches[0]))
e6a595d2
VS
11515 return -EINVAL;
11516
f900db47
CW
11517 if (i915_terminally_wedged(&dev_priv->gpu_error))
11518 goto out_hang;
11519
b14c5679 11520 work = kzalloc(sizeof(*work), GFP_KERNEL);
6b95a207
KH
11521 if (work == NULL)
11522 return -ENOMEM;
11523
6b95a207 11524 work->event = event;
b4a98e57 11525 work->crtc = crtc;
ab8d6675 11526 work->old_fb = old_fb;
6b95a207
KH
11527 INIT_WORK(&work->work, intel_unpin_work_fn);
11528
87b6b101 11529 ret = drm_crtc_vblank_get(crtc);
7317c75e
JB
11530 if (ret)
11531 goto free_work;
11532
6b95a207 11533 /* We borrow the event spin lock for protecting unpin_work */
5e2d7afc 11534 spin_lock_irq(&dev->event_lock);
6b95a207 11535 if (intel_crtc->unpin_work) {
d6bbafa1
CW
11536 /* Before declaring the flip queue wedged, check if
11537 * the hardware completed the operation behind our backs.
11538 */
11539 if (__intel_pageflip_stall_check(dev, crtc)) {
11540 DRM_DEBUG_DRIVER("flip queue: previous flip completed, continuing\n");
11541 page_flip_completed(intel_crtc);
11542 } else {
11543 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
5e2d7afc 11544 spin_unlock_irq(&dev->event_lock);
468f0b44 11545
d6bbafa1
CW
11546 drm_crtc_vblank_put(crtc);
11547 kfree(work);
11548 return -EBUSY;
11549 }
6b95a207
KH
11550 }
11551 intel_crtc->unpin_work = work;
5e2d7afc 11552 spin_unlock_irq(&dev->event_lock);
6b95a207 11553
b4a98e57
CW
11554 if (atomic_read(&intel_crtc->unpin_work_count) >= 2)
11555 flush_workqueue(dev_priv->wq);
11556
75dfca80 11557 /* Reference the objects for the scheduled work. */
ab8d6675 11558 drm_framebuffer_reference(work->old_fb);
05394f39 11559 drm_gem_object_reference(&obj->base);
6b95a207 11560
f4510a27 11561 crtc->primary->fb = fb;
afd65eb4 11562 update_state_fb(crtc->primary);
e8216e50 11563 intel_fbc_pre_update(intel_crtc);
1ed1f968 11564
e1f99ce6 11565 work->pending_flip_obj = obj;
e1f99ce6 11566
89ed88ba
CW
11567 ret = i915_mutex_lock_interruptible(dev);
11568 if (ret)
11569 goto cleanup;
11570
c19ae989 11571 intel_crtc->reset_counter = i915_reset_counter(&dev_priv->gpu_error);
7f1847eb
CW
11572 if (__i915_reset_in_progress_or_wedged(intel_crtc->reset_counter)) {
11573 ret = -EIO;
11574 goto cleanup;
11575 }
11576
11577 atomic_inc(&intel_crtc->unpin_work_count);
e1f99ce6 11578
75f7f3ec 11579 if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev))
fd8f507c 11580 work->flip_count = I915_READ(PIPE_FLIPCOUNT_G4X(pipe)) + 1;
75f7f3ec 11581
666a4537 11582 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
4a570db5 11583 engine = &dev_priv->engine[BCS];
ab8d6675 11584 if (obj->tiling_mode != intel_fb_obj(work->old_fb)->tiling_mode)
8e09bf83 11585 /* vlv: DISPLAY_FLIP fails to change tiling */
e2f80391 11586 engine = NULL;
48bf5b2d 11587 } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
4a570db5 11588 engine = &dev_priv->engine[BCS];
4fa62c89 11589 } else if (INTEL_INFO(dev)->gen >= 7) {
666796da 11590 engine = i915_gem_request_get_engine(obj->last_write_req);
e2f80391 11591 if (engine == NULL || engine->id != RCS)
4a570db5 11592 engine = &dev_priv->engine[BCS];
4fa62c89 11593 } else {
4a570db5 11594 engine = &dev_priv->engine[RCS];
4fa62c89
VS
11595 }
11596
e2f80391 11597 mmio_flip = use_mmio_flip(engine, obj);
cf5d8a46
CW
11598
11599 /* When using CS flips, we want to emit semaphores between rings.
11600 * However, when using mmio flips we will create a task to do the
11601 * synchronisation, so all we want here is to pin the framebuffer
11602 * into the display plane and skip any waits.
11603 */
7580d774 11604 if (!mmio_flip) {
e2f80391 11605 ret = i915_gem_object_sync(obj, engine, &request);
55d80d23
ML
11606 if (!ret && !request) {
11607 request = i915_gem_request_alloc(engine, NULL);
11608 ret = PTR_ERR_OR_ZERO(request);
11609 }
11610
7580d774
ML
11611 if (ret)
11612 goto cleanup_pending;
11613 }
11614
3465c580 11615 ret = intel_pin_and_fence_fb_obj(fb, primary->state->rotation);
8c9f3aaf
JB
11616 if (ret)
11617 goto cleanup_pending;
6b95a207 11618
dedf278c
TU
11619 work->gtt_offset = intel_plane_obj_offset(to_intel_plane(primary),
11620 obj, 0);
11621 work->gtt_offset += intel_crtc->dspaddr_offset;
4fa62c89 11622
cf5d8a46 11623 if (mmio_flip) {
55d80d23 11624 work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
d6bbafa1 11625
f06cc1b9
JH
11626 i915_gem_request_assign(&work->flip_queued_req,
11627 obj->last_write_req);
6258fbe2 11628
55d80d23
ML
11629 ret = intel_queue_mmio_flip(dev, crtc, obj);
11630 if (ret)
11631 goto cleanup_unpin;
11632 } else {
6258fbe2 11633 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj, request,
d6bbafa1
CW
11634 page_flip_flags);
11635 if (ret)
11636 goto cleanup_unpin;
11637
6258fbe2 11638 i915_gem_request_assign(&work->flip_queued_req, request);
d6bbafa1 11639
55d80d23
ML
11640 work->flip_queued_vblank = drm_crtc_vblank_count(crtc);
11641 intel_mark_page_flip_active(work);
91af127f 11642
55d80d23
ML
11643 i915_add_request_no_flush(request);
11644 }
4fa62c89 11645
55d80d23 11646 i915_gem_track_fb(intel_fb_obj(old_fb), obj,
a9ff8714 11647 to_intel_plane(primary)->frontbuffer_bit);
c80ac854 11648 mutex_unlock(&dev->struct_mutex);
a071fa00 11649
a9ff8714
VS
11650 intel_frontbuffer_flip_prepare(dev,
11651 to_intel_plane(primary)->frontbuffer_bit);
6b95a207 11652
e5510fac
JB
11653 trace_i915_flip_request(intel_crtc->plane, obj);
11654
6b95a207 11655 return 0;
96b099fd 11656
4fa62c89 11657cleanup_unpin:
3465c580 11658 intel_unpin_fb_obj(fb, crtc->primary->state->rotation);
8c9f3aaf 11659cleanup_pending:
0aa498d5 11660 if (!IS_ERR_OR_NULL(request))
aa9b7810 11661 i915_add_request_no_flush(request);
b4a98e57 11662 atomic_dec(&intel_crtc->unpin_work_count);
89ed88ba
CW
11663 mutex_unlock(&dev->struct_mutex);
11664cleanup:
f4510a27 11665 crtc->primary->fb = old_fb;
afd65eb4 11666 update_state_fb(crtc->primary);
89ed88ba
CW
11667
11668 drm_gem_object_unreference_unlocked(&obj->base);
ab8d6675 11669 drm_framebuffer_unreference(work->old_fb);
96b099fd 11670
5e2d7afc 11671 spin_lock_irq(&dev->event_lock);
96b099fd 11672 intel_crtc->unpin_work = NULL;
5e2d7afc 11673 spin_unlock_irq(&dev->event_lock);
96b099fd 11674
87b6b101 11675 drm_crtc_vblank_put(crtc);
7317c75e 11676free_work:
96b099fd
CW
11677 kfree(work);
11678
f900db47 11679 if (ret == -EIO) {
02e0efb5
ML
11680 struct drm_atomic_state *state;
11681 struct drm_plane_state *plane_state;
11682
f900db47 11683out_hang:
02e0efb5
ML
11684 state = drm_atomic_state_alloc(dev);
11685 if (!state)
11686 return -ENOMEM;
11687 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
11688
11689retry:
11690 plane_state = drm_atomic_get_plane_state(state, primary);
11691 ret = PTR_ERR_OR_ZERO(plane_state);
11692 if (!ret) {
11693 drm_atomic_set_fb_for_plane(plane_state, fb);
11694
11695 ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
11696 if (!ret)
11697 ret = drm_atomic_commit(state);
11698 }
11699
11700 if (ret == -EDEADLK) {
11701 drm_modeset_backoff(state->acquire_ctx);
11702 drm_atomic_state_clear(state);
11703 goto retry;
11704 }
11705
11706 if (ret)
11707 drm_atomic_state_free(state);
11708
f0d3dad3 11709 if (ret == 0 && event) {
5e2d7afc 11710 spin_lock_irq(&dev->event_lock);
560ce1dc 11711 drm_crtc_send_vblank_event(crtc, event);
5e2d7afc 11712 spin_unlock_irq(&dev->event_lock);
f0d3dad3 11713 }
f900db47 11714 }
96b099fd 11715 return ret;
6b95a207
KH
11716}
11717
da20eabd
ML
11718
11719/**
11720 * intel_wm_need_update - Check whether watermarks need updating
11721 * @plane: drm plane
11722 * @state: new plane state
11723 *
11724 * Check current plane state versus the new one to determine whether
11725 * watermarks need to be recalculated.
11726 *
11727 * Returns true or false.
11728 */
11729static bool intel_wm_need_update(struct drm_plane *plane,
11730 struct drm_plane_state *state)
11731{
d21fbe87
MR
11732 struct intel_plane_state *new = to_intel_plane_state(state);
11733 struct intel_plane_state *cur = to_intel_plane_state(plane->state);
11734
11735 /* Update watermarks on tiling or size changes. */
92826fcd
ML
11736 if (new->visible != cur->visible)
11737 return true;
11738
11739 if (!cur->base.fb || !new->base.fb)
11740 return false;
11741
11742 if (cur->base.fb->modifier[0] != new->base.fb->modifier[0] ||
11743 cur->base.rotation != new->base.rotation ||
d21fbe87
MR
11744 drm_rect_width(&new->src) != drm_rect_width(&cur->src) ||
11745 drm_rect_height(&new->src) != drm_rect_height(&cur->src) ||
11746 drm_rect_width(&new->dst) != drm_rect_width(&cur->dst) ||
11747 drm_rect_height(&new->dst) != drm_rect_height(&cur->dst))
2791a16c 11748 return true;
7809e5ae 11749
2791a16c 11750 return false;
7809e5ae
MR
11751}
11752
d21fbe87
MR
11753static bool needs_scaling(struct intel_plane_state *state)
11754{
11755 int src_w = drm_rect_width(&state->src) >> 16;
11756 int src_h = drm_rect_height(&state->src) >> 16;
11757 int dst_w = drm_rect_width(&state->dst);
11758 int dst_h = drm_rect_height(&state->dst);
11759
11760 return (src_w != dst_w || src_h != dst_h);
11761}
11762
da20eabd
ML
11763int intel_plane_atomic_calc_changes(struct drm_crtc_state *crtc_state,
11764 struct drm_plane_state *plane_state)
11765{
ab1d3a0e 11766 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
da20eabd
ML
11767 struct drm_crtc *crtc = crtc_state->crtc;
11768 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11769 struct drm_plane *plane = plane_state->plane;
11770 struct drm_device *dev = crtc->dev;
ed4a6a7c 11771 struct drm_i915_private *dev_priv = to_i915(dev);
da20eabd
ML
11772 struct intel_plane_state *old_plane_state =
11773 to_intel_plane_state(plane->state);
11774 int idx = intel_crtc->base.base.id, ret;
da20eabd
ML
11775 bool mode_changed = needs_modeset(crtc_state);
11776 bool was_crtc_enabled = crtc->state->active;
11777 bool is_crtc_enabled = crtc_state->active;
da20eabd
ML
11778 bool turn_off, turn_on, visible, was_visible;
11779 struct drm_framebuffer *fb = plane_state->fb;
11780
11781 if (crtc_state && INTEL_INFO(dev)->gen >= 9 &&
11782 plane->type != DRM_PLANE_TYPE_CURSOR) {
11783 ret = skl_update_scaler_plane(
11784 to_intel_crtc_state(crtc_state),
11785 to_intel_plane_state(plane_state));
11786 if (ret)
11787 return ret;
11788 }
11789
da20eabd
ML
11790 was_visible = old_plane_state->visible;
11791 visible = to_intel_plane_state(plane_state)->visible;
11792
11793 if (!was_crtc_enabled && WARN_ON(was_visible))
11794 was_visible = false;
11795
35c08f43
ML
11796 /*
11797 * Visibility is calculated as if the crtc was on, but
11798 * after scaler setup everything depends on it being off
11799 * when the crtc isn't active.
f818ffea
VS
11800 *
11801 * FIXME this is wrong for watermarks. Watermarks should also
11802 * be computed as if the pipe would be active. Perhaps move
11803 * per-plane wm computation to the .check_plane() hook, and
11804 * only combine the results from all planes in the current place?
35c08f43
ML
11805 */
11806 if (!is_crtc_enabled)
11807 to_intel_plane_state(plane_state)->visible = visible = false;
da20eabd
ML
11808
11809 if (!was_visible && !visible)
11810 return 0;
11811
e8861675
ML
11812 if (fb != old_plane_state->base.fb)
11813 pipe_config->fb_changed = true;
11814
da20eabd
ML
11815 turn_off = was_visible && (!visible || mode_changed);
11816 turn_on = visible && (!was_visible || mode_changed);
11817
11818 DRM_DEBUG_ATOMIC("[CRTC:%i] has [PLANE:%i] with fb %i\n", idx,
11819 plane->base.id, fb ? fb->base.id : -1);
11820
11821 DRM_DEBUG_ATOMIC("[PLANE:%i] visible %i -> %i, off %i, on %i, ms %i\n",
11822 plane->base.id, was_visible, visible,
11823 turn_off, turn_on, mode_changed);
11824
caed361d
VS
11825 if (turn_on) {
11826 pipe_config->update_wm_pre = true;
11827
11828 /* must disable cxsr around plane enable/disable */
11829 if (plane->type != DRM_PLANE_TYPE_CURSOR)
11830 pipe_config->disable_cxsr = true;
11831 } else if (turn_off) {
11832 pipe_config->update_wm_post = true;
92826fcd 11833
852eb00d 11834 /* must disable cxsr around plane enable/disable */
e8861675 11835 if (plane->type != DRM_PLANE_TYPE_CURSOR)
ab1d3a0e 11836 pipe_config->disable_cxsr = true;
852eb00d 11837 } else if (intel_wm_need_update(plane, plane_state)) {
caed361d
VS
11838 /* FIXME bollocks */
11839 pipe_config->update_wm_pre = true;
11840 pipe_config->update_wm_post = true;
852eb00d 11841 }
da20eabd 11842
ed4a6a7c 11843 /* Pre-gen9 platforms need two-step watermark updates */
caed361d
VS
11844 if ((pipe_config->update_wm_pre || pipe_config->update_wm_post) &&
11845 INTEL_INFO(dev)->gen < 9 && dev_priv->display.optimize_watermarks)
ed4a6a7c
MR
11846 to_intel_crtc_state(crtc_state)->wm.need_postvbl_update = true;
11847
8be6ca85 11848 if (visible || was_visible)
cd202f69 11849 pipe_config->fb_bits |= to_intel_plane(plane)->frontbuffer_bit;
a9ff8714 11850
31ae71fc
ML
11851 /*
11852 * WaCxSRDisabledForSpriteScaling:ivb
11853 *
11854 * cstate->update_wm was already set above, so this flag will
11855 * take effect when we commit and program watermarks.
11856 */
11857 if (plane->type == DRM_PLANE_TYPE_OVERLAY && IS_IVYBRIDGE(dev) &&
11858 needs_scaling(to_intel_plane_state(plane_state)) &&
11859 !needs_scaling(old_plane_state))
11860 pipe_config->disable_lp_wm = true;
d21fbe87 11861
da20eabd
ML
11862 return 0;
11863}
11864
6d3a1ce7
ML
11865static bool encoders_cloneable(const struct intel_encoder *a,
11866 const struct intel_encoder *b)
11867{
11868 /* masks could be asymmetric, so check both ways */
11869 return a == b || (a->cloneable & (1 << b->type) &&
11870 b->cloneable & (1 << a->type));
11871}
11872
11873static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11874 struct intel_crtc *crtc,
11875 struct intel_encoder *encoder)
11876{
11877 struct intel_encoder *source_encoder;
11878 struct drm_connector *connector;
11879 struct drm_connector_state *connector_state;
11880 int i;
11881
11882 for_each_connector_in_state(state, connector, connector_state, i) {
11883 if (connector_state->crtc != &crtc->base)
11884 continue;
11885
11886 source_encoder =
11887 to_intel_encoder(connector_state->best_encoder);
11888 if (!encoders_cloneable(encoder, source_encoder))
11889 return false;
11890 }
11891
11892 return true;
11893}
11894
11895static bool check_encoder_cloning(struct drm_atomic_state *state,
11896 struct intel_crtc *crtc)
11897{
11898 struct intel_encoder *encoder;
11899 struct drm_connector *connector;
11900 struct drm_connector_state *connector_state;
11901 int i;
11902
11903 for_each_connector_in_state(state, connector, connector_state, i) {
11904 if (connector_state->crtc != &crtc->base)
11905 continue;
11906
11907 encoder = to_intel_encoder(connector_state->best_encoder);
11908 if (!check_single_encoder_cloning(state, crtc, encoder))
11909 return false;
11910 }
11911
11912 return true;
11913}
11914
11915static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11916 struct drm_crtc_state *crtc_state)
11917{
cf5a15be 11918 struct drm_device *dev = crtc->dev;
ad421372 11919 struct drm_i915_private *dev_priv = dev->dev_private;
6d3a1ce7 11920 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
cf5a15be
ML
11921 struct intel_crtc_state *pipe_config =
11922 to_intel_crtc_state(crtc_state);
6d3a1ce7 11923 struct drm_atomic_state *state = crtc_state->state;
4d20cd86 11924 int ret;
6d3a1ce7
ML
11925 bool mode_changed = needs_modeset(crtc_state);
11926
11927 if (mode_changed && !check_encoder_cloning(state, intel_crtc)) {
11928 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
11929 return -EINVAL;
11930 }
11931
852eb00d 11932 if (mode_changed && !crtc_state->active)
caed361d 11933 pipe_config->update_wm_post = true;
eddfcbcd 11934
ad421372
ML
11935 if (mode_changed && crtc_state->enable &&
11936 dev_priv->display.crtc_compute_clock &&
8106ddbd 11937 !WARN_ON(pipe_config->shared_dpll)) {
ad421372
ML
11938 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11939 pipe_config);
11940 if (ret)
11941 return ret;
11942 }
11943
82cf435b
LL
11944 if (crtc_state->color_mgmt_changed) {
11945 ret = intel_color_check(crtc, crtc_state);
11946 if (ret)
11947 return ret;
11948 }
11949
e435d6e5 11950 ret = 0;
86c8bbbe 11951 if (dev_priv->display.compute_pipe_wm) {
e3bddded 11952 ret = dev_priv->display.compute_pipe_wm(pipe_config);
ed4a6a7c
MR
11953 if (ret) {
11954 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11955 return ret;
11956 }
11957 }
11958
11959 if (dev_priv->display.compute_intermediate_wm &&
11960 !to_intel_atomic_state(state)->skip_intermediate_wm) {
11961 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11962 return 0;
11963
11964 /*
11965 * Calculate 'intermediate' watermarks that satisfy both the
11966 * old state and the new state. We can program these
11967 * immediately.
11968 */
11969 ret = dev_priv->display.compute_intermediate_wm(crtc->dev,
11970 intel_crtc,
11971 pipe_config);
11972 if (ret) {
11973 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
86c8bbbe 11974 return ret;
ed4a6a7c 11975 }
e3d5457c
VS
11976 } else if (dev_priv->display.compute_intermediate_wm) {
11977 if (HAS_PCH_SPLIT(dev_priv) && INTEL_GEN(dev_priv) < 9)
11978 pipe_config->wm.ilk.intermediate = pipe_config->wm.ilk.optimal;
86c8bbbe
MR
11979 }
11980
e435d6e5
ML
11981 if (INTEL_INFO(dev)->gen >= 9) {
11982 if (mode_changed)
11983 ret = skl_update_scaler_crtc(pipe_config);
11984
11985 if (!ret)
11986 ret = intel_atomic_setup_scalers(dev, intel_crtc,
11987 pipe_config);
11988 }
11989
11990 return ret;
6d3a1ce7
ML
11991}
11992
65b38e0d 11993static const struct drm_crtc_helper_funcs intel_helper_funcs = {
f6e5b160 11994 .mode_set_base_atomic = intel_pipe_set_base_atomic,
ea2c67bb
MR
11995 .atomic_begin = intel_begin_crtc_commit,
11996 .atomic_flush = intel_finish_crtc_commit,
6d3a1ce7 11997 .atomic_check = intel_crtc_atomic_check,
f6e5b160
CW
11998};
11999
d29b2f9d
ACO
12000static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
12001{
12002 struct intel_connector *connector;
12003
12004 for_each_intel_connector(dev, connector) {
8863dc7f
DV
12005 if (connector->base.state->crtc)
12006 drm_connector_unreference(&connector->base);
12007
d29b2f9d
ACO
12008 if (connector->base.encoder) {
12009 connector->base.state->best_encoder =
12010 connector->base.encoder;
12011 connector->base.state->crtc =
12012 connector->base.encoder->crtc;
8863dc7f
DV
12013
12014 drm_connector_reference(&connector->base);
d29b2f9d
ACO
12015 } else {
12016 connector->base.state->best_encoder = NULL;
12017 connector->base.state->crtc = NULL;
12018 }
12019 }
12020}
12021
050f7aeb 12022static void
eba905b2 12023connected_sink_compute_bpp(struct intel_connector *connector,
5cec258b 12024 struct intel_crtc_state *pipe_config)
050f7aeb
DV
12025{
12026 int bpp = pipe_config->pipe_bpp;
12027
12028 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] checking for sink bpp constrains\n",
12029 connector->base.base.id,
c23cc417 12030 connector->base.name);
050f7aeb
DV
12031
12032 /* Don't use an invalid EDID bpc value */
12033 if (connector->base.display_info.bpc &&
12034 connector->base.display_info.bpc * 3 < bpp) {
12035 DRM_DEBUG_KMS("clamping display bpp (was %d) to EDID reported max of %d\n",
12036 bpp, connector->base.display_info.bpc*3);
12037 pipe_config->pipe_bpp = connector->base.display_info.bpc*3;
12038 }
12039
013dd9e0
JN
12040 /* Clamp bpp to default limit on screens without EDID 1.4 */
12041 if (connector->base.display_info.bpc == 0) {
12042 int type = connector->base.connector_type;
12043 int clamp_bpp = 24;
12044
12045 /* Fall back to 18 bpp when DP sink capability is unknown. */
12046 if (type == DRM_MODE_CONNECTOR_DisplayPort ||
12047 type == DRM_MODE_CONNECTOR_eDP)
12048 clamp_bpp = 18;
12049
12050 if (bpp > clamp_bpp) {
12051 DRM_DEBUG_KMS("clamping display bpp (was %d) to default limit of %d\n",
12052 bpp, clamp_bpp);
12053 pipe_config->pipe_bpp = clamp_bpp;
12054 }
050f7aeb
DV
12055 }
12056}
12057
4e53c2e0 12058static int
050f7aeb 12059compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5cec258b 12060 struct intel_crtc_state *pipe_config)
4e53c2e0 12061{
050f7aeb 12062 struct drm_device *dev = crtc->base.dev;
1486017f 12063 struct drm_atomic_state *state;
da3ced29
ACO
12064 struct drm_connector *connector;
12065 struct drm_connector_state *connector_state;
1486017f 12066 int bpp, i;
4e53c2e0 12067
666a4537 12068 if ((IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)))
4e53c2e0 12069 bpp = 10*3;
d328c9d7
DV
12070 else if (INTEL_INFO(dev)->gen >= 5)
12071 bpp = 12*3;
12072 else
12073 bpp = 8*3;
12074
4e53c2e0 12075
4e53c2e0
DV
12076 pipe_config->pipe_bpp = bpp;
12077
1486017f
ACO
12078 state = pipe_config->base.state;
12079
4e53c2e0 12080 /* Clamp display bpp to EDID value */
da3ced29
ACO
12081 for_each_connector_in_state(state, connector, connector_state, i) {
12082 if (connector_state->crtc != &crtc->base)
4e53c2e0
DV
12083 continue;
12084
da3ced29
ACO
12085 connected_sink_compute_bpp(to_intel_connector(connector),
12086 pipe_config);
4e53c2e0
DV
12087 }
12088
12089 return bpp;
12090}
12091
644db711
DV
12092static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
12093{
12094 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
12095 "type: 0x%x flags: 0x%x\n",
1342830c 12096 mode->crtc_clock,
644db711
DV
12097 mode->crtc_hdisplay, mode->crtc_hsync_start,
12098 mode->crtc_hsync_end, mode->crtc_htotal,
12099 mode->crtc_vdisplay, mode->crtc_vsync_start,
12100 mode->crtc_vsync_end, mode->crtc_vtotal, mode->type, mode->flags);
12101}
12102
c0b03411 12103static void intel_dump_pipe_config(struct intel_crtc *crtc,
5cec258b 12104 struct intel_crtc_state *pipe_config,
c0b03411
DV
12105 const char *context)
12106{
6a60cd87
CK
12107 struct drm_device *dev = crtc->base.dev;
12108 struct drm_plane *plane;
12109 struct intel_plane *intel_plane;
12110 struct intel_plane_state *state;
12111 struct drm_framebuffer *fb;
12112
12113 DRM_DEBUG_KMS("[CRTC:%d]%s config %p for pipe %c\n", crtc->base.base.id,
12114 context, pipe_config, pipe_name(crtc->pipe));
c0b03411 12115
da205630 12116 DRM_DEBUG_KMS("cpu_transcoder: %s\n", transcoder_name(pipe_config->cpu_transcoder));
c0b03411
DV
12117 DRM_DEBUG_KMS("pipe bpp: %i, dithering: %i\n",
12118 pipe_config->pipe_bpp, pipe_config->dither);
12119 DRM_DEBUG_KMS("fdi/pch: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
12120 pipe_config->has_pch_encoder,
12121 pipe_config->fdi_lanes,
12122 pipe_config->fdi_m_n.gmch_m, pipe_config->fdi_m_n.gmch_n,
12123 pipe_config->fdi_m_n.link_m, pipe_config->fdi_m_n.link_n,
12124 pipe_config->fdi_m_n.tu);
90a6b7b0 12125 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
eb14cb74 12126 pipe_config->has_dp_encoder,
90a6b7b0 12127 pipe_config->lane_count,
eb14cb74
VS
12128 pipe_config->dp_m_n.gmch_m, pipe_config->dp_m_n.gmch_n,
12129 pipe_config->dp_m_n.link_m, pipe_config->dp_m_n.link_n,
12130 pipe_config->dp_m_n.tu);
b95af8be 12131
90a6b7b0 12132 DRM_DEBUG_KMS("dp: %i, lanes: %i, gmch_m2: %u, gmch_n2: %u, link_m2: %u, link_n2: %u, tu2: %u\n",
b95af8be 12133 pipe_config->has_dp_encoder,
90a6b7b0 12134 pipe_config->lane_count,
b95af8be
VK
12135 pipe_config->dp_m2_n2.gmch_m,
12136 pipe_config->dp_m2_n2.gmch_n,
12137 pipe_config->dp_m2_n2.link_m,
12138 pipe_config->dp_m2_n2.link_n,
12139 pipe_config->dp_m2_n2.tu);
12140
55072d19
DV
12141 DRM_DEBUG_KMS("audio: %i, infoframes: %i\n",
12142 pipe_config->has_audio,
12143 pipe_config->has_infoframe);
12144
c0b03411 12145 DRM_DEBUG_KMS("requested mode:\n");
2d112de7 12146 drm_mode_debug_printmodeline(&pipe_config->base.mode);
c0b03411 12147 DRM_DEBUG_KMS("adjusted mode:\n");
2d112de7
ACO
12148 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
12149 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
d71b8d4a 12150 DRM_DEBUG_KMS("port clock: %d\n", pipe_config->port_clock);
37327abd
VS
12151 DRM_DEBUG_KMS("pipe src size: %dx%d\n",
12152 pipe_config->pipe_src_w, pipe_config->pipe_src_h);
0ec463d3
TU
12153 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
12154 crtc->num_scalers,
12155 pipe_config->scaler_state.scaler_users,
12156 pipe_config->scaler_state.scaler_id);
c0b03411
DV
12157 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
12158 pipe_config->gmch_pfit.control,
12159 pipe_config->gmch_pfit.pgm_ratios,
12160 pipe_config->gmch_pfit.lvds_border_bits);
fd4daa9c 12161 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s\n",
c0b03411 12162 pipe_config->pch_pfit.pos,
fd4daa9c
CW
12163 pipe_config->pch_pfit.size,
12164 pipe_config->pch_pfit.enabled ? "enabled" : "disabled");
42db64ef 12165 DRM_DEBUG_KMS("ips: %i\n", pipe_config->ips_enabled);
cf532bb2 12166 DRM_DEBUG_KMS("double wide: %i\n", pipe_config->double_wide);
6a60cd87 12167
415ff0f6 12168 if (IS_BROXTON(dev)) {
05712c15 12169 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
415ff0f6 12170 "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
c8453338 12171 "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
415ff0f6
TU
12172 pipe_config->ddi_pll_sel,
12173 pipe_config->dpll_hw_state.ebb0,
05712c15 12174 pipe_config->dpll_hw_state.ebb4,
415ff0f6
TU
12175 pipe_config->dpll_hw_state.pll0,
12176 pipe_config->dpll_hw_state.pll1,
12177 pipe_config->dpll_hw_state.pll2,
12178 pipe_config->dpll_hw_state.pll3,
12179 pipe_config->dpll_hw_state.pll6,
12180 pipe_config->dpll_hw_state.pll8,
05712c15 12181 pipe_config->dpll_hw_state.pll9,
c8453338 12182 pipe_config->dpll_hw_state.pll10,
415ff0f6 12183 pipe_config->dpll_hw_state.pcsdw12);
ef11bdb3 12184 } else if (IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) {
415ff0f6
TU
12185 DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: "
12186 "ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
12187 pipe_config->ddi_pll_sel,
12188 pipe_config->dpll_hw_state.ctrl1,
12189 pipe_config->dpll_hw_state.cfgcr1,
12190 pipe_config->dpll_hw_state.cfgcr2);
12191 } else if (HAS_DDI(dev)) {
1260f07e 12192 DRM_DEBUG_KMS("ddi_pll_sel: 0x%x; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
415ff0f6 12193 pipe_config->ddi_pll_sel,
00490c22
ML
12194 pipe_config->dpll_hw_state.wrpll,
12195 pipe_config->dpll_hw_state.spll);
415ff0f6
TU
12196 } else {
12197 DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
12198 "fp0: 0x%x, fp1: 0x%x\n",
12199 pipe_config->dpll_hw_state.dpll,
12200 pipe_config->dpll_hw_state.dpll_md,
12201 pipe_config->dpll_hw_state.fp0,
12202 pipe_config->dpll_hw_state.fp1);
12203 }
12204
6a60cd87
CK
12205 DRM_DEBUG_KMS("planes on this crtc\n");
12206 list_for_each_entry(plane, &dev->mode_config.plane_list, head) {
12207 intel_plane = to_intel_plane(plane);
12208 if (intel_plane->pipe != crtc->pipe)
12209 continue;
12210
12211 state = to_intel_plane_state(plane->state);
12212 fb = state->base.fb;
12213 if (!fb) {
12214 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d "
12215 "disabled, scaler_id = %d\n",
12216 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12217 plane->base.id, intel_plane->pipe,
12218 (crtc->base.primary == plane) ? 0 : intel_plane->plane + 1,
12219 drm_plane_index(plane), state->scaler_id);
12220 continue;
12221 }
12222
12223 DRM_DEBUG_KMS("%s PLANE:%d plane: %u.%u idx: %d enabled",
12224 plane->type == DRM_PLANE_TYPE_CURSOR ? "CURSOR" : "STANDARD",
12225 plane->base.id, intel_plane->pipe,
12226 crtc->base.primary == plane ? 0 : intel_plane->plane + 1,
12227 drm_plane_index(plane));
12228 DRM_DEBUG_KMS("\tFB:%d, fb = %ux%u format = 0x%x",
12229 fb->base.id, fb->width, fb->height, fb->pixel_format);
12230 DRM_DEBUG_KMS("\tscaler:%d src (%u, %u) %ux%u dst (%u, %u) %ux%u\n",
12231 state->scaler_id,
12232 state->src.x1 >> 16, state->src.y1 >> 16,
12233 drm_rect_width(&state->src) >> 16,
12234 drm_rect_height(&state->src) >> 16,
12235 state->dst.x1, state->dst.y1,
12236 drm_rect_width(&state->dst), drm_rect_height(&state->dst));
12237 }
c0b03411
DV
12238}
12239
5448a00d 12240static bool check_digital_port_conflicts(struct drm_atomic_state *state)
00f0b378 12241{
5448a00d 12242 struct drm_device *dev = state->dev;
da3ced29 12243 struct drm_connector *connector;
00f0b378
VS
12244 unsigned int used_ports = 0;
12245
12246 /*
12247 * Walk the connector list instead of the encoder
12248 * list to detect the problem on ddi platforms
12249 * where there's just one encoder per digital port.
12250 */
0bff4858
VS
12251 drm_for_each_connector(connector, dev) {
12252 struct drm_connector_state *connector_state;
12253 struct intel_encoder *encoder;
12254
12255 connector_state = drm_atomic_get_existing_connector_state(state, connector);
12256 if (!connector_state)
12257 connector_state = connector->state;
12258
5448a00d 12259 if (!connector_state->best_encoder)
00f0b378
VS
12260 continue;
12261
5448a00d
ACO
12262 encoder = to_intel_encoder(connector_state->best_encoder);
12263
12264 WARN_ON(!connector_state->crtc);
00f0b378
VS
12265
12266 switch (encoder->type) {
12267 unsigned int port_mask;
12268 case INTEL_OUTPUT_UNKNOWN:
12269 if (WARN_ON(!HAS_DDI(dev)))
12270 break;
12271 case INTEL_OUTPUT_DISPLAYPORT:
12272 case INTEL_OUTPUT_HDMI:
12273 case INTEL_OUTPUT_EDP:
12274 port_mask = 1 << enc_to_dig_port(&encoder->base)->port;
12275
12276 /* the same port mustn't appear more than once */
12277 if (used_ports & port_mask)
12278 return false;
12279
12280 used_ports |= port_mask;
12281 default:
12282 break;
12283 }
12284 }
12285
12286 return true;
12287}
12288
83a57153
ACO
12289static void
12290clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
12291{
12292 struct drm_crtc_state tmp_state;
663a3640 12293 struct intel_crtc_scaler_state scaler_state;
4978cc93 12294 struct intel_dpll_hw_state dpll_hw_state;
8106ddbd 12295 struct intel_shared_dpll *shared_dpll;
8504c74c 12296 uint32_t ddi_pll_sel;
c4e2d043 12297 bool force_thru;
83a57153 12298
7546a384
ACO
12299 /* FIXME: before the switch to atomic started, a new pipe_config was
12300 * kzalloc'd. Code that depends on any field being zero should be
12301 * fixed, so that the crtc_state can be safely duplicated. For now,
12302 * only fields that are know to not cause problems are preserved. */
12303
83a57153 12304 tmp_state = crtc_state->base;
663a3640 12305 scaler_state = crtc_state->scaler_state;
4978cc93
ACO
12306 shared_dpll = crtc_state->shared_dpll;
12307 dpll_hw_state = crtc_state->dpll_hw_state;
8504c74c 12308 ddi_pll_sel = crtc_state->ddi_pll_sel;
c4e2d043 12309 force_thru = crtc_state->pch_pfit.force_thru;
4978cc93 12310
83a57153 12311 memset(crtc_state, 0, sizeof *crtc_state);
4978cc93 12312
83a57153 12313 crtc_state->base = tmp_state;
663a3640 12314 crtc_state->scaler_state = scaler_state;
4978cc93
ACO
12315 crtc_state->shared_dpll = shared_dpll;
12316 crtc_state->dpll_hw_state = dpll_hw_state;
8504c74c 12317 crtc_state->ddi_pll_sel = ddi_pll_sel;
c4e2d043 12318 crtc_state->pch_pfit.force_thru = force_thru;
83a57153
ACO
12319}
12320
548ee15b 12321static int
b8cecdf5 12322intel_modeset_pipe_config(struct drm_crtc *crtc,
b359283a 12323 struct intel_crtc_state *pipe_config)
ee7b9f93 12324{
b359283a 12325 struct drm_atomic_state *state = pipe_config->base.state;
7758a113 12326 struct intel_encoder *encoder;
da3ced29 12327 struct drm_connector *connector;
0b901879 12328 struct drm_connector_state *connector_state;
d328c9d7 12329 int base_bpp, ret = -EINVAL;
0b901879 12330 int i;
e29c22c0 12331 bool retry = true;
ee7b9f93 12332
83a57153 12333 clear_intel_crtc_state(pipe_config);
7758a113 12334
e143a21c
DV
12335 pipe_config->cpu_transcoder =
12336 (enum transcoder) to_intel_crtc(crtc)->pipe;
b8cecdf5 12337
2960bc9c
ID
12338 /*
12339 * Sanitize sync polarity flags based on requested ones. If neither
12340 * positive or negative polarity is requested, treat this as meaning
12341 * negative polarity.
12342 */
2d112de7 12343 if (!(pipe_config->base.adjusted_mode.flags &
2960bc9c 12344 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
2d112de7 12345 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
2960bc9c 12346
2d112de7 12347 if (!(pipe_config->base.adjusted_mode.flags &
2960bc9c 12348 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
2d112de7 12349 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
2960bc9c 12350
d328c9d7
DV
12351 base_bpp = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12352 pipe_config);
12353 if (base_bpp < 0)
4e53c2e0
DV
12354 goto fail;
12355
e41a56be
VS
12356 /*
12357 * Determine the real pipe dimensions. Note that stereo modes can
12358 * increase the actual pipe size due to the frame doubling and
12359 * insertion of additional space for blanks between the frame. This
12360 * is stored in the crtc timings. We use the requested mode to do this
12361 * computation to clearly distinguish it from the adjusted mode, which
12362 * can be changed by the connectors in the below retry loop.
12363 */
2d112de7 12364 drm_crtc_get_hv_timing(&pipe_config->base.mode,
ecb7e16b
GP
12365 &pipe_config->pipe_src_w,
12366 &pipe_config->pipe_src_h);
e41a56be 12367
e29c22c0 12368encoder_retry:
ef1b460d 12369 /* Ensure the port clock defaults are reset when retrying. */
ff9a6750 12370 pipe_config->port_clock = 0;
ef1b460d 12371 pipe_config->pixel_multiplier = 1;
ff9a6750 12372
135c81b8 12373 /* Fill in default crtc timings, allow encoders to overwrite them. */
2d112de7
ACO
12374 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12375 CRTC_STEREO_DOUBLE);
135c81b8 12376
7758a113
DV
12377 /* Pass our mode to the connectors and the CRTC to give them a chance to
12378 * adjust it according to limitations or connector properties, and also
12379 * a chance to reject the mode entirely.
47f1c6c9 12380 */
da3ced29 12381 for_each_connector_in_state(state, connector, connector_state, i) {
0b901879 12382 if (connector_state->crtc != crtc)
7758a113 12383 continue;
7ae89233 12384
0b901879
ACO
12385 encoder = to_intel_encoder(connector_state->best_encoder);
12386
efea6e8e
DV
12387 if (!(encoder->compute_config(encoder, pipe_config))) {
12388 DRM_DEBUG_KMS("Encoder config failure\n");
7758a113
DV
12389 goto fail;
12390 }
ee7b9f93 12391 }
47f1c6c9 12392
ff9a6750
DV
12393 /* Set default port clock if not overwritten by the encoder. Needs to be
12394 * done afterwards in case the encoder adjusts the mode. */
12395 if (!pipe_config->port_clock)
2d112de7 12396 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
241bfc38 12397 * pipe_config->pixel_multiplier;
ff9a6750 12398
a43f6e0f 12399 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
e29c22c0 12400 if (ret < 0) {
7758a113
DV
12401 DRM_DEBUG_KMS("CRTC fixup failed\n");
12402 goto fail;
ee7b9f93 12403 }
e29c22c0
DV
12404
12405 if (ret == RETRY) {
12406 if (WARN(!retry, "loop in pipe configuration computation\n")) {
12407 ret = -EINVAL;
12408 goto fail;
12409 }
12410
12411 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12412 retry = false;
12413 goto encoder_retry;
12414 }
12415
e8fa4270
DV
12416 /* Dithering seems to not pass-through bits correctly when it should, so
12417 * only enable it on 6bpc panels. */
12418 pipe_config->dither = pipe_config->pipe_bpp == 6*3;
62f0ace5 12419 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
d328c9d7 12420 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
4e53c2e0 12421
7758a113 12422fail:
548ee15b 12423 return ret;
ee7b9f93 12424}
47f1c6c9 12425
ea9d758d 12426static void
4740b0f2 12427intel_modeset_update_crtc_state(struct drm_atomic_state *state)
ea9d758d 12428{
0a9ab303
ACO
12429 struct drm_crtc *crtc;
12430 struct drm_crtc_state *crtc_state;
8a75d157 12431 int i;
ea9d758d 12432
7668851f 12433 /* Double check state. */
8a75d157 12434 for_each_crtc_in_state(state, crtc, crtc_state, i) {
3cb480bc 12435 to_intel_crtc(crtc)->config = to_intel_crtc_state(crtc->state);
fc467a22
ML
12436
12437 /* Update hwmode for vblank functions */
12438 if (crtc->state->active)
12439 crtc->hwmode = crtc->state->adjusted_mode;
12440 else
12441 crtc->hwmode.crtc_clock = 0;
61067a5e
ML
12442
12443 /*
12444 * Update legacy state to satisfy fbc code. This can
12445 * be removed when fbc uses the atomic state.
12446 */
12447 if (drm_atomic_get_existing_plane_state(state, crtc->primary)) {
12448 struct drm_plane_state *plane_state = crtc->primary->state;
12449
12450 crtc->primary->fb = plane_state->fb;
12451 crtc->x = plane_state->src_x >> 16;
12452 crtc->y = plane_state->src_y >> 16;
12453 }
ea9d758d 12454 }
ea9d758d
DV
12455}
12456
3bd26263 12457static bool intel_fuzzy_clock_check(int clock1, int clock2)
f1f644dc 12458{
3bd26263 12459 int diff;
f1f644dc
JB
12460
12461 if (clock1 == clock2)
12462 return true;
12463
12464 if (!clock1 || !clock2)
12465 return false;
12466
12467 diff = abs(clock1 - clock2);
12468
12469 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12470 return true;
12471
12472 return false;
12473}
12474
25c5b266
DV
12475#define for_each_intel_crtc_masked(dev, mask, intel_crtc) \
12476 list_for_each_entry((intel_crtc), \
12477 &(dev)->mode_config.crtc_list, \
12478 base.head) \
95150bdf 12479 for_each_if (mask & (1 <<(intel_crtc)->pipe))
25c5b266 12480
cfb23ed6
ML
12481static bool
12482intel_compare_m_n(unsigned int m, unsigned int n,
12483 unsigned int m2, unsigned int n2,
12484 bool exact)
12485{
12486 if (m == m2 && n == n2)
12487 return true;
12488
12489 if (exact || !m || !n || !m2 || !n2)
12490 return false;
12491
12492 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12493
31d10b57
ML
12494 if (n > n2) {
12495 while (n > n2) {
cfb23ed6
ML
12496 m2 <<= 1;
12497 n2 <<= 1;
12498 }
31d10b57
ML
12499 } else if (n < n2) {
12500 while (n < n2) {
cfb23ed6
ML
12501 m <<= 1;
12502 n <<= 1;
12503 }
12504 }
12505
31d10b57
ML
12506 if (n != n2)
12507 return false;
12508
12509 return intel_fuzzy_clock_check(m, m2);
cfb23ed6
ML
12510}
12511
12512static bool
12513intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12514 struct intel_link_m_n *m2_n2,
12515 bool adjust)
12516{
12517 if (m_n->tu == m2_n2->tu &&
12518 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12519 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12520 intel_compare_m_n(m_n->link_m, m_n->link_n,
12521 m2_n2->link_m, m2_n2->link_n, !adjust)) {
12522 if (adjust)
12523 *m2_n2 = *m_n;
12524
12525 return true;
12526 }
12527
12528 return false;
12529}
12530
0e8ffe1b 12531static bool
2fa2fe9a 12532intel_pipe_config_compare(struct drm_device *dev,
5cec258b 12533 struct intel_crtc_state *current_config,
cfb23ed6
ML
12534 struct intel_crtc_state *pipe_config,
12535 bool adjust)
0e8ffe1b 12536{
cfb23ed6
ML
12537 bool ret = true;
12538
12539#define INTEL_ERR_OR_DBG_KMS(fmt, ...) \
12540 do { \
12541 if (!adjust) \
12542 DRM_ERROR(fmt, ##__VA_ARGS__); \
12543 else \
12544 DRM_DEBUG_KMS(fmt, ##__VA_ARGS__); \
12545 } while (0)
12546
66e985c0
DV
12547#define PIPE_CONF_CHECK_X(name) \
12548 if (current_config->name != pipe_config->name) { \
cfb23ed6 12549 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
66e985c0
DV
12550 "(expected 0x%08x, found 0x%08x)\n", \
12551 current_config->name, \
12552 pipe_config->name); \
cfb23ed6 12553 ret = false; \
66e985c0
DV
12554 }
12555
08a24034
DV
12556#define PIPE_CONF_CHECK_I(name) \
12557 if (current_config->name != pipe_config->name) { \
cfb23ed6 12558 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
08a24034
DV
12559 "(expected %i, found %i)\n", \
12560 current_config->name, \
12561 pipe_config->name); \
cfb23ed6
ML
12562 ret = false; \
12563 }
12564
8106ddbd
ACO
12565#define PIPE_CONF_CHECK_P(name) \
12566 if (current_config->name != pipe_config->name) { \
12567 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12568 "(expected %p, found %p)\n", \
12569 current_config->name, \
12570 pipe_config->name); \
12571 ret = false; \
12572 }
12573
cfb23ed6
ML
12574#define PIPE_CONF_CHECK_M_N(name) \
12575 if (!intel_compare_link_m_n(&current_config->name, \
12576 &pipe_config->name,\
12577 adjust)) { \
12578 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12579 "(expected tu %i gmch %i/%i link %i/%i, " \
12580 "found tu %i, gmch %i/%i link %i/%i)\n", \
12581 current_config->name.tu, \
12582 current_config->name.gmch_m, \
12583 current_config->name.gmch_n, \
12584 current_config->name.link_m, \
12585 current_config->name.link_n, \
12586 pipe_config->name.tu, \
12587 pipe_config->name.gmch_m, \
12588 pipe_config->name.gmch_n, \
12589 pipe_config->name.link_m, \
12590 pipe_config->name.link_n); \
12591 ret = false; \
12592 }
12593
55c561a7
DV
12594/* This is required for BDW+ where there is only one set of registers for
12595 * switching between high and low RR.
12596 * This macro can be used whenever a comparison has to be made between one
12597 * hw state and multiple sw state variables.
12598 */
cfb23ed6
ML
12599#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) \
12600 if (!intel_compare_link_m_n(&current_config->name, \
12601 &pipe_config->name, adjust) && \
12602 !intel_compare_link_m_n(&current_config->alt_name, \
12603 &pipe_config->name, adjust)) { \
12604 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
12605 "(expected tu %i gmch %i/%i link %i/%i, " \
12606 "or tu %i gmch %i/%i link %i/%i, " \
12607 "found tu %i, gmch %i/%i link %i/%i)\n", \
12608 current_config->name.tu, \
12609 current_config->name.gmch_m, \
12610 current_config->name.gmch_n, \
12611 current_config->name.link_m, \
12612 current_config->name.link_n, \
12613 current_config->alt_name.tu, \
12614 current_config->alt_name.gmch_m, \
12615 current_config->alt_name.gmch_n, \
12616 current_config->alt_name.link_m, \
12617 current_config->alt_name.link_n, \
12618 pipe_config->name.tu, \
12619 pipe_config->name.gmch_m, \
12620 pipe_config->name.gmch_n, \
12621 pipe_config->name.link_m, \
12622 pipe_config->name.link_n); \
12623 ret = false; \
88adfff1
DV
12624 }
12625
1bd1bd80
DV
12626#define PIPE_CONF_CHECK_FLAGS(name, mask) \
12627 if ((current_config->name ^ pipe_config->name) & (mask)) { \
cfb23ed6 12628 INTEL_ERR_OR_DBG_KMS("mismatch in " #name "(" #mask ") " \
1bd1bd80
DV
12629 "(expected %i, found %i)\n", \
12630 current_config->name & (mask), \
12631 pipe_config->name & (mask)); \
cfb23ed6 12632 ret = false; \
1bd1bd80
DV
12633 }
12634
5e550656
VS
12635#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) \
12636 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
cfb23ed6 12637 INTEL_ERR_OR_DBG_KMS("mismatch in " #name " " \
5e550656
VS
12638 "(expected %i, found %i)\n", \
12639 current_config->name, \
12640 pipe_config->name); \
cfb23ed6 12641 ret = false; \
5e550656
VS
12642 }
12643
bb760063
DV
12644#define PIPE_CONF_QUIRK(quirk) \
12645 ((current_config->quirks | pipe_config->quirks) & (quirk))
12646
eccb140b
DV
12647 PIPE_CONF_CHECK_I(cpu_transcoder);
12648
08a24034
DV
12649 PIPE_CONF_CHECK_I(has_pch_encoder);
12650 PIPE_CONF_CHECK_I(fdi_lanes);
cfb23ed6 12651 PIPE_CONF_CHECK_M_N(fdi_m_n);
08a24034 12652
eb14cb74 12653 PIPE_CONF_CHECK_I(has_dp_encoder);
90a6b7b0 12654 PIPE_CONF_CHECK_I(lane_count);
b95af8be
VK
12655
12656 if (INTEL_INFO(dev)->gen < 8) {
cfb23ed6
ML
12657 PIPE_CONF_CHECK_M_N(dp_m_n);
12658
cfb23ed6
ML
12659 if (current_config->has_drrs)
12660 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12661 } else
12662 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
eb14cb74 12663
a65347ba
JN
12664 PIPE_CONF_CHECK_I(has_dsi_encoder);
12665
2d112de7
ACO
12666 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12667 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12668 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12669 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12670 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12671 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
1bd1bd80 12672
2d112de7
ACO
12673 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12674 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12675 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12676 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12677 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12678 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
1bd1bd80 12679
c93f54cf 12680 PIPE_CONF_CHECK_I(pixel_multiplier);
6897b4b5 12681 PIPE_CONF_CHECK_I(has_hdmi_sink);
b5a9fa09 12682 if ((INTEL_INFO(dev)->gen < 8 && !IS_HASWELL(dev)) ||
666a4537 12683 IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
b5a9fa09 12684 PIPE_CONF_CHECK_I(limited_color_range);
e43823ec 12685 PIPE_CONF_CHECK_I(has_infoframe);
6c49f241 12686
9ed109a7
DV
12687 PIPE_CONF_CHECK_I(has_audio);
12688
2d112de7 12689 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
1bd1bd80
DV
12690 DRM_MODE_FLAG_INTERLACE);
12691
bb760063 12692 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
2d112de7 12693 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12694 DRM_MODE_FLAG_PHSYNC);
2d112de7 12695 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12696 DRM_MODE_FLAG_NHSYNC);
2d112de7 12697 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12698 DRM_MODE_FLAG_PVSYNC);
2d112de7 12699 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063
DV
12700 DRM_MODE_FLAG_NVSYNC);
12701 }
045ac3b5 12702
333b8ca8 12703 PIPE_CONF_CHECK_X(gmch_pfit.control);
e2ff2d4a
DV
12704 /* pfit ratios are autocomputed by the hw on gen4+ */
12705 if (INTEL_INFO(dev)->gen < 4)
7f7d8dd6 12706 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
333b8ca8 12707 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
9953599b 12708
bfd16b2a
ML
12709 if (!adjust) {
12710 PIPE_CONF_CHECK_I(pipe_src_w);
12711 PIPE_CONF_CHECK_I(pipe_src_h);
12712
12713 PIPE_CONF_CHECK_I(pch_pfit.enabled);
12714 if (current_config->pch_pfit.enabled) {
12715 PIPE_CONF_CHECK_X(pch_pfit.pos);
12716 PIPE_CONF_CHECK_X(pch_pfit.size);
12717 }
2fa2fe9a 12718
7aefe2b5
ML
12719 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
12720 }
a1b2278e 12721
e59150dc
JB
12722 /* BDW+ don't expose a synchronous way to read the state */
12723 if (IS_HASWELL(dev))
12724 PIPE_CONF_CHECK_I(ips_enabled);
42db64ef 12725
282740f7
VS
12726 PIPE_CONF_CHECK_I(double_wide);
12727
26804afd
DV
12728 PIPE_CONF_CHECK_X(ddi_pll_sel);
12729
8106ddbd 12730 PIPE_CONF_CHECK_P(shared_dpll);
66e985c0 12731 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8bcc2795 12732 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
66e985c0
DV
12733 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12734 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
d452c5b6 12735 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
00490c22 12736 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
3f4cd19f
DL
12737 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12738 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12739 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
c0d43d62 12740
47eacbab
VS
12741 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12742 PIPE_CONF_CHECK_X(dsi_pll.div);
12743
42571aef
VS
12744 if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
12745 PIPE_CONF_CHECK_I(pipe_bpp);
12746
2d112de7 12747 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
a9a7e98a 12748 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
5e550656 12749
66e985c0 12750#undef PIPE_CONF_CHECK_X
08a24034 12751#undef PIPE_CONF_CHECK_I
8106ddbd 12752#undef PIPE_CONF_CHECK_P
1bd1bd80 12753#undef PIPE_CONF_CHECK_FLAGS
5e550656 12754#undef PIPE_CONF_CHECK_CLOCK_FUZZY
bb760063 12755#undef PIPE_CONF_QUIRK
cfb23ed6 12756#undef INTEL_ERR_OR_DBG_KMS
88adfff1 12757
cfb23ed6 12758 return ret;
0e8ffe1b
DV
12759}
12760
e3b247da
VS
12761static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12762 const struct intel_crtc_state *pipe_config)
12763{
12764 if (pipe_config->has_pch_encoder) {
21a727b3 12765 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
e3b247da
VS
12766 &pipe_config->fdi_m_n);
12767 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12768
12769 /*
12770 * FDI already provided one idea for the dotclock.
12771 * Yell if the encoder disagrees.
12772 */
12773 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12774 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12775 fdi_dotclock, dotclock);
12776 }
12777}
12778
c0ead703
ML
12779static void verify_wm_state(struct drm_crtc *crtc,
12780 struct drm_crtc_state *new_state)
08db6652 12781{
e7c84544 12782 struct drm_device *dev = crtc->dev;
08db6652
DL
12783 struct drm_i915_private *dev_priv = dev->dev_private;
12784 struct skl_ddb_allocation hw_ddb, *sw_ddb;
e7c84544
ML
12785 struct skl_ddb_entry *hw_entry, *sw_entry;
12786 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12787 const enum pipe pipe = intel_crtc->pipe;
08db6652
DL
12788 int plane;
12789
e7c84544 12790 if (INTEL_INFO(dev)->gen < 9 || !new_state->active)
08db6652
DL
12791 return;
12792
12793 skl_ddb_get_hw_state(dev_priv, &hw_ddb);
12794 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12795
e7c84544
ML
12796 /* planes */
12797 for_each_plane(dev_priv, pipe, plane) {
12798 hw_entry = &hw_ddb.plane[pipe][plane];
12799 sw_entry = &sw_ddb->plane[pipe][plane];
08db6652 12800
e7c84544 12801 if (skl_ddb_entry_equal(hw_entry, sw_entry))
08db6652
DL
12802 continue;
12803
e7c84544
ML
12804 DRM_ERROR("mismatch in DDB state pipe %c plane %d "
12805 "(expected (%u,%u), found (%u,%u))\n",
12806 pipe_name(pipe), plane + 1,
12807 sw_entry->start, sw_entry->end,
12808 hw_entry->start, hw_entry->end);
12809 }
08db6652 12810
e7c84544
ML
12811 /* cursor */
12812 hw_entry = &hw_ddb.plane[pipe][PLANE_CURSOR];
12813 sw_entry = &sw_ddb->plane[pipe][PLANE_CURSOR];
08db6652 12814
e7c84544 12815 if (!skl_ddb_entry_equal(hw_entry, sw_entry)) {
08db6652
DL
12816 DRM_ERROR("mismatch in DDB state pipe %c cursor "
12817 "(expected (%u,%u), found (%u,%u))\n",
12818 pipe_name(pipe),
12819 sw_entry->start, sw_entry->end,
12820 hw_entry->start, hw_entry->end);
12821 }
12822}
12823
91d1b4bd 12824static void
c0ead703 12825verify_connector_state(struct drm_device *dev, struct drm_crtc *crtc)
8af6cf88 12826{
35dd3c64 12827 struct drm_connector *connector;
8af6cf88 12828
e7c84544 12829 drm_for_each_connector(connector, dev) {
35dd3c64
ML
12830 struct drm_encoder *encoder = connector->encoder;
12831 struct drm_connector_state *state = connector->state;
ad3c558f 12832
e7c84544
ML
12833 if (state->crtc != crtc)
12834 continue;
12835
c0ead703 12836 intel_connector_verify_state(to_intel_connector(connector));
8af6cf88 12837
ad3c558f 12838 I915_STATE_WARN(state->best_encoder != encoder,
35dd3c64 12839 "connector's atomic encoder doesn't match legacy encoder\n");
8af6cf88 12840 }
91d1b4bd
DV
12841}
12842
12843static void
c0ead703 12844verify_encoder_state(struct drm_device *dev)
91d1b4bd
DV
12845{
12846 struct intel_encoder *encoder;
12847 struct intel_connector *connector;
8af6cf88 12848
b2784e15 12849 for_each_intel_encoder(dev, encoder) {
8af6cf88 12850 bool enabled = false;
4d20cd86 12851 enum pipe pipe;
8af6cf88
DV
12852
12853 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12854 encoder->base.base.id,
8e329a03 12855 encoder->base.name);
8af6cf88 12856
3a3371ff 12857 for_each_intel_connector(dev, connector) {
4d20cd86 12858 if (connector->base.state->best_encoder != &encoder->base)
8af6cf88
DV
12859 continue;
12860 enabled = true;
ad3c558f
ML
12861
12862 I915_STATE_WARN(connector->base.state->crtc !=
12863 encoder->base.crtc,
12864 "connector's crtc doesn't match encoder crtc\n");
8af6cf88 12865 }
0e32b39c 12866
e2c719b7 12867 I915_STATE_WARN(!!encoder->base.crtc != enabled,
8af6cf88
DV
12868 "encoder's enabled state mismatch "
12869 "(expected %i, found %i)\n",
12870 !!encoder->base.crtc, enabled);
7c60d198
ML
12871
12872 if (!encoder->base.crtc) {
4d20cd86 12873 bool active;
7c60d198 12874
4d20cd86
ML
12875 active = encoder->get_hw_state(encoder, &pipe);
12876 I915_STATE_WARN(active,
12877 "encoder detached but still enabled on pipe %c.\n",
12878 pipe_name(pipe));
7c60d198 12879 }
8af6cf88 12880 }
91d1b4bd
DV
12881}
12882
12883static void
c0ead703
ML
12884verify_crtc_state(struct drm_crtc *crtc,
12885 struct drm_crtc_state *old_crtc_state,
12886 struct drm_crtc_state *new_crtc_state)
91d1b4bd 12887{
e7c84544 12888 struct drm_device *dev = crtc->dev;
fbee40df 12889 struct drm_i915_private *dev_priv = dev->dev_private;
91d1b4bd 12890 struct intel_encoder *encoder;
e7c84544
ML
12891 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12892 struct intel_crtc_state *pipe_config, *sw_config;
12893 struct drm_atomic_state *old_state;
12894 bool active;
045ac3b5 12895
e7c84544
ML
12896 old_state = old_crtc_state->state;
12897 __drm_atomic_helper_crtc_destroy_state(crtc, old_crtc_state);
12898 pipe_config = to_intel_crtc_state(old_crtc_state);
12899 memset(pipe_config, 0, sizeof(*pipe_config));
12900 pipe_config->base.crtc = crtc;
12901 pipe_config->base.state = old_state;
8af6cf88 12902
e7c84544 12903 DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);
8af6cf88 12904
e7c84544 12905 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
d62cf62a 12906
e7c84544
ML
12907 /* hw state is inconsistent with the pipe quirk */
12908 if ((intel_crtc->pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
12909 (intel_crtc->pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
12910 active = new_crtc_state->active;
6c49f241 12911
e7c84544
ML
12912 I915_STATE_WARN(new_crtc_state->active != active,
12913 "crtc active state doesn't match with hw state "
12914 "(expected %i, found %i)\n", new_crtc_state->active, active);
0e8ffe1b 12915
e7c84544
ML
12916 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12917 "transitional active state does not match atomic hw state "
12918 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
4d20cd86 12919
e7c84544
ML
12920 for_each_encoder_on_crtc(dev, crtc, encoder) {
12921 enum pipe pipe;
4d20cd86 12922
e7c84544
ML
12923 active = encoder->get_hw_state(encoder, &pipe);
12924 I915_STATE_WARN(active != new_crtc_state->active,
12925 "[ENCODER:%i] active %i with crtc active %i\n",
12926 encoder->base.base.id, active, new_crtc_state->active);
4d20cd86 12927
e7c84544
ML
12928 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12929 "Encoder connected to wrong pipe %c\n",
12930 pipe_name(pipe));
4d20cd86 12931
e7c84544
ML
12932 if (active)
12933 encoder->get_config(encoder, pipe_config);
12934 }
53d9f4e9 12935
e7c84544
ML
12936 if (!new_crtc_state->active)
12937 return;
cfb23ed6 12938
e7c84544 12939 intel_pipe_config_sanity_check(dev_priv, pipe_config);
e3b247da 12940
e7c84544
ML
12941 sw_config = to_intel_crtc_state(crtc->state);
12942 if (!intel_pipe_config_compare(dev, sw_config,
12943 pipe_config, false)) {
12944 I915_STATE_WARN(1, "pipe state doesn't match!\n");
12945 intel_dump_pipe_config(intel_crtc, pipe_config,
12946 "[hw state]");
12947 intel_dump_pipe_config(intel_crtc, sw_config,
12948 "[sw state]");
8af6cf88
DV
12949 }
12950}
12951
91d1b4bd 12952static void
c0ead703
ML
12953verify_single_dpll_state(struct drm_i915_private *dev_priv,
12954 struct intel_shared_dpll *pll,
12955 struct drm_crtc *crtc,
12956 struct drm_crtc_state *new_state)
91d1b4bd 12957{
91d1b4bd 12958 struct intel_dpll_hw_state dpll_hw_state;
e7c84544
ML
12959 unsigned crtc_mask;
12960 bool active;
5358901f 12961
e7c84544 12962 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
5358901f 12963
e7c84544 12964 DRM_DEBUG_KMS("%s\n", pll->name);
5358901f 12965
e7c84544 12966 active = pll->funcs.get_hw_state(dev_priv, pll, &dpll_hw_state);
5358901f 12967
e7c84544
ML
12968 if (!(pll->flags & INTEL_DPLL_ALWAYS_ON)) {
12969 I915_STATE_WARN(!pll->on && pll->active_mask,
12970 "pll in active use but not on in sw tracking\n");
12971 I915_STATE_WARN(pll->on && !pll->active_mask,
12972 "pll is on but not used by any active crtc\n");
12973 I915_STATE_WARN(pll->on != active,
12974 "pll on state mismatch (expected %i, found %i)\n",
12975 pll->on, active);
12976 }
5358901f 12977
e7c84544 12978 if (!crtc) {
2dd66ebd 12979 I915_STATE_WARN(pll->active_mask & ~pll->config.crtc_mask,
e7c84544
ML
12980 "more active pll users than references: %x vs %x\n",
12981 pll->active_mask, pll->config.crtc_mask);
5358901f 12982
e7c84544
ML
12983 return;
12984 }
12985
12986 crtc_mask = 1 << drm_crtc_index(crtc);
12987
12988 if (new_state->active)
12989 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12990 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12991 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12992 else
12993 I915_STATE_WARN(pll->active_mask & crtc_mask,
12994 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12995 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
2dd66ebd 12996
e7c84544
ML
12997 I915_STATE_WARN(!(pll->config.crtc_mask & crtc_mask),
12998 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
12999 crtc_mask, pll->config.crtc_mask);
66e985c0 13000
e7c84544
ML
13001 I915_STATE_WARN(pll->on && memcmp(&pll->config.hw_state,
13002 &dpll_hw_state,
13003 sizeof(dpll_hw_state)),
13004 "pll hw state mismatch\n");
13005}
13006
13007static void
c0ead703
ML
13008verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
13009 struct drm_crtc_state *old_crtc_state,
13010 struct drm_crtc_state *new_crtc_state)
e7c84544
ML
13011{
13012 struct drm_i915_private *dev_priv = dev->dev_private;
13013 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
13014 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
13015
13016 if (new_state->shared_dpll)
c0ead703 13017 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
e7c84544
ML
13018
13019 if (old_state->shared_dpll &&
13020 old_state->shared_dpll != new_state->shared_dpll) {
13021 unsigned crtc_mask = 1 << drm_crtc_index(crtc);
13022 struct intel_shared_dpll *pll = old_state->shared_dpll;
13023
13024 I915_STATE_WARN(pll->active_mask & crtc_mask,
13025 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13026 pipe_name(drm_crtc_index(crtc)));
13027 I915_STATE_WARN(pll->config.crtc_mask & crtc_mask,
13028 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13029 pipe_name(drm_crtc_index(crtc)));
5358901f 13030 }
8af6cf88
DV
13031}
13032
e7c84544 13033static void
c0ead703 13034intel_modeset_verify_crtc(struct drm_crtc *crtc,
e7c84544
ML
13035 struct drm_crtc_state *old_state,
13036 struct drm_crtc_state *new_state)
13037{
13038 if (!needs_modeset(new_state) &&
13039 !to_intel_crtc_state(new_state)->update_pipe)
13040 return;
13041
c0ead703
ML
13042 verify_wm_state(crtc, new_state);
13043 verify_connector_state(crtc->dev, crtc);
13044 verify_crtc_state(crtc, old_state, new_state);
13045 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
e7c84544
ML
13046}
13047
13048static void
c0ead703 13049verify_disabled_dpll_state(struct drm_device *dev)
e7c84544
ML
13050{
13051 struct drm_i915_private *dev_priv = dev->dev_private;
13052 int i;
13053
13054 for (i = 0; i < dev_priv->num_shared_dpll; i++)
c0ead703 13055 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
e7c84544
ML
13056}
13057
13058static void
c0ead703 13059intel_modeset_verify_disabled(struct drm_device *dev)
e7c84544 13060{
c0ead703
ML
13061 verify_encoder_state(dev);
13062 verify_connector_state(dev, NULL);
13063 verify_disabled_dpll_state(dev);
e7c84544
ML
13064}
13065
80715b2f
VS
13066static void update_scanline_offset(struct intel_crtc *crtc)
13067{
13068 struct drm_device *dev = crtc->base.dev;
13069
13070 /*
13071 * The scanline counter increments at the leading edge of hsync.
13072 *
13073 * On most platforms it starts counting from vtotal-1 on the
13074 * first active line. That means the scanline counter value is
13075 * always one less than what we would expect. Ie. just after
13076 * start of vblank, which also occurs at start of hsync (on the
13077 * last active line), the scanline counter will read vblank_start-1.
13078 *
13079 * On gen2 the scanline counter starts counting from 1 instead
13080 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13081 * to keep the value positive), instead of adding one.
13082 *
13083 * On HSW+ the behaviour of the scanline counter depends on the output
13084 * type. For DP ports it behaves like most other platforms, but on HDMI
13085 * there's an extra 1 line difference. So we need to add two instead of
13086 * one to the value.
13087 */
13088 if (IS_GEN2(dev)) {
124abe07 13089 const struct drm_display_mode *adjusted_mode = &crtc->config->base.adjusted_mode;
80715b2f
VS
13090 int vtotal;
13091
124abe07
VS
13092 vtotal = adjusted_mode->crtc_vtotal;
13093 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
80715b2f
VS
13094 vtotal /= 2;
13095
13096 crtc->scanline_offset = vtotal - 1;
13097 } else if (HAS_DDI(dev) &&
409ee761 13098 intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI)) {
80715b2f
VS
13099 crtc->scanline_offset = 2;
13100 } else
13101 crtc->scanline_offset = 1;
13102}
13103
ad421372 13104static void intel_modeset_clear_plls(struct drm_atomic_state *state)
ed6739ef 13105{
225da59b 13106 struct drm_device *dev = state->dev;
ed6739ef 13107 struct drm_i915_private *dev_priv = to_i915(dev);
ad421372 13108 struct intel_shared_dpll_config *shared_dpll = NULL;
0a9ab303
ACO
13109 struct drm_crtc *crtc;
13110 struct drm_crtc_state *crtc_state;
0a9ab303 13111 int i;
ed6739ef
ACO
13112
13113 if (!dev_priv->display.crtc_compute_clock)
ad421372 13114 return;
ed6739ef 13115
0a9ab303 13116 for_each_crtc_in_state(state, crtc, crtc_state, i) {
fb1a38a9 13117 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
8106ddbd
ACO
13118 struct intel_shared_dpll *old_dpll =
13119 to_intel_crtc_state(crtc->state)->shared_dpll;
0a9ab303 13120
fb1a38a9 13121 if (!needs_modeset(crtc_state))
225da59b
ACO
13122 continue;
13123
8106ddbd 13124 to_intel_crtc_state(crtc_state)->shared_dpll = NULL;
fb1a38a9 13125
8106ddbd 13126 if (!old_dpll)
fb1a38a9 13127 continue;
0a9ab303 13128
ad421372
ML
13129 if (!shared_dpll)
13130 shared_dpll = intel_atomic_get_shared_dpll_state(state);
ed6739ef 13131
8106ddbd 13132 intel_shared_dpll_config_put(shared_dpll, old_dpll, intel_crtc);
ad421372 13133 }
ed6739ef
ACO
13134}
13135
99d736a2
ML
13136/*
13137 * This implements the workaround described in the "notes" section of the mode
13138 * set sequence documentation. When going from no pipes or single pipe to
13139 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13140 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13141 */
13142static int haswell_mode_set_planes_workaround(struct drm_atomic_state *state)
13143{
13144 struct drm_crtc_state *crtc_state;
13145 struct intel_crtc *intel_crtc;
13146 struct drm_crtc *crtc;
13147 struct intel_crtc_state *first_crtc_state = NULL;
13148 struct intel_crtc_state *other_crtc_state = NULL;
13149 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13150 int i;
13151
13152 /* look at all crtc's that are going to be enabled in during modeset */
13153 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13154 intel_crtc = to_intel_crtc(crtc);
13155
13156 if (!crtc_state->active || !needs_modeset(crtc_state))
13157 continue;
13158
13159 if (first_crtc_state) {
13160 other_crtc_state = to_intel_crtc_state(crtc_state);
13161 break;
13162 } else {
13163 first_crtc_state = to_intel_crtc_state(crtc_state);
13164 first_pipe = intel_crtc->pipe;
13165 }
13166 }
13167
13168 /* No workaround needed? */
13169 if (!first_crtc_state)
13170 return 0;
13171
13172 /* w/a possibly needed, check how many crtc's are already enabled. */
13173 for_each_intel_crtc(state->dev, intel_crtc) {
13174 struct intel_crtc_state *pipe_config;
13175
13176 pipe_config = intel_atomic_get_crtc_state(state, intel_crtc);
13177 if (IS_ERR(pipe_config))
13178 return PTR_ERR(pipe_config);
13179
13180 pipe_config->hsw_workaround_pipe = INVALID_PIPE;
13181
13182 if (!pipe_config->base.active ||
13183 needs_modeset(&pipe_config->base))
13184 continue;
13185
13186 /* 2 or more enabled crtcs means no need for w/a */
13187 if (enabled_pipe != INVALID_PIPE)
13188 return 0;
13189
13190 enabled_pipe = intel_crtc->pipe;
13191 }
13192
13193 if (enabled_pipe != INVALID_PIPE)
13194 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13195 else if (other_crtc_state)
13196 other_crtc_state->hsw_workaround_pipe = first_pipe;
13197
13198 return 0;
13199}
13200
27c329ed
ML
13201static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13202{
13203 struct drm_crtc *crtc;
13204 struct drm_crtc_state *crtc_state;
13205 int ret = 0;
13206
13207 /* add all active pipes to the state */
13208 for_each_crtc(state->dev, crtc) {
13209 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13210 if (IS_ERR(crtc_state))
13211 return PTR_ERR(crtc_state);
13212
13213 if (!crtc_state->active || needs_modeset(crtc_state))
13214 continue;
13215
13216 crtc_state->mode_changed = true;
13217
13218 ret = drm_atomic_add_affected_connectors(state, crtc);
13219 if (ret)
13220 break;
13221
13222 ret = drm_atomic_add_affected_planes(state, crtc);
13223 if (ret)
13224 break;
13225 }
13226
13227 return ret;
13228}
13229
c347a676 13230static int intel_modeset_checks(struct drm_atomic_state *state)
054518dd 13231{
565602d7
ML
13232 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13233 struct drm_i915_private *dev_priv = state->dev->dev_private;
13234 struct drm_crtc *crtc;
13235 struct drm_crtc_state *crtc_state;
13236 int ret = 0, i;
054518dd 13237
b359283a
ML
13238 if (!check_digital_port_conflicts(state)) {
13239 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13240 return -EINVAL;
13241 }
13242
565602d7
ML
13243 intel_state->modeset = true;
13244 intel_state->active_crtcs = dev_priv->active_crtcs;
13245
13246 for_each_crtc_in_state(state, crtc, crtc_state, i) {
13247 if (crtc_state->active)
13248 intel_state->active_crtcs |= 1 << i;
13249 else
13250 intel_state->active_crtcs &= ~(1 << i);
8b4a7d05
MR
13251
13252 if (crtc_state->active != crtc->state->active)
13253 intel_state->active_pipe_changes |= drm_crtc_mask(crtc);
565602d7
ML
13254 }
13255
054518dd
ACO
13256 /*
13257 * See if the config requires any additional preparation, e.g.
13258 * to adjust global state with pipes off. We need to do this
13259 * here so we can get the modeset_pipe updated config for the new
13260 * mode set on this crtc. For other crtcs we need to use the
13261 * adjusted_mode bits in the crtc directly.
13262 */
27c329ed 13263 if (dev_priv->display.modeset_calc_cdclk) {
27c329ed
ML
13264 ret = dev_priv->display.modeset_calc_cdclk(state);
13265
1a617b77 13266 if (!ret && intel_state->dev_cdclk != dev_priv->cdclk_freq)
27c329ed
ML
13267 ret = intel_modeset_all_pipes(state);
13268
13269 if (ret < 0)
054518dd 13270 return ret;
e8788cbc
ML
13271
13272 DRM_DEBUG_KMS("New cdclk calculated to be atomic %u, actual %u\n",
13273 intel_state->cdclk, intel_state->dev_cdclk);
27c329ed 13274 } else
1a617b77 13275 to_intel_atomic_state(state)->cdclk = dev_priv->atomic_cdclk_freq;
054518dd 13276
ad421372 13277 intel_modeset_clear_plls(state);
054518dd 13278
565602d7 13279 if (IS_HASWELL(dev_priv))
ad421372 13280 return haswell_mode_set_planes_workaround(state);
99d736a2 13281
ad421372 13282 return 0;
c347a676
ACO
13283}
13284
aa363136
MR
13285/*
13286 * Handle calculation of various watermark data at the end of the atomic check
13287 * phase. The code here should be run after the per-crtc and per-plane 'check'
13288 * handlers to ensure that all derived state has been updated.
13289 */
55994c2c 13290static int calc_watermark_data(struct drm_atomic_state *state)
aa363136
MR
13291{
13292 struct drm_device *dev = state->dev;
98d39494 13293 struct drm_i915_private *dev_priv = to_i915(dev);
98d39494
MR
13294
13295 /* Is there platform-specific watermark information to calculate? */
13296 if (dev_priv->display.compute_global_watermarks)
55994c2c
MR
13297 return dev_priv->display.compute_global_watermarks(state);
13298
13299 return 0;
aa363136
MR
13300}
13301
74c090b1
ML
13302/**
13303 * intel_atomic_check - validate state object
13304 * @dev: drm device
13305 * @state: state to validate
13306 */
13307static int intel_atomic_check(struct drm_device *dev,
13308 struct drm_atomic_state *state)
c347a676 13309{
dd8b3bdb 13310 struct drm_i915_private *dev_priv = to_i915(dev);
aa363136 13311 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
c347a676
ACO
13312 struct drm_crtc *crtc;
13313 struct drm_crtc_state *crtc_state;
13314 int ret, i;
61333b60 13315 bool any_ms = false;
c347a676 13316
74c090b1 13317 ret = drm_atomic_helper_check_modeset(dev, state);
054518dd
ACO
13318 if (ret)
13319 return ret;
13320
c347a676 13321 for_each_crtc_in_state(state, crtc, crtc_state, i) {
cfb23ed6
ML
13322 struct intel_crtc_state *pipe_config =
13323 to_intel_crtc_state(crtc_state);
1ed51de9
DV
13324
13325 /* Catch I915_MODE_FLAG_INHERITED */
13326 if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
13327 crtc_state->mode_changed = true;
cfb23ed6 13328
af4a879e 13329 if (!needs_modeset(crtc_state))
c347a676
ACO
13330 continue;
13331
af4a879e
DV
13332 if (!crtc_state->enable) {
13333 any_ms = true;
cfb23ed6 13334 continue;
af4a879e 13335 }
cfb23ed6 13336
26495481
DV
13337 /* FIXME: For only active_changed we shouldn't need to do any
13338 * state recomputation at all. */
13339
1ed51de9
DV
13340 ret = drm_atomic_add_affected_connectors(state, crtc);
13341 if (ret)
13342 return ret;
b359283a 13343
cfb23ed6 13344 ret = intel_modeset_pipe_config(crtc, pipe_config);
25aa1c39
ML
13345 if (ret) {
13346 intel_dump_pipe_config(to_intel_crtc(crtc),
13347 pipe_config, "[failed]");
c347a676 13348 return ret;
25aa1c39 13349 }
c347a676 13350
73831236 13351 if (i915.fastboot &&
dd8b3bdb 13352 intel_pipe_config_compare(dev,
cfb23ed6 13353 to_intel_crtc_state(crtc->state),
1ed51de9 13354 pipe_config, true)) {
26495481 13355 crtc_state->mode_changed = false;
bfd16b2a 13356 to_intel_crtc_state(crtc_state)->update_pipe = true;
26495481
DV
13357 }
13358
af4a879e 13359 if (needs_modeset(crtc_state))
26495481 13360 any_ms = true;
cfb23ed6 13361
af4a879e
DV
13362 ret = drm_atomic_add_affected_planes(state, crtc);
13363 if (ret)
13364 return ret;
61333b60 13365
26495481
DV
13366 intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config,
13367 needs_modeset(crtc_state) ?
13368 "[modeset]" : "[fastset]");
c347a676
ACO
13369 }
13370
61333b60
ML
13371 if (any_ms) {
13372 ret = intel_modeset_checks(state);
13373
13374 if (ret)
13375 return ret;
27c329ed 13376 } else
dd8b3bdb 13377 intel_state->cdclk = dev_priv->cdclk_freq;
76305b1a 13378
dd8b3bdb 13379 ret = drm_atomic_helper_check_planes(dev, state);
aa363136
MR
13380 if (ret)
13381 return ret;
13382
f51be2e0 13383 intel_fbc_choose_crtc(dev_priv, state);
55994c2c 13384 return calc_watermark_data(state);
054518dd
ACO
13385}
13386
5008e874
ML
13387static int intel_atomic_prepare_commit(struct drm_device *dev,
13388 struct drm_atomic_state *state,
81072bfd 13389 bool nonblock)
5008e874 13390{
7580d774
ML
13391 struct drm_i915_private *dev_priv = dev->dev_private;
13392 struct drm_plane_state *plane_state;
5008e874 13393 struct drm_crtc_state *crtc_state;
7580d774 13394 struct drm_plane *plane;
5008e874
ML
13395 struct drm_crtc *crtc;
13396 int i, ret;
13397
81072bfd
ML
13398 if (nonblock) {
13399 DRM_DEBUG_KMS("i915 does not yet support nonblocking commit\n");
5008e874
ML
13400 return -EINVAL;
13401 }
13402
13403 for_each_crtc_in_state(state, crtc, crtc_state, i) {
acf4e84d
CW
13404 if (state->legacy_cursor_update)
13405 continue;
13406
5008e874
ML
13407 ret = intel_crtc_wait_for_pending_flips(crtc);
13408 if (ret)
13409 return ret;
7580d774
ML
13410
13411 if (atomic_read(&to_intel_crtc(crtc)->unpin_work_count) >= 2)
13412 flush_workqueue(dev_priv->wq);
5008e874
ML
13413 }
13414
f935675f
ML
13415 ret = mutex_lock_interruptible(&dev->struct_mutex);
13416 if (ret)
13417 return ret;
13418
5008e874 13419 ret = drm_atomic_helper_prepare_planes(dev, state);
f7e5838b 13420 mutex_unlock(&dev->struct_mutex);
7580d774 13421
21daaeee 13422 if (!ret && !nonblock) {
7580d774
ML
13423 for_each_plane_in_state(state, plane, plane_state, i) {
13424 struct intel_plane_state *intel_plane_state =
13425 to_intel_plane_state(plane_state);
13426
13427 if (!intel_plane_state->wait_req)
13428 continue;
13429
13430 ret = __i915_wait_request(intel_plane_state->wait_req,
299259a3 13431 true, NULL, NULL);
f7e5838b 13432 if (ret) {
f4457ae7
CW
13433 /* Any hang should be swallowed by the wait */
13434 WARN_ON(ret == -EIO);
f7e5838b
CW
13435 mutex_lock(&dev->struct_mutex);
13436 drm_atomic_helper_cleanup_planes(dev, state);
13437 mutex_unlock(&dev->struct_mutex);
7580d774 13438 break;
f7e5838b 13439 }
7580d774 13440 }
7580d774 13441 }
5008e874
ML
13442
13443 return ret;
13444}
13445
e8861675
ML
13446static void intel_atomic_wait_for_vblanks(struct drm_device *dev,
13447 struct drm_i915_private *dev_priv,
13448 unsigned crtc_mask)
13449{
13450 unsigned last_vblank_count[I915_MAX_PIPES];
13451 enum pipe pipe;
13452 int ret;
13453
13454 if (!crtc_mask)
13455 return;
13456
13457 for_each_pipe(dev_priv, pipe) {
13458 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13459
13460 if (!((1 << pipe) & crtc_mask))
13461 continue;
13462
13463 ret = drm_crtc_vblank_get(crtc);
13464 if (WARN_ON(ret != 0)) {
13465 crtc_mask &= ~(1 << pipe);
13466 continue;
13467 }
13468
13469 last_vblank_count[pipe] = drm_crtc_vblank_count(crtc);
13470 }
13471
13472 for_each_pipe(dev_priv, pipe) {
13473 struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
13474 long lret;
13475
13476 if (!((1 << pipe) & crtc_mask))
13477 continue;
13478
13479 lret = wait_event_timeout(dev->vblank[pipe].queue,
13480 last_vblank_count[pipe] !=
13481 drm_crtc_vblank_count(crtc),
13482 msecs_to_jiffies(50));
13483
8a8dae26 13484 WARN(!lret, "pipe %c vblank wait timed out\n", pipe_name(pipe));
e8861675
ML
13485
13486 drm_crtc_vblank_put(crtc);
13487 }
13488}
13489
13490static bool needs_vblank_wait(struct intel_crtc_state *crtc_state)
13491{
13492 /* fb updated, need to unpin old fb */
13493 if (crtc_state->fb_changed)
13494 return true;
13495
13496 /* wm changes, need vblank before final wm's */
caed361d 13497 if (crtc_state->update_wm_post)
e8861675
ML
13498 return true;
13499
13500 /*
13501 * cxsr is re-enabled after vblank.
caed361d 13502 * This is already handled by crtc_state->update_wm_post,
e8861675
ML
13503 * but added for clarity.
13504 */
13505 if (crtc_state->disable_cxsr)
13506 return true;
13507
13508 return false;
13509}
13510
74c090b1
ML
13511/**
13512 * intel_atomic_commit - commit validated state object
13513 * @dev: DRM device
13514 * @state: the top-level driver state object
81072bfd 13515 * @nonblock: nonblocking commit
74c090b1
ML
13516 *
13517 * This function commits a top-level state object that has been validated
13518 * with drm_atomic_helper_check().
13519 *
13520 * FIXME: Atomic modeset support for i915 is not yet complete. At the moment
13521 * we can only handle plane-related operations and do not yet support
81072bfd 13522 * nonblocking commit.
74c090b1
ML
13523 *
13524 * RETURNS
13525 * Zero for success or -errno.
13526 */
13527static int intel_atomic_commit(struct drm_device *dev,
13528 struct drm_atomic_state *state,
81072bfd 13529 bool nonblock)
a6778b3c 13530{
565602d7 13531 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
fbee40df 13532 struct drm_i915_private *dev_priv = dev->dev_private;
29ceb0e6 13533 struct drm_crtc_state *old_crtc_state;
7580d774 13534 struct drm_crtc *crtc;
ed4a6a7c 13535 struct intel_crtc_state *intel_cstate;
565602d7
ML
13536 int ret = 0, i;
13537 bool hw_check = intel_state->modeset;
33c8df89 13538 unsigned long put_domains[I915_MAX_PIPES] = {};
e8861675 13539 unsigned crtc_vblank_mask = 0;
a6778b3c 13540
81072bfd 13541 ret = intel_atomic_prepare_commit(dev, state, nonblock);
7580d774
ML
13542 if (ret) {
13543 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
d4afb8cc 13544 return ret;
7580d774 13545 }
d4afb8cc 13546
1c5e19f8 13547 drm_atomic_helper_swap_state(dev, state);
279e99d7 13548 dev_priv->wm.distrust_bios_wm = false;
734fa01f 13549 dev_priv->wm.skl_results = intel_state->wm_results;
a1475e77 13550 intel_shared_dpll_commit(state);
1c5e19f8 13551
565602d7
ML
13552 if (intel_state->modeset) {
13553 memcpy(dev_priv->min_pixclk, intel_state->min_pixclk,
13554 sizeof(intel_state->min_pixclk));
13555 dev_priv->active_crtcs = intel_state->active_crtcs;
1a617b77 13556 dev_priv->atomic_cdclk_freq = intel_state->cdclk;
33c8df89
ML
13557
13558 intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
565602d7
ML
13559 }
13560
29ceb0e6 13561 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
a539205a
ML
13562 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13563
33c8df89
ML
13564 if (needs_modeset(crtc->state) ||
13565 to_intel_crtc_state(crtc->state)->update_pipe) {
13566 hw_check = true;
13567
13568 put_domains[to_intel_crtc(crtc)->pipe] =
13569 modeset_get_crtc_power_domains(crtc,
13570 to_intel_crtc_state(crtc->state));
13571 }
13572
61333b60
ML
13573 if (!needs_modeset(crtc->state))
13574 continue;
13575
29ceb0e6 13576 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
460da916 13577
29ceb0e6
VS
13578 if (old_crtc_state->active) {
13579 intel_crtc_disable_planes(crtc, old_crtc_state->plane_mask);
a539205a 13580 dev_priv->display.crtc_disable(crtc);
eddfcbcd 13581 intel_crtc->active = false;
58f9c0bc 13582 intel_fbc_disable(intel_crtc);
eddfcbcd 13583 intel_disable_shared_dpll(intel_crtc);
9bbc8258
VS
13584
13585 /*
13586 * Underruns don't always raise
13587 * interrupts, so check manually.
13588 */
13589 intel_check_cpu_fifo_underruns(dev_priv);
13590 intel_check_pch_fifo_underruns(dev_priv);
b9001114
ML
13591
13592 if (!crtc->state->active)
13593 intel_update_watermarks(crtc);
a539205a 13594 }
b8cecdf5 13595 }
7758a113 13596
ea9d758d
DV
13597 /* Only after disabling all output pipelines that will be changed can we
13598 * update the the output configuration. */
4740b0f2 13599 intel_modeset_update_crtc_state(state);
f6e5b160 13600
565602d7 13601 if (intel_state->modeset) {
4740b0f2 13602 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
33c8df89
ML
13603
13604 if (dev_priv->display.modeset_commit_cdclk &&
13605 intel_state->dev_cdclk != dev_priv->cdclk_freq)
13606 dev_priv->display.modeset_commit_cdclk(state);
f6d1973d 13607
c0ead703 13608 intel_modeset_verify_disabled(dev);
4740b0f2 13609 }
47fab737 13610
a6778b3c 13611 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
29ceb0e6 13612 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
f6ac4b2a
ML
13613 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
13614 bool modeset = needs_modeset(crtc->state);
e8861675
ML
13615 struct intel_crtc_state *pipe_config =
13616 to_intel_crtc_state(crtc->state);
13617 bool update_pipe = !modeset && pipe_config->update_pipe;
9f836f90 13618
f6ac4b2a 13619 if (modeset && crtc->state->active) {
a539205a
ML
13620 update_scanline_offset(to_intel_crtc(crtc));
13621 dev_priv->display.crtc_enable(crtc);
13622 }
80715b2f 13623
f6ac4b2a 13624 if (!modeset)
29ceb0e6 13625 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state));
f6ac4b2a 13626
31ae71fc
ML
13627 if (crtc->state->active &&
13628 drm_atomic_get_existing_plane_state(state, crtc->primary))
49227c4a
PZ
13629 intel_fbc_enable(intel_crtc);
13630
6173ee28
ML
13631 if (crtc->state->active &&
13632 (crtc->state->planes_changed || update_pipe))
29ceb0e6 13633 drm_atomic_helper_commit_planes_on_crtc(old_crtc_state);
bfd16b2a 13634
e8861675
ML
13635 if (pipe_config->base.active && needs_vblank_wait(pipe_config))
13636 crtc_vblank_mask |= 1 << i;
80715b2f 13637 }
a6778b3c 13638
a6778b3c 13639 /* FIXME: add subpixel order */
83a57153 13640
e8861675
ML
13641 if (!state->legacy_cursor_update)
13642 intel_atomic_wait_for_vblanks(dev, dev_priv, crtc_vblank_mask);
f935675f 13643
ed4a6a7c
MR
13644 /*
13645 * Now that the vblank has passed, we can go ahead and program the
13646 * optimal watermarks on platforms that need two-step watermark
13647 * programming.
13648 *
13649 * TODO: Move this (and other cleanup) to an async worker eventually.
13650 */
29ceb0e6 13651 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
ed4a6a7c
MR
13652 intel_cstate = to_intel_crtc_state(crtc->state);
13653
13654 if (dev_priv->display.optimize_watermarks)
13655 dev_priv->display.optimize_watermarks(intel_cstate);
13656 }
13657
177246a8
MR
13658 for_each_crtc_in_state(state, crtc, old_crtc_state, i) {
13659 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13660
13661 if (put_domains[i])
13662 modeset_put_power_domains(dev_priv, put_domains[i]);
f6d1973d 13663
c0ead703 13664 intel_modeset_verify_crtc(crtc, old_crtc_state, crtc->state);
177246a8
MR
13665 }
13666
13667 if (intel_state->modeset)
13668 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET);
13669
f935675f 13670 mutex_lock(&dev->struct_mutex);
d4afb8cc 13671 drm_atomic_helper_cleanup_planes(dev, state);
f935675f 13672 mutex_unlock(&dev->struct_mutex);
2bfb4627 13673
ee165b1a 13674 drm_atomic_state_free(state);
f30da187 13675
75714940
MK
13676 /* As one of the primary mmio accessors, KMS has a high likelihood
13677 * of triggering bugs in unclaimed access. After we finish
13678 * modesetting, see if an error has been flagged, and if so
13679 * enable debugging for the next modeset - and hope we catch
13680 * the culprit.
13681 *
13682 * XXX note that we assume display power is on at this point.
13683 * This might hold true now but we need to add pm helper to check
13684 * unclaimed only when the hardware is on, as atomic commits
13685 * can happen also when the device is completely off.
13686 */
13687 intel_uncore_arm_unclaimed_mmio_detection(dev_priv);
13688
74c090b1 13689 return 0;
7f27126e
JB
13690}
13691
c0c36b94
CW
13692void intel_crtc_restore_mode(struct drm_crtc *crtc)
13693{
83a57153
ACO
13694 struct drm_device *dev = crtc->dev;
13695 struct drm_atomic_state *state;
e694eb02 13696 struct drm_crtc_state *crtc_state;
2bfb4627 13697 int ret;
83a57153
ACO
13698
13699 state = drm_atomic_state_alloc(dev);
13700 if (!state) {
e694eb02 13701 DRM_DEBUG_KMS("[CRTC:%d] crtc restore failed, out of memory",
83a57153
ACO
13702 crtc->base.id);
13703 return;
13704 }
13705
e694eb02 13706 state->acquire_ctx = drm_modeset_legacy_acquire_ctx(crtc);
83a57153 13707
e694eb02
ML
13708retry:
13709 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13710 ret = PTR_ERR_OR_ZERO(crtc_state);
13711 if (!ret) {
13712 if (!crtc_state->active)
13713 goto out;
83a57153 13714
e694eb02 13715 crtc_state->mode_changed = true;
74c090b1 13716 ret = drm_atomic_commit(state);
83a57153
ACO
13717 }
13718
e694eb02
ML
13719 if (ret == -EDEADLK) {
13720 drm_atomic_state_clear(state);
13721 drm_modeset_backoff(state->acquire_ctx);
13722 goto retry;
4ed9fb37 13723 }
4be07317 13724
2bfb4627 13725 if (ret)
e694eb02 13726out:
2bfb4627 13727 drm_atomic_state_free(state);
c0c36b94
CW
13728}
13729
25c5b266
DV
13730#undef for_each_intel_crtc_masked
13731
f6e5b160 13732static const struct drm_crtc_funcs intel_crtc_funcs = {
82cf435b 13733 .gamma_set = drm_atomic_helper_legacy_gamma_set,
74c090b1 13734 .set_config = drm_atomic_helper_set_config,
82cf435b 13735 .set_property = drm_atomic_helper_crtc_set_property,
f6e5b160
CW
13736 .destroy = intel_crtc_destroy,
13737 .page_flip = intel_crtc_page_flip,
1356837e
MR
13738 .atomic_duplicate_state = intel_crtc_duplicate_state,
13739 .atomic_destroy_state = intel_crtc_destroy_state,
f6e5b160
CW
13740};
13741
6beb8c23
MR
13742/**
13743 * intel_prepare_plane_fb - Prepare fb for usage on plane
13744 * @plane: drm plane to prepare for
13745 * @fb: framebuffer to prepare for presentation
13746 *
13747 * Prepares a framebuffer for usage on a display plane. Generally this
13748 * involves pinning the underlying object and updating the frontbuffer tracking
13749 * bits. Some older platforms need special physical address handling for
13750 * cursor planes.
13751 *
f935675f
ML
13752 * Must be called with struct_mutex held.
13753 *
6beb8c23
MR
13754 * Returns 0 on success, negative error code on failure.
13755 */
13756int
13757intel_prepare_plane_fb(struct drm_plane *plane,
d136dfee 13758 const struct drm_plane_state *new_state)
465c120c
MR
13759{
13760 struct drm_device *dev = plane->dev;
844f9111 13761 struct drm_framebuffer *fb = new_state->fb;
6beb8c23 13762 struct intel_plane *intel_plane = to_intel_plane(plane);
6beb8c23 13763 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1ee49399 13764 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
6beb8c23 13765 int ret = 0;
465c120c 13766
1ee49399 13767 if (!obj && !old_obj)
465c120c
MR
13768 return 0;
13769
5008e874
ML
13770 if (old_obj) {
13771 struct drm_crtc_state *crtc_state =
13772 drm_atomic_get_existing_crtc_state(new_state->state, plane->state->crtc);
13773
13774 /* Big Hammer, we also need to ensure that any pending
13775 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
13776 * current scanout is retired before unpinning the old
13777 * framebuffer. Note that we rely on userspace rendering
13778 * into the buffer attached to the pipe they are waiting
13779 * on. If not, userspace generates a GPU hang with IPEHR
13780 * point to the MI_WAIT_FOR_EVENT.
13781 *
13782 * This should only fail upon a hung GPU, in which case we
13783 * can safely continue.
13784 */
13785 if (needs_modeset(crtc_state))
13786 ret = i915_gem_object_wait_rendering(old_obj, true);
f4457ae7
CW
13787 if (ret) {
13788 /* GPU hangs should have been swallowed by the wait */
13789 WARN_ON(ret == -EIO);
f935675f 13790 return ret;
f4457ae7 13791 }
5008e874
ML
13792 }
13793
3c28ff22
AG
13794 /* For framebuffer backed by dmabuf, wait for fence */
13795 if (obj && obj->base.dma_buf) {
bcf8be27
ML
13796 long lret;
13797
13798 lret = reservation_object_wait_timeout_rcu(obj->base.dma_buf->resv,
13799 false, true,
13800 MAX_SCHEDULE_TIMEOUT);
13801 if (lret == -ERESTARTSYS)
13802 return lret;
3c28ff22 13803
bcf8be27 13804 WARN(lret < 0, "waiting returns %li\n", lret);
3c28ff22
AG
13805 }
13806
1ee49399
ML
13807 if (!obj) {
13808 ret = 0;
13809 } else if (plane->type == DRM_PLANE_TYPE_CURSOR &&
6beb8c23
MR
13810 INTEL_INFO(dev)->cursor_needs_physical) {
13811 int align = IS_I830(dev) ? 16 * 1024 : 256;
13812 ret = i915_gem_object_attach_phys(obj, align);
13813 if (ret)
13814 DRM_DEBUG_KMS("failed to attach phys object\n");
13815 } else {
3465c580 13816 ret = intel_pin_and_fence_fb_obj(fb, new_state->rotation);
6beb8c23 13817 }
465c120c 13818
7580d774
ML
13819 if (ret == 0) {
13820 if (obj) {
13821 struct intel_plane_state *plane_state =
13822 to_intel_plane_state(new_state);
13823
13824 i915_gem_request_assign(&plane_state->wait_req,
13825 obj->last_write_req);
13826 }
13827
a9ff8714 13828 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
7580d774 13829 }
fdd508a6 13830
6beb8c23
MR
13831 return ret;
13832}
13833
38f3ce3a
MR
13834/**
13835 * intel_cleanup_plane_fb - Cleans up an fb after plane use
13836 * @plane: drm plane to clean up for
13837 * @fb: old framebuffer that was on plane
13838 *
13839 * Cleans up a framebuffer that has just been removed from a plane.
f935675f
ML
13840 *
13841 * Must be called with struct_mutex held.
38f3ce3a
MR
13842 */
13843void
13844intel_cleanup_plane_fb(struct drm_plane *plane,
d136dfee 13845 const struct drm_plane_state *old_state)
38f3ce3a
MR
13846{
13847 struct drm_device *dev = plane->dev;
1ee49399 13848 struct intel_plane *intel_plane = to_intel_plane(plane);
7580d774 13849 struct intel_plane_state *old_intel_state;
1ee49399
ML
13850 struct drm_i915_gem_object *old_obj = intel_fb_obj(old_state->fb);
13851 struct drm_i915_gem_object *obj = intel_fb_obj(plane->state->fb);
38f3ce3a 13852
7580d774
ML
13853 old_intel_state = to_intel_plane_state(old_state);
13854
1ee49399 13855 if (!obj && !old_obj)
38f3ce3a
MR
13856 return;
13857
1ee49399
ML
13858 if (old_obj && (plane->type != DRM_PLANE_TYPE_CURSOR ||
13859 !INTEL_INFO(dev)->cursor_needs_physical))
3465c580 13860 intel_unpin_fb_obj(old_state->fb, old_state->rotation);
1ee49399
ML
13861
13862 /* prepare_fb aborted? */
13863 if ((old_obj && (old_obj->frontbuffer_bits & intel_plane->frontbuffer_bit)) ||
13864 (obj && !(obj->frontbuffer_bits & intel_plane->frontbuffer_bit)))
13865 i915_gem_track_fb(old_obj, obj, intel_plane->frontbuffer_bit);
7580d774
ML
13866
13867 i915_gem_request_assign(&old_intel_state->wait_req, NULL);
465c120c
MR
13868}
13869
6156a456
CK
13870int
13871skl_max_scale(struct intel_crtc *intel_crtc, struct intel_crtc_state *crtc_state)
13872{
13873 int max_scale;
13874 struct drm_device *dev;
13875 struct drm_i915_private *dev_priv;
13876 int crtc_clock, cdclk;
13877
bf8a0af0 13878 if (!intel_crtc || !crtc_state->base.enable)
6156a456
CK
13879 return DRM_PLANE_HELPER_NO_SCALING;
13880
13881 dev = intel_crtc->base.dev;
13882 dev_priv = dev->dev_private;
13883 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
27c329ed 13884 cdclk = to_intel_atomic_state(crtc_state->base.state)->cdclk;
6156a456 13885
54bf1ce6 13886 if (WARN_ON_ONCE(!crtc_clock || cdclk < crtc_clock))
6156a456
CK
13887 return DRM_PLANE_HELPER_NO_SCALING;
13888
13889 /*
13890 * skl max scale is lower of:
13891 * close to 3 but not 3, -1 is for that purpose
13892 * or
13893 * cdclk/crtc_clock
13894 */
13895 max_scale = min((1 << 16) * 3 - 1, (1 << 8) * ((cdclk << 8) / crtc_clock));
13896
13897 return max_scale;
13898}
13899
465c120c 13900static int
3c692a41 13901intel_check_primary_plane(struct drm_plane *plane,
061e4b8d 13902 struct intel_crtc_state *crtc_state,
3c692a41
GP
13903 struct intel_plane_state *state)
13904{
2b875c22
MR
13905 struct drm_crtc *crtc = state->base.crtc;
13906 struct drm_framebuffer *fb = state->base.fb;
6156a456 13907 int min_scale = DRM_PLANE_HELPER_NO_SCALING;
061e4b8d
ML
13908 int max_scale = DRM_PLANE_HELPER_NO_SCALING;
13909 bool can_position = false;
465c120c 13910
693bdc28
VS
13911 if (INTEL_INFO(plane->dev)->gen >= 9) {
13912 /* use scaler when colorkey is not required */
13913 if (state->ckey.flags == I915_SET_COLORKEY_NONE) {
13914 min_scale = 1;
13915 max_scale = skl_max_scale(to_intel_crtc(crtc), crtc_state);
13916 }
d8106366 13917 can_position = true;
6156a456 13918 }
d8106366 13919
061e4b8d
ML
13920 return drm_plane_helper_check_update(plane, crtc, fb, &state->src,
13921 &state->dst, &state->clip,
da20eabd
ML
13922 min_scale, max_scale,
13923 can_position, true,
13924 &state->visible);
14af293f
GP
13925}
13926
613d2b27
ML
13927static void intel_begin_crtc_commit(struct drm_crtc *crtc,
13928 struct drm_crtc_state *old_crtc_state)
3c692a41 13929{
32b7eeec 13930 struct drm_device *dev = crtc->dev;
3c692a41 13931 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
bfd16b2a
ML
13932 struct intel_crtc_state *old_intel_state =
13933 to_intel_crtc_state(old_crtc_state);
13934 bool modeset = needs_modeset(crtc->state);
3c692a41 13935
c34c9ee4 13936 /* Perform vblank evasion around commit operation */
62852622 13937 intel_pipe_update_start(intel_crtc);
0583236e 13938
bfd16b2a
ML
13939 if (modeset)
13940 return;
13941
20a34e78
ML
13942 if (crtc->state->color_mgmt_changed || to_intel_crtc_state(crtc->state)->update_pipe) {
13943 intel_color_set_csc(crtc->state);
13944 intel_color_load_luts(crtc->state);
13945 }
13946
bfd16b2a
ML
13947 if (to_intel_crtc_state(crtc->state)->update_pipe)
13948 intel_update_pipe_config(intel_crtc, old_intel_state);
13949 else if (INTEL_INFO(dev)->gen >= 9)
0583236e 13950 skl_detach_scalers(intel_crtc);
32b7eeec
MR
13951}
13952
613d2b27
ML
13953static void intel_finish_crtc_commit(struct drm_crtc *crtc,
13954 struct drm_crtc_state *old_crtc_state)
32b7eeec 13955{
32b7eeec 13956 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
32b7eeec 13957
62852622 13958 intel_pipe_update_end(intel_crtc);
3c692a41
GP
13959}
13960
cf4c7c12 13961/**
4a3b8769
MR
13962 * intel_plane_destroy - destroy a plane
13963 * @plane: plane to destroy
cf4c7c12 13964 *
4a3b8769
MR
13965 * Common destruction function for all types of planes (primary, cursor,
13966 * sprite).
cf4c7c12 13967 */
4a3b8769 13968void intel_plane_destroy(struct drm_plane *plane)
465c120c
MR
13969{
13970 struct intel_plane *intel_plane = to_intel_plane(plane);
13971 drm_plane_cleanup(plane);
13972 kfree(intel_plane);
13973}
13974
65a3fea0 13975const struct drm_plane_funcs intel_plane_funcs = {
70a101f8
MR
13976 .update_plane = drm_atomic_helper_update_plane,
13977 .disable_plane = drm_atomic_helper_disable_plane,
3d7d6510 13978 .destroy = intel_plane_destroy,
c196e1d6 13979 .set_property = drm_atomic_helper_plane_set_property,
a98b3431
MR
13980 .atomic_get_property = intel_plane_atomic_get_property,
13981 .atomic_set_property = intel_plane_atomic_set_property,
ea2c67bb
MR
13982 .atomic_duplicate_state = intel_plane_duplicate_state,
13983 .atomic_destroy_state = intel_plane_destroy_state,
13984
465c120c
MR
13985};
13986
13987static struct drm_plane *intel_primary_plane_create(struct drm_device *dev,
13988 int pipe)
13989{
fca0ce2a
VS
13990 struct intel_plane *primary = NULL;
13991 struct intel_plane_state *state = NULL;
465c120c 13992 const uint32_t *intel_primary_formats;
45e3743a 13993 unsigned int num_formats;
fca0ce2a 13994 int ret;
465c120c
MR
13995
13996 primary = kzalloc(sizeof(*primary), GFP_KERNEL);
fca0ce2a
VS
13997 if (!primary)
13998 goto fail;
465c120c 13999
8e7d688b 14000 state = intel_create_plane_state(&primary->base);
fca0ce2a
VS
14001 if (!state)
14002 goto fail;
8e7d688b 14003 primary->base.state = &state->base;
ea2c67bb 14004
465c120c
MR
14005 primary->can_scale = false;
14006 primary->max_downscale = 1;
6156a456
CK
14007 if (INTEL_INFO(dev)->gen >= 9) {
14008 primary->can_scale = true;
af99ceda 14009 state->scaler_id = -1;
6156a456 14010 }
465c120c
MR
14011 primary->pipe = pipe;
14012 primary->plane = pipe;
a9ff8714 14013 primary->frontbuffer_bit = INTEL_FRONTBUFFER_PRIMARY(pipe);
c59cb179 14014 primary->check_plane = intel_check_primary_plane;
465c120c
MR
14015 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4)
14016 primary->plane = !pipe;
14017
6c0fd451
DL
14018 if (INTEL_INFO(dev)->gen >= 9) {
14019 intel_primary_formats = skl_primary_formats;
14020 num_formats = ARRAY_SIZE(skl_primary_formats);
a8d201af
ML
14021
14022 primary->update_plane = skylake_update_primary_plane;
14023 primary->disable_plane = skylake_disable_primary_plane;
14024 } else if (HAS_PCH_SPLIT(dev)) {
14025 intel_primary_formats = i965_primary_formats;
14026 num_formats = ARRAY_SIZE(i965_primary_formats);
14027
14028 primary->update_plane = ironlake_update_primary_plane;
14029 primary->disable_plane = i9xx_disable_primary_plane;
6c0fd451 14030 } else if (INTEL_INFO(dev)->gen >= 4) {
568db4f2
DL
14031 intel_primary_formats = i965_primary_formats;
14032 num_formats = ARRAY_SIZE(i965_primary_formats);
a8d201af
ML
14033
14034 primary->update_plane = i9xx_update_primary_plane;
14035 primary->disable_plane = i9xx_disable_primary_plane;
6c0fd451
DL
14036 } else {
14037 intel_primary_formats = i8xx_primary_formats;
14038 num_formats = ARRAY_SIZE(i8xx_primary_formats);
a8d201af
ML
14039
14040 primary->update_plane = i9xx_update_primary_plane;
14041 primary->disable_plane = i9xx_disable_primary_plane;
465c120c
MR
14042 }
14043
fca0ce2a
VS
14044 ret = drm_universal_plane_init(dev, &primary->base, 0,
14045 &intel_plane_funcs,
14046 intel_primary_formats, num_formats,
14047 DRM_PLANE_TYPE_PRIMARY, NULL);
14048 if (ret)
14049 goto fail;
48404c1e 14050
3b7a5119
SJ
14051 if (INTEL_INFO(dev)->gen >= 4)
14052 intel_create_rotation_property(dev, primary);
48404c1e 14053
ea2c67bb
MR
14054 drm_plane_helper_add(&primary->base, &intel_plane_helper_funcs);
14055
465c120c 14056 return &primary->base;
fca0ce2a
VS
14057
14058fail:
14059 kfree(state);
14060 kfree(primary);
14061
14062 return NULL;
465c120c
MR
14063}
14064
3b7a5119
SJ
14065void intel_create_rotation_property(struct drm_device *dev, struct intel_plane *plane)
14066{
14067 if (!dev->mode_config.rotation_property) {
14068 unsigned long flags = BIT(DRM_ROTATE_0) |
14069 BIT(DRM_ROTATE_180);
14070
14071 if (INTEL_INFO(dev)->gen >= 9)
14072 flags |= BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270);
14073
14074 dev->mode_config.rotation_property =
14075 drm_mode_create_rotation_property(dev, flags);
14076 }
14077 if (dev->mode_config.rotation_property)
14078 drm_object_attach_property(&plane->base.base,
14079 dev->mode_config.rotation_property,
14080 plane->base.state->rotation);
14081}
14082
3d7d6510 14083static int
852e787c 14084intel_check_cursor_plane(struct drm_plane *plane,
061e4b8d 14085 struct intel_crtc_state *crtc_state,
852e787c 14086 struct intel_plane_state *state)
3d7d6510 14087{
061e4b8d 14088 struct drm_crtc *crtc = crtc_state->base.crtc;
2b875c22 14089 struct drm_framebuffer *fb = state->base.fb;
757f9a3e 14090 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
b29ec92c 14091 enum pipe pipe = to_intel_plane(plane)->pipe;
757f9a3e
GP
14092 unsigned stride;
14093 int ret;
3d7d6510 14094
061e4b8d
ML
14095 ret = drm_plane_helper_check_update(plane, crtc, fb, &state->src,
14096 &state->dst, &state->clip,
3d7d6510
MR
14097 DRM_PLANE_HELPER_NO_SCALING,
14098 DRM_PLANE_HELPER_NO_SCALING,
852e787c 14099 true, true, &state->visible);
757f9a3e
GP
14100 if (ret)
14101 return ret;
14102
757f9a3e
GP
14103 /* if we want to turn off the cursor ignore width and height */
14104 if (!obj)
da20eabd 14105 return 0;
757f9a3e 14106
757f9a3e 14107 /* Check for which cursor types we support */
061e4b8d 14108 if (!cursor_size_ok(plane->dev, state->base.crtc_w, state->base.crtc_h)) {
ea2c67bb
MR
14109 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
14110 state->base.crtc_w, state->base.crtc_h);
757f9a3e
GP
14111 return -EINVAL;
14112 }
14113
ea2c67bb
MR
14114 stride = roundup_pow_of_two(state->base.crtc_w) * 4;
14115 if (obj->base.size < stride * state->base.crtc_h) {
757f9a3e
GP
14116 DRM_DEBUG_KMS("buffer is too small\n");
14117 return -ENOMEM;
14118 }
14119
3a656b54 14120 if (fb->modifier[0] != DRM_FORMAT_MOD_NONE) {
757f9a3e 14121 DRM_DEBUG_KMS("cursor cannot be tiled\n");
da20eabd 14122 return -EINVAL;
32b7eeec
MR
14123 }
14124
b29ec92c
VS
14125 /*
14126 * There's something wrong with the cursor on CHV pipe C.
14127 * If it straddles the left edge of the screen then
14128 * moving it away from the edge or disabling it often
14129 * results in a pipe underrun, and often that can lead to
14130 * dead pipe (constant underrun reported, and it scans
14131 * out just a solid color). To recover from that, the
14132 * display power well must be turned off and on again.
14133 * Refuse the put the cursor into that compromised position.
14134 */
14135 if (IS_CHERRYVIEW(plane->dev) && pipe == PIPE_C &&
14136 state->visible && state->base.crtc_x < 0) {
14137 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
14138 return -EINVAL;
14139 }
14140
da20eabd 14141 return 0;
852e787c 14142}
3d7d6510 14143
a8ad0d8e
ML
14144static void
14145intel_disable_cursor_plane(struct drm_plane *plane,
7fabf5ef 14146 struct drm_crtc *crtc)
a8ad0d8e 14147{
f2858021
ML
14148 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
14149
14150 intel_crtc->cursor_addr = 0;
55a08b3f 14151 intel_crtc_update_cursor(crtc, NULL);
a8ad0d8e
ML
14152}
14153
f4a2cf29 14154static void
55a08b3f
ML
14155intel_update_cursor_plane(struct drm_plane *plane,
14156 const struct intel_crtc_state *crtc_state,
14157 const struct intel_plane_state *state)
852e787c 14158{
55a08b3f
ML
14159 struct drm_crtc *crtc = crtc_state->base.crtc;
14160 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
ea2c67bb 14161 struct drm_device *dev = plane->dev;
2b875c22 14162 struct drm_i915_gem_object *obj = intel_fb_obj(state->base.fb);
a912f12f 14163 uint32_t addr;
852e787c 14164
f4a2cf29 14165 if (!obj)
a912f12f 14166 addr = 0;
f4a2cf29 14167 else if (!INTEL_INFO(dev)->cursor_needs_physical)
a912f12f 14168 addr = i915_gem_obj_ggtt_offset(obj);
f4a2cf29 14169 else
a912f12f 14170 addr = obj->phys_handle->busaddr;
852e787c 14171
a912f12f 14172 intel_crtc->cursor_addr = addr;
55a08b3f 14173 intel_crtc_update_cursor(crtc, state);
852e787c
GP
14174}
14175
3d7d6510
MR
14176static struct drm_plane *intel_cursor_plane_create(struct drm_device *dev,
14177 int pipe)
14178{
fca0ce2a
VS
14179 struct intel_plane *cursor = NULL;
14180 struct intel_plane_state *state = NULL;
14181 int ret;
3d7d6510
MR
14182
14183 cursor = kzalloc(sizeof(*cursor), GFP_KERNEL);
fca0ce2a
VS
14184 if (!cursor)
14185 goto fail;
3d7d6510 14186
8e7d688b 14187 state = intel_create_plane_state(&cursor->base);
fca0ce2a
VS
14188 if (!state)
14189 goto fail;
8e7d688b 14190 cursor->base.state = &state->base;
ea2c67bb 14191
3d7d6510
MR
14192 cursor->can_scale = false;
14193 cursor->max_downscale = 1;
14194 cursor->pipe = pipe;
14195 cursor->plane = pipe;
a9ff8714 14196 cursor->frontbuffer_bit = INTEL_FRONTBUFFER_CURSOR(pipe);
c59cb179 14197 cursor->check_plane = intel_check_cursor_plane;
55a08b3f 14198 cursor->update_plane = intel_update_cursor_plane;
a8ad0d8e 14199 cursor->disable_plane = intel_disable_cursor_plane;
3d7d6510 14200
fca0ce2a
VS
14201 ret = drm_universal_plane_init(dev, &cursor->base, 0,
14202 &intel_plane_funcs,
14203 intel_cursor_formats,
14204 ARRAY_SIZE(intel_cursor_formats),
14205 DRM_PLANE_TYPE_CURSOR, NULL);
14206 if (ret)
14207 goto fail;
4398ad45
VS
14208
14209 if (INTEL_INFO(dev)->gen >= 4) {
14210 if (!dev->mode_config.rotation_property)
14211 dev->mode_config.rotation_property =
14212 drm_mode_create_rotation_property(dev,
14213 BIT(DRM_ROTATE_0) |
14214 BIT(DRM_ROTATE_180));
14215 if (dev->mode_config.rotation_property)
14216 drm_object_attach_property(&cursor->base.base,
14217 dev->mode_config.rotation_property,
8e7d688b 14218 state->base.rotation);
4398ad45
VS
14219 }
14220
af99ceda
CK
14221 if (INTEL_INFO(dev)->gen >=9)
14222 state->scaler_id = -1;
14223
ea2c67bb
MR
14224 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14225
3d7d6510 14226 return &cursor->base;
fca0ce2a
VS
14227
14228fail:
14229 kfree(state);
14230 kfree(cursor);
14231
14232 return NULL;
3d7d6510
MR
14233}
14234
549e2bfb
CK
14235static void skl_init_scalers(struct drm_device *dev, struct intel_crtc *intel_crtc,
14236 struct intel_crtc_state *crtc_state)
14237{
14238 int i;
14239 struct intel_scaler *intel_scaler;
14240 struct intel_crtc_scaler_state *scaler_state = &crtc_state->scaler_state;
14241
14242 for (i = 0; i < intel_crtc->num_scalers; i++) {
14243 intel_scaler = &scaler_state->scalers[i];
14244 intel_scaler->in_use = 0;
549e2bfb
CK
14245 intel_scaler->mode = PS_SCALER_MODE_DYN;
14246 }
14247
14248 scaler_state->scaler_id = -1;
14249}
14250
b358d0a6 14251static void intel_crtc_init(struct drm_device *dev, int pipe)
79e53945 14252{
fbee40df 14253 struct drm_i915_private *dev_priv = dev->dev_private;
79e53945 14254 struct intel_crtc *intel_crtc;
f5de6e07 14255 struct intel_crtc_state *crtc_state = NULL;
3d7d6510
MR
14256 struct drm_plane *primary = NULL;
14257 struct drm_plane *cursor = NULL;
8563b1e8 14258 int ret;
79e53945 14259
955382f3 14260 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
79e53945
JB
14261 if (intel_crtc == NULL)
14262 return;
14263
f5de6e07
ACO
14264 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
14265 if (!crtc_state)
14266 goto fail;
550acefd
ACO
14267 intel_crtc->config = crtc_state;
14268 intel_crtc->base.state = &crtc_state->base;
07878248 14269 crtc_state->base.crtc = &intel_crtc->base;
f5de6e07 14270
549e2bfb
CK
14271 /* initialize shared scalers */
14272 if (INTEL_INFO(dev)->gen >= 9) {
14273 if (pipe == PIPE_C)
14274 intel_crtc->num_scalers = 1;
14275 else
14276 intel_crtc->num_scalers = SKL_NUM_SCALERS;
14277
14278 skl_init_scalers(dev, intel_crtc, crtc_state);
14279 }
14280
465c120c 14281 primary = intel_primary_plane_create(dev, pipe);
3d7d6510
MR
14282 if (!primary)
14283 goto fail;
14284
14285 cursor = intel_cursor_plane_create(dev, pipe);
14286 if (!cursor)
14287 goto fail;
14288
465c120c 14289 ret = drm_crtc_init_with_planes(dev, &intel_crtc->base, primary,
f9882876 14290 cursor, &intel_crtc_funcs, NULL);
3d7d6510
MR
14291 if (ret)
14292 goto fail;
79e53945 14293
1f1c2e24
VS
14294 /*
14295 * On gen2/3 only plane A can do fbc, but the panel fitter and lvds port
8c0f92e1 14296 * is hooked to pipe B. Hence we want plane A feeding pipe B.
1f1c2e24 14297 */
80824003
JB
14298 intel_crtc->pipe = pipe;
14299 intel_crtc->plane = pipe;
3a77c4c4 14300 if (HAS_FBC(dev) && INTEL_INFO(dev)->gen < 4) {
28c97730 14301 DRM_DEBUG_KMS("swapping pipes & planes for FBC\n");
e2e767ab 14302 intel_crtc->plane = !pipe;
80824003
JB
14303 }
14304
4b0e333e
CW
14305 intel_crtc->cursor_base = ~0;
14306 intel_crtc->cursor_cntl = ~0;
dc41c154 14307 intel_crtc->cursor_size = ~0;
8d7849db 14308
852eb00d
VS
14309 intel_crtc->wm.cxsr_allowed = true;
14310
22fd0fab
JB
14311 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14312 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL);
14313 dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base;
14314 dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base;
14315
79e53945 14316 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
87b6b101 14317
8563b1e8
LL
14318 intel_color_init(&intel_crtc->base);
14319
87b6b101 14320 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
3d7d6510
MR
14321 return;
14322
14323fail:
14324 if (primary)
14325 drm_plane_cleanup(primary);
14326 if (cursor)
14327 drm_plane_cleanup(cursor);
f5de6e07 14328 kfree(crtc_state);
3d7d6510 14329 kfree(intel_crtc);
79e53945
JB
14330}
14331
752aa88a
JB
14332enum pipe intel_get_pipe_from_connector(struct intel_connector *connector)
14333{
14334 struct drm_encoder *encoder = connector->base.encoder;
6e9f798d 14335 struct drm_device *dev = connector->base.dev;
752aa88a 14336
51fd371b 14337 WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
752aa88a 14338
d3babd3f 14339 if (!encoder || WARN_ON(!encoder->crtc))
752aa88a
JB
14340 return INVALID_PIPE;
14341
14342 return to_intel_crtc(encoder->crtc)->pipe;
14343}
14344
08d7b3d1 14345int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
05394f39 14346 struct drm_file *file)
08d7b3d1 14347{
08d7b3d1 14348 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7707e653 14349 struct drm_crtc *drmmode_crtc;
c05422d5 14350 struct intel_crtc *crtc;
08d7b3d1 14351
7707e653 14352 drmmode_crtc = drm_crtc_find(dev, pipe_from_crtc_id->crtc_id);
08d7b3d1 14353
7707e653 14354 if (!drmmode_crtc) {
08d7b3d1 14355 DRM_ERROR("no such CRTC id\n");
3f2c2057 14356 return -ENOENT;
08d7b3d1
CW
14357 }
14358
7707e653 14359 crtc = to_intel_crtc(drmmode_crtc);
c05422d5 14360 pipe_from_crtc_id->pipe = crtc->pipe;
08d7b3d1 14361
c05422d5 14362 return 0;
08d7b3d1
CW
14363}
14364
66a9278e 14365static int intel_encoder_clones(struct intel_encoder *encoder)
79e53945 14366{
66a9278e
DV
14367 struct drm_device *dev = encoder->base.dev;
14368 struct intel_encoder *source_encoder;
79e53945 14369 int index_mask = 0;
79e53945
JB
14370 int entry = 0;
14371
b2784e15 14372 for_each_intel_encoder(dev, source_encoder) {
bc079e8b 14373 if (encoders_cloneable(encoder, source_encoder))
66a9278e
DV
14374 index_mask |= (1 << entry);
14375
79e53945
JB
14376 entry++;
14377 }
4ef69c7a 14378
79e53945
JB
14379 return index_mask;
14380}
14381
4d302442
CW
14382static bool has_edp_a(struct drm_device *dev)
14383{
14384 struct drm_i915_private *dev_priv = dev->dev_private;
14385
14386 if (!IS_MOBILE(dev))
14387 return false;
14388
14389 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14390 return false;
14391
e3589908 14392 if (IS_GEN5(dev) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
4d302442
CW
14393 return false;
14394
14395 return true;
14396}
14397
84b4e042
JB
14398static bool intel_crt_present(struct drm_device *dev)
14399{
14400 struct drm_i915_private *dev_priv = dev->dev_private;
14401
884497ed
DL
14402 if (INTEL_INFO(dev)->gen >= 9)
14403 return false;
14404
cf404ce4 14405 if (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
84b4e042
JB
14406 return false;
14407
14408 if (IS_CHERRYVIEW(dev))
14409 return false;
14410
65e472e4
VS
14411 if (HAS_PCH_LPT_H(dev) && I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
14412 return false;
14413
70ac54d0
VS
14414 /* DDI E can't be used if DDI A requires 4 lanes */
14415 if (HAS_DDI(dev) && I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
14416 return false;
14417
e4abb733 14418 if (!dev_priv->vbt.int_crt_support)
84b4e042
JB
14419 return false;
14420
14421 return true;
14422}
14423
79e53945
JB
14424static void intel_setup_outputs(struct drm_device *dev)
14425{
725e30ad 14426 struct drm_i915_private *dev_priv = dev->dev_private;
4ef69c7a 14427 struct intel_encoder *encoder;
cb0953d7 14428 bool dpd_is_edp = false;
79e53945 14429
c9093354 14430 intel_lvds_init(dev);
79e53945 14431
84b4e042 14432 if (intel_crt_present(dev))
79935fca 14433 intel_crt_init(dev);
cb0953d7 14434
c776eb2e
VK
14435 if (IS_BROXTON(dev)) {
14436 /*
14437 * FIXME: Broxton doesn't support port detection via the
14438 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
14439 * detect the ports.
14440 */
14441 intel_ddi_init(dev, PORT_A);
14442 intel_ddi_init(dev, PORT_B);
14443 intel_ddi_init(dev, PORT_C);
c6c794a2
SS
14444
14445 intel_dsi_init(dev);
c776eb2e 14446 } else if (HAS_DDI(dev)) {
0e72a5b5
ED
14447 int found;
14448
de31facd
JB
14449 /*
14450 * Haswell uses DDI functions to detect digital outputs.
14451 * On SKL pre-D0 the strap isn't connected, so we assume
14452 * it's there.
14453 */
77179400 14454 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
de31facd 14455 /* WaIgnoreDDIAStrap: skl */
ef11bdb3 14456 if (found || IS_SKYLAKE(dev) || IS_KABYLAKE(dev))
0e72a5b5
ED
14457 intel_ddi_init(dev, PORT_A);
14458
14459 /* DDI B, C and D detection is indicated by the SFUSE_STRAP
14460 * register */
14461 found = I915_READ(SFUSE_STRAP);
14462
14463 if (found & SFUSE_STRAP_DDIB_DETECTED)
14464 intel_ddi_init(dev, PORT_B);
14465 if (found & SFUSE_STRAP_DDIC_DETECTED)
14466 intel_ddi_init(dev, PORT_C);
14467 if (found & SFUSE_STRAP_DDID_DETECTED)
14468 intel_ddi_init(dev, PORT_D);
2800e4c2
RV
14469 /*
14470 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
14471 */
ef11bdb3 14472 if ((IS_SKYLAKE(dev) || IS_KABYLAKE(dev)) &&
2800e4c2
RV
14473 (dev_priv->vbt.ddi_port_info[PORT_E].supports_dp ||
14474 dev_priv->vbt.ddi_port_info[PORT_E].supports_dvi ||
14475 dev_priv->vbt.ddi_port_info[PORT_E].supports_hdmi))
14476 intel_ddi_init(dev, PORT_E);
14477
0e72a5b5 14478 } else if (HAS_PCH_SPLIT(dev)) {
cb0953d7 14479 int found;
5d8a7752 14480 dpd_is_edp = intel_dp_is_edp(dev, PORT_D);
270b3042
DV
14481
14482 if (has_edp_a(dev))
14483 intel_dp_init(dev, DP_A, PORT_A);
cb0953d7 14484
dc0fa718 14485 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
461ed3ca 14486 /* PCH SDVOB multiplex with HDMIB */
2a5c0832 14487 found = intel_sdvo_init(dev, PCH_SDVOB, PORT_B);
30ad48b7 14488 if (!found)
e2debe91 14489 intel_hdmi_init(dev, PCH_HDMIB, PORT_B);
5eb08b69 14490 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
ab9d7c30 14491 intel_dp_init(dev, PCH_DP_B, PORT_B);
30ad48b7
ZW
14492 }
14493
dc0fa718 14494 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
e2debe91 14495 intel_hdmi_init(dev, PCH_HDMIC, PORT_C);
30ad48b7 14496
dc0fa718 14497 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
e2debe91 14498 intel_hdmi_init(dev, PCH_HDMID, PORT_D);
30ad48b7 14499
5eb08b69 14500 if (I915_READ(PCH_DP_C) & DP_DETECTED)
ab9d7c30 14501 intel_dp_init(dev, PCH_DP_C, PORT_C);
5eb08b69 14502
270b3042 14503 if (I915_READ(PCH_DP_D) & DP_DETECTED)
ab9d7c30 14504 intel_dp_init(dev, PCH_DP_D, PORT_D);
666a4537 14505 } else if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
e17ac6db
VS
14506 /*
14507 * The DP_DETECTED bit is the latched state of the DDC
14508 * SDA pin at boot. However since eDP doesn't require DDC
14509 * (no way to plug in a DP->HDMI dongle) the DDC pins for
14510 * eDP ports may have been muxed to an alternate function.
14511 * Thus we can't rely on the DP_DETECTED bit alone to detect
14512 * eDP ports. Consult the VBT as well as DP_DETECTED to
14513 * detect eDP ports.
14514 */
e66eb81d 14515 if (I915_READ(VLV_HDMIB) & SDVO_DETECTED &&
d2182a66 14516 !intel_dp_is_edp(dev, PORT_B))
e66eb81d
VS
14517 intel_hdmi_init(dev, VLV_HDMIB, PORT_B);
14518 if (I915_READ(VLV_DP_B) & DP_DETECTED ||
e17ac6db 14519 intel_dp_is_edp(dev, PORT_B))
e66eb81d 14520 intel_dp_init(dev, VLV_DP_B, PORT_B);
585a94b8 14521
e66eb81d 14522 if (I915_READ(VLV_HDMIC) & SDVO_DETECTED &&
d2182a66 14523 !intel_dp_is_edp(dev, PORT_C))
e66eb81d
VS
14524 intel_hdmi_init(dev, VLV_HDMIC, PORT_C);
14525 if (I915_READ(VLV_DP_C) & DP_DETECTED ||
e17ac6db 14526 intel_dp_is_edp(dev, PORT_C))
e66eb81d 14527 intel_dp_init(dev, VLV_DP_C, PORT_C);
19c03924 14528
9418c1f1 14529 if (IS_CHERRYVIEW(dev)) {
e17ac6db 14530 /* eDP not supported on port D, so don't check VBT */
e66eb81d
VS
14531 if (I915_READ(CHV_HDMID) & SDVO_DETECTED)
14532 intel_hdmi_init(dev, CHV_HDMID, PORT_D);
14533 if (I915_READ(CHV_DP_D) & DP_DETECTED)
14534 intel_dp_init(dev, CHV_DP_D, PORT_D);
9418c1f1
VS
14535 }
14536
3cfca973 14537 intel_dsi_init(dev);
09da55dc 14538 } else if (!IS_GEN2(dev) && !IS_PINEVIEW(dev)) {
27185ae1 14539 bool found = false;
7d57382e 14540
e2debe91 14541 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 14542 DRM_DEBUG_KMS("probing SDVOB\n");
2a5c0832 14543 found = intel_sdvo_init(dev, GEN3_SDVOB, PORT_B);
3fec3d2f 14544 if (!found && IS_G4X(dev)) {
b01f2c3a 14545 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
e2debe91 14546 intel_hdmi_init(dev, GEN4_HDMIB, PORT_B);
b01f2c3a 14547 }
27185ae1 14548
3fec3d2f 14549 if (!found && IS_G4X(dev))
ab9d7c30 14550 intel_dp_init(dev, DP_B, PORT_B);
725e30ad 14551 }
13520b05
KH
14552
14553 /* Before G4X SDVOC doesn't have its own detect register */
13520b05 14554
e2debe91 14555 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 14556 DRM_DEBUG_KMS("probing SDVOC\n");
2a5c0832 14557 found = intel_sdvo_init(dev, GEN3_SDVOC, PORT_C);
b01f2c3a 14558 }
27185ae1 14559
e2debe91 14560 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
27185ae1 14561
3fec3d2f 14562 if (IS_G4X(dev)) {
b01f2c3a 14563 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
e2debe91 14564 intel_hdmi_init(dev, GEN4_HDMIC, PORT_C);
b01f2c3a 14565 }
3fec3d2f 14566 if (IS_G4X(dev))
ab9d7c30 14567 intel_dp_init(dev, DP_C, PORT_C);
725e30ad 14568 }
27185ae1 14569
3fec3d2f 14570 if (IS_G4X(dev) &&
e7281eab 14571 (I915_READ(DP_D) & DP_DETECTED))
ab9d7c30 14572 intel_dp_init(dev, DP_D, PORT_D);
bad720ff 14573 } else if (IS_GEN2(dev))
79e53945
JB
14574 intel_dvo_init(dev);
14575
103a196f 14576 if (SUPPORTS_TV(dev))
79e53945
JB
14577 intel_tv_init(dev);
14578
0bc12bcb 14579 intel_psr_init(dev);
7c8f8a70 14580
b2784e15 14581 for_each_intel_encoder(dev, encoder) {
4ef69c7a
CW
14582 encoder->base.possible_crtcs = encoder->crtc_mask;
14583 encoder->base.possible_clones =
66a9278e 14584 intel_encoder_clones(encoder);
79e53945 14585 }
47356eb6 14586
dde86e2d 14587 intel_init_pch_refclk(dev);
270b3042
DV
14588
14589 drm_helper_move_panel_connectors_to_head(dev);
79e53945
JB
14590}
14591
14592static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
14593{
60a5ca01 14594 struct drm_device *dev = fb->dev;
79e53945 14595 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
79e53945 14596
ef2d633e 14597 drm_framebuffer_cleanup(fb);
60a5ca01 14598 mutex_lock(&dev->struct_mutex);
ef2d633e 14599 WARN_ON(!intel_fb->obj->framebuffer_references--);
60a5ca01
VS
14600 drm_gem_object_unreference(&intel_fb->obj->base);
14601 mutex_unlock(&dev->struct_mutex);
79e53945
JB
14602 kfree(intel_fb);
14603}
14604
14605static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
05394f39 14606 struct drm_file *file,
79e53945
JB
14607 unsigned int *handle)
14608{
14609 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
05394f39 14610 struct drm_i915_gem_object *obj = intel_fb->obj;
79e53945 14611
cc917ab4
CW
14612 if (obj->userptr.mm) {
14613 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
14614 return -EINVAL;
14615 }
14616
05394f39 14617 return drm_gem_handle_create(file, &obj->base, handle);
79e53945
JB
14618}
14619
86c98588
RV
14620static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
14621 struct drm_file *file,
14622 unsigned flags, unsigned color,
14623 struct drm_clip_rect *clips,
14624 unsigned num_clips)
14625{
14626 struct drm_device *dev = fb->dev;
14627 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
14628 struct drm_i915_gem_object *obj = intel_fb->obj;
14629
14630 mutex_lock(&dev->struct_mutex);
74b4ea1e 14631 intel_fb_obj_flush(obj, false, ORIGIN_DIRTYFB);
86c98588
RV
14632 mutex_unlock(&dev->struct_mutex);
14633
14634 return 0;
14635}
14636
79e53945
JB
14637static const struct drm_framebuffer_funcs intel_fb_funcs = {
14638 .destroy = intel_user_framebuffer_destroy,
14639 .create_handle = intel_user_framebuffer_create_handle,
86c98588 14640 .dirty = intel_user_framebuffer_dirty,
79e53945
JB
14641};
14642
b321803d
DL
14643static
14644u32 intel_fb_pitch_limit(struct drm_device *dev, uint64_t fb_modifier,
14645 uint32_t pixel_format)
14646{
14647 u32 gen = INTEL_INFO(dev)->gen;
14648
14649 if (gen >= 9) {
ac484963
VS
14650 int cpp = drm_format_plane_cpp(pixel_format, 0);
14651
b321803d
DL
14652 /* "The stride in bytes must not exceed the of the size of 8K
14653 * pixels and 32K bytes."
14654 */
ac484963 14655 return min(8192 * cpp, 32768);
666a4537 14656 } else if (gen >= 5 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
b321803d
DL
14657 return 32*1024;
14658 } else if (gen >= 4) {
14659 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14660 return 16*1024;
14661 else
14662 return 32*1024;
14663 } else if (gen >= 3) {
14664 if (fb_modifier == I915_FORMAT_MOD_X_TILED)
14665 return 8*1024;
14666 else
14667 return 16*1024;
14668 } else {
14669 /* XXX DSPC is limited to 4k tiled */
14670 return 8*1024;
14671 }
14672}
14673
b5ea642a
DV
14674static int intel_framebuffer_init(struct drm_device *dev,
14675 struct intel_framebuffer *intel_fb,
14676 struct drm_mode_fb_cmd2 *mode_cmd,
14677 struct drm_i915_gem_object *obj)
79e53945 14678{
7b49f948 14679 struct drm_i915_private *dev_priv = to_i915(dev);
6761dd31 14680 unsigned int aligned_height;
79e53945 14681 int ret;
b321803d 14682 u32 pitch_limit, stride_alignment;
79e53945 14683
dd4916c5
DV
14684 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
14685
2a80eada
DV
14686 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
14687 /* Enforce that fb modifier and tiling mode match, but only for
14688 * X-tiled. This is needed for FBC. */
14689 if (!!(obj->tiling_mode == I915_TILING_X) !=
14690 !!(mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED)) {
14691 DRM_DEBUG("tiling_mode doesn't match fb modifier\n");
14692 return -EINVAL;
14693 }
14694 } else {
14695 if (obj->tiling_mode == I915_TILING_X)
14696 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
14697 else if (obj->tiling_mode == I915_TILING_Y) {
14698 DRM_DEBUG("No Y tiling for legacy addfb\n");
14699 return -EINVAL;
14700 }
14701 }
14702
9a8f0a12
TU
14703 /* Passed in modifier sanity checking. */
14704 switch (mode_cmd->modifier[0]) {
14705 case I915_FORMAT_MOD_Y_TILED:
14706 case I915_FORMAT_MOD_Yf_TILED:
14707 if (INTEL_INFO(dev)->gen < 9) {
14708 DRM_DEBUG("Unsupported tiling 0x%llx!\n",
14709 mode_cmd->modifier[0]);
14710 return -EINVAL;
14711 }
14712 case DRM_FORMAT_MOD_NONE:
14713 case I915_FORMAT_MOD_X_TILED:
14714 break;
14715 default:
c0f40428
JB
14716 DRM_DEBUG("Unsupported fb modifier 0x%llx!\n",
14717 mode_cmd->modifier[0]);
57cd6508 14718 return -EINVAL;
c16ed4be 14719 }
57cd6508 14720
7b49f948
VS
14721 stride_alignment = intel_fb_stride_alignment(dev_priv,
14722 mode_cmd->modifier[0],
b321803d
DL
14723 mode_cmd->pixel_format);
14724 if (mode_cmd->pitches[0] & (stride_alignment - 1)) {
14725 DRM_DEBUG("pitch (%d) must be at least %u byte aligned\n",
14726 mode_cmd->pitches[0], stride_alignment);
57cd6508 14727 return -EINVAL;
c16ed4be 14728 }
57cd6508 14729
b321803d
DL
14730 pitch_limit = intel_fb_pitch_limit(dev, mode_cmd->modifier[0],
14731 mode_cmd->pixel_format);
a35cdaa0 14732 if (mode_cmd->pitches[0] > pitch_limit) {
b321803d
DL
14733 DRM_DEBUG("%s pitch (%u) must be at less than %d\n",
14734 mode_cmd->modifier[0] != DRM_FORMAT_MOD_NONE ?
2a80eada 14735 "tiled" : "linear",
a35cdaa0 14736 mode_cmd->pitches[0], pitch_limit);
5d7bd705 14737 return -EINVAL;
c16ed4be 14738 }
5d7bd705 14739
2a80eada 14740 if (mode_cmd->modifier[0] == I915_FORMAT_MOD_X_TILED &&
c16ed4be
CW
14741 mode_cmd->pitches[0] != obj->stride) {
14742 DRM_DEBUG("pitch (%d) must match tiling stride (%d)\n",
14743 mode_cmd->pitches[0], obj->stride);
5d7bd705 14744 return -EINVAL;
c16ed4be 14745 }
5d7bd705 14746
57779d06 14747 /* Reject formats not supported by any plane early. */
308e5bcb 14748 switch (mode_cmd->pixel_format) {
57779d06 14749 case DRM_FORMAT_C8:
04b3924d
VS
14750 case DRM_FORMAT_RGB565:
14751 case DRM_FORMAT_XRGB8888:
14752 case DRM_FORMAT_ARGB8888:
57779d06
VS
14753 break;
14754 case DRM_FORMAT_XRGB1555:
c16ed4be 14755 if (INTEL_INFO(dev)->gen > 3) {
4ee62c76
VS
14756 DRM_DEBUG("unsupported pixel format: %s\n",
14757 drm_get_format_name(mode_cmd->pixel_format));
57779d06 14758 return -EINVAL;
c16ed4be 14759 }
57779d06 14760 break;
57779d06 14761 case DRM_FORMAT_ABGR8888:
666a4537
WB
14762 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev) &&
14763 INTEL_INFO(dev)->gen < 9) {
6c0fd451
DL
14764 DRM_DEBUG("unsupported pixel format: %s\n",
14765 drm_get_format_name(mode_cmd->pixel_format));
14766 return -EINVAL;
14767 }
14768 break;
14769 case DRM_FORMAT_XBGR8888:
04b3924d 14770 case DRM_FORMAT_XRGB2101010:
57779d06 14771 case DRM_FORMAT_XBGR2101010:
c16ed4be 14772 if (INTEL_INFO(dev)->gen < 4) {
4ee62c76
VS
14773 DRM_DEBUG("unsupported pixel format: %s\n",
14774 drm_get_format_name(mode_cmd->pixel_format));
57779d06 14775 return -EINVAL;
c16ed4be 14776 }
b5626747 14777 break;
7531208b 14778 case DRM_FORMAT_ABGR2101010:
666a4537 14779 if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
7531208b
DL
14780 DRM_DEBUG("unsupported pixel format: %s\n",
14781 drm_get_format_name(mode_cmd->pixel_format));
14782 return -EINVAL;
14783 }
14784 break;
04b3924d
VS
14785 case DRM_FORMAT_YUYV:
14786 case DRM_FORMAT_UYVY:
14787 case DRM_FORMAT_YVYU:
14788 case DRM_FORMAT_VYUY:
c16ed4be 14789 if (INTEL_INFO(dev)->gen < 5) {
4ee62c76
VS
14790 DRM_DEBUG("unsupported pixel format: %s\n",
14791 drm_get_format_name(mode_cmd->pixel_format));
57779d06 14792 return -EINVAL;
c16ed4be 14793 }
57cd6508
CW
14794 break;
14795 default:
4ee62c76
VS
14796 DRM_DEBUG("unsupported pixel format: %s\n",
14797 drm_get_format_name(mode_cmd->pixel_format));
57cd6508
CW
14798 return -EINVAL;
14799 }
14800
90f9a336
VS
14801 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
14802 if (mode_cmd->offsets[0] != 0)
14803 return -EINVAL;
14804
ec2c981e 14805 aligned_height = intel_fb_align_height(dev, mode_cmd->height,
091df6cb
DV
14806 mode_cmd->pixel_format,
14807 mode_cmd->modifier[0]);
53155c0a
DV
14808 /* FIXME drm helper for size checks (especially planar formats)? */
14809 if (obj->base.size < aligned_height * mode_cmd->pitches[0])
14810 return -EINVAL;
14811
c7d73f6a
DV
14812 drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd);
14813 intel_fb->obj = obj;
14814
2d7a215f
VS
14815 intel_fill_fb_info(dev_priv, &intel_fb->base);
14816
79e53945
JB
14817 ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs);
14818 if (ret) {
14819 DRM_ERROR("framebuffer init failed %d\n", ret);
14820 return ret;
14821 }
14822
0b05e1e0
VS
14823 intel_fb->obj->framebuffer_references++;
14824
79e53945
JB
14825 return 0;
14826}
14827
79e53945
JB
14828static struct drm_framebuffer *
14829intel_user_framebuffer_create(struct drm_device *dev,
14830 struct drm_file *filp,
1eb83451 14831 const struct drm_mode_fb_cmd2 *user_mode_cmd)
79e53945 14832{
dcb1394e 14833 struct drm_framebuffer *fb;
05394f39 14834 struct drm_i915_gem_object *obj;
76dc3769 14835 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
79e53945 14836
308e5bcb 14837 obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
76dc3769 14838 mode_cmd.handles[0]));
c8725226 14839 if (&obj->base == NULL)
cce13ff7 14840 return ERR_PTR(-ENOENT);
79e53945 14841
92907cbb 14842 fb = intel_framebuffer_create(dev, &mode_cmd, obj);
dcb1394e
LW
14843 if (IS_ERR(fb))
14844 drm_gem_object_unreference_unlocked(&obj->base);
14845
14846 return fb;
79e53945
JB
14847}
14848
0695726e 14849#ifndef CONFIG_DRM_FBDEV_EMULATION
0632fef6 14850static inline void intel_fbdev_output_poll_changed(struct drm_device *dev)
4520f53a
DV
14851{
14852}
14853#endif
14854
79e53945 14855static const struct drm_mode_config_funcs intel_mode_funcs = {
79e53945 14856 .fb_create = intel_user_framebuffer_create,
0632fef6 14857 .output_poll_changed = intel_fbdev_output_poll_changed,
5ee67f1c
MR
14858 .atomic_check = intel_atomic_check,
14859 .atomic_commit = intel_atomic_commit,
de419ab6
ML
14860 .atomic_state_alloc = intel_atomic_state_alloc,
14861 .atomic_state_clear = intel_atomic_state_clear,
79e53945
JB
14862};
14863
88212941
ID
14864/**
14865 * intel_init_display_hooks - initialize the display modesetting hooks
14866 * @dev_priv: device private
14867 */
14868void intel_init_display_hooks(struct drm_i915_private *dev_priv)
e70236a8 14869{
88212941 14870 if (INTEL_INFO(dev_priv)->gen >= 9) {
bc8d7dff 14871 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5724dbd1
DL
14872 dev_priv->display.get_initial_plane_config =
14873 skylake_get_initial_plane_config;
bc8d7dff
DL
14874 dev_priv->display.crtc_compute_clock =
14875 haswell_crtc_compute_clock;
14876 dev_priv->display.crtc_enable = haswell_crtc_enable;
14877 dev_priv->display.crtc_disable = haswell_crtc_disable;
88212941 14878 } else if (HAS_DDI(dev_priv)) {
0e8ffe1b 14879 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5724dbd1
DL
14880 dev_priv->display.get_initial_plane_config =
14881 ironlake_get_initial_plane_config;
797d0259
ACO
14882 dev_priv->display.crtc_compute_clock =
14883 haswell_crtc_compute_clock;
4f771f10
PZ
14884 dev_priv->display.crtc_enable = haswell_crtc_enable;
14885 dev_priv->display.crtc_disable = haswell_crtc_disable;
88212941 14886 } else if (HAS_PCH_SPLIT(dev_priv)) {
0e8ffe1b 14887 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
5724dbd1
DL
14888 dev_priv->display.get_initial_plane_config =
14889 ironlake_get_initial_plane_config;
3fb37703
ACO
14890 dev_priv->display.crtc_compute_clock =
14891 ironlake_crtc_compute_clock;
76e5a89c
DV
14892 dev_priv->display.crtc_enable = ironlake_crtc_enable;
14893 dev_priv->display.crtc_disable = ironlake_crtc_disable;
65b3d6a9 14894 } else if (IS_CHERRYVIEW(dev_priv)) {
89b667f8 14895 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5724dbd1
DL
14896 dev_priv->display.get_initial_plane_config =
14897 i9xx_get_initial_plane_config;
65b3d6a9
ACO
14898 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
14899 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14900 dev_priv->display.crtc_disable = i9xx_crtc_disable;
14901 } else if (IS_VALLEYVIEW(dev_priv)) {
14902 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14903 dev_priv->display.get_initial_plane_config =
14904 i9xx_get_initial_plane_config;
14905 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
89b667f8
JB
14906 dev_priv->display.crtc_enable = valleyview_crtc_enable;
14907 dev_priv->display.crtc_disable = i9xx_crtc_disable;
19ec6693
ACO
14908 } else if (IS_G4X(dev_priv)) {
14909 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14910 dev_priv->display.get_initial_plane_config =
14911 i9xx_get_initial_plane_config;
14912 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
14913 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14914 dev_priv->display.crtc_disable = i9xx_crtc_disable;
70e8aa21
ACO
14915 } else if (IS_PINEVIEW(dev_priv)) {
14916 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14917 dev_priv->display.get_initial_plane_config =
14918 i9xx_get_initial_plane_config;
14919 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
14920 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14921 dev_priv->display.crtc_disable = i9xx_crtc_disable;
81c97f52 14922 } else if (!IS_GEN2(dev_priv)) {
0e8ffe1b 14923 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5724dbd1
DL
14924 dev_priv->display.get_initial_plane_config =
14925 i9xx_get_initial_plane_config;
d6dfee7a 14926 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
76e5a89c
DV
14927 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14928 dev_priv->display.crtc_disable = i9xx_crtc_disable;
81c97f52
ACO
14929 } else {
14930 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
14931 dev_priv->display.get_initial_plane_config =
14932 i9xx_get_initial_plane_config;
14933 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
14934 dev_priv->display.crtc_enable = i9xx_crtc_enable;
14935 dev_priv->display.crtc_disable = i9xx_crtc_disable;
f564048e 14936 }
e70236a8 14937
e70236a8 14938 /* Returns the core display clock speed */
88212941 14939 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
1652d19e
VS
14940 dev_priv->display.get_display_clock_speed =
14941 skylake_get_display_clock_speed;
88212941 14942 else if (IS_BROXTON(dev_priv))
acd3f3d3
BP
14943 dev_priv->display.get_display_clock_speed =
14944 broxton_get_display_clock_speed;
88212941 14945 else if (IS_BROADWELL(dev_priv))
1652d19e
VS
14946 dev_priv->display.get_display_clock_speed =
14947 broadwell_get_display_clock_speed;
88212941 14948 else if (IS_HASWELL(dev_priv))
1652d19e
VS
14949 dev_priv->display.get_display_clock_speed =
14950 haswell_get_display_clock_speed;
88212941 14951 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
25eb05fc
JB
14952 dev_priv->display.get_display_clock_speed =
14953 valleyview_get_display_clock_speed;
88212941 14954 else if (IS_GEN5(dev_priv))
b37a6434
VS
14955 dev_priv->display.get_display_clock_speed =
14956 ilk_get_display_clock_speed;
88212941
ID
14957 else if (IS_I945G(dev_priv) || IS_BROADWATER(dev_priv) ||
14958 IS_GEN6(dev_priv) || IS_IVYBRIDGE(dev_priv))
e70236a8
JB
14959 dev_priv->display.get_display_clock_speed =
14960 i945_get_display_clock_speed;
88212941 14961 else if (IS_GM45(dev_priv))
34edce2f
VS
14962 dev_priv->display.get_display_clock_speed =
14963 gm45_get_display_clock_speed;
88212941 14964 else if (IS_CRESTLINE(dev_priv))
34edce2f
VS
14965 dev_priv->display.get_display_clock_speed =
14966 i965gm_get_display_clock_speed;
88212941 14967 else if (IS_PINEVIEW(dev_priv))
34edce2f
VS
14968 dev_priv->display.get_display_clock_speed =
14969 pnv_get_display_clock_speed;
88212941 14970 else if (IS_G33(dev_priv) || IS_G4X(dev_priv))
34edce2f
VS
14971 dev_priv->display.get_display_clock_speed =
14972 g33_get_display_clock_speed;
88212941 14973 else if (IS_I915G(dev_priv))
e70236a8
JB
14974 dev_priv->display.get_display_clock_speed =
14975 i915_get_display_clock_speed;
88212941 14976 else if (IS_I945GM(dev_priv) || IS_845G(dev_priv))
e70236a8
JB
14977 dev_priv->display.get_display_clock_speed =
14978 i9xx_misc_get_display_clock_speed;
88212941 14979 else if (IS_I915GM(dev_priv))
e70236a8
JB
14980 dev_priv->display.get_display_clock_speed =
14981 i915gm_get_display_clock_speed;
88212941 14982 else if (IS_I865G(dev_priv))
e70236a8
JB
14983 dev_priv->display.get_display_clock_speed =
14984 i865_get_display_clock_speed;
88212941 14985 else if (IS_I85X(dev_priv))
e70236a8 14986 dev_priv->display.get_display_clock_speed =
1b1d2716 14987 i85x_get_display_clock_speed;
623e01e5 14988 else { /* 830 */
88212941 14989 WARN(!IS_I830(dev_priv), "Unknown platform. Assuming 133 MHz CDCLK\n");
e70236a8
JB
14990 dev_priv->display.get_display_clock_speed =
14991 i830_get_display_clock_speed;
623e01e5 14992 }
e70236a8 14993
88212941 14994 if (IS_GEN5(dev_priv)) {
3bb11b53 14995 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
88212941 14996 } else if (IS_GEN6(dev_priv)) {
3bb11b53 14997 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
88212941 14998 } else if (IS_IVYBRIDGE(dev_priv)) {
3bb11b53
SJ
14999 /* FIXME: detect B0+ stepping and use auto training */
15000 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
88212941 15001 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3bb11b53 15002 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
445e780b
VS
15003 }
15004
15005 if (IS_BROADWELL(dev_priv)) {
15006 dev_priv->display.modeset_commit_cdclk =
15007 broadwell_modeset_commit_cdclk;
15008 dev_priv->display.modeset_calc_cdclk =
15009 broadwell_modeset_calc_cdclk;
88212941 15010 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
27c329ed
ML
15011 dev_priv->display.modeset_commit_cdclk =
15012 valleyview_modeset_commit_cdclk;
15013 dev_priv->display.modeset_calc_cdclk =
15014 valleyview_modeset_calc_cdclk;
88212941 15015 } else if (IS_BROXTON(dev_priv)) {
27c329ed
ML
15016 dev_priv->display.modeset_commit_cdclk =
15017 broxton_modeset_commit_cdclk;
15018 dev_priv->display.modeset_calc_cdclk =
15019 broxton_modeset_calc_cdclk;
e70236a8 15020 }
8c9f3aaf 15021
88212941 15022 switch (INTEL_INFO(dev_priv)->gen) {
8c9f3aaf
JB
15023 case 2:
15024 dev_priv->display.queue_flip = intel_gen2_queue_flip;
15025 break;
15026
15027 case 3:
15028 dev_priv->display.queue_flip = intel_gen3_queue_flip;
15029 break;
15030
15031 case 4:
15032 case 5:
15033 dev_priv->display.queue_flip = intel_gen4_queue_flip;
15034 break;
15035
15036 case 6:
15037 dev_priv->display.queue_flip = intel_gen6_queue_flip;
15038 break;
7c9017e5 15039 case 7:
4e0bbc31 15040 case 8: /* FIXME(BDW): Check that the gen8 RCS flip works. */
7c9017e5
JB
15041 dev_priv->display.queue_flip = intel_gen7_queue_flip;
15042 break;
830c81db 15043 case 9:
ba343e02
TU
15044 /* Drop through - unsupported since execlist only. */
15045 default:
15046 /* Default just returns -ENODEV to indicate unsupported */
15047 dev_priv->display.queue_flip = intel_default_queue_flip;
8c9f3aaf 15048 }
e70236a8
JB
15049}
15050
b690e96c
JB
15051/*
15052 * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend,
15053 * resume, or other times. This quirk makes sure that's the case for
15054 * affected systems.
15055 */
0206e353 15056static void quirk_pipea_force(struct drm_device *dev)
b690e96c
JB
15057{
15058 struct drm_i915_private *dev_priv = dev->dev_private;
15059
15060 dev_priv->quirks |= QUIRK_PIPEA_FORCE;
bc0daf48 15061 DRM_INFO("applying pipe a force quirk\n");
b690e96c
JB
15062}
15063
b6b5d049
VS
15064static void quirk_pipeb_force(struct drm_device *dev)
15065{
15066 struct drm_i915_private *dev_priv = dev->dev_private;
15067
15068 dev_priv->quirks |= QUIRK_PIPEB_FORCE;
15069 DRM_INFO("applying pipe b force quirk\n");
15070}
15071
435793df
KP
15072/*
15073 * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
15074 */
15075static void quirk_ssc_force_disable(struct drm_device *dev)
15076{
15077 struct drm_i915_private *dev_priv = dev->dev_private;
15078 dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE;
bc0daf48 15079 DRM_INFO("applying lvds SSC disable quirk\n");
435793df
KP
15080}
15081
4dca20ef 15082/*
5a15ab5b
CE
15083 * A machine (e.g. Acer Aspire 5734Z) may need to invert the panel backlight
15084 * brightness value
4dca20ef
CE
15085 */
15086static void quirk_invert_brightness(struct drm_device *dev)
15087{
15088 struct drm_i915_private *dev_priv = dev->dev_private;
15089 dev_priv->quirks |= QUIRK_INVERT_BRIGHTNESS;
bc0daf48 15090 DRM_INFO("applying inverted panel brightness quirk\n");
435793df
KP
15091}
15092
9c72cc6f
SD
15093/* Some VBT's incorrectly indicate no backlight is present */
15094static void quirk_backlight_present(struct drm_device *dev)
15095{
15096 struct drm_i915_private *dev_priv = dev->dev_private;
15097 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
15098 DRM_INFO("applying backlight present quirk\n");
15099}
15100
b690e96c
JB
15101struct intel_quirk {
15102 int device;
15103 int subsystem_vendor;
15104 int subsystem_device;
15105 void (*hook)(struct drm_device *dev);
15106};
15107
5f85f176
EE
15108/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
15109struct intel_dmi_quirk {
15110 void (*hook)(struct drm_device *dev);
15111 const struct dmi_system_id (*dmi_id_list)[];
15112};
15113
15114static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
15115{
15116 DRM_INFO("Backlight polarity reversed on %s\n", id->ident);
15117 return 1;
15118}
15119
15120static const struct intel_dmi_quirk intel_dmi_quirks[] = {
15121 {
15122 .dmi_id_list = &(const struct dmi_system_id[]) {
15123 {
15124 .callback = intel_dmi_reverse_brightness,
15125 .ident = "NCR Corporation",
15126 .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
15127 DMI_MATCH(DMI_PRODUCT_NAME, ""),
15128 },
15129 },
15130 { } /* terminating entry */
15131 },
15132 .hook = quirk_invert_brightness,
15133 },
15134};
15135
c43b5634 15136static struct intel_quirk intel_quirks[] = {
b690e96c
JB
15137 /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
15138 { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
15139
b690e96c
JB
15140 /* ThinkPad T60 needs pipe A force quirk (bug #16494) */
15141 { 0x2782, 0x17aa, 0x201a, quirk_pipea_force },
15142
5f080c0f
VS
15143 /* 830 needs to leave pipe A & dpll A up */
15144 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force },
15145
b6b5d049
VS
15146 /* 830 needs to leave pipe B & dpll B up */
15147 { 0x3577, PCI_ANY_ID, PCI_ANY_ID, quirk_pipeb_force },
15148
435793df
KP
15149 /* Lenovo U160 cannot use SSC on LVDS */
15150 { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable },
070d329a
MAS
15151
15152 /* Sony Vaio Y cannot use SSC on LVDS */
15153 { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
5a15ab5b 15154
be505f64
AH
15155 /* Acer Aspire 5734Z must invert backlight brightness */
15156 { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
15157
15158 /* Acer/eMachines G725 */
15159 { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
15160
15161 /* Acer/eMachines e725 */
15162 { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
15163
15164 /* Acer/Packard Bell NCL20 */
15165 { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
15166
15167 /* Acer Aspire 4736Z */
15168 { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
0f540c3a
JN
15169
15170 /* Acer Aspire 5336 */
15171 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
2e93a1aa
SD
15172
15173 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
15174 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
d4967d8c 15175
dfb3d47b
SD
15176 /* Acer C720 Chromebook (Core i3 4005U) */
15177 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
15178
b2a9601c 15179 /* Apple Macbook 2,1 (Core 2 T7400) */
15180 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
15181
1b9448b0
JN
15182 /* Apple Macbook 4,1 */
15183 { 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
15184
d4967d8c
SD
15185 /* Toshiba CB35 Chromebook (Celeron 2955U) */
15186 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
724cb06f
SD
15187
15188 /* HP Chromebook 14 (Celeron 2955U) */
15189 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
cf6f0af9
JN
15190
15191 /* Dell Chromebook 11 */
15192 { 0x0a06, 0x1028, 0x0a35, quirk_backlight_present },
9be64eee
JN
15193
15194 /* Dell Chromebook 11 (2015 version) */
15195 { 0x0a16, 0x1028, 0x0a35, quirk_backlight_present },
b690e96c
JB
15196};
15197
15198static void intel_init_quirks(struct drm_device *dev)
15199{
15200 struct pci_dev *d = dev->pdev;
15201 int i;
15202
15203 for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) {
15204 struct intel_quirk *q = &intel_quirks[i];
15205
15206 if (d->device == q->device &&
15207 (d->subsystem_vendor == q->subsystem_vendor ||
15208 q->subsystem_vendor == PCI_ANY_ID) &&
15209 (d->subsystem_device == q->subsystem_device ||
15210 q->subsystem_device == PCI_ANY_ID))
15211 q->hook(dev);
15212 }
5f85f176
EE
15213 for (i = 0; i < ARRAY_SIZE(intel_dmi_quirks); i++) {
15214 if (dmi_check_system(*intel_dmi_quirks[i].dmi_id_list) != 0)
15215 intel_dmi_quirks[i].hook(dev);
15216 }
b690e96c
JB
15217}
15218
9cce37f4
JB
15219/* Disable the VGA plane that we never use */
15220static void i915_disable_vga(struct drm_device *dev)
15221{
15222 struct drm_i915_private *dev_priv = dev->dev_private;
15223 u8 sr1;
f0f59a00 15224 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
9cce37f4 15225
2b37c616 15226 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
9cce37f4 15227 vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
3fdcf431 15228 outb(SR01, VGA_SR_INDEX);
9cce37f4
JB
15229 sr1 = inb(VGA_SR_DATA);
15230 outb(sr1 | 1<<5, VGA_SR_DATA);
15231 vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
15232 udelay(300);
15233
01f5a626 15234 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9cce37f4
JB
15235 POSTING_READ(vga_reg);
15236}
15237
f817586c
DV
15238void intel_modeset_init_hw(struct drm_device *dev)
15239{
1a617b77
ML
15240 struct drm_i915_private *dev_priv = dev->dev_private;
15241
b6283055 15242 intel_update_cdclk(dev);
1a617b77
ML
15243
15244 dev_priv->atomic_cdclk_freq = dev_priv->cdclk_freq;
15245
f817586c 15246 intel_init_clock_gating(dev);
dc97997a 15247 intel_enable_gt_powersave(dev_priv);
f817586c
DV
15248}
15249
d93c0372
MR
15250/*
15251 * Calculate what we think the watermarks should be for the state we've read
15252 * out of the hardware and then immediately program those watermarks so that
15253 * we ensure the hardware settings match our internal state.
15254 *
15255 * We can calculate what we think WM's should be by creating a duplicate of the
15256 * current state (which was constructed during hardware readout) and running it
15257 * through the atomic check code to calculate new watermark values in the
15258 * state object.
15259 */
15260static void sanitize_watermarks(struct drm_device *dev)
15261{
15262 struct drm_i915_private *dev_priv = to_i915(dev);
15263 struct drm_atomic_state *state;
15264 struct drm_crtc *crtc;
15265 struct drm_crtc_state *cstate;
15266 struct drm_modeset_acquire_ctx ctx;
15267 int ret;
15268 int i;
15269
15270 /* Only supported on platforms that use atomic watermark design */
ed4a6a7c 15271 if (!dev_priv->display.optimize_watermarks)
d93c0372
MR
15272 return;
15273
15274 /*
15275 * We need to hold connection_mutex before calling duplicate_state so
15276 * that the connector loop is protected.
15277 */
15278 drm_modeset_acquire_init(&ctx, 0);
15279retry:
0cd1262d 15280 ret = drm_modeset_lock_all_ctx(dev, &ctx);
d93c0372
MR
15281 if (ret == -EDEADLK) {
15282 drm_modeset_backoff(&ctx);
15283 goto retry;
15284 } else if (WARN_ON(ret)) {
0cd1262d 15285 goto fail;
d93c0372
MR
15286 }
15287
15288 state = drm_atomic_helper_duplicate_state(dev, &ctx);
15289 if (WARN_ON(IS_ERR(state)))
0cd1262d 15290 goto fail;
d93c0372 15291
ed4a6a7c
MR
15292 /*
15293 * Hardware readout is the only time we don't want to calculate
15294 * intermediate watermarks (since we don't trust the current
15295 * watermarks).
15296 */
15297 to_intel_atomic_state(state)->skip_intermediate_wm = true;
15298
d93c0372
MR
15299 ret = intel_atomic_check(dev, state);
15300 if (ret) {
15301 /*
15302 * If we fail here, it means that the hardware appears to be
15303 * programmed in a way that shouldn't be possible, given our
15304 * understanding of watermark requirements. This might mean a
15305 * mistake in the hardware readout code or a mistake in the
15306 * watermark calculations for a given platform. Raise a WARN
15307 * so that this is noticeable.
15308 *
15309 * If this actually happens, we'll have to just leave the
15310 * BIOS-programmed watermarks untouched and hope for the best.
15311 */
15312 WARN(true, "Could not determine valid watermarks for inherited state\n");
0cd1262d 15313 goto fail;
d93c0372
MR
15314 }
15315
15316 /* Write calculated watermark values back */
d93c0372
MR
15317 for_each_crtc_in_state(state, crtc, cstate, i) {
15318 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15319
ed4a6a7c
MR
15320 cs->wm.need_postvbl_update = true;
15321 dev_priv->display.optimize_watermarks(cs);
d93c0372
MR
15322 }
15323
15324 drm_atomic_state_free(state);
0cd1262d 15325fail:
d93c0372
MR
15326 drm_modeset_drop_locks(&ctx);
15327 drm_modeset_acquire_fini(&ctx);
15328}
15329
79e53945
JB
15330void intel_modeset_init(struct drm_device *dev)
15331{
72e96d64
JL
15332 struct drm_i915_private *dev_priv = to_i915(dev);
15333 struct i915_ggtt *ggtt = &dev_priv->ggtt;
1fe47785 15334 int sprite, ret;
8cc87b75 15335 enum pipe pipe;
46f297fb 15336 struct intel_crtc *crtc;
79e53945
JB
15337
15338 drm_mode_config_init(dev);
15339
15340 dev->mode_config.min_width = 0;
15341 dev->mode_config.min_height = 0;
15342
019d96cb
DA
15343 dev->mode_config.preferred_depth = 24;
15344 dev->mode_config.prefer_shadow = 1;
15345
25bab385
TU
15346 dev->mode_config.allow_fb_modifiers = true;
15347
e6ecefaa 15348 dev->mode_config.funcs = &intel_mode_funcs;
79e53945 15349
b690e96c
JB
15350 intel_init_quirks(dev);
15351
1fa61106
ED
15352 intel_init_pm(dev);
15353
e3c74757
BW
15354 if (INTEL_INFO(dev)->num_pipes == 0)
15355 return;
15356
69f92f67
LW
15357 /*
15358 * There may be no VBT; and if the BIOS enabled SSC we can
15359 * just keep using it to avoid unnecessary flicker. Whereas if the
15360 * BIOS isn't using it, don't assume it will work even if the VBT
15361 * indicates as much.
15362 */
15363 if (HAS_PCH_IBX(dev) || HAS_PCH_CPT(dev)) {
15364 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15365 DREF_SSC1_ENABLE);
15366
15367 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15368 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15369 bios_lvds_use_ssc ? "en" : "dis",
15370 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15371 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15372 }
15373 }
15374
a6c45cf0
CW
15375 if (IS_GEN2(dev)) {
15376 dev->mode_config.max_width = 2048;
15377 dev->mode_config.max_height = 2048;
15378 } else if (IS_GEN3(dev)) {
5e4d6fa7
KP
15379 dev->mode_config.max_width = 4096;
15380 dev->mode_config.max_height = 4096;
79e53945 15381 } else {
a6c45cf0
CW
15382 dev->mode_config.max_width = 8192;
15383 dev->mode_config.max_height = 8192;
79e53945 15384 }
068be561 15385
dc41c154
VS
15386 if (IS_845G(dev) || IS_I865G(dev)) {
15387 dev->mode_config.cursor_width = IS_845G(dev) ? 64 : 512;
15388 dev->mode_config.cursor_height = 1023;
15389 } else if (IS_GEN2(dev)) {
068be561
DL
15390 dev->mode_config.cursor_width = GEN2_CURSOR_WIDTH;
15391 dev->mode_config.cursor_height = GEN2_CURSOR_HEIGHT;
15392 } else {
15393 dev->mode_config.cursor_width = MAX_CURSOR_WIDTH;
15394 dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
15395 }
15396
72e96d64 15397 dev->mode_config.fb_base = ggtt->mappable_base;
79e53945 15398
28c97730 15399 DRM_DEBUG_KMS("%d display pipe%s available.\n",
7eb552ae
BW
15400 INTEL_INFO(dev)->num_pipes,
15401 INTEL_INFO(dev)->num_pipes > 1 ? "s" : "");
79e53945 15402
055e393f 15403 for_each_pipe(dev_priv, pipe) {
8cc87b75 15404 intel_crtc_init(dev, pipe);
3bdcfc0c 15405 for_each_sprite(dev_priv, pipe, sprite) {
1fe47785 15406 ret = intel_plane_init(dev, pipe, sprite);
7f1f3851 15407 if (ret)
06da8da2 15408 DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n",
1fe47785 15409 pipe_name(pipe), sprite_name(pipe, sprite), ret);
7f1f3851 15410 }
79e53945
JB
15411 }
15412
bfa7df01
VS
15413 intel_update_czclk(dev_priv);
15414 intel_update_cdclk(dev);
15415
e72f9fbf 15416 intel_shared_dpll_init(dev);
ee7b9f93 15417
9cce37f4
JB
15418 /* Just disable it once at startup */
15419 i915_disable_vga(dev);
79e53945 15420 intel_setup_outputs(dev);
11be49eb 15421
6e9f798d 15422 drm_modeset_lock_all(dev);
043e9bda 15423 intel_modeset_setup_hw_state(dev);
6e9f798d 15424 drm_modeset_unlock_all(dev);
46f297fb 15425
d3fcc808 15426 for_each_intel_crtc(dev, crtc) {
eeebeac5
ML
15427 struct intel_initial_plane_config plane_config = {};
15428
46f297fb
JB
15429 if (!crtc->active)
15430 continue;
15431
46f297fb 15432 /*
46f297fb
JB
15433 * Note that reserving the BIOS fb up front prevents us
15434 * from stuffing other stolen allocations like the ring
15435 * on top. This prevents some ugliness at boot time, and
15436 * can even allow for smooth boot transitions if the BIOS
15437 * fb is large enough for the active pipe configuration.
15438 */
eeebeac5
ML
15439 dev_priv->display.get_initial_plane_config(crtc,
15440 &plane_config);
15441
15442 /*
15443 * If the fb is shared between multiple heads, we'll
15444 * just get the first one.
15445 */
15446 intel_find_initial_plane_obj(crtc, &plane_config);
46f297fb 15447 }
d93c0372
MR
15448
15449 /*
15450 * Make sure hardware watermarks really match the state we read out.
15451 * Note that we need to do this after reconstructing the BIOS fb's
15452 * since the watermark calculation done here will use pstate->fb.
15453 */
15454 sanitize_watermarks(dev);
2c7111db
CW
15455}
15456
7fad798e
DV
15457static void intel_enable_pipe_a(struct drm_device *dev)
15458{
15459 struct intel_connector *connector;
15460 struct drm_connector *crt = NULL;
15461 struct intel_load_detect_pipe load_detect_temp;
208bf9fd 15462 struct drm_modeset_acquire_ctx *ctx = dev->mode_config.acquire_ctx;
7fad798e
DV
15463
15464 /* We can't just switch on the pipe A, we need to set things up with a
15465 * proper mode and output configuration. As a gross hack, enable pipe A
15466 * by enabling the load detect pipe once. */
3a3371ff 15467 for_each_intel_connector(dev, connector) {
7fad798e
DV
15468 if (connector->encoder->type == INTEL_OUTPUT_ANALOG) {
15469 crt = &connector->base;
15470 break;
15471 }
15472 }
15473
15474 if (!crt)
15475 return;
15476
208bf9fd 15477 if (intel_get_load_detect_pipe(crt, NULL, &load_detect_temp, ctx))
49172fee 15478 intel_release_load_detect_pipe(crt, &load_detect_temp, ctx);
7fad798e
DV
15479}
15480
fa555837
DV
15481static bool
15482intel_check_plane_mapping(struct intel_crtc *crtc)
15483{
7eb552ae
BW
15484 struct drm_device *dev = crtc->base.dev;
15485 struct drm_i915_private *dev_priv = dev->dev_private;
649636ef 15486 u32 val;
fa555837 15487
7eb552ae 15488 if (INTEL_INFO(dev)->num_pipes == 1)
fa555837
DV
15489 return true;
15490
649636ef 15491 val = I915_READ(DSPCNTR(!crtc->plane));
fa555837
DV
15492
15493 if ((val & DISPLAY_PLANE_ENABLE) &&
15494 (!!(val & DISPPLANE_SEL_PIPE_MASK) == crtc->pipe))
15495 return false;
15496
15497 return true;
15498}
15499
02e93c35
VS
15500static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
15501{
15502 struct drm_device *dev = crtc->base.dev;
15503 struct intel_encoder *encoder;
15504
15505 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
15506 return true;
15507
15508 return false;
15509}
15510
dd756198
VS
15511static bool intel_encoder_has_connectors(struct intel_encoder *encoder)
15512{
15513 struct drm_device *dev = encoder->base.dev;
15514 struct intel_connector *connector;
15515
15516 for_each_connector_on_encoder(dev, &encoder->base, connector)
15517 return true;
15518
15519 return false;
15520}
15521
24929352
DV
15522static void intel_sanitize_crtc(struct intel_crtc *crtc)
15523{
15524 struct drm_device *dev = crtc->base.dev;
15525 struct drm_i915_private *dev_priv = dev->dev_private;
4d1de975 15526 enum transcoder cpu_transcoder = crtc->config->cpu_transcoder;
24929352 15527
24929352 15528 /* Clear any frame start delays used for debugging left by the BIOS */
4d1de975
JN
15529 if (!transcoder_is_dsi(cpu_transcoder)) {
15530 i915_reg_t reg = PIPECONF(cpu_transcoder);
15531
15532 I915_WRITE(reg,
15533 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
15534 }
24929352 15535
d3eaf884 15536 /* restore vblank interrupts to correct state */
9625604c 15537 drm_crtc_vblank_reset(&crtc->base);
d297e103 15538 if (crtc->active) {
f9cd7b88
VS
15539 struct intel_plane *plane;
15540
9625604c 15541 drm_crtc_vblank_on(&crtc->base);
f9cd7b88
VS
15542
15543 /* Disable everything but the primary plane */
15544 for_each_intel_plane_on_crtc(dev, crtc, plane) {
15545 if (plane->base.type == DRM_PLANE_TYPE_PRIMARY)
15546 continue;
15547
15548 plane->disable_plane(&plane->base, &crtc->base);
15549 }
9625604c 15550 }
d3eaf884 15551
24929352 15552 /* We need to sanitize the plane -> pipe mapping first because this will
fa555837
DV
15553 * disable the crtc (and hence change the state) if it is wrong. Note
15554 * that gen4+ has a fixed plane -> pipe mapping. */
15555 if (INTEL_INFO(dev)->gen < 4 && !intel_check_plane_mapping(crtc)) {
24929352
DV
15556 bool plane;
15557
24929352
DV
15558 DRM_DEBUG_KMS("[CRTC:%d] wrong plane connection detected!\n",
15559 crtc->base.base.id);
15560
15561 /* Pipe has the wrong plane attached and the plane is active.
15562 * Temporarily change the plane mapping and disable everything
15563 * ... */
15564 plane = crtc->plane;
b70709a6 15565 to_intel_plane_state(crtc->base.primary->state)->visible = true;
24929352 15566 crtc->plane = !plane;
b17d48e2 15567 intel_crtc_disable_noatomic(&crtc->base);
24929352 15568 crtc->plane = plane;
24929352 15569 }
24929352 15570
7fad798e
DV
15571 if (dev_priv->quirks & QUIRK_PIPEA_FORCE &&
15572 crtc->pipe == PIPE_A && !crtc->active) {
15573 /* BIOS forgot to enable pipe A, this mostly happens after
15574 * resume. Force-enable the pipe to fix this, the update_dpms
15575 * call below we restore the pipe to the right state, but leave
15576 * the required bits on. */
15577 intel_enable_pipe_a(dev);
15578 }
15579
24929352
DV
15580 /* Adjust the state of the output pipe according to whether we
15581 * have active connectors/encoders. */
842e0307 15582 if (crtc->active && !intel_crtc_has_encoders(crtc))
b17d48e2 15583 intel_crtc_disable_noatomic(&crtc->base);
24929352 15584
a3ed6aad 15585 if (crtc->active || HAS_GMCH_DISPLAY(dev)) {
4cc31489
DV
15586 /*
15587 * We start out with underrun reporting disabled to avoid races.
15588 * For correct bookkeeping mark this on active crtcs.
15589 *
c5ab3bc0
DV
15590 * Also on gmch platforms we dont have any hardware bits to
15591 * disable the underrun reporting. Which means we need to start
15592 * out with underrun reporting disabled also on inactive pipes,
15593 * since otherwise we'll complain about the garbage we read when
15594 * e.g. coming up after runtime pm.
15595 *
4cc31489
DV
15596 * No protection against concurrent access is required - at
15597 * worst a fifo underrun happens which also sets this to false.
15598 */
15599 crtc->cpu_fifo_underrun_disabled = true;
15600 crtc->pch_fifo_underrun_disabled = true;
15601 }
24929352
DV
15602}
15603
15604static void intel_sanitize_encoder(struct intel_encoder *encoder)
15605{
15606 struct intel_connector *connector;
15607 struct drm_device *dev = encoder->base.dev;
15608
15609 /* We need to check both for a crtc link (meaning that the
15610 * encoder is active and trying to read from a pipe) and the
15611 * pipe itself being active. */
15612 bool has_active_crtc = encoder->base.crtc &&
15613 to_intel_crtc(encoder->base.crtc)->active;
15614
dd756198 15615 if (intel_encoder_has_connectors(encoder) && !has_active_crtc) {
24929352
DV
15616 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
15617 encoder->base.base.id,
8e329a03 15618 encoder->base.name);
24929352
DV
15619
15620 /* Connector is active, but has no active pipe. This is
15621 * fallout from our resume register restoring. Disable
15622 * the encoder manually again. */
15623 if (encoder->base.crtc) {
15624 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
15625 encoder->base.base.id,
8e329a03 15626 encoder->base.name);
24929352 15627 encoder->disable(encoder);
a62d1497
VS
15628 if (encoder->post_disable)
15629 encoder->post_disable(encoder);
24929352 15630 }
7f1950fb 15631 encoder->base.crtc = NULL;
24929352
DV
15632
15633 /* Inconsistent output/port/pipe state happens presumably due to
15634 * a bug in one of the get_hw_state functions. Or someplace else
15635 * in our code, like the register restore mess on resume. Clamp
15636 * things to off as a safer default. */
3a3371ff 15637 for_each_intel_connector(dev, connector) {
24929352
DV
15638 if (connector->encoder != encoder)
15639 continue;
7f1950fb
EE
15640 connector->base.dpms = DRM_MODE_DPMS_OFF;
15641 connector->base.encoder = NULL;
24929352
DV
15642 }
15643 }
15644 /* Enabled encoders without active connectors will be fixed in
15645 * the crtc fixup. */
15646}
15647
04098753 15648void i915_redisable_vga_power_on(struct drm_device *dev)
0fde901f
KM
15649{
15650 struct drm_i915_private *dev_priv = dev->dev_private;
f0f59a00 15651 i915_reg_t vga_reg = i915_vgacntrl_reg(dev);
0fde901f 15652
04098753
ID
15653 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
15654 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
15655 i915_disable_vga(dev);
15656 }
15657}
15658
15659void i915_redisable_vga(struct drm_device *dev)
15660{
15661 struct drm_i915_private *dev_priv = dev->dev_private;
15662
8dc8a27c
PZ
15663 /* This function can be called both from intel_modeset_setup_hw_state or
15664 * at a very early point in our resume sequence, where the power well
15665 * structures are not yet restored. Since this function is at a very
15666 * paranoid "someone might have enabled VGA while we were not looking"
15667 * level, just check if the power well is enabled instead of trying to
15668 * follow the "don't touch the power well if we don't need it" policy
15669 * the rest of the driver uses. */
6392f847 15670 if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
8dc8a27c
PZ
15671 return;
15672
04098753 15673 i915_redisable_vga_power_on(dev);
6392f847
ID
15674
15675 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
0fde901f
KM
15676}
15677
f9cd7b88 15678static bool primary_get_hw_state(struct intel_plane *plane)
98ec7739 15679{
f9cd7b88 15680 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
98ec7739 15681
f9cd7b88 15682 return I915_READ(DSPCNTR(plane->plane)) & DISPLAY_PLANE_ENABLE;
d032ffa0
ML
15683}
15684
f9cd7b88
VS
15685/* FIXME read out full plane state for all planes */
15686static void readout_plane_state(struct intel_crtc *crtc)
d032ffa0 15687{
b26d3ea3 15688 struct drm_plane *primary = crtc->base.primary;
f9cd7b88 15689 struct intel_plane_state *plane_state =
b26d3ea3 15690 to_intel_plane_state(primary->state);
d032ffa0 15691
19b8d387 15692 plane_state->visible = crtc->active &&
b26d3ea3
ML
15693 primary_get_hw_state(to_intel_plane(primary));
15694
15695 if (plane_state->visible)
15696 crtc->base.state->plane_mask |= 1 << drm_plane_index(primary);
98ec7739
VS
15697}
15698
30e984df 15699static void intel_modeset_readout_hw_state(struct drm_device *dev)
24929352
DV
15700{
15701 struct drm_i915_private *dev_priv = dev->dev_private;
15702 enum pipe pipe;
24929352
DV
15703 struct intel_crtc *crtc;
15704 struct intel_encoder *encoder;
15705 struct intel_connector *connector;
5358901f 15706 int i;
24929352 15707
565602d7
ML
15708 dev_priv->active_crtcs = 0;
15709
d3fcc808 15710 for_each_intel_crtc(dev, crtc) {
565602d7
ML
15711 struct intel_crtc_state *crtc_state = crtc->config;
15712 int pixclk = 0;
3b117c8f 15713
565602d7
ML
15714 __drm_atomic_helper_crtc_destroy_state(&crtc->base, &crtc_state->base);
15715 memset(crtc_state, 0, sizeof(*crtc_state));
15716 crtc_state->base.crtc = &crtc->base;
24929352 15717
565602d7
ML
15718 crtc_state->base.active = crtc_state->base.enable =
15719 dev_priv->display.get_pipe_config(crtc, crtc_state);
15720
15721 crtc->base.enabled = crtc_state->base.enable;
15722 crtc->active = crtc_state->base.active;
15723
15724 if (crtc_state->base.active) {
15725 dev_priv->active_crtcs |= 1 << crtc->pipe;
15726
15727 if (IS_BROADWELL(dev_priv)) {
15728 pixclk = ilk_pipe_pixel_rate(crtc_state);
15729
15730 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
15731 if (crtc_state->ips_enabled)
15732 pixclk = DIV_ROUND_UP(pixclk * 100, 95);
15733 } else if (IS_VALLEYVIEW(dev_priv) ||
15734 IS_CHERRYVIEW(dev_priv) ||
15735 IS_BROXTON(dev_priv))
15736 pixclk = crtc_state->base.adjusted_mode.crtc_clock;
15737 else
15738 WARN_ON(dev_priv->display.modeset_calc_cdclk);
15739 }
15740
15741 dev_priv->min_pixclk[crtc->pipe] = pixclk;
b70709a6 15742
f9cd7b88 15743 readout_plane_state(crtc);
24929352
DV
15744
15745 DRM_DEBUG_KMS("[CRTC:%d] hw state readout: %s\n",
15746 crtc->base.base.id,
15747 crtc->active ? "enabled" : "disabled");
15748 }
15749
5358901f
DV
15750 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15751 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15752
2edd6443
ACO
15753 pll->on = pll->funcs.get_hw_state(dev_priv, pll,
15754 &pll->config.hw_state);
3e369b76 15755 pll->config.crtc_mask = 0;
d3fcc808 15756 for_each_intel_crtc(dev, crtc) {
2dd66ebd 15757 if (crtc->active && crtc->config->shared_dpll == pll)
3e369b76 15758 pll->config.crtc_mask |= 1 << crtc->pipe;
5358901f 15759 }
2dd66ebd 15760 pll->active_mask = pll->config.crtc_mask;
5358901f 15761
1e6f2ddc 15762 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
3e369b76 15763 pll->name, pll->config.crtc_mask, pll->on);
5358901f
DV
15764 }
15765
b2784e15 15766 for_each_intel_encoder(dev, encoder) {
24929352
DV
15767 pipe = 0;
15768
15769 if (encoder->get_hw_state(encoder, &pipe)) {
045ac3b5
JB
15770 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15771 encoder->base.crtc = &crtc->base;
6e3c9717 15772 encoder->get_config(encoder, crtc->config);
24929352
DV
15773 } else {
15774 encoder->base.crtc = NULL;
15775 }
15776
6f2bcceb 15777 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
24929352 15778 encoder->base.base.id,
8e329a03 15779 encoder->base.name,
24929352 15780 encoder->base.crtc ? "enabled" : "disabled",
6f2bcceb 15781 pipe_name(pipe));
24929352
DV
15782 }
15783
3a3371ff 15784 for_each_intel_connector(dev, connector) {
24929352
DV
15785 if (connector->get_hw_state(connector)) {
15786 connector->base.dpms = DRM_MODE_DPMS_ON;
2aa974c9
ML
15787
15788 encoder = connector->encoder;
15789 connector->base.encoder = &encoder->base;
15790
15791 if (encoder->base.crtc &&
15792 encoder->base.crtc->state->active) {
15793 /*
15794 * This has to be done during hardware readout
15795 * because anything calling .crtc_disable may
15796 * rely on the connector_mask being accurate.
15797 */
15798 encoder->base.crtc->state->connector_mask |=
15799 1 << drm_connector_index(&connector->base);
e87a52b3
ML
15800 encoder->base.crtc->state->encoder_mask |=
15801 1 << drm_encoder_index(&encoder->base);
2aa974c9
ML
15802 }
15803
24929352
DV
15804 } else {
15805 connector->base.dpms = DRM_MODE_DPMS_OFF;
15806 connector->base.encoder = NULL;
15807 }
15808 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
15809 connector->base.base.id,
c23cc417 15810 connector->base.name,
24929352
DV
15811 connector->base.encoder ? "enabled" : "disabled");
15812 }
7f4c6284
VS
15813
15814 for_each_intel_crtc(dev, crtc) {
15815 crtc->base.hwmode = crtc->config->base.adjusted_mode;
15816
15817 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
15818 if (crtc->base.state->active) {
15819 intel_mode_from_pipe_config(&crtc->base.mode, crtc->config);
15820 intel_mode_from_pipe_config(&crtc->base.state->adjusted_mode, crtc->config);
15821 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
15822
15823 /*
15824 * The initial mode needs to be set in order to keep
15825 * the atomic core happy. It wants a valid mode if the
15826 * crtc's enabled, so we do the above call.
15827 *
15828 * At this point some state updated by the connectors
15829 * in their ->detect() callback has not run yet, so
15830 * no recalculation can be done yet.
15831 *
15832 * Even if we could do a recalculation and modeset
15833 * right now it would cause a double modeset if
15834 * fbdev or userspace chooses a different initial mode.
15835 *
15836 * If that happens, someone indicated they wanted a
15837 * mode change, which means it's safe to do a full
15838 * recalculation.
15839 */
15840 crtc->base.state->mode.private_flags = I915_MODE_FLAG_INHERITED;
9eca6832
VS
15841
15842 drm_calc_timestamping_constants(&crtc->base, &crtc->base.hwmode);
15843 update_scanline_offset(crtc);
7f4c6284 15844 }
e3b247da
VS
15845
15846 intel_pipe_config_sanity_check(dev_priv, crtc->config);
7f4c6284 15847 }
30e984df
DV
15848}
15849
043e9bda
ML
15850/* Scan out the current hw modeset state,
15851 * and sanitizes it to the current state
15852 */
15853static void
15854intel_modeset_setup_hw_state(struct drm_device *dev)
30e984df
DV
15855{
15856 struct drm_i915_private *dev_priv = dev->dev_private;
15857 enum pipe pipe;
30e984df
DV
15858 struct intel_crtc *crtc;
15859 struct intel_encoder *encoder;
35c95375 15860 int i;
30e984df
DV
15861
15862 intel_modeset_readout_hw_state(dev);
24929352
DV
15863
15864 /* HW state is read out, now we need to sanitize this mess. */
b2784e15 15865 for_each_intel_encoder(dev, encoder) {
24929352
DV
15866 intel_sanitize_encoder(encoder);
15867 }
15868
055e393f 15869 for_each_pipe(dev_priv, pipe) {
24929352
DV
15870 crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
15871 intel_sanitize_crtc(crtc);
6e3c9717
ACO
15872 intel_dump_pipe_config(crtc, crtc->config,
15873 "[setup_hw_state]");
24929352 15874 }
9a935856 15875
d29b2f9d
ACO
15876 intel_modeset_update_connector_atomic_state(dev);
15877
35c95375
DV
15878 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
15879 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
15880
2dd66ebd 15881 if (!pll->on || pll->active_mask)
35c95375
DV
15882 continue;
15883
15884 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name);
15885
2edd6443 15886 pll->funcs.disable(dev_priv, pll);
35c95375
DV
15887 pll->on = false;
15888 }
15889
666a4537 15890 if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
6eb1a681
VS
15891 vlv_wm_get_hw_state(dev);
15892 else if (IS_GEN9(dev))
3078999f
PB
15893 skl_wm_get_hw_state(dev);
15894 else if (HAS_PCH_SPLIT(dev))
243e6a44 15895 ilk_wm_get_hw_state(dev);
292b990e
ML
15896
15897 for_each_intel_crtc(dev, crtc) {
15898 unsigned long put_domains;
15899
74bff5f9 15900 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc->config);
292b990e
ML
15901 if (WARN_ON(put_domains))
15902 modeset_put_power_domains(dev_priv, put_domains);
15903 }
15904 intel_display_set_init_power(dev_priv, false);
010cf73d
PZ
15905
15906 intel_fbc_init_pipe_state(dev_priv);
043e9bda 15907}
7d0bc1ea 15908
043e9bda
ML
15909void intel_display_resume(struct drm_device *dev)
15910{
e2c8b870
ML
15911 struct drm_i915_private *dev_priv = to_i915(dev);
15912 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
15913 struct drm_modeset_acquire_ctx ctx;
043e9bda 15914 int ret;
e2c8b870 15915 bool setup = false;
f30da187 15916
e2c8b870 15917 dev_priv->modeset_restore_state = NULL;
043e9bda 15918
ea49c9ac
ML
15919 /*
15920 * This is a cludge because with real atomic modeset mode_config.mutex
15921 * won't be taken. Unfortunately some probed state like
15922 * audio_codec_enable is still protected by mode_config.mutex, so lock
15923 * it here for now.
15924 */
15925 mutex_lock(&dev->mode_config.mutex);
e2c8b870 15926 drm_modeset_acquire_init(&ctx, 0);
043e9bda 15927
e2c8b870
ML
15928retry:
15929 ret = drm_modeset_lock_all_ctx(dev, &ctx);
043e9bda 15930
e2c8b870
ML
15931 if (ret == 0 && !setup) {
15932 setup = true;
043e9bda 15933
e2c8b870
ML
15934 intel_modeset_setup_hw_state(dev);
15935 i915_redisable_vga(dev);
45e2b5f6 15936 }
8af6cf88 15937
e2c8b870
ML
15938 if (ret == 0 && state) {
15939 struct drm_crtc_state *crtc_state;
15940 struct drm_crtc *crtc;
15941 int i;
043e9bda 15942
e2c8b870
ML
15943 state->acquire_ctx = &ctx;
15944
e3d5457c
VS
15945 /* ignore any reset values/BIOS leftovers in the WM registers */
15946 to_intel_atomic_state(state)->skip_intermediate_wm = true;
15947
e2c8b870
ML
15948 for_each_crtc_in_state(state, crtc, crtc_state, i) {
15949 /*
15950 * Force recalculation even if we restore
15951 * current state. With fast modeset this may not result
15952 * in a modeset when the state is compatible.
15953 */
15954 crtc_state->mode_changed = true;
15955 }
15956
15957 ret = drm_atomic_commit(state);
043e9bda
ML
15958 }
15959
e2c8b870
ML
15960 if (ret == -EDEADLK) {
15961 drm_modeset_backoff(&ctx);
15962 goto retry;
15963 }
043e9bda 15964
e2c8b870
ML
15965 drm_modeset_drop_locks(&ctx);
15966 drm_modeset_acquire_fini(&ctx);
ea49c9ac 15967 mutex_unlock(&dev->mode_config.mutex);
043e9bda 15968
e2c8b870
ML
15969 if (ret) {
15970 DRM_ERROR("Restoring old state failed with %i\n", ret);
15971 drm_atomic_state_free(state);
15972 }
2c7111db
CW
15973}
15974
15975void intel_modeset_gem_init(struct drm_device *dev)
15976{
dc97997a 15977 struct drm_i915_private *dev_priv = to_i915(dev);
484b41dd 15978 struct drm_crtc *c;
2ff8fde1 15979 struct drm_i915_gem_object *obj;
e0d6149b 15980 int ret;
484b41dd 15981
dc97997a 15982 intel_init_gt_powersave(dev_priv);
ae48434c 15983
1833b134 15984 intel_modeset_init_hw(dev);
02e792fb 15985
1ee8da6d 15986 intel_setup_overlay(dev_priv);
484b41dd
JB
15987
15988 /*
15989 * Make sure any fbs we allocated at startup are properly
15990 * pinned & fenced. When we do the allocation it's too early
15991 * for this.
15992 */
70e1e0ec 15993 for_each_crtc(dev, c) {
2ff8fde1
MR
15994 obj = intel_fb_obj(c->primary->fb);
15995 if (obj == NULL)
484b41dd
JB
15996 continue;
15997
e0d6149b 15998 mutex_lock(&dev->struct_mutex);
3465c580
VS
15999 ret = intel_pin_and_fence_fb_obj(c->primary->fb,
16000 c->primary->state->rotation);
e0d6149b
TU
16001 mutex_unlock(&dev->struct_mutex);
16002 if (ret) {
484b41dd
JB
16003 DRM_ERROR("failed to pin boot fb on pipe %d\n",
16004 to_intel_crtc(c)->pipe);
66e514c1
DA
16005 drm_framebuffer_unreference(c->primary->fb);
16006 c->primary->fb = NULL;
36750f28 16007 c->primary->crtc = c->primary->state->crtc = NULL;
afd65eb4 16008 update_state_fb(c->primary);
36750f28 16009 c->state->plane_mask &= ~(1 << drm_plane_index(c->primary));
484b41dd
JB
16010 }
16011 }
0962c3c9
VS
16012
16013 intel_backlight_register(dev);
79e53945
JB
16014}
16015
4932e2c3
ID
16016void intel_connector_unregister(struct intel_connector *intel_connector)
16017{
16018 struct drm_connector *connector = &intel_connector->base;
16019
16020 intel_panel_destroy_backlight(connector);
34ea3d38 16021 drm_connector_unregister(connector);
4932e2c3
ID
16022}
16023
79e53945
JB
16024void intel_modeset_cleanup(struct drm_device *dev)
16025{
652c393a 16026 struct drm_i915_private *dev_priv = dev->dev_private;
19c8054c 16027 struct intel_connector *connector;
652c393a 16028
dc97997a 16029 intel_disable_gt_powersave(dev_priv);
2eb5252e 16030
0962c3c9
VS
16031 intel_backlight_unregister(dev);
16032
fd0c0642
DV
16033 /*
16034 * Interrupts and polling as the first thing to avoid creating havoc.
2eb5252e 16035 * Too much stuff here (turning of connectors, ...) would
fd0c0642
DV
16036 * experience fancy races otherwise.
16037 */
2aeb7d3a 16038 intel_irq_uninstall(dev_priv);
eb21b92b 16039
fd0c0642
DV
16040 /*
16041 * Due to the hpd irq storm handling the hotplug work can re-arm the
16042 * poll handlers. Hence disable polling after hpd handling is shut down.
16043 */
f87ea761 16044 drm_kms_helper_poll_fini(dev);
fd0c0642 16045
723bfd70
JB
16046 intel_unregister_dsm_handler();
16047
c937ab3e 16048 intel_fbc_global_disable(dev_priv);
69341a5e 16049
1630fe75
CW
16050 /* flush any delayed tasks or pending work */
16051 flush_scheduled_work();
16052
db31af1d 16053 /* destroy the backlight and sysfs files before encoders/connectors */
19c8054c
JN
16054 for_each_intel_connector(dev, connector)
16055 connector->unregister(connector);
d9255d57 16056
79e53945 16057 drm_mode_config_cleanup(dev);
4d7bb011 16058
1ee8da6d 16059 intel_cleanup_overlay(dev_priv);
ae48434c 16060
dc97997a 16061 intel_cleanup_gt_powersave(dev_priv);
f5949141
DV
16062
16063 intel_teardown_gmbus(dev);
79e53945
JB
16064}
16065
f1c79df3
ZW
16066/*
16067 * Return which encoder is currently attached for connector.
16068 */
df0e9248 16069struct drm_encoder *intel_best_encoder(struct drm_connector *connector)
79e53945 16070{
df0e9248
CW
16071 return &intel_attached_encoder(connector)->base;
16072}
f1c79df3 16073
df0e9248
CW
16074void intel_connector_attach_encoder(struct intel_connector *connector,
16075 struct intel_encoder *encoder)
16076{
16077 connector->encoder = encoder;
16078 drm_mode_connector_attach_encoder(&connector->base,
16079 &encoder->base);
79e53945 16080}
28d52043
DA
16081
16082/*
16083 * set vga decode state - true == enable VGA decode
16084 */
16085int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
16086{
16087 struct drm_i915_private *dev_priv = dev->dev_private;
a885b3cc 16088 unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
28d52043
DA
16089 u16 gmch_ctrl;
16090
75fa041d
CW
16091 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16092 DRM_ERROR("failed to read control word\n");
16093 return -EIO;
16094 }
16095
c0cc8a55
CW
16096 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16097 return 0;
16098
28d52043
DA
16099 if (state)
16100 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16101 else
16102 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
75fa041d
CW
16103
16104 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16105 DRM_ERROR("failed to write control word\n");
16106 return -EIO;
16107 }
16108
28d52043
DA
16109 return 0;
16110}
c4a1d9e4 16111
c4a1d9e4 16112struct intel_display_error_state {
ff57f1b0
PZ
16113
16114 u32 power_well_driver;
16115
63b66e5b
CW
16116 int num_transcoders;
16117
c4a1d9e4
CW
16118 struct intel_cursor_error_state {
16119 u32 control;
16120 u32 position;
16121 u32 base;
16122 u32 size;
52331309 16123 } cursor[I915_MAX_PIPES];
c4a1d9e4
CW
16124
16125 struct intel_pipe_error_state {
ddf9c536 16126 bool power_domain_on;
c4a1d9e4 16127 u32 source;
f301b1e1 16128 u32 stat;
52331309 16129 } pipe[I915_MAX_PIPES];
c4a1d9e4
CW
16130
16131 struct intel_plane_error_state {
16132 u32 control;
16133 u32 stride;
16134 u32 size;
16135 u32 pos;
16136 u32 addr;
16137 u32 surface;
16138 u32 tile_offset;
52331309 16139 } plane[I915_MAX_PIPES];
63b66e5b
CW
16140
16141 struct intel_transcoder_error_state {
ddf9c536 16142 bool power_domain_on;
63b66e5b
CW
16143 enum transcoder cpu_transcoder;
16144
16145 u32 conf;
16146
16147 u32 htotal;
16148 u32 hblank;
16149 u32 hsync;
16150 u32 vtotal;
16151 u32 vblank;
16152 u32 vsync;
16153 } transcoder[4];
c4a1d9e4
CW
16154};
16155
16156struct intel_display_error_state *
c033666a 16157intel_display_capture_error_state(struct drm_i915_private *dev_priv)
c4a1d9e4 16158{
c4a1d9e4 16159 struct intel_display_error_state *error;
63b66e5b
CW
16160 int transcoders[] = {
16161 TRANSCODER_A,
16162 TRANSCODER_B,
16163 TRANSCODER_C,
16164 TRANSCODER_EDP,
16165 };
c4a1d9e4
CW
16166 int i;
16167
c033666a 16168 if (INTEL_INFO(dev_priv)->num_pipes == 0)
63b66e5b
CW
16169 return NULL;
16170
9d1cb914 16171 error = kzalloc(sizeof(*error), GFP_ATOMIC);
c4a1d9e4
CW
16172 if (error == NULL)
16173 return NULL;
16174
c033666a 16175 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
ff57f1b0
PZ
16176 error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER);
16177
055e393f 16178 for_each_pipe(dev_priv, i) {
ddf9c536 16179 error->pipe[i].power_domain_on =
f458ebbc
DV
16180 __intel_display_power_is_enabled(dev_priv,
16181 POWER_DOMAIN_PIPE(i));
ddf9c536 16182 if (!error->pipe[i].power_domain_on)
9d1cb914
PZ
16183 continue;
16184
5efb3e28
VS
16185 error->cursor[i].control = I915_READ(CURCNTR(i));
16186 error->cursor[i].position = I915_READ(CURPOS(i));
16187 error->cursor[i].base = I915_READ(CURBASE(i));
c4a1d9e4
CW
16188
16189 error->plane[i].control = I915_READ(DSPCNTR(i));
16190 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
c033666a 16191 if (INTEL_GEN(dev_priv) <= 3) {
51889b35 16192 error->plane[i].size = I915_READ(DSPSIZE(i));
80ca378b
PZ
16193 error->plane[i].pos = I915_READ(DSPPOS(i));
16194 }
c033666a 16195 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
ca291363 16196 error->plane[i].addr = I915_READ(DSPADDR(i));
c033666a 16197 if (INTEL_GEN(dev_priv) >= 4) {
c4a1d9e4
CW
16198 error->plane[i].surface = I915_READ(DSPSURF(i));
16199 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16200 }
16201
c4a1d9e4 16202 error->pipe[i].source = I915_READ(PIPESRC(i));
f301b1e1 16203
c033666a 16204 if (HAS_GMCH_DISPLAY(dev_priv))
f301b1e1 16205 error->pipe[i].stat = I915_READ(PIPESTAT(i));
63b66e5b
CW
16206 }
16207
4d1de975 16208 /* Note: this does not include DSI transcoders. */
c033666a 16209 error->num_transcoders = INTEL_INFO(dev_priv)->num_pipes;
2d1fe073 16210 if (HAS_DDI(dev_priv))
63b66e5b
CW
16211 error->num_transcoders++; /* Account for eDP. */
16212
16213 for (i = 0; i < error->num_transcoders; i++) {
16214 enum transcoder cpu_transcoder = transcoders[i];
16215
ddf9c536 16216 error->transcoder[i].power_domain_on =
f458ebbc 16217 __intel_display_power_is_enabled(dev_priv,
38cc1daf 16218 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
ddf9c536 16219 if (!error->transcoder[i].power_domain_on)
9d1cb914
PZ
16220 continue;
16221
63b66e5b
CW
16222 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16223
16224 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16225 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16226 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16227 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16228 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16229 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16230 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
c4a1d9e4
CW
16231 }
16232
16233 return error;
16234}
16235
edc3d884
MK
16236#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16237
c4a1d9e4 16238void
edc3d884 16239intel_display_print_error_state(struct drm_i915_error_state_buf *m,
c4a1d9e4
CW
16240 struct drm_device *dev,
16241 struct intel_display_error_state *error)
16242{
055e393f 16243 struct drm_i915_private *dev_priv = dev->dev_private;
c4a1d9e4
CW
16244 int i;
16245
63b66e5b
CW
16246 if (!error)
16247 return;
16248
edc3d884 16249 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes);
190be112 16250 if (IS_HASWELL(dev) || IS_BROADWELL(dev))
edc3d884 16251 err_printf(m, "PWR_WELL_CTL2: %08x\n",
ff57f1b0 16252 error->power_well_driver);
055e393f 16253 for_each_pipe(dev_priv, i) {
edc3d884 16254 err_printf(m, "Pipe [%d]:\n", i);
ddf9c536 16255 err_printf(m, " Power: %s\n",
87ad3212 16256 onoff(error->pipe[i].power_domain_on));
edc3d884 16257 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
f301b1e1 16258 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
edc3d884
MK
16259
16260 err_printf(m, "Plane [%d]:\n", i);
16261 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
16262 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
80ca378b 16263 if (INTEL_INFO(dev)->gen <= 3) {
edc3d884
MK
16264 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
16265 err_printf(m, " POS: %08x\n", error->plane[i].pos);
80ca378b 16266 }
4b71a570 16267 if (INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev))
edc3d884 16268 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
c4a1d9e4 16269 if (INTEL_INFO(dev)->gen >= 4) {
edc3d884
MK
16270 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
16271 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
c4a1d9e4
CW
16272 }
16273
edc3d884
MK
16274 err_printf(m, "Cursor [%d]:\n", i);
16275 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
16276 err_printf(m, " POS: %08x\n", error->cursor[i].position);
16277 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
c4a1d9e4 16278 }
63b66e5b
CW
16279
16280 for (i = 0; i < error->num_transcoders; i++) {
da205630 16281 err_printf(m, "CPU transcoder: %s\n",
63b66e5b 16282 transcoder_name(error->transcoder[i].cpu_transcoder));
ddf9c536 16283 err_printf(m, " Power: %s\n",
87ad3212 16284 onoff(error->transcoder[i].power_domain_on));
63b66e5b
CW
16285 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
16286 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
16287 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
16288 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
16289 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
16290 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
16291 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
16292 }
c4a1d9e4 16293}