]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blame - drivers/gpu/drm/i915/intel_display.c
drm/i915/dsi: Use a fuzzy check for burst mode clock check
[mirror_ubuntu-focal-kernel.git] / drivers / gpu / drm / i915 / intel_display.c
CommitLineData
79e53945
JB
1/*
2 * Copyright © 2006-2007 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 */
26
27#include <linux/i2c.h>
d0e93599
SR
28#include <linux/input.h>
29#include <linux/intel-iommu.h>
7662c8bd 30#include <linux/kernel.h>
d0e93599
SR
31#include <linux/module.h>
32#include <linux/reservation.h>
5a0e3ad6 33#include <linux/slab.h>
9cce37f4 34#include <linux/vgaarb.h>
d0e93599 35
319c1d42 36#include <drm/drm_atomic.h>
c196e1d6 37#include <drm/drm_atomic_helper.h>
d0e93599 38#include <drm/drm_atomic_uapi.h>
760285e7 39#include <drm/drm_dp_helper.h>
d0e93599
SR
40#include <drm/drm_edid.h>
41#include <drm/drm_fourcc.h>
465c120c 42#include <drm/drm_plane_helper.h>
fcd70cd3 43#include <drm/drm_probe_helper.h>
465c120c 44#include <drm/drm_rect.h>
d0e93599
SR
45#include <drm/i915_drm.h>
46
47#include "i915_drv.h"
d0e93599 48#include "i915_trace.h"
4e49d35c 49#include "intel_acpi.h"
12392a74 50#include "intel_atomic.h"
56dabc93 51#include "intel_atomic_plane.h"
c457d9cf 52#include "intel_bw.h"
78c61320 53#include "intel_color.h"
e7674ef6 54#include "intel_cdclk.h"
d2ee2e8a 55#include "intel_crt.h"
fdc24cf3 56#include "intel_ddi.h"
27fec1f9 57#include "intel_dp.h"
d0e93599
SR
58#include "intel_drv.h"
59#include "intel_dsi.h"
75a4639a 60#include "intel_dvo.h"
98afa316 61#include "intel_fbc.h"
6dfccb95 62#include "intel_fbdev.h"
8834e365 63#include "intel_fifo_underrun.h"
d0e93599 64#include "intel_frontbuffer.h"
3ce2ea65 65#include "intel_gmbus.h"
408bd917 66#include "intel_hdcp.h"
0550691d 67#include "intel_hdmi.h"
dbeb38d9 68#include "intel_hotplug.h"
42406fdc 69#include "intel_lvds.h"
05ca9306 70#include "intel_overlay.h"
c6a35b9c 71#include "intel_pipe_crc.h"
696173b0 72#include "intel_pm.h"
55367a27 73#include "intel_psr.h"
220b92bf 74#include "intel_quirks.h"
596fee14 75#include "intel_sdvo.h"
56c5098f 76#include "intel_sideband.h"
f9a79f9a 77#include "intel_sprite.h"
efe57eea 78#include "intel_tv.h"
b375d0ef 79#include "intel_vdsc.h"
79e53945 80
465c120c 81/* Primary plane formats for gen <= 3 */
ba3f4d0a 82static const u32 i8xx_primary_formats[] = {
67fe7dc5
DL
83 DRM_FORMAT_C8,
84 DRM_FORMAT_RGB565,
465c120c 85 DRM_FORMAT_XRGB1555,
67fe7dc5 86 DRM_FORMAT_XRGB8888,
465c120c
MR
87};
88
89/* Primary plane formats for gen >= 4 */
ba3f4d0a 90static const u32 i965_primary_formats[] = {
6c0fd451
DL
91 DRM_FORMAT_C8,
92 DRM_FORMAT_RGB565,
93 DRM_FORMAT_XRGB8888,
94 DRM_FORMAT_XBGR8888,
95 DRM_FORMAT_XRGB2101010,
96 DRM_FORMAT_XBGR2101010,
97};
98
ba3f4d0a 99static const u64 i9xx_format_modifiers[] = {
714244e2
BW
100 I915_FORMAT_MOD_X_TILED,
101 DRM_FORMAT_MOD_LINEAR,
102 DRM_FORMAT_MOD_INVALID
103};
104
3d7d6510 105/* Cursor formats */
ba3f4d0a 106static const u32 intel_cursor_formats[] = {
3d7d6510
MR
107 DRM_FORMAT_ARGB8888,
108};
109
ba3f4d0a 110static const u64 cursor_format_modifiers[] = {
714244e2
BW
111 DRM_FORMAT_MOD_LINEAR,
112 DRM_FORMAT_MOD_INVALID
113};
114
f1f644dc 115static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 116 struct intel_crtc_state *pipe_config);
18442d08 117static void ironlake_pch_clock_get(struct intel_crtc *crtc,
5cec258b 118 struct intel_crtc_state *pipe_config);
f1f644dc 119
24dbf51a
CW
120static int intel_framebuffer_init(struct intel_framebuffer *ifb,
121 struct drm_i915_gem_object *obj,
122 struct drm_mode_fb_cmd2 *mode_cmd);
44fe7f35
ML
123static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state);
124static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state);
4c354754
ML
125static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
126 const struct intel_link_m_n *m_n,
127 const struct intel_link_m_n *m2_n2);
fdf73510
ML
128static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state);
129static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state);
130static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state);
9b11215e 131static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state);
d288f65f 132static void vlv_prepare_pll(struct intel_crtc *crtc,
5cec258b 133 const struct intel_crtc_state *pipe_config);
d288f65f 134static void chv_prepare_pll(struct intel_crtc *crtc,
5cec258b 135 const struct intel_crtc_state *pipe_config);
c856dbc8
MN
136static void intel_begin_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
137static void intel_finish_crtc_commit(struct intel_atomic_state *, struct intel_crtc *);
1c74eeaf
NM
138static void intel_crtc_init_scalers(struct intel_crtc *crtc,
139 struct intel_crtc_state *crtc_state);
b2562712
ML
140static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state);
141static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state);
142static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state);
aecd36b8
VS
143static void intel_modeset_setup_hw_state(struct drm_device *dev,
144 struct drm_modeset_acquire_ctx *ctx);
2622a081 145static void intel_pre_disable_primary_noatomic(struct drm_crtc *crtc);
e7457a9a 146
d4906093 147struct intel_limit {
4c5def93
ACO
148 struct {
149 int min, max;
150 } dot, vco, n, m, m1, m2, p, p1;
151
152 struct {
153 int dot_limit;
154 int p2_slow, p2_fast;
155 } p2;
d4906093 156};
79e53945 157
bfa7df01 158/* returns HPLL frequency in kHz */
49cd97a3 159int vlv_get_hpll_vco(struct drm_i915_private *dev_priv)
bfa7df01
VS
160{
161 int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 };
162
163 /* Obtain SKU information */
bfa7df01
VS
164 hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) &
165 CCK_FUSE_HPLL_FREQ_MASK;
bfa7df01
VS
166
167 return vco_freq[hpll_freq] * 1000;
168}
169
c30fec65
VS
170int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
171 const char *name, u32 reg, int ref_freq)
bfa7df01
VS
172{
173 u32 val;
174 int divider;
175
bfa7df01 176 val = vlv_cck_read(dev_priv, reg);
bfa7df01
VS
177 divider = val & CCK_FREQUENCY_VALUES;
178
179 WARN((val & CCK_FREQUENCY_STATUS) !=
180 (divider << CCK_FREQUENCY_STATUS_SHIFT),
181 "%s change in progress\n", name);
182
c30fec65
VS
183 return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
184}
185
7ff89ca2
VS
186int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
187 const char *name, u32 reg)
c30fec65 188{
337fa6e0
CW
189 int hpll;
190
191 vlv_cck_get(dev_priv);
192
c30fec65 193 if (dev_priv->hpll_freq == 0)
49cd97a3 194 dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv);
c30fec65 195
337fa6e0
CW
196 hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq);
197
198 vlv_cck_put(dev_priv);
199
200 return hpll;
bfa7df01
VS
201}
202
bfa7df01
VS
203static void intel_update_czclk(struct drm_i915_private *dev_priv)
204{
666a4537 205 if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)))
bfa7df01
VS
206 return;
207
208 dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
209 CCK_CZ_CLOCK_CONTROL);
210
211 DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
212}
213
021357ac 214static inline u32 /* units of 100MHz */
21a727b3
VS
215intel_fdi_link_freq(struct drm_i915_private *dev_priv,
216 const struct intel_crtc_state *pipe_config)
021357ac 217{
21a727b3
VS
218 if (HAS_DDI(dev_priv))
219 return pipe_config->port_clock; /* SPLL */
e3b247da 220 else
58ecd9d5 221 return dev_priv->fdi_pll_freq;
021357ac
CW
222}
223
1b6f4958 224static const struct intel_limit intel_limits_i8xx_dac = {
0206e353 225 .dot = { .min = 25000, .max = 350000 },
9c333719 226 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 227 .n = { .min = 2, .max = 16 },
0206e353
AJ
228 .m = { .min = 96, .max = 140 },
229 .m1 = { .min = 18, .max = 26 },
230 .m2 = { .min = 6, .max = 16 },
231 .p = { .min = 4, .max = 128 },
232 .p1 = { .min = 2, .max = 33 },
273e27ca
EA
233 .p2 = { .dot_limit = 165000,
234 .p2_slow = 4, .p2_fast = 2 },
e4b36699
KP
235};
236
1b6f4958 237static const struct intel_limit intel_limits_i8xx_dvo = {
5d536e28 238 .dot = { .min = 25000, .max = 350000 },
9c333719 239 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 240 .n = { .min = 2, .max = 16 },
5d536e28
DV
241 .m = { .min = 96, .max = 140 },
242 .m1 = { .min = 18, .max = 26 },
243 .m2 = { .min = 6, .max = 16 },
244 .p = { .min = 4, .max = 128 },
245 .p1 = { .min = 2, .max = 33 },
246 .p2 = { .dot_limit = 165000,
247 .p2_slow = 4, .p2_fast = 4 },
248};
249
1b6f4958 250static const struct intel_limit intel_limits_i8xx_lvds = {
0206e353 251 .dot = { .min = 25000, .max = 350000 },
9c333719 252 .vco = { .min = 908000, .max = 1512000 },
91dbe5fb 253 .n = { .min = 2, .max = 16 },
0206e353
AJ
254 .m = { .min = 96, .max = 140 },
255 .m1 = { .min = 18, .max = 26 },
256 .m2 = { .min = 6, .max = 16 },
257 .p = { .min = 4, .max = 128 },
258 .p1 = { .min = 1, .max = 6 },
273e27ca
EA
259 .p2 = { .dot_limit = 165000,
260 .p2_slow = 14, .p2_fast = 7 },
e4b36699 261};
273e27ca 262
1b6f4958 263static const struct intel_limit intel_limits_i9xx_sdvo = {
0206e353
AJ
264 .dot = { .min = 20000, .max = 400000 },
265 .vco = { .min = 1400000, .max = 2800000 },
266 .n = { .min = 1, .max = 6 },
267 .m = { .min = 70, .max = 120 },
4f7dfb67
PJ
268 .m1 = { .min = 8, .max = 18 },
269 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
270 .p = { .min = 5, .max = 80 },
271 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
272 .p2 = { .dot_limit = 200000,
273 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
274};
275
1b6f4958 276static const struct intel_limit intel_limits_i9xx_lvds = {
0206e353
AJ
277 .dot = { .min = 20000, .max = 400000 },
278 .vco = { .min = 1400000, .max = 2800000 },
279 .n = { .min = 1, .max = 6 },
280 .m = { .min = 70, .max = 120 },
53a7d2d1
PJ
281 .m1 = { .min = 8, .max = 18 },
282 .m2 = { .min = 3, .max = 7 },
0206e353
AJ
283 .p = { .min = 7, .max = 98 },
284 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
285 .p2 = { .dot_limit = 112000,
286 .p2_slow = 14, .p2_fast = 7 },
e4b36699
KP
287};
288
273e27ca 289
1b6f4958 290static const struct intel_limit intel_limits_g4x_sdvo = {
273e27ca
EA
291 .dot = { .min = 25000, .max = 270000 },
292 .vco = { .min = 1750000, .max = 3500000},
293 .n = { .min = 1, .max = 4 },
294 .m = { .min = 104, .max = 138 },
295 .m1 = { .min = 17, .max = 23 },
296 .m2 = { .min = 5, .max = 11 },
297 .p = { .min = 10, .max = 30 },
298 .p1 = { .min = 1, .max = 3},
299 .p2 = { .dot_limit = 270000,
300 .p2_slow = 10,
301 .p2_fast = 10
044c7c41 302 },
e4b36699
KP
303};
304
1b6f4958 305static const struct intel_limit intel_limits_g4x_hdmi = {
273e27ca
EA
306 .dot = { .min = 22000, .max = 400000 },
307 .vco = { .min = 1750000, .max = 3500000},
308 .n = { .min = 1, .max = 4 },
309 .m = { .min = 104, .max = 138 },
310 .m1 = { .min = 16, .max = 23 },
311 .m2 = { .min = 5, .max = 11 },
312 .p = { .min = 5, .max = 80 },
313 .p1 = { .min = 1, .max = 8},
314 .p2 = { .dot_limit = 165000,
315 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
316};
317
1b6f4958 318static const struct intel_limit intel_limits_g4x_single_channel_lvds = {
273e27ca
EA
319 .dot = { .min = 20000, .max = 115000 },
320 .vco = { .min = 1750000, .max = 3500000 },
321 .n = { .min = 1, .max = 3 },
322 .m = { .min = 104, .max = 138 },
323 .m1 = { .min = 17, .max = 23 },
324 .m2 = { .min = 5, .max = 11 },
325 .p = { .min = 28, .max = 112 },
326 .p1 = { .min = 2, .max = 8 },
327 .p2 = { .dot_limit = 0,
328 .p2_slow = 14, .p2_fast = 14
044c7c41 329 },
e4b36699
KP
330};
331
1b6f4958 332static const struct intel_limit intel_limits_g4x_dual_channel_lvds = {
273e27ca
EA
333 .dot = { .min = 80000, .max = 224000 },
334 .vco = { .min = 1750000, .max = 3500000 },
335 .n = { .min = 1, .max = 3 },
336 .m = { .min = 104, .max = 138 },
337 .m1 = { .min = 17, .max = 23 },
338 .m2 = { .min = 5, .max = 11 },
339 .p = { .min = 14, .max = 42 },
340 .p1 = { .min = 2, .max = 6 },
341 .p2 = { .dot_limit = 0,
342 .p2_slow = 7, .p2_fast = 7
044c7c41 343 },
e4b36699
KP
344};
345
1b6f4958 346static const struct intel_limit intel_limits_pineview_sdvo = {
0206e353
AJ
347 .dot = { .min = 20000, .max = 400000},
348 .vco = { .min = 1700000, .max = 3500000 },
273e27ca 349 /* Pineview's Ncounter is a ring counter */
0206e353
AJ
350 .n = { .min = 3, .max = 6 },
351 .m = { .min = 2, .max = 256 },
273e27ca 352 /* Pineview only has one combined m divider, which we treat as m2. */
0206e353
AJ
353 .m1 = { .min = 0, .max = 0 },
354 .m2 = { .min = 0, .max = 254 },
355 .p = { .min = 5, .max = 80 },
356 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
357 .p2 = { .dot_limit = 200000,
358 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
359};
360
1b6f4958 361static const struct intel_limit intel_limits_pineview_lvds = {
0206e353
AJ
362 .dot = { .min = 20000, .max = 400000 },
363 .vco = { .min = 1700000, .max = 3500000 },
364 .n = { .min = 3, .max = 6 },
365 .m = { .min = 2, .max = 256 },
366 .m1 = { .min = 0, .max = 0 },
367 .m2 = { .min = 0, .max = 254 },
368 .p = { .min = 7, .max = 112 },
369 .p1 = { .min = 1, .max = 8 },
273e27ca
EA
370 .p2 = { .dot_limit = 112000,
371 .p2_slow = 14, .p2_fast = 14 },
e4b36699
KP
372};
373
273e27ca
EA
374/* Ironlake / Sandybridge
375 *
376 * We calculate clock using (register_value + 2) for N/M1/M2, so here
377 * the range value for them is (actual_value - 2).
378 */
1b6f4958 379static const struct intel_limit intel_limits_ironlake_dac = {
273e27ca
EA
380 .dot = { .min = 25000, .max = 350000 },
381 .vco = { .min = 1760000, .max = 3510000 },
382 .n = { .min = 1, .max = 5 },
383 .m = { .min = 79, .max = 127 },
384 .m1 = { .min = 12, .max = 22 },
385 .m2 = { .min = 5, .max = 9 },
386 .p = { .min = 5, .max = 80 },
387 .p1 = { .min = 1, .max = 8 },
388 .p2 = { .dot_limit = 225000,
389 .p2_slow = 10, .p2_fast = 5 },
e4b36699
KP
390};
391
1b6f4958 392static const struct intel_limit intel_limits_ironlake_single_lvds = {
273e27ca
EA
393 .dot = { .min = 25000, .max = 350000 },
394 .vco = { .min = 1760000, .max = 3510000 },
395 .n = { .min = 1, .max = 3 },
396 .m = { .min = 79, .max = 118 },
397 .m1 = { .min = 12, .max = 22 },
398 .m2 = { .min = 5, .max = 9 },
399 .p = { .min = 28, .max = 112 },
400 .p1 = { .min = 2, .max = 8 },
401 .p2 = { .dot_limit = 225000,
402 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
403};
404
1b6f4958 405static const struct intel_limit intel_limits_ironlake_dual_lvds = {
273e27ca
EA
406 .dot = { .min = 25000, .max = 350000 },
407 .vco = { .min = 1760000, .max = 3510000 },
408 .n = { .min = 1, .max = 3 },
409 .m = { .min = 79, .max = 127 },
410 .m1 = { .min = 12, .max = 22 },
411 .m2 = { .min = 5, .max = 9 },
412 .p = { .min = 14, .max = 56 },
413 .p1 = { .min = 2, .max = 8 },
414 .p2 = { .dot_limit = 225000,
415 .p2_slow = 7, .p2_fast = 7 },
b91ad0ec
ZW
416};
417
273e27ca 418/* LVDS 100mhz refclk limits. */
1b6f4958 419static const struct intel_limit intel_limits_ironlake_single_lvds_100m = {
273e27ca
EA
420 .dot = { .min = 25000, .max = 350000 },
421 .vco = { .min = 1760000, .max = 3510000 },
422 .n = { .min = 1, .max = 2 },
423 .m = { .min = 79, .max = 126 },
424 .m1 = { .min = 12, .max = 22 },
425 .m2 = { .min = 5, .max = 9 },
426 .p = { .min = 28, .max = 112 },
0206e353 427 .p1 = { .min = 2, .max = 8 },
273e27ca
EA
428 .p2 = { .dot_limit = 225000,
429 .p2_slow = 14, .p2_fast = 14 },
b91ad0ec
ZW
430};
431
1b6f4958 432static const struct intel_limit intel_limits_ironlake_dual_lvds_100m = {
273e27ca
EA
433 .dot = { .min = 25000, .max = 350000 },
434 .vco = { .min = 1760000, .max = 3510000 },
435 .n = { .min = 1, .max = 3 },
436 .m = { .min = 79, .max = 126 },
437 .m1 = { .min = 12, .max = 22 },
438 .m2 = { .min = 5, .max = 9 },
439 .p = { .min = 14, .max = 42 },
0206e353 440 .p1 = { .min = 2, .max = 6 },
273e27ca
EA
441 .p2 = { .dot_limit = 225000,
442 .p2_slow = 7, .p2_fast = 7 },
4547668a
ZY
443};
444
1b6f4958 445static const struct intel_limit intel_limits_vlv = {
f01b7962
VS
446 /*
447 * These are the data rate limits (measured in fast clocks)
448 * since those are the strictest limits we have. The fast
449 * clock and actual rate limits are more relaxed, so checking
450 * them would make no difference.
451 */
452 .dot = { .min = 25000 * 5, .max = 270000 * 5 },
75e53986 453 .vco = { .min = 4000000, .max = 6000000 },
a0c4da24 454 .n = { .min = 1, .max = 7 },
a0c4da24
JB
455 .m1 = { .min = 2, .max = 3 },
456 .m2 = { .min = 11, .max = 156 },
b99ab663 457 .p1 = { .min = 2, .max = 3 },
5fdc9c49 458 .p2 = { .p2_slow = 2, .p2_fast = 20 }, /* slow=min, fast=max */
a0c4da24
JB
459};
460
1b6f4958 461static const struct intel_limit intel_limits_chv = {
ef9348c8
CML
462 /*
463 * These are the data rate limits (measured in fast clocks)
464 * since those are the strictest limits we have. The fast
465 * clock and actual rate limits are more relaxed, so checking
466 * them would make no difference.
467 */
468 .dot = { .min = 25000 * 5, .max = 540000 * 5},
17fe1021 469 .vco = { .min = 4800000, .max = 6480000 },
ef9348c8
CML
470 .n = { .min = 1, .max = 1 },
471 .m1 = { .min = 2, .max = 2 },
472 .m2 = { .min = 24 << 22, .max = 175 << 22 },
473 .p1 = { .min = 2, .max = 4 },
474 .p2 = { .p2_slow = 1, .p2_fast = 14 },
475};
476
1b6f4958 477static const struct intel_limit intel_limits_bxt = {
5ab7b0b7
ID
478 /* FIXME: find real dot limits */
479 .dot = { .min = 0, .max = INT_MAX },
e6292556 480 .vco = { .min = 4800000, .max = 6700000 },
5ab7b0b7
ID
481 .n = { .min = 1, .max = 1 },
482 .m1 = { .min = 2, .max = 2 },
483 /* FIXME: find real m2 limits */
484 .m2 = { .min = 2 << 22, .max = 255 << 22 },
485 .p1 = { .min = 2, .max = 4 },
486 .p2 = { .p2_slow = 1, .p2_fast = 20 },
487};
488
51eb1a1d 489/* WA Display #0827: Gen9:all */
c4a4efa9 490static void
2474028e 491skl_wa_827(struct drm_i915_private *dev_priv, int pipe, bool enable)
c4a4efa9 492{
c4a4efa9
VS
493 if (enable)
494 I915_WRITE(CLKGATE_DIS_PSL(pipe),
fa9d38f6 495 I915_READ(CLKGATE_DIS_PSL(pipe)) |
c4a4efa9
VS
496 DUPS1_GATING_DIS | DUPS2_GATING_DIS);
497 else
498 I915_WRITE(CLKGATE_DIS_PSL(pipe),
499 I915_READ(CLKGATE_DIS_PSL(pipe)) &
500 ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
501}
502
51eb1a1d
RS
503/* Wa_2006604312:icl */
504static void
505icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
506 bool enable)
507{
508 if (enable)
509 I915_WRITE(CLKGATE_DIS_PSL(pipe),
510 I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
511 else
512 I915_WRITE(CLKGATE_DIS_PSL(pipe),
513 I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
514}
515
cdba954e 516static bool
24f28450 517needs_modeset(const struct drm_crtc_state *state)
cdba954e 518{
fc596660 519 return drm_atomic_crtc_needs_modeset(state);
cdba954e
ACO
520}
521
dccbea3b
ID
522/*
523 * Platform specific helpers to calculate the port PLL loopback- (clock.m),
524 * and post-divider (clock.p) values, pre- (clock.vco) and post-divided fast
525 * (clock.dot) clock rates. This fast dot clock is fed to the port's IO logic.
526 * The helpers' return value is the rate of the clock that is fed to the
527 * display engine's pipe which can be the above fast dot clock rate or a
528 * divided-down version of it.
529 */
f2b115e6 530/* m1 is reserved as 0 in Pineview, n is a ring counter */
9e2c8475 531static int pnv_calc_dpll_params(int refclk, struct dpll *clock)
79e53945 532{
2177832f
SL
533 clock->m = clock->m2 + 2;
534 clock->p = clock->p1 * clock->p2;
ed5ca77e 535 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 536 return 0;
fb03ac01
VS
537 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
538 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
539
540 return clock->dot;
2177832f
SL
541}
542
ba3f4d0a 543static u32 i9xx_dpll_compute_m(struct dpll *dpll)
7429e9d4
DV
544{
545 return 5 * (dpll->m1 + 2) + (dpll->m2 + 2);
546}
547
9e2c8475 548static int i9xx_calc_dpll_params(int refclk, struct dpll *clock)
2177832f 549{
7429e9d4 550 clock->m = i9xx_dpll_compute_m(clock);
79e53945 551 clock->p = clock->p1 * clock->p2;
ed5ca77e 552 if (WARN_ON(clock->n + 2 == 0 || clock->p == 0))
dccbea3b 553 return 0;
fb03ac01
VS
554 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n + 2);
555 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
556
557 return clock->dot;
79e53945
JB
558}
559
9e2c8475 560static int vlv_calc_dpll_params(int refclk, struct dpll *clock)
589eca67
ID
561{
562 clock->m = clock->m1 * clock->m2;
563 clock->p = clock->p1 * clock->p2;
564 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 565 return 0;
589eca67
ID
566 clock->vco = DIV_ROUND_CLOSEST(refclk * clock->m, clock->n);
567 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
568
569 return clock->dot / 5;
589eca67
ID
570}
571
9e2c8475 572int chv_calc_dpll_params(int refclk, struct dpll *clock)
ef9348c8
CML
573{
574 clock->m = clock->m1 * clock->m2;
575 clock->p = clock->p1 * clock->p2;
576 if (WARN_ON(clock->n == 0 || clock->p == 0))
dccbea3b 577 return 0;
d492a29d 578 clock->vco = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(refclk, clock->m),
ba3f4d0a 579 clock->n << 22);
ef9348c8 580 clock->dot = DIV_ROUND_CLOSEST(clock->vco, clock->p);
dccbea3b
ID
581
582 return clock->dot / 5;
ef9348c8
CML
583}
584
7c04d1d9 585#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
c38c1455
CW
586
587/*
79e53945
JB
588 * Returns whether the given set of divisors are valid for a given refclk with
589 * the given connectors.
590 */
e2d214ae 591static bool intel_PLL_is_valid(struct drm_i915_private *dev_priv,
1b6f4958 592 const struct intel_limit *limit,
9e2c8475 593 const struct dpll *clock)
79e53945 594{
f01b7962
VS
595 if (clock->n < limit->n.min || limit->n.max < clock->n)
596 INTELPllInvalid("n out of range\n");
79e53945 597 if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
0206e353 598 INTELPllInvalid("p1 out of range\n");
79e53945 599 if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2)
0206e353 600 INTELPllInvalid("m2 out of range\n");
79e53945 601 if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1)
0206e353 602 INTELPllInvalid("m1 out of range\n");
f01b7962 603
e2d214ae 604 if (!IS_PINEVIEW(dev_priv) && !IS_VALLEYVIEW(dev_priv) &&
cc3f90f0 605 !IS_CHERRYVIEW(dev_priv) && !IS_GEN9_LP(dev_priv))
f01b7962
VS
606 if (clock->m1 <= clock->m2)
607 INTELPllInvalid("m1 <= m2\n");
608
e2d214ae 609 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
cc3f90f0 610 !IS_GEN9_LP(dev_priv)) {
f01b7962
VS
611 if (clock->p < limit->p.min || limit->p.max < clock->p)
612 INTELPllInvalid("p out of range\n");
613 if (clock->m < limit->m.min || limit->m.max < clock->m)
614 INTELPllInvalid("m out of range\n");
615 }
616
79e53945 617 if (clock->vco < limit->vco.min || limit->vco.max < clock->vco)
0206e353 618 INTELPllInvalid("vco out of range\n");
79e53945
JB
619 /* XXX: We may need to be checking "Dot clock" depending on the multiplier,
620 * connector, etc., rather than just a single range.
621 */
622 if (clock->dot < limit->dot.min || limit->dot.max < clock->dot)
0206e353 623 INTELPllInvalid("dot out of range\n");
79e53945
JB
624
625 return true;
626}
627
3b1429d9 628static int
1b6f4958 629i9xx_select_p2_div(const struct intel_limit *limit,
3b1429d9
VS
630 const struct intel_crtc_state *crtc_state,
631 int target)
79e53945 632{
d2daff2c 633 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
79e53945 634
2d84d2b3 635 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
79e53945 636 /*
a210b028
DV
637 * For LVDS just rely on its current settings for dual-channel.
638 * We haven't figured out how to reliably set up different
639 * single/dual channel state, if we even can.
79e53945 640 */
d2daff2c 641 if (intel_is_dual_link_lvds(dev_priv))
3b1429d9 642 return limit->p2.p2_fast;
79e53945 643 else
3b1429d9 644 return limit->p2.p2_slow;
79e53945
JB
645 } else {
646 if (target < limit->p2.dot_limit)
3b1429d9 647 return limit->p2.p2_slow;
79e53945 648 else
3b1429d9 649 return limit->p2.p2_fast;
79e53945 650 }
3b1429d9
VS
651}
652
70e8aa21
ACO
653/*
654 * Returns a set of divisors for the desired target clock with the given
655 * refclk, or FALSE. The returned values represent the clock equation:
656 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
657 *
658 * Target and reference clocks are specified in kHz.
659 *
660 * If match_clock is provided, then best_clock P divider must match the P
661 * divider from @match_clock used for LVDS downclocking.
662 */
3b1429d9 663static bool
1b6f4958 664i9xx_find_best_dpll(const struct intel_limit *limit,
3b1429d9 665 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
666 int target, int refclk, struct dpll *match_clock,
667 struct dpll *best_clock)
3b1429d9
VS
668{
669 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 670 struct dpll clock;
3b1429d9 671 int err = target;
79e53945 672
0206e353 673 memset(best_clock, 0, sizeof(*best_clock));
79e53945 674
3b1429d9
VS
675 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
676
42158660
ZY
677 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
678 clock.m1++) {
679 for (clock.m2 = limit->m2.min;
680 clock.m2 <= limit->m2.max; clock.m2++) {
c0efc387 681 if (clock.m2 >= clock.m1)
42158660
ZY
682 break;
683 for (clock.n = limit->n.min;
684 clock.n <= limit->n.max; clock.n++) {
685 for (clock.p1 = limit->p1.min;
686 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
687 int this_err;
688
dccbea3b 689 i9xx_calc_dpll_params(refclk, &clock);
e2d214ae
TU
690 if (!intel_PLL_is_valid(to_i915(dev),
691 limit,
ac58c3f0
DV
692 &clock))
693 continue;
694 if (match_clock &&
695 clock.p != match_clock->p)
696 continue;
697
698 this_err = abs(clock.dot - target);
699 if (this_err < err) {
700 *best_clock = clock;
701 err = this_err;
702 }
703 }
704 }
705 }
706 }
707
708 return (err != target);
709}
710
70e8aa21
ACO
711/*
712 * Returns a set of divisors for the desired target clock with the given
713 * refclk, or FALSE. The returned values represent the clock equation:
714 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
715 *
716 * Target and reference clocks are specified in kHz.
717 *
718 * If match_clock is provided, then best_clock P divider must match the P
719 * divider from @match_clock used for LVDS downclocking.
720 */
ac58c3f0 721static bool
1b6f4958 722pnv_find_best_dpll(const struct intel_limit *limit,
a93e255f 723 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
724 int target, int refclk, struct dpll *match_clock,
725 struct dpll *best_clock)
79e53945 726{
3b1429d9 727 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 728 struct dpll clock;
79e53945
JB
729 int err = target;
730
0206e353 731 memset(best_clock, 0, sizeof(*best_clock));
79e53945 732
3b1429d9
VS
733 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
734
42158660
ZY
735 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max;
736 clock.m1++) {
737 for (clock.m2 = limit->m2.min;
738 clock.m2 <= limit->m2.max; clock.m2++) {
42158660
ZY
739 for (clock.n = limit->n.min;
740 clock.n <= limit->n.max; clock.n++) {
741 for (clock.p1 = limit->p1.min;
742 clock.p1 <= limit->p1.max; clock.p1++) {
79e53945
JB
743 int this_err;
744
dccbea3b 745 pnv_calc_dpll_params(refclk, &clock);
e2d214ae
TU
746 if (!intel_PLL_is_valid(to_i915(dev),
747 limit,
1b894b59 748 &clock))
79e53945 749 continue;
cec2f356
SP
750 if (match_clock &&
751 clock.p != match_clock->p)
752 continue;
79e53945
JB
753
754 this_err = abs(clock.dot - target);
755 if (this_err < err) {
756 *best_clock = clock;
757 err = this_err;
758 }
759 }
760 }
761 }
762 }
763
764 return (err != target);
765}
766
997c030c
ACO
767/*
768 * Returns a set of divisors for the desired target clock with the given
769 * refclk, or FALSE. The returned values represent the clock equation:
770 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
70e8aa21
ACO
771 *
772 * Target and reference clocks are specified in kHz.
773 *
774 * If match_clock is provided, then best_clock P divider must match the P
775 * divider from @match_clock used for LVDS downclocking.
997c030c 776 */
d4906093 777static bool
1b6f4958 778g4x_find_best_dpll(const struct intel_limit *limit,
a93e255f 779 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
780 int target, int refclk, struct dpll *match_clock,
781 struct dpll *best_clock)
d4906093 782{
3b1429d9 783 struct drm_device *dev = crtc_state->base.crtc->dev;
9e2c8475 784 struct dpll clock;
d4906093 785 int max_n;
3b1429d9 786 bool found = false;
6ba770dc
AJ
787 /* approximately equals target * 0.00585 */
788 int err_most = (target >> 8) + (target >> 9);
d4906093
ML
789
790 memset(best_clock, 0, sizeof(*best_clock));
3b1429d9
VS
791
792 clock.p2 = i9xx_select_p2_div(limit, crtc_state, target);
793
d4906093 794 max_n = limit->n.max;
f77f13e2 795 /* based on hardware requirement, prefer smaller n to precision */
d4906093 796 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
f77f13e2 797 /* based on hardware requirement, prefere larger m1,m2 */
d4906093
ML
798 for (clock.m1 = limit->m1.max;
799 clock.m1 >= limit->m1.min; clock.m1--) {
800 for (clock.m2 = limit->m2.max;
801 clock.m2 >= limit->m2.min; clock.m2--) {
802 for (clock.p1 = limit->p1.max;
803 clock.p1 >= limit->p1.min; clock.p1--) {
804 int this_err;
805
dccbea3b 806 i9xx_calc_dpll_params(refclk, &clock);
e2d214ae
TU
807 if (!intel_PLL_is_valid(to_i915(dev),
808 limit,
1b894b59 809 &clock))
d4906093 810 continue;
1b894b59
CW
811
812 this_err = abs(clock.dot - target);
d4906093
ML
813 if (this_err < err_most) {
814 *best_clock = clock;
815 err_most = this_err;
816 max_n = clock.n;
817 found = true;
818 }
819 }
820 }
821 }
822 }
2c07245f
ZW
823 return found;
824}
825
d5dd62bd
ID
826/*
827 * Check if the calculated PLL configuration is more optimal compared to the
828 * best configuration and error found so far. Return the calculated error.
829 */
830static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
9e2c8475
ACO
831 const struct dpll *calculated_clock,
832 const struct dpll *best_clock,
d5dd62bd
ID
833 unsigned int best_error_ppm,
834 unsigned int *error_ppm)
835{
9ca3ba01
ID
836 /*
837 * For CHV ignore the error and consider only the P value.
838 * Prefer a bigger P value based on HW requirements.
839 */
920a14b2 840 if (IS_CHERRYVIEW(to_i915(dev))) {
9ca3ba01
ID
841 *error_ppm = 0;
842
843 return calculated_clock->p > best_clock->p;
844 }
845
24be4e46
ID
846 if (WARN_ON_ONCE(!target_freq))
847 return false;
848
d5dd62bd
ID
849 *error_ppm = div_u64(1000000ULL *
850 abs(target_freq - calculated_clock->dot),
851 target_freq);
852 /*
853 * Prefer a better P value over a better (smaller) error if the error
854 * is small. Ensure this preference for future configurations too by
855 * setting the error to 0.
856 */
857 if (*error_ppm < 100 && calculated_clock->p > best_clock->p) {
858 *error_ppm = 0;
859
860 return true;
861 }
862
863 return *error_ppm + 10 < best_error_ppm;
864}
865
65b3d6a9
ACO
866/*
867 * Returns a set of divisors for the desired target clock with the given
868 * refclk, or FALSE. The returned values represent the clock equation:
869 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
870 */
a0c4da24 871static bool
1b6f4958 872vlv_find_best_dpll(const struct intel_limit *limit,
a93e255f 873 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
874 int target, int refclk, struct dpll *match_clock,
875 struct dpll *best_clock)
a0c4da24 876{
a93e255f 877 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a919ff14 878 struct drm_device *dev = crtc->base.dev;
9e2c8475 879 struct dpll clock;
69e4f900 880 unsigned int bestppm = 1000000;
27e639bf
VS
881 /* min update 19.2 MHz */
882 int max_n = min(limit->n.max, refclk / 19200);
49e497ef 883 bool found = false;
a0c4da24 884
6b4bf1c4
VS
885 target *= 5; /* fast clock */
886
887 memset(best_clock, 0, sizeof(*best_clock));
a0c4da24
JB
888
889 /* based on hardware requirement, prefer smaller n to precision */
27e639bf 890 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
811bbf05 891 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
889059d8 892 for (clock.p2 = limit->p2.p2_fast; clock.p2 >= limit->p2.p2_slow;
c1a9ae43 893 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
6b4bf1c4 894 clock.p = clock.p1 * clock.p2;
a0c4da24 895 /* based on hardware requirement, prefer bigger m1,m2 values */
6b4bf1c4 896 for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; clock.m1++) {
d5dd62bd 897 unsigned int ppm;
69e4f900 898
6b4bf1c4
VS
899 clock.m2 = DIV_ROUND_CLOSEST(target * clock.p * clock.n,
900 refclk * clock.m1);
901
dccbea3b 902 vlv_calc_dpll_params(refclk, &clock);
43b0ac53 903
e2d214ae
TU
904 if (!intel_PLL_is_valid(to_i915(dev),
905 limit,
f01b7962 906 &clock))
43b0ac53
VS
907 continue;
908
d5dd62bd
ID
909 if (!vlv_PLL_is_optimal(dev, target,
910 &clock,
911 best_clock,
912 bestppm, &ppm))
913 continue;
6b4bf1c4 914
d5dd62bd
ID
915 *best_clock = clock;
916 bestppm = ppm;
917 found = true;
a0c4da24
JB
918 }
919 }
920 }
921 }
a0c4da24 922
49e497ef 923 return found;
a0c4da24 924}
a4fc5ed6 925
65b3d6a9
ACO
926/*
927 * Returns a set of divisors for the desired target clock with the given
928 * refclk, or FALSE. The returned values represent the clock equation:
929 * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
930 */
ef9348c8 931static bool
1b6f4958 932chv_find_best_dpll(const struct intel_limit *limit,
a93e255f 933 struct intel_crtc_state *crtc_state,
9e2c8475
ACO
934 int target, int refclk, struct dpll *match_clock,
935 struct dpll *best_clock)
ef9348c8 936{
a93e255f 937 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
a919ff14 938 struct drm_device *dev = crtc->base.dev;
9ca3ba01 939 unsigned int best_error_ppm;
9e2c8475 940 struct dpll clock;
ba3f4d0a 941 u64 m2;
ef9348c8
CML
942 int found = false;
943
944 memset(best_clock, 0, sizeof(*best_clock));
9ca3ba01 945 best_error_ppm = 1000000;
ef9348c8
CML
946
947 /*
948 * Based on hardware doc, the n always set to 1, and m1 always
949 * set to 2. If requires to support 200Mhz refclk, we need to
950 * revisit this because n may not 1 anymore.
951 */
952 clock.n = 1, clock.m1 = 2;
953 target *= 5; /* fast clock */
954
955 for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
956 for (clock.p2 = limit->p2.p2_fast;
957 clock.p2 >= limit->p2.p2_slow;
958 clock.p2 -= clock.p2 > 10 ? 2 : 1) {
9ca3ba01 959 unsigned int error_ppm;
ef9348c8
CML
960
961 clock.p = clock.p1 * clock.p2;
962
d492a29d
VS
963 m2 = DIV_ROUND_CLOSEST_ULL(mul_u32_u32(target, clock.p * clock.n) << 22,
964 refclk * clock.m1);
ef9348c8
CML
965
966 if (m2 > INT_MAX/clock.m1)
967 continue;
968
969 clock.m2 = m2;
970
dccbea3b 971 chv_calc_dpll_params(refclk, &clock);
ef9348c8 972
e2d214ae 973 if (!intel_PLL_is_valid(to_i915(dev), limit, &clock))
ef9348c8
CML
974 continue;
975
9ca3ba01
ID
976 if (!vlv_PLL_is_optimal(dev, target, &clock, best_clock,
977 best_error_ppm, &error_ppm))
978 continue;
979
980 *best_clock = clock;
981 best_error_ppm = error_ppm;
982 found = true;
ef9348c8
CML
983 }
984 }
985
986 return found;
987}
988
e40396d0 989bool bxt_find_best_dpll(struct intel_crtc_state *crtc_state,
9e2c8475 990 struct dpll *best_clock)
5ab7b0b7 991{
65b3d6a9 992 int refclk = 100000;
1b6f4958 993 const struct intel_limit *limit = &intel_limits_bxt;
5ab7b0b7 994
65b3d6a9 995 return chv_find_best_dpll(limit, crtc_state,
e40396d0
VS
996 crtc_state->port_clock, refclk,
997 NULL, best_clock);
5ab7b0b7
ID
998}
999
525b9311 1000bool intel_crtc_active(struct intel_crtc *crtc)
20ddf665 1001{
20ddf665
VS
1002 /* Be paranoid as we can arrive here with only partial
1003 * state retrieved from the hardware during setup.
1004 *
241bfc38 1005 * We can ditch the adjusted_mode.crtc_clock check as soon
20ddf665
VS
1006 * as Haswell has gained clock readout/fastboot support.
1007 *
cd30fbca 1008 * We can ditch the crtc->primary->state->fb check as soon as we can
20ddf665 1009 * properly reconstruct framebuffers.
c3d1f436
MR
1010 *
1011 * FIXME: The intel_crtc->active here should be switched to
1012 * crtc->state->active once we have proper CRTC states wired up
1013 * for atomic.
20ddf665 1014 */
525b9311
VS
1015 return crtc->active && crtc->base.primary->state->fb &&
1016 crtc->config->base.adjusted_mode.crtc_clock;
20ddf665
VS
1017}
1018
a5c961d1
PZ
1019enum transcoder intel_pipe_to_cpu_transcoder(struct drm_i915_private *dev_priv,
1020 enum pipe pipe)
1021{
98187836 1022 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
a5c961d1 1023
e2af48c6 1024 return crtc->config->cpu_transcoder;
a5c961d1
PZ
1025}
1026
8fedd64d
VS
1027static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
1028 enum pipe pipe)
fbf49ea2 1029{
f0f59a00 1030 i915_reg_t reg = PIPEDSL(pipe);
fbf49ea2
VS
1031 u32 line1, line2;
1032 u32 line_mask;
1033
cf819eff 1034 if (IS_GEN(dev_priv, 2))
fbf49ea2
VS
1035 line_mask = DSL_LINEMASK_GEN2;
1036 else
1037 line_mask = DSL_LINEMASK_GEN3;
1038
1039 line1 = I915_READ(reg) & line_mask;
6adfb1ef 1040 msleep(5);
fbf49ea2
VS
1041 line2 = I915_READ(reg) & line_mask;
1042
8fedd64d
VS
1043 return line1 != line2;
1044}
1045
1046static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
1047{
1048 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1049 enum pipe pipe = crtc->pipe;
1050
1051 /* Wait for the display line to settle/start moving */
1052 if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
1053 DRM_ERROR("pipe %c scanline %s wait timed out\n",
1054 pipe_name(pipe), onoff(state));
1055}
1056
1057static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
1058{
1059 wait_for_pipe_scanline_moving(crtc, false);
1060}
1061
1062static void intel_wait_for_pipe_scanline_moving(struct intel_crtc *crtc)
1063{
1064 wait_for_pipe_scanline_moving(crtc, true);
fbf49ea2
VS
1065}
1066
4972f70a
VS
1067static void
1068intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
9d0498a2 1069{
4972f70a 1070 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6315b5d3 1071 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
ab7ad7f6 1072
6315b5d3 1073 if (INTEL_GEN(dev_priv) >= 4) {
4972f70a 1074 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
f0f59a00 1075 i915_reg_t reg = PIPECONF(cpu_transcoder);
ab7ad7f6
KP
1076
1077 /* Wait for the Pipe State to go off */
97a04e0d 1078 if (intel_wait_for_register(&dev_priv->uncore,
b8511f53
CW
1079 reg, I965_PIPECONF_ACTIVE, 0,
1080 100))
284637d9 1081 WARN(1, "pipe_off wait timed out\n");
ab7ad7f6 1082 } else {
8fedd64d 1083 intel_wait_for_pipe_scanline_stopped(crtc);
ab7ad7f6 1084 }
79e53945
JB
1085}
1086
b24e7179 1087/* Only for pre-ILK configs */
55607e8a
DV
1088void assert_pll(struct drm_i915_private *dev_priv,
1089 enum pipe pipe, bool state)
b24e7179 1090{
b24e7179
JB
1091 u32 val;
1092 bool cur_state;
1093
649636ef 1094 val = I915_READ(DPLL(pipe));
b24e7179 1095 cur_state = !!(val & DPLL_VCO_ENABLE);
e2c719b7 1096 I915_STATE_WARN(cur_state != state,
b24e7179 1097 "PLL state assertion failure (expected %s, current %s)\n",
87ad3212 1098 onoff(state), onoff(cur_state));
b24e7179 1099}
b24e7179 1100
23538ef1 1101/* XXX: the dsi pll is shared between MIPI DSI ports */
8563b1e8 1102void assert_dsi_pll(struct drm_i915_private *dev_priv, bool state)
23538ef1
JN
1103{
1104 u32 val;
1105 bool cur_state;
1106
221c7862 1107 vlv_cck_get(dev_priv);
23538ef1 1108 val = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
221c7862 1109 vlv_cck_put(dev_priv);
23538ef1
JN
1110
1111 cur_state = val & DSI_PLL_VCO_EN;
e2c719b7 1112 I915_STATE_WARN(cur_state != state,
23538ef1 1113 "DSI PLL state assertion failure (expected %s, current %s)\n",
87ad3212 1114 onoff(state), onoff(cur_state));
23538ef1 1115}
23538ef1 1116
040484af
JB
1117static void assert_fdi_tx(struct drm_i915_private *dev_priv,
1118 enum pipe pipe, bool state)
1119{
040484af 1120 bool cur_state;
ad80a810
PZ
1121 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1122 pipe);
040484af 1123
2d1fe073 1124 if (HAS_DDI(dev_priv)) {
affa9354 1125 /* DDI does not have a specific FDI_TX register */
649636ef 1126 u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
ad80a810 1127 cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
bf507ef7 1128 } else {
649636ef 1129 u32 val = I915_READ(FDI_TX_CTL(pipe));
bf507ef7
ED
1130 cur_state = !!(val & FDI_TX_ENABLE);
1131 }
e2c719b7 1132 I915_STATE_WARN(cur_state != state,
040484af 1133 "FDI TX state assertion failure (expected %s, current %s)\n",
87ad3212 1134 onoff(state), onoff(cur_state));
040484af
JB
1135}
1136#define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true)
1137#define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false)
1138
1139static void assert_fdi_rx(struct drm_i915_private *dev_priv,
1140 enum pipe pipe, bool state)
1141{
040484af
JB
1142 u32 val;
1143 bool cur_state;
1144
649636ef 1145 val = I915_READ(FDI_RX_CTL(pipe));
d63fa0dc 1146 cur_state = !!(val & FDI_RX_ENABLE);
e2c719b7 1147 I915_STATE_WARN(cur_state != state,
040484af 1148 "FDI RX state assertion failure (expected %s, current %s)\n",
87ad3212 1149 onoff(state), onoff(cur_state));
040484af
JB
1150}
1151#define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true)
1152#define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false)
1153
1154static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
1155 enum pipe pipe)
1156{
040484af
JB
1157 u32 val;
1158
1159 /* ILK FDI PLL is always enabled */
cf819eff 1160 if (IS_GEN(dev_priv, 5))
040484af
JB
1161 return;
1162
bf507ef7 1163 /* On Haswell, DDI ports are responsible for the FDI PLL setup */
2d1fe073 1164 if (HAS_DDI(dev_priv))
bf507ef7
ED
1165 return;
1166
649636ef 1167 val = I915_READ(FDI_TX_CTL(pipe));
e2c719b7 1168 I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
040484af
JB
1169}
1170
55607e8a
DV
1171void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
1172 enum pipe pipe, bool state)
040484af 1173{
040484af 1174 u32 val;
55607e8a 1175 bool cur_state;
040484af 1176
649636ef 1177 val = I915_READ(FDI_RX_CTL(pipe));
55607e8a 1178 cur_state = !!(val & FDI_RX_PLL_ENABLE);
e2c719b7 1179 I915_STATE_WARN(cur_state != state,
55607e8a 1180 "FDI RX PLL assertion failure (expected %s, current %s)\n",
87ad3212 1181 onoff(state), onoff(cur_state));
040484af
JB
1182}
1183
4f8036a2 1184void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
ea0760cf 1185{
f0f59a00 1186 i915_reg_t pp_reg;
ea0760cf 1187 u32 val;
10ed55e4 1188 enum pipe panel_pipe = INVALID_PIPE;
0de3b485 1189 bool locked = true;
ea0760cf 1190
4f8036a2 1191 if (WARN_ON(HAS_DDI(dev_priv)))
bedd4dba
JN
1192 return;
1193
4f8036a2 1194 if (HAS_PCH_SPLIT(dev_priv)) {
bedd4dba
JN
1195 u32 port_sel;
1196
44cb734c
ID
1197 pp_reg = PP_CONTROL(0);
1198 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
bedd4dba 1199
4c23dea4
VS
1200 switch (port_sel) {
1201 case PANEL_PORT_SELECT_LVDS:
a44628b9 1202 intel_lvds_port_enabled(dev_priv, PCH_LVDS, &panel_pipe);
4c23dea4
VS
1203 break;
1204 case PANEL_PORT_SELECT_DPA:
1205 intel_dp_port_enabled(dev_priv, DP_A, PORT_A, &panel_pipe);
1206 break;
1207 case PANEL_PORT_SELECT_DPC:
1208 intel_dp_port_enabled(dev_priv, PCH_DP_C, PORT_C, &panel_pipe);
1209 break;
1210 case PANEL_PORT_SELECT_DPD:
1211 intel_dp_port_enabled(dev_priv, PCH_DP_D, PORT_D, &panel_pipe);
1212 break;
1213 default:
1214 MISSING_CASE(port_sel);
1215 break;
1216 }
4f8036a2 1217 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bedd4dba 1218 /* presumably write lock depends on pipe, not port select */
44cb734c 1219 pp_reg = PP_CONTROL(pipe);
bedd4dba 1220 panel_pipe = pipe;
ea0760cf 1221 } else {
f0d2b758
VS
1222 u32 port_sel;
1223
44cb734c 1224 pp_reg = PP_CONTROL(0);
f0d2b758
VS
1225 port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
1226
1227 WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
a44628b9 1228 intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
ea0760cf
JB
1229 }
1230
1231 val = I915_READ(pp_reg);
1232 if (!(val & PANEL_POWER_ON) ||
ec49ba2d 1233 ((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
ea0760cf
JB
1234 locked = false;
1235
e2c719b7 1236 I915_STATE_WARN(panel_pipe == pipe && locked,
ea0760cf 1237 "panel assertion failure, pipe %c regs locked\n",
9db4a9c7 1238 pipe_name(pipe));
ea0760cf
JB
1239}
1240
b840d907
JB
1241void assert_pipe(struct drm_i915_private *dev_priv,
1242 enum pipe pipe, bool state)
b24e7179 1243{
63d7bbe9 1244 bool cur_state;
702e7a56
PZ
1245 enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
1246 pipe);
4feed0eb 1247 enum intel_display_power_domain power_domain;
0e6e0be4 1248 intel_wakeref_t wakeref;
b24e7179 1249
e56134bc
VS
1250 /* we keep both pipes enabled on 830 */
1251 if (IS_I830(dev_priv))
8e636784
DV
1252 state = true;
1253
4feed0eb 1254 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
0e6e0be4
CW
1255 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
1256 if (wakeref) {
649636ef 1257 u32 val = I915_READ(PIPECONF(cpu_transcoder));
69310161 1258 cur_state = !!(val & PIPECONF_ENABLE);
4feed0eb 1259
0e6e0be4 1260 intel_display_power_put(dev_priv, power_domain, wakeref);
4feed0eb
ID
1261 } else {
1262 cur_state = false;
69310161
PZ
1263 }
1264
e2c719b7 1265 I915_STATE_WARN(cur_state != state,
63d7bbe9 1266 "pipe %c assertion failure (expected %s, current %s)\n",
87ad3212 1267 pipe_name(pipe), onoff(state), onoff(cur_state));
b24e7179
JB
1268}
1269
51f5a096 1270static void assert_plane(struct intel_plane *plane, bool state)
b24e7179 1271{
eade6c89
VS
1272 enum pipe pipe;
1273 bool cur_state;
1274
1275 cur_state = plane->get_hw_state(plane, &pipe);
b24e7179 1276
e2c719b7 1277 I915_STATE_WARN(cur_state != state,
51f5a096
VS
1278 "%s assertion failure (expected %s, current %s)\n",
1279 plane->base.name, onoff(state), onoff(cur_state));
b24e7179
JB
1280}
1281
51f5a096
VS
1282#define assert_plane_enabled(p) assert_plane(p, true)
1283#define assert_plane_disabled(p) assert_plane(p, false)
931872fc 1284
51f5a096 1285static void assert_planes_disabled(struct intel_crtc *crtc)
b24e7179 1286{
51f5a096
VS
1287 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1288 struct intel_plane *plane;
19332d7a 1289
51f5a096
VS
1290 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane)
1291 assert_plane_disabled(plane);
19332d7a
JB
1292}
1293
08c71e5e
VS
1294static void assert_vblank_disabled(struct drm_crtc *crtc)
1295{
e2c719b7 1296 if (I915_STATE_WARN_ON(drm_crtc_vblank_get(crtc) == 0))
08c71e5e
VS
1297 drm_crtc_vblank_put(crtc);
1298}
1299
7abd4b35
ACO
1300void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
1301 enum pipe pipe)
92f2584a 1302{
92f2584a
JB
1303 u32 val;
1304 bool enabled;
1305
649636ef 1306 val = I915_READ(PCH_TRANSCONF(pipe));
92f2584a 1307 enabled = !!(val & TRANS_ENABLE);
e2c719b7 1308 I915_STATE_WARN(enabled,
9db4a9c7
JB
1309 "transcoder assertion failed, should be off on pipe %c but is still active\n",
1310 pipe_name(pipe));
92f2584a
JB
1311}
1312
59b74c49
VS
1313static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv,
1314 enum pipe pipe, enum port port,
1315 i915_reg_t dp_reg)
f0575e92 1316{
59b74c49
VS
1317 enum pipe port_pipe;
1318 bool state;
f0575e92 1319
59b74c49 1320 state = intel_dp_port_enabled(dev_priv, dp_reg, port, &port_pipe);
f0575e92 1321
59b74c49
VS
1322 I915_STATE_WARN(state && port_pipe == pipe,
1323 "PCH DP %c enabled on transcoder %c, should be disabled\n",
1324 port_name(port), pipe_name(pipe));
1325
1326 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1327 "IBX PCH DP %c still using transcoder B\n",
1328 port_name(port));
291906f1
JB
1329}
1330
1331static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv,
76203467
VS
1332 enum pipe pipe, enum port port,
1333 i915_reg_t hdmi_reg)
291906f1 1334{
76203467
VS
1335 enum pipe port_pipe;
1336 bool state;
1337
1338 state = intel_sdvo_port_enabled(dev_priv, hdmi_reg, &port_pipe);
1339
1340 I915_STATE_WARN(state && port_pipe == pipe,
1341 "PCH HDMI %c enabled on transcoder %c, should be disabled\n",
1342 port_name(port), pipe_name(pipe));
de9a35ab 1343
76203467
VS
1344 I915_STATE_WARN(HAS_PCH_IBX(dev_priv) && !state && port_pipe == PIPE_B,
1345 "IBX PCH HDMI %c still using transcoder B\n",
1346 port_name(port));
291906f1
JB
1347}
1348
1349static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv,
1350 enum pipe pipe)
1351{
6102a8ee 1352 enum pipe port_pipe;
291906f1 1353
59b74c49
VS
1354 assert_pch_dp_disabled(dev_priv, pipe, PORT_B, PCH_DP_B);
1355 assert_pch_dp_disabled(dev_priv, pipe, PORT_C, PCH_DP_C);
1356 assert_pch_dp_disabled(dev_priv, pipe, PORT_D, PCH_DP_D);
291906f1 1357
6102a8ee
VS
1358 I915_STATE_WARN(intel_crt_port_enabled(dev_priv, PCH_ADPA, &port_pipe) &&
1359 port_pipe == pipe,
1360 "PCH VGA enabled on transcoder %c, should be disabled\n",
1361 pipe_name(pipe));
291906f1 1362
a44628b9
VS
1363 I915_STATE_WARN(intel_lvds_port_enabled(dev_priv, PCH_LVDS, &port_pipe) &&
1364 port_pipe == pipe,
1365 "PCH LVDS enabled on transcoder %c, should be disabled\n",
1366 pipe_name(pipe));
291906f1 1367
3aefb67f 1368 /* PCH SDVOB multiplex with HDMIB */
76203467
VS
1369 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_B, PCH_HDMIB);
1370 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_C, PCH_HDMIC);
1371 assert_pch_hdmi_disabled(dev_priv, pipe, PORT_D, PCH_HDMID);
291906f1
JB
1372}
1373
cd2d34d9
VS
1374static void _vlv_enable_pll(struct intel_crtc *crtc,
1375 const struct intel_crtc_state *pipe_config)
1376{
1377 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1378 enum pipe pipe = crtc->pipe;
1379
1380 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
1381 POSTING_READ(DPLL(pipe));
1382 udelay(150);
1383
97a04e0d 1384 if (intel_wait_for_register(&dev_priv->uncore,
2c30b43b
CW
1385 DPLL(pipe),
1386 DPLL_LOCK_VLV,
1387 DPLL_LOCK_VLV,
1388 1))
cd2d34d9
VS
1389 DRM_ERROR("DPLL %d failed to lock\n", pipe);
1390}
1391
d288f65f 1392static void vlv_enable_pll(struct intel_crtc *crtc,
5cec258b 1393 const struct intel_crtc_state *pipe_config)
87442f73 1394{
cd2d34d9 1395 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8bd3f301 1396 enum pipe pipe = crtc->pipe;
87442f73 1397
8bd3f301 1398 assert_pipe_disabled(dev_priv, pipe);
87442f73 1399
87442f73 1400 /* PLL is protected by panel, make sure we can write it */
7d1a83cb 1401 assert_panel_unlocked(dev_priv, pipe);
87442f73 1402
cd2d34d9
VS
1403 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1404 _vlv_enable_pll(crtc, pipe_config);
426115cf 1405
8bd3f301
VS
1406 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1407 POSTING_READ(DPLL_MD(pipe));
87442f73
DV
1408}
1409
cd2d34d9
VS
1410
1411static void _chv_enable_pll(struct intel_crtc *crtc,
1412 const struct intel_crtc_state *pipe_config)
9d556c99 1413{
cd2d34d9 1414 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8bd3f301 1415 enum pipe pipe = crtc->pipe;
9d556c99 1416 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9d556c99
CML
1417 u32 tmp;
1418
221c7862 1419 vlv_dpio_get(dev_priv);
9d556c99
CML
1420
1421 /* Enable back the 10bit clock to display controller */
1422 tmp = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1423 tmp |= DPIO_DCLKP_EN;
1424 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), tmp);
1425
221c7862 1426 vlv_dpio_put(dev_priv);
54433e91 1427
9d556c99
CML
1428 /*
1429 * Need to wait > 100ns between dclkp clock enable bit and PLL enable.
1430 */
1431 udelay(1);
1432
1433 /* Enable PLL */
d288f65f 1434 I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
9d556c99
CML
1435
1436 /* Check PLL is locked */
97a04e0d 1437 if (intel_wait_for_register(&dev_priv->uncore,
6b18826a
CW
1438 DPLL(pipe), DPLL_LOCK_VLV, DPLL_LOCK_VLV,
1439 1))
9d556c99 1440 DRM_ERROR("PLL %d failed to lock\n", pipe);
cd2d34d9
VS
1441}
1442
1443static void chv_enable_pll(struct intel_crtc *crtc,
1444 const struct intel_crtc_state *pipe_config)
1445{
1446 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1447 enum pipe pipe = crtc->pipe;
1448
1449 assert_pipe_disabled(dev_priv, pipe);
1450
1451 /* PLL is protected by panel, make sure we can write it */
1452 assert_panel_unlocked(dev_priv, pipe);
1453
1454 if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
1455 _chv_enable_pll(crtc, pipe_config);
9d556c99 1456
c231775c
VS
1457 if (pipe != PIPE_A) {
1458 /*
1459 * WaPixelRepeatModeFixForC0:chv
1460 *
1461 * DPLLCMD is AWOL. Use chicken bits to propagate
1462 * the value from DPLLBMD to either pipe B or C.
1463 */
dfa311f0 1464 I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
c231775c
VS
1465 I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
1466 I915_WRITE(CBR4_VLV, 0);
1467 dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
1468
1469 /*
1470 * DPLLB VGA mode also seems to cause problems.
1471 * We should always have it disabled.
1472 */
1473 WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
1474 } else {
1475 I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
1476 POSTING_READ(DPLL_MD(pipe));
1477 }
9d556c99
CML
1478}
1479
9e7d5699
VS
1480static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
1481{
1482 if (IS_I830(dev_priv))
1483 return false;
1484
1485 return IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
1486}
1487
939994da
VS
1488static void i9xx_enable_pll(struct intel_crtc *crtc,
1489 const struct intel_crtc_state *crtc_state)
63d7bbe9 1490{
6315b5d3 1491 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
f0f59a00 1492 i915_reg_t reg = DPLL(crtc->pipe);
939994da 1493 u32 dpll = crtc_state->dpll_hw_state.dpll;
bb408dd2 1494 int i;
63d7bbe9 1495
66e3d5c0 1496 assert_pipe_disabled(dev_priv, crtc->pipe);
58c6eaa2 1497
63d7bbe9 1498 /* PLL is protected by panel, make sure we can write it */
9e7d5699 1499 if (i9xx_has_pps(dev_priv))
66e3d5c0 1500 assert_panel_unlocked(dev_priv, crtc->pipe);
63d7bbe9 1501
c2b63374
VS
1502 /*
1503 * Apparently we need to have VGA mode enabled prior to changing
1504 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
1505 * dividers, even though the register value does change.
1506 */
7ca60367 1507 I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
8e7a65aa
VS
1508 I915_WRITE(reg, dpll);
1509
66e3d5c0
DV
1510 /* Wait for the clocks to stabilize. */
1511 POSTING_READ(reg);
1512 udelay(150);
1513
6315b5d3 1514 if (INTEL_GEN(dev_priv) >= 4) {
66e3d5c0 1515 I915_WRITE(DPLL_MD(crtc->pipe),
939994da 1516 crtc_state->dpll_hw_state.dpll_md);
66e3d5c0
DV
1517 } else {
1518 /* The pixel multiplier can only be updated once the
1519 * DPLL is enabled and the clocks are stable.
1520 *
1521 * So write it again.
1522 */
1523 I915_WRITE(reg, dpll);
1524 }
63d7bbe9
JB
1525
1526 /* We do this three times for luck */
bb408dd2
VS
1527 for (i = 0; i < 3; i++) {
1528 I915_WRITE(reg, dpll);
1529 POSTING_READ(reg);
1530 udelay(150); /* wait for warmup */
1531 }
63d7bbe9
JB
1532}
1533
b2354c78 1534static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
63d7bbe9 1535{
b2354c78 1536 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6315b5d3 1537 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1c4e0274
VS
1538 enum pipe pipe = crtc->pipe;
1539
b6b5d049 1540 /* Don't disable pipe or pipe PLLs if needed */
e56134bc 1541 if (IS_I830(dev_priv))
63d7bbe9
JB
1542 return;
1543
1544 /* Make sure the pipe isn't still relying on us */
1545 assert_pipe_disabled(dev_priv, pipe);
1546
b8afb911 1547 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
50b44a44 1548 POSTING_READ(DPLL(pipe));
63d7bbe9
JB
1549}
1550
f6071166
JB
1551static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1552{
b8afb911 1553 u32 val;
f6071166
JB
1554
1555 /* Make sure the pipe isn't still relying on us */
1556 assert_pipe_disabled(dev_priv, pipe);
1557
03ed5cbf
VS
1558 val = DPLL_INTEGRATED_REF_CLK_VLV |
1559 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
1560 if (pipe != PIPE_A)
1561 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
1562
f6071166
JB
1563 I915_WRITE(DPLL(pipe), val);
1564 POSTING_READ(DPLL(pipe));
076ed3b2
CML
1565}
1566
1567static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
1568{
d752048d 1569 enum dpio_channel port = vlv_pipe_to_channel(pipe);
076ed3b2
CML
1570 u32 val;
1571
a11b0703
VS
1572 /* Make sure the pipe isn't still relying on us */
1573 assert_pipe_disabled(dev_priv, pipe);
076ed3b2 1574
60bfe44f
VS
1575 val = DPLL_SSC_REF_CLK_CHV |
1576 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
a11b0703
VS
1577 if (pipe != PIPE_A)
1578 val |= DPLL_INTEGRATED_CRI_CLK_VLV;
03ed5cbf 1579
a11b0703
VS
1580 I915_WRITE(DPLL(pipe), val);
1581 POSTING_READ(DPLL(pipe));
d752048d 1582
221c7862 1583 vlv_dpio_get(dev_priv);
d752048d
VS
1584
1585 /* Disable 10bit clock to display controller */
1586 val = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port));
1587 val &= ~DPIO_DCLKP_EN;
1588 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), val);
1589
221c7862 1590 vlv_dpio_put(dev_priv);
f6071166
JB
1591}
1592
e4607fcf 1593void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
9b6de0a1
VS
1594 struct intel_digital_port *dport,
1595 unsigned int expected_mask)
89b667f8
JB
1596{
1597 u32 port_mask;
f0f59a00 1598 i915_reg_t dpll_reg;
89b667f8 1599
8f4f2797 1600 switch (dport->base.port) {
e4607fcf 1601 case PORT_B:
89b667f8 1602 port_mask = DPLL_PORTB_READY_MASK;
00fc31b7 1603 dpll_reg = DPLL(0);
e4607fcf
CML
1604 break;
1605 case PORT_C:
89b667f8 1606 port_mask = DPLL_PORTC_READY_MASK;
00fc31b7 1607 dpll_reg = DPLL(0);
9b6de0a1 1608 expected_mask <<= 4;
00fc31b7
CML
1609 break;
1610 case PORT_D:
1611 port_mask = DPLL_PORTD_READY_MASK;
1612 dpll_reg = DPIO_PHY_STATUS;
e4607fcf
CML
1613 break;
1614 default:
1615 BUG();
1616 }
89b667f8 1617
97a04e0d 1618 if (intel_wait_for_register(&dev_priv->uncore,
370004d3
CW
1619 dpll_reg, port_mask, expected_mask,
1620 1000))
9b6de0a1 1621 WARN(1, "timed out waiting for port %c ready: got 0x%x, expected 0x%x\n",
8f4f2797
VS
1622 port_name(dport->base.port),
1623 I915_READ(dpll_reg) & port_mask, expected_mask);
89b667f8
JB
1624}
1625
7efd90fb 1626static void ironlake_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
040484af 1627{
7efd90fb
ML
1628 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1629 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1630 enum pipe pipe = crtc->pipe;
f0f59a00 1631 i915_reg_t reg;
ba3f4d0a 1632 u32 val, pipeconf_val;
040484af 1633
040484af 1634 /* Make sure PCH DPLL is enabled */
7efd90fb 1635 assert_shared_dpll_enabled(dev_priv, crtc_state->shared_dpll);
040484af
JB
1636
1637 /* FDI must be feeding us bits for PCH ports */
1638 assert_fdi_tx_enabled(dev_priv, pipe);
1639 assert_fdi_rx_enabled(dev_priv, pipe);
1640
6e266956 1641 if (HAS_PCH_CPT(dev_priv)) {
23670b32
DV
1642 /* Workaround: Set the timing override bit before enabling the
1643 * pch transcoder. */
1644 reg = TRANS_CHICKEN2(pipe);
1645 val = I915_READ(reg);
1646 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
1647 I915_WRITE(reg, val);
59c859d6 1648 }
23670b32 1649
ab9412ba 1650 reg = PCH_TRANSCONF(pipe);
040484af 1651 val = I915_READ(reg);
5f7f726d 1652 pipeconf_val = I915_READ(PIPECONF(pipe));
e9bcff5c 1653
2d1fe073 1654 if (HAS_PCH_IBX(dev_priv)) {
e9bcff5c 1655 /*
c5de7c6f
VS
1656 * Make the BPC in transcoder be consistent with
1657 * that in pipeconf reg. For HDMI we must use 8bpc
1658 * here for both 8bpc and 12bpc.
e9bcff5c 1659 */
dfd07d72 1660 val &= ~PIPECONF_BPC_MASK;
7efd90fb 1661 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
c5de7c6f
VS
1662 val |= PIPECONF_8BPC;
1663 else
1664 val |= pipeconf_val & PIPECONF_BPC_MASK;
e9bcff5c 1665 }
5f7f726d
PZ
1666
1667 val &= ~TRANS_INTERLACE_MASK;
27b680f9 1668 if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) {
2d1fe073 1669 if (HAS_PCH_IBX(dev_priv) &&
7efd90fb 1670 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
7c26e5c6
PZ
1671 val |= TRANS_LEGACY_INTERLACED_ILK;
1672 else
1673 val |= TRANS_INTERLACED;
27b680f9 1674 } else {
5f7f726d 1675 val |= TRANS_PROGRESSIVE;
27b680f9 1676 }
5f7f726d 1677
040484af 1678 I915_WRITE(reg, val | TRANS_ENABLE);
97a04e0d 1679 if (intel_wait_for_register(&dev_priv->uncore,
650fbd84
CW
1680 reg, TRANS_STATE_ENABLE, TRANS_STATE_ENABLE,
1681 100))
4bb6f1f3 1682 DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
040484af
JB
1683}
1684
8fb033d7 1685static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
937bb610 1686 enum transcoder cpu_transcoder)
040484af 1687{
8fb033d7 1688 u32 val, pipeconf_val;
8fb033d7 1689
8fb033d7 1690 /* FDI must be feeding us bits for PCH ports */
1a240d4d 1691 assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
a2196033 1692 assert_fdi_rx_enabled(dev_priv, PIPE_A);
8fb033d7 1693
223a6fdf 1694 /* Workaround: set timing override bit. */
36c0d0cf 1695 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
23670b32 1696 val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
36c0d0cf 1697 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
223a6fdf 1698
25f3ef11 1699 val = TRANS_ENABLE;
937bb610 1700 pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
8fb033d7 1701
9a76b1c6
PZ
1702 if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
1703 PIPECONF_INTERLACED_ILK)
a35f2679 1704 val |= TRANS_INTERLACED;
8fb033d7
PZ
1705 else
1706 val |= TRANS_PROGRESSIVE;
1707
ab9412ba 1708 I915_WRITE(LPT_TRANSCONF, val);
97a04e0d 1709 if (intel_wait_for_register(&dev_priv->uncore,
d9f96244
CW
1710 LPT_TRANSCONF,
1711 TRANS_STATE_ENABLE,
1712 TRANS_STATE_ENABLE,
1713 100))
937bb610 1714 DRM_ERROR("Failed to enable PCH transcoder\n");
8fb033d7
PZ
1715}
1716
b8a4f404
PZ
1717static void ironlake_disable_pch_transcoder(struct drm_i915_private *dev_priv,
1718 enum pipe pipe)
040484af 1719{
f0f59a00 1720 i915_reg_t reg;
ba3f4d0a 1721 u32 val;
040484af
JB
1722
1723 /* FDI relies on the transcoder */
1724 assert_fdi_tx_disabled(dev_priv, pipe);
1725 assert_fdi_rx_disabled(dev_priv, pipe);
1726
291906f1
JB
1727 /* Ports must be off as well */
1728 assert_pch_ports_disabled(dev_priv, pipe);
1729
ab9412ba 1730 reg = PCH_TRANSCONF(pipe);
040484af
JB
1731 val = I915_READ(reg);
1732 val &= ~TRANS_ENABLE;
1733 I915_WRITE(reg, val);
1734 /* wait for PCH transcoder off, transcoder state */
97a04e0d 1735 if (intel_wait_for_register(&dev_priv->uncore,
a7d04662
CW
1736 reg, TRANS_STATE_ENABLE, 0,
1737 50))
4bb6f1f3 1738 DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
23670b32 1739
6e266956 1740 if (HAS_PCH_CPT(dev_priv)) {
23670b32
DV
1741 /* Workaround: Clear the timing override chicken bit again. */
1742 reg = TRANS_CHICKEN2(pipe);
1743 val = I915_READ(reg);
1744 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
1745 I915_WRITE(reg, val);
1746 }
040484af
JB
1747}
1748
b7076546 1749void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
8fb033d7 1750{
8fb033d7
PZ
1751 u32 val;
1752
ab9412ba 1753 val = I915_READ(LPT_TRANSCONF);
8fb033d7 1754 val &= ~TRANS_ENABLE;
ab9412ba 1755 I915_WRITE(LPT_TRANSCONF, val);
8fb033d7 1756 /* wait for PCH transcoder off, transcoder state */
97a04e0d 1757 if (intel_wait_for_register(&dev_priv->uncore,
dfdb4749
CW
1758 LPT_TRANSCONF, TRANS_STATE_ENABLE, 0,
1759 50))
8a52fd9f 1760 DRM_ERROR("Failed to disable PCH transcoder\n");
223a6fdf
PZ
1761
1762 /* Workaround: clear timing override bit. */
36c0d0cf 1763 val = I915_READ(TRANS_CHICKEN2(PIPE_A));
23670b32 1764 val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
36c0d0cf 1765 I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
040484af
JB
1766}
1767
a2196033 1768enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
65f2130c
VS
1769{
1770 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1771
65f2130c 1772 if (HAS_PCH_LPT(dev_priv))
a2196033 1773 return PIPE_A;
65f2130c 1774 else
a2196033 1775 return crtc->pipe;
65f2130c
VS
1776}
1777
32db0b65
VS
1778static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state)
1779{
1780 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
1781
1782 /*
1783 * On i965gm the hardware frame counter reads
1784 * zero when the TV encoder is enabled :(
1785 */
1786 if (IS_I965GM(dev_priv) &&
1787 (crtc_state->output_types & BIT(INTEL_OUTPUT_TVOUT)))
1788 return 0;
1789
1790 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
1791 return 0xffffffff; /* full 32 bit counter */
1792 else if (INTEL_GEN(dev_priv) >= 3)
1793 return 0xffffff; /* only 24 bits of frame count */
1794 else
1795 return 0; /* Gen2 doesn't have a hardware frame counter */
1796}
1797
1798static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
1799{
1800 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
1801
1802 drm_crtc_set_max_vblank_count(&crtc->base,
1803 intel_crtc_max_vblank_count(crtc_state));
1804 drm_crtc_vblank_on(&crtc->base);
1805}
1806
4972f70a 1807static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
b24e7179 1808{
4972f70a
VS
1809 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
1810 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1811 enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
0372264a 1812 enum pipe pipe = crtc->pipe;
f0f59a00 1813 i915_reg_t reg;
b24e7179
JB
1814 u32 val;
1815
9e2ee2dd
VS
1816 DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
1817
51f5a096 1818 assert_planes_disabled(crtc);
58c6eaa2 1819
b24e7179
JB
1820 /*
1821 * A pipe without a PLL won't actually be able to drive bits from
1822 * a plane. On ILK+ the pipe PLLs are integrated, so we don't
1823 * need the check.
1824 */
b2ae318a 1825 if (HAS_GMCH(dev_priv)) {
4972f70a 1826 if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI))
23538ef1
JN
1827 assert_dsi_pll_enabled(dev_priv);
1828 else
1829 assert_pll_enabled(dev_priv, pipe);
09fa8bb9 1830 } else {
4972f70a 1831 if (new_crtc_state->has_pch_encoder) {
040484af 1832 /* if driving the PCH, we need FDI enabled */
65f2130c 1833 assert_fdi_rx_pll_enabled(dev_priv,
a2196033 1834 intel_crtc_pch_transcoder(crtc));
1a240d4d
DV
1835 assert_fdi_tx_pll_enabled(dev_priv,
1836 (enum pipe) cpu_transcoder);
040484af
JB
1837 }
1838 /* FIXME: assert CPU port conditions for SNB+ */
1839 }
b24e7179 1840
0b2599a4
VS
1841 trace_intel_pipe_enable(dev_priv, pipe);
1842
702e7a56 1843 reg = PIPECONF(cpu_transcoder);
b24e7179 1844 val = I915_READ(reg);
7ad25d48 1845 if (val & PIPECONF_ENABLE) {
e56134bc
VS
1846 /* we keep both pipes enabled on 830 */
1847 WARN_ON(!IS_I830(dev_priv));
00d70b15 1848 return;
7ad25d48 1849 }
00d70b15
CW
1850
1851 I915_WRITE(reg, val | PIPECONF_ENABLE);
851855d8 1852 POSTING_READ(reg);
b7792d8b
VS
1853
1854 /*
8fedd64d
VS
1855 * Until the pipe starts PIPEDSL reads will return a stale value,
1856 * which causes an apparent vblank timestamp jump when PIPEDSL
1857 * resets to its proper value. That also messes up the frame count
1858 * when it's derived from the timestamps. So let's wait for the
1859 * pipe to start properly before we call drm_crtc_vblank_on()
b7792d8b 1860 */
32db0b65 1861 if (intel_crtc_max_vblank_count(new_crtc_state) == 0)
8fedd64d 1862 intel_wait_for_pipe_scanline_moving(crtc);
b24e7179
JB
1863}
1864
4972f70a 1865static void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
b24e7179 1866{
4972f70a 1867 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
fac5e23e 1868 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4972f70a 1869 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
575f7ab7 1870 enum pipe pipe = crtc->pipe;
f0f59a00 1871 i915_reg_t reg;
b24e7179
JB
1872 u32 val;
1873
9e2ee2dd
VS
1874 DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
1875
b24e7179
JB
1876 /*
1877 * Make sure planes won't keep trying to pump pixels to us,
1878 * or we might hang the display.
1879 */
51f5a096 1880 assert_planes_disabled(crtc);
b24e7179 1881
0b2599a4
VS
1882 trace_intel_pipe_disable(dev_priv, pipe);
1883
702e7a56 1884 reg = PIPECONF(cpu_transcoder);
b24e7179 1885 val = I915_READ(reg);
00d70b15
CW
1886 if ((val & PIPECONF_ENABLE) == 0)
1887 return;
1888
67adc644
VS
1889 /*
1890 * Double wide has implications for planes
1891 * so best keep it disabled when not needed.
1892 */
4972f70a 1893 if (old_crtc_state->double_wide)
67adc644
VS
1894 val &= ~PIPECONF_DOUBLE_WIDE;
1895
1896 /* Don't disable pipe or pipe PLLs if needed */
e56134bc 1897 if (!IS_I830(dev_priv))
67adc644
VS
1898 val &= ~PIPECONF_ENABLE;
1899
1900 I915_WRITE(reg, val);
1901 if ((val & PIPECONF_ENABLE) == 0)
4972f70a 1902 intel_wait_for_pipe_off(old_crtc_state);
b24e7179
JB
1903}
1904
832be82f
VS
1905static unsigned int intel_tile_size(const struct drm_i915_private *dev_priv)
1906{
cf819eff 1907 return IS_GEN(dev_priv, 2) ? 2048 : 4096;
832be82f
VS
1908}
1909
d88c4afd 1910static unsigned int
5d2a1950 1911intel_tile_width_bytes(const struct drm_framebuffer *fb, int color_plane)
7b49f948 1912{
d88c4afd 1913 struct drm_i915_private *dev_priv = to_i915(fb->dev);
5d2a1950 1914 unsigned int cpp = fb->format->cpp[color_plane];
d88c4afd
VS
1915
1916 switch (fb->modifier) {
2f075565 1917 case DRM_FORMAT_MOD_LINEAR:
54d4d719 1918 return intel_tile_size(dev_priv);
7b49f948 1919 case I915_FORMAT_MOD_X_TILED:
cf819eff 1920 if (IS_GEN(dev_priv, 2))
7b49f948
VS
1921 return 128;
1922 else
1923 return 512;
2e2adb05 1924 case I915_FORMAT_MOD_Y_TILED_CCS:
5d2a1950 1925 if (color_plane == 1)
2e2adb05
VS
1926 return 128;
1927 /* fall through */
7b49f948 1928 case I915_FORMAT_MOD_Y_TILED:
cf819eff 1929 if (IS_GEN(dev_priv, 2) || HAS_128_BYTE_Y_TILING(dev_priv))
7b49f948
VS
1930 return 128;
1931 else
1932 return 512;
2e2adb05 1933 case I915_FORMAT_MOD_Yf_TILED_CCS:
5d2a1950 1934 if (color_plane == 1)
2e2adb05
VS
1935 return 128;
1936 /* fall through */
7b49f948
VS
1937 case I915_FORMAT_MOD_Yf_TILED:
1938 switch (cpp) {
1939 case 1:
1940 return 64;
1941 case 2:
1942 case 4:
1943 return 128;
1944 case 8:
1945 case 16:
1946 return 256;
1947 default:
1948 MISSING_CASE(cpp);
1949 return cpp;
1950 }
1951 break;
1952 default:
d88c4afd 1953 MISSING_CASE(fb->modifier);
7b49f948
VS
1954 return cpp;
1955 }
1956}
1957
d88c4afd 1958static unsigned int
5d2a1950 1959intel_tile_height(const struct drm_framebuffer *fb, int color_plane)
a57ce0b2 1960{
54d4d719
VS
1961 return intel_tile_size(to_i915(fb->dev)) /
1962 intel_tile_width_bytes(fb, color_plane);
6761dd31
TU
1963}
1964
8d0deca8 1965/* Return the tile dimensions in pixel units */
5d2a1950 1966static void intel_tile_dims(const struct drm_framebuffer *fb, int color_plane,
8d0deca8 1967 unsigned int *tile_width,
d88c4afd 1968 unsigned int *tile_height)
8d0deca8 1969{
5d2a1950
VS
1970 unsigned int tile_width_bytes = intel_tile_width_bytes(fb, color_plane);
1971 unsigned int cpp = fb->format->cpp[color_plane];
8d0deca8
VS
1972
1973 *tile_width = tile_width_bytes / cpp;
d88c4afd 1974 *tile_height = intel_tile_size(to_i915(fb->dev)) / tile_width_bytes;
8d0deca8
VS
1975}
1976
6761dd31 1977unsigned int
d88c4afd 1978intel_fb_align_height(const struct drm_framebuffer *fb,
5d2a1950 1979 int color_plane, unsigned int height)
6761dd31 1980{
5d2a1950 1981 unsigned int tile_height = intel_tile_height(fb, color_plane);
832be82f
VS
1982
1983 return ALIGN(height, tile_height);
a57ce0b2
JB
1984}
1985
1663b9d6
VS
1986unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info)
1987{
1988 unsigned int size = 0;
1989 int i;
1990
1991 for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++)
1992 size += rot_info->plane[i].width * rot_info->plane[i].height;
1993
1994 return size;
1995}
1996
1a74fc0b
VS
1997unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info)
1998{
1999 unsigned int size = 0;
2000 int i;
2001
2002 for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++)
2003 size += rem_info->plane[i].width * rem_info->plane[i].height;
2004
2005 return size;
2006}
2007
75c82a53 2008static void
3465c580
VS
2009intel_fill_fb_ggtt_view(struct i915_ggtt_view *view,
2010 const struct drm_framebuffer *fb,
2011 unsigned int rotation)
f64b98cd 2012{
7b92c047 2013 view->type = I915_GGTT_VIEW_NORMAL;
bd2ef25d 2014 if (drm_rotation_90_or_270(rotation)) {
7b92c047 2015 view->type = I915_GGTT_VIEW_ROTATED;
8bab1193 2016 view->rotated = to_intel_framebuffer(fb)->rot_info;
2d7a215f
VS
2017 }
2018}
50470bb0 2019
fabac484
VS
2020static unsigned int intel_cursor_alignment(const struct drm_i915_private *dev_priv)
2021{
2022 if (IS_I830(dev_priv))
2023 return 16 * 1024;
2024 else if (IS_I85X(dev_priv))
2025 return 256;
d9e1551e
VS
2026 else if (IS_I845G(dev_priv) || IS_I865G(dev_priv))
2027 return 32;
fabac484
VS
2028 else
2029 return 4 * 1024;
2030}
2031
603525d7 2032static unsigned int intel_linear_alignment(const struct drm_i915_private *dev_priv)
4e9a86b6 2033{
c56b89f1 2034 if (INTEL_GEN(dev_priv) >= 9)
4e9a86b6 2035 return 256 * 1024;
c0f86832 2036 else if (IS_I965G(dev_priv) || IS_I965GM(dev_priv) ||
666a4537 2037 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
4e9a86b6 2038 return 128 * 1024;
c56b89f1 2039 else if (INTEL_GEN(dev_priv) >= 4)
4e9a86b6
VS
2040 return 4 * 1024;
2041 else
44c5905e 2042 return 0;
4e9a86b6
VS
2043}
2044
d88c4afd 2045static unsigned int intel_surf_alignment(const struct drm_framebuffer *fb,
5d2a1950 2046 int color_plane)
603525d7 2047{
d88c4afd
VS
2048 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2049
b90c1ee1 2050 /* AUX_DIST needs only 4K alignment */
5d2a1950 2051 if (color_plane == 1)
b90c1ee1
VS
2052 return 4096;
2053
d88c4afd 2054 switch (fb->modifier) {
2f075565 2055 case DRM_FORMAT_MOD_LINEAR:
603525d7
VS
2056 return intel_linear_alignment(dev_priv);
2057 case I915_FORMAT_MOD_X_TILED:
d88c4afd 2058 if (INTEL_GEN(dev_priv) >= 9)
603525d7
VS
2059 return 256 * 1024;
2060 return 0;
2e2adb05
VS
2061 case I915_FORMAT_MOD_Y_TILED_CCS:
2062 case I915_FORMAT_MOD_Yf_TILED_CCS:
603525d7
VS
2063 case I915_FORMAT_MOD_Y_TILED:
2064 case I915_FORMAT_MOD_Yf_TILED:
2065 return 1 * 1024 * 1024;
2066 default:
d88c4afd 2067 MISSING_CASE(fb->modifier);
603525d7
VS
2068 return 0;
2069 }
2070}
2071
f7a02ad7
VS
2072static bool intel_plane_uses_fence(const struct intel_plane_state *plane_state)
2073{
2074 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2075 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2076
bb211c3d
VS
2077 return INTEL_GEN(dev_priv) < 4 ||
2078 (plane->has_fbc &&
2079 plane_state->view.type == I915_GGTT_VIEW_NORMAL);
f7a02ad7
VS
2080}
2081
058d88c4 2082struct i915_vma *
5935485f 2083intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
f5929c53 2084 const struct i915_ggtt_view *view,
f7a02ad7 2085 bool uses_fence,
5935485f 2086 unsigned long *out_flags)
6b95a207 2087{
850c4cdc 2088 struct drm_device *dev = fb->dev;
fac5e23e 2089 struct drm_i915_private *dev_priv = to_i915(dev);
850c4cdc 2090 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1d264d91 2091 intel_wakeref_t wakeref;
058d88c4 2092 struct i915_vma *vma;
5935485f 2093 unsigned int pinctl;
6b95a207 2094 u32 alignment;
6b95a207 2095
ebcdd39e
MR
2096 WARN_ON(!mutex_is_locked(&dev->struct_mutex));
2097
d88c4afd 2098 alignment = intel_surf_alignment(fb, 0);
6b95a207 2099
693db184
CW
2100 /* Note that the w/a also requires 64 PTE of padding following the
2101 * bo. We currently fill all unused PTE with the shadow page and so
2102 * we should always have valid PTE following the scanout preventing
2103 * the VT-d warning.
2104 */
48f112fe 2105 if (intel_scanout_needs_vtd_wa(dev_priv) && alignment < 256 * 1024)
693db184
CW
2106 alignment = 256 * 1024;
2107
d6dd6843
PZ
2108 /*
2109 * Global gtt pte registers are special registers which actually forward
2110 * writes to a chunk of system memory. Which means that there is no risk
2111 * that the register values disappear as soon as we call
2112 * intel_runtime_pm_put(), so it is correct to wrap only the
2113 * pin/unpin/fence and not more.
2114 */
1d264d91 2115 wakeref = intel_runtime_pm_get(dev_priv);
6951e589 2116 i915_gem_object_lock(obj);
d6dd6843 2117
9db529aa
DV
2118 atomic_inc(&dev_priv->gpu_error.pending_fb_pin);
2119
5935485f
CW
2120 pinctl = 0;
2121
2122 /* Valleyview is definitely limited to scanning out the first
2123 * 512MiB. Lets presume this behaviour was inherited from the
2124 * g4x display engine and that all earlier gen are similarly
2125 * limited. Testing suggests that it is a little more
2126 * complicated than this. For example, Cherryview appears quite
2127 * happy to scanout from anywhere within its global aperture.
2128 */
b2ae318a 2129 if (HAS_GMCH(dev_priv))
5935485f
CW
2130 pinctl |= PIN_MAPPABLE;
2131
2132 vma = i915_gem_object_pin_to_display_plane(obj,
f5929c53 2133 alignment, view, pinctl);
49ef5294
CW
2134 if (IS_ERR(vma))
2135 goto err;
6b95a207 2136
f7a02ad7 2137 if (uses_fence && i915_vma_is_map_and_fenceable(vma)) {
85798ac9
VS
2138 int ret;
2139
49ef5294
CW
2140 /* Install a fence for tiled scan-out. Pre-i965 always needs a
2141 * fence, whereas 965+ only requires a fence if using
2142 * framebuffer compression. For simplicity, we always, when
2143 * possible, install a fence as the cost is not that onerous.
2144 *
2145 * If we fail to fence the tiled scanout, then either the
2146 * modeset will reject the change (which is highly unlikely as
2147 * the affected systems, all but one, do not have unmappable
2148 * space) or we will not be able to enable full powersaving
2149 * techniques (also likely not to apply due to various limits
2150 * FBC and the like impose on the size of the buffer, which
2151 * presumably we violated anyway with this unmappable buffer).
2152 * Anyway, it is presumably better to stumble onwards with
2153 * something and try to run the system in a "less than optimal"
2154 * mode that matches the user configuration.
2155 */
85798ac9
VS
2156 ret = i915_vma_pin_fence(vma);
2157 if (ret != 0 && INTEL_GEN(dev_priv) < 4) {
7509702b 2158 i915_gem_object_unpin_from_display_plane(vma);
85798ac9
VS
2159 vma = ERR_PTR(ret);
2160 goto err;
2161 }
2162
2163 if (ret == 0 && vma->fence)
5935485f 2164 *out_flags |= PLANE_HAS_FENCE;
9807216f 2165 }
6b95a207 2166
be1e3415 2167 i915_vma_get(vma);
49ef5294 2168err:
9db529aa
DV
2169 atomic_dec(&dev_priv->gpu_error.pending_fb_pin);
2170
6951e589 2171 i915_gem_object_unlock(obj);
1d264d91 2172 intel_runtime_pm_put(dev_priv, wakeref);
058d88c4 2173 return vma;
6b95a207
KH
2174}
2175
5935485f 2176void intel_unpin_fb_vma(struct i915_vma *vma, unsigned long flags)
1690e1eb 2177{
be1e3415 2178 lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
f64b98cd 2179
6951e589 2180 i915_gem_object_lock(vma->obj);
5935485f
CW
2181 if (flags & PLANE_HAS_FENCE)
2182 i915_vma_unpin_fence(vma);
058d88c4 2183 i915_gem_object_unpin_from_display_plane(vma);
6951e589
CW
2184 i915_gem_object_unlock(vma->obj);
2185
be1e3415 2186 i915_vma_put(vma);
1690e1eb
CW
2187}
2188
5d2a1950 2189static int intel_fb_pitch(const struct drm_framebuffer *fb, int color_plane,
ef78ec94
VS
2190 unsigned int rotation)
2191{
bd2ef25d 2192 if (drm_rotation_90_or_270(rotation))
5d2a1950 2193 return to_intel_framebuffer(fb)->rotated[color_plane].pitch;
ef78ec94 2194 else
5d2a1950 2195 return fb->pitches[color_plane];
ef78ec94
VS
2196}
2197
6687c906
VS
2198/*
2199 * Convert the x/y offsets into a linear offset.
2200 * Only valid with 0/180 degree rotation, which is fine since linear
2201 * offset is only used with linear buffers on pre-hsw and tiled buffers
2202 * with gen2/3, and 90/270 degree rotations isn't supported on any of them.
2203 */
2204u32 intel_fb_xy_to_linear(int x, int y,
2949056c 2205 const struct intel_plane_state *state,
5d2a1950 2206 int color_plane)
6687c906 2207{
2949056c 2208 const struct drm_framebuffer *fb = state->base.fb;
5d2a1950
VS
2209 unsigned int cpp = fb->format->cpp[color_plane];
2210 unsigned int pitch = state->color_plane[color_plane].stride;
6687c906
VS
2211
2212 return y * pitch + x * cpp;
2213}
2214
2215/*
2216 * Add the x/y offsets derived from fb->offsets[] to the user
2217 * specified plane src x/y offsets. The resulting x/y offsets
2218 * specify the start of scanout from the beginning of the gtt mapping.
2219 */
2220void intel_add_fb_offsets(int *x, int *y,
2949056c 2221 const struct intel_plane_state *state,
5d2a1950 2222 int color_plane)
6687c906
VS
2223
2224{
54d4d719
VS
2225 *x += state->color_plane[color_plane].x;
2226 *y += state->color_plane[color_plane].y;
6687c906
VS
2227}
2228
6d19a44c
VS
2229static u32 intel_adjust_tile_offset(int *x, int *y,
2230 unsigned int tile_width,
2231 unsigned int tile_height,
2232 unsigned int tile_size,
2233 unsigned int pitch_tiles,
2234 u32 old_offset,
2235 u32 new_offset)
29cf9491 2236{
b9b24038 2237 unsigned int pitch_pixels = pitch_tiles * tile_width;
29cf9491
VS
2238 unsigned int tiles;
2239
2240 WARN_ON(old_offset & (tile_size - 1));
2241 WARN_ON(new_offset & (tile_size - 1));
2242 WARN_ON(new_offset > old_offset);
2243
2244 tiles = (old_offset - new_offset) / tile_size;
2245
2246 *y += tiles / pitch_tiles * tile_height;
2247 *x += tiles % pitch_tiles * tile_width;
2248
b9b24038
VS
2249 /* minimize x in case it got needlessly big */
2250 *y += *x / pitch_pixels * tile_height;
2251 *x %= pitch_pixels;
2252
29cf9491
VS
2253 return new_offset;
2254}
2255
2a11b1b4
DP
2256static bool is_surface_linear(u64 modifier, int color_plane)
2257{
2258 return modifier == DRM_FORMAT_MOD_LINEAR;
2259}
2260
6d19a44c 2261static u32 intel_adjust_aligned_offset(int *x, int *y,
5d2a1950
VS
2262 const struct drm_framebuffer *fb,
2263 int color_plane,
6d19a44c 2264 unsigned int rotation,
df79cf44 2265 unsigned int pitch,
6d19a44c 2266 u32 old_offset, u32 new_offset)
66a2d927 2267{
6d19a44c 2268 struct drm_i915_private *dev_priv = to_i915(fb->dev);
5d2a1950 2269 unsigned int cpp = fb->format->cpp[color_plane];
66a2d927
VS
2270
2271 WARN_ON(new_offset > old_offset);
2272
2a11b1b4 2273 if (!is_surface_linear(fb->modifier, color_plane)) {
66a2d927
VS
2274 unsigned int tile_size, tile_width, tile_height;
2275 unsigned int pitch_tiles;
2276
2277 tile_size = intel_tile_size(dev_priv);
5d2a1950 2278 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
66a2d927 2279
bd2ef25d 2280 if (drm_rotation_90_or_270(rotation)) {
66a2d927
VS
2281 pitch_tiles = pitch / tile_height;
2282 swap(tile_width, tile_height);
2283 } else {
2284 pitch_tiles = pitch / (tile_width * cpp);
2285 }
2286
6d19a44c
VS
2287 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2288 tile_size, pitch_tiles,
2289 old_offset, new_offset);
66a2d927
VS
2290 } else {
2291 old_offset += *y * pitch + *x * cpp;
2292
2293 *y = (old_offset - new_offset) / pitch;
2294 *x = ((old_offset - new_offset) - *y * pitch) / cpp;
2295 }
2296
2297 return new_offset;
2298}
2299
303ba695
VS
2300/*
2301 * Adjust the tile offset by moving the difference into
2302 * the x/y offsets.
2303 */
6d19a44c
VS
2304static u32 intel_plane_adjust_aligned_offset(int *x, int *y,
2305 const struct intel_plane_state *state,
5d2a1950 2306 int color_plane,
6d19a44c 2307 u32 old_offset, u32 new_offset)
303ba695 2308{
5d2a1950 2309 return intel_adjust_aligned_offset(x, y, state->base.fb, color_plane,
6d19a44c 2310 state->base.rotation,
5d2a1950 2311 state->color_plane[color_plane].stride,
6d19a44c 2312 old_offset, new_offset);
303ba695
VS
2313}
2314
8d0deca8 2315/*
6d19a44c 2316 * Computes the aligned offset to the base tile and adjusts
8d0deca8
VS
2317 * x, y. bytes per pixel is assumed to be a power-of-two.
2318 *
2319 * In the 90/270 rotated case, x and y are assumed
2320 * to be already rotated to match the rotated GTT view, and
2321 * pitch is the tile_height aligned framebuffer height.
6687c906
VS
2322 *
2323 * This function is used when computing the derived information
2324 * under intel_framebuffer, so using any of that information
2325 * here is not allowed. Anything under drm_framebuffer can be
2326 * used. This is why the user has to pass in the pitch since it
2327 * is specified in the rotated orientation.
8d0deca8 2328 */
6d19a44c
VS
2329static u32 intel_compute_aligned_offset(struct drm_i915_private *dev_priv,
2330 int *x, int *y,
5d2a1950
VS
2331 const struct drm_framebuffer *fb,
2332 int color_plane,
6d19a44c
VS
2333 unsigned int pitch,
2334 unsigned int rotation,
2335 u32 alignment)
c2c75131 2336{
5d2a1950 2337 unsigned int cpp = fb->format->cpp[color_plane];
6687c906 2338 u32 offset, offset_aligned;
29cf9491 2339
29cf9491
VS
2340 if (alignment)
2341 alignment--;
2342
2a11b1b4 2343 if (!is_surface_linear(fb->modifier, color_plane)) {
8d0deca8
VS
2344 unsigned int tile_size, tile_width, tile_height;
2345 unsigned int tile_rows, tiles, pitch_tiles;
c2c75131 2346
d843310d 2347 tile_size = intel_tile_size(dev_priv);
5d2a1950 2348 intel_tile_dims(fb, color_plane, &tile_width, &tile_height);
8d0deca8 2349
bd2ef25d 2350 if (drm_rotation_90_or_270(rotation)) {
8d0deca8
VS
2351 pitch_tiles = pitch / tile_height;
2352 swap(tile_width, tile_height);
2353 } else {
2354 pitch_tiles = pitch / (tile_width * cpp);
2355 }
d843310d
VS
2356
2357 tile_rows = *y / tile_height;
2358 *y %= tile_height;
c2c75131 2359
8d0deca8
VS
2360 tiles = *x / tile_width;
2361 *x %= tile_width;
bc752862 2362
29cf9491
VS
2363 offset = (tile_rows * pitch_tiles + tiles) * tile_size;
2364 offset_aligned = offset & ~alignment;
bc752862 2365
6d19a44c
VS
2366 intel_adjust_tile_offset(x, y, tile_width, tile_height,
2367 tile_size, pitch_tiles,
2368 offset, offset_aligned);
29cf9491 2369 } else {
bc752862 2370 offset = *y * pitch + *x * cpp;
29cf9491
VS
2371 offset_aligned = offset & ~alignment;
2372
4e9a86b6
VS
2373 *y = (offset & alignment) / pitch;
2374 *x = ((offset & alignment) - *y * pitch) / cpp;
bc752862 2375 }
29cf9491
VS
2376
2377 return offset_aligned;
c2c75131
DV
2378}
2379
6d19a44c
VS
2380static u32 intel_plane_compute_aligned_offset(int *x, int *y,
2381 const struct intel_plane_state *state,
5d2a1950 2382 int color_plane)
6687c906 2383{
1e7b4fd8
VS
2384 struct intel_plane *intel_plane = to_intel_plane(state->base.plane);
2385 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
2949056c
VS
2386 const struct drm_framebuffer *fb = state->base.fb;
2387 unsigned int rotation = state->base.rotation;
5d2a1950 2388 int pitch = state->color_plane[color_plane].stride;
1e7b4fd8
VS
2389 u32 alignment;
2390
2391 if (intel_plane->id == PLANE_CURSOR)
2392 alignment = intel_cursor_alignment(dev_priv);
2393 else
5d2a1950 2394 alignment = intel_surf_alignment(fb, color_plane);
6687c906 2395
5d2a1950 2396 return intel_compute_aligned_offset(dev_priv, x, y, fb, color_plane,
6d19a44c 2397 pitch, rotation, alignment);
6687c906
VS
2398}
2399
303ba695
VS
2400/* Convert the fb->offset[] into x/y offsets */
2401static int intel_fb_offset_to_xy(int *x, int *y,
5d2a1950
VS
2402 const struct drm_framebuffer *fb,
2403 int color_plane)
6687c906 2404{
303ba695 2405 struct drm_i915_private *dev_priv = to_i915(fb->dev);
70bbe53c 2406 unsigned int height;
6687c906 2407
303ba695 2408 if (fb->modifier != DRM_FORMAT_MOD_LINEAR &&
70bbe53c
VS
2409 fb->offsets[color_plane] % intel_tile_size(dev_priv)) {
2410 DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
2411 fb->offsets[color_plane], color_plane);
303ba695 2412 return -EINVAL;
70bbe53c
VS
2413 }
2414
2415 height = drm_framebuffer_plane_height(fb->height, fb, color_plane);
2416 height = ALIGN(height, intel_tile_height(fb, color_plane));
2417
2418 /* Catch potential overflows early */
2419 if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
2420 fb->offsets[color_plane])) {
2421 DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
2422 fb->offsets[color_plane], fb->pitches[color_plane],
2423 color_plane);
2424 return -ERANGE;
2425 }
303ba695
VS
2426
2427 *x = 0;
2428 *y = 0;
2429
6d19a44c 2430 intel_adjust_aligned_offset(x, y,
5d2a1950
VS
2431 fb, color_plane, DRM_MODE_ROTATE_0,
2432 fb->pitches[color_plane],
2433 fb->offsets[color_plane], 0);
303ba695
VS
2434
2435 return 0;
6687c906
VS
2436}
2437
ba3f4d0a 2438static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
72618ebf
VS
2439{
2440 switch (fb_modifier) {
2441 case I915_FORMAT_MOD_X_TILED:
2442 return I915_TILING_X;
2443 case I915_FORMAT_MOD_Y_TILED:
2e2adb05 2444 case I915_FORMAT_MOD_Y_TILED_CCS:
72618ebf
VS
2445 return I915_TILING_Y;
2446 default:
2447 return I915_TILING_NONE;
2448 }
2449}
2450
16af25fa
VS
2451/*
2452 * From the Sky Lake PRM:
2453 * "The Color Control Surface (CCS) contains the compression status of
2454 * the cache-line pairs. The compression state of the cache-line pair
2455 * is specified by 2 bits in the CCS. Each CCS cache-line represents
2456 * an area on the main surface of 16 x16 sets of 128 byte Y-tiled
2457 * cache-line-pairs. CCS is always Y tiled."
2458 *
2459 * Since cache line pairs refers to horizontally adjacent cache lines,
2460 * each cache line in the CCS corresponds to an area of 32x16 cache
2461 * lines on the main surface. Since each pixel is 4 bytes, this gives
2462 * us a ratio of one byte in the CCS for each 8x16 pixels in the
2463 * main surface.
2464 */
bbfb6ce8
VS
2465static const struct drm_format_info ccs_formats[] = {
2466 { .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2467 { .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2468 { .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2469 { .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
2470};
2471
2472static const struct drm_format_info *
2473lookup_format_info(const struct drm_format_info formats[],
2474 int num_formats, u32 format)
2475{
2476 int i;
2477
2478 for (i = 0; i < num_formats; i++) {
2479 if (formats[i].format == format)
2480 return &formats[i];
2481 }
2482
2483 return NULL;
2484}
2485
2486static const struct drm_format_info *
2487intel_get_format_info(const struct drm_mode_fb_cmd2 *cmd)
2488{
2489 switch (cmd->modifier[0]) {
2490 case I915_FORMAT_MOD_Y_TILED_CCS:
2491 case I915_FORMAT_MOD_Yf_TILED_CCS:
2492 return lookup_format_info(ccs_formats,
2493 ARRAY_SIZE(ccs_formats),
2494 cmd->pixel_format);
2495 default:
2496 return NULL;
2497 }
2498}
2499
63eaf9ac
DP
2500bool is_ccs_modifier(u64 modifier)
2501{
2502 return modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
2503 modifier == I915_FORMAT_MOD_Yf_TILED_CCS;
2504}
2505
54d4d719
VS
2506u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
2507 u32 pixel_format, u64 modifier)
a88c40eb
VS
2508{
2509 struct intel_crtc *crtc;
2510 struct intel_plane *plane;
2511
2512 /*
2513 * We assume the primary plane for pipe A has
2514 * the highest stride limits of them all.
2515 */
2516 crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
2517 plane = to_intel_plane(crtc->base.primary);
2518
2519 return plane->max_stride(plane, pixel_format, modifier,
2520 DRM_MODE_ROTATE_0);
2521}
2522
54d4d719
VS
2523static
2524u32 intel_fb_max_stride(struct drm_i915_private *dev_priv,
2525 u32 pixel_format, u64 modifier)
2526{
20330129
VS
2527 /*
2528 * Arbitrary limit for gen4+ chosen to match the
2529 * render engine max stride.
2530 *
2531 * The new CCS hash mode makes remapping impossible
2532 */
2533 if (!is_ccs_modifier(modifier)) {
2534 if (INTEL_GEN(dev_priv) >= 7)
2535 return 256*1024;
2536 else if (INTEL_GEN(dev_priv) >= 4)
2537 return 128*1024;
2538 }
2539
54d4d719
VS
2540 return intel_plane_fb_max_stride(dev_priv, pixel_format, modifier);
2541}
2542
a88c40eb
VS
2543static u32
2544intel_fb_stride_alignment(const struct drm_framebuffer *fb, int color_plane)
2545{
54d4d719
VS
2546 struct drm_i915_private *dev_priv = to_i915(fb->dev);
2547
2548 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2549 u32 max_stride = intel_plane_fb_max_stride(dev_priv,
2550 fb->format->format,
2551 fb->modifier);
2552
2553 /*
2554 * To make remapping with linear generally feasible
2555 * we need the stride to be page aligned.
2556 */
2557 if (fb->pitches[color_plane] > max_stride)
2558 return intel_tile_size(dev_priv);
2559 else
2560 return 64;
2561 } else {
a88c40eb 2562 return intel_tile_width_bytes(fb, color_plane);
54d4d719
VS
2563 }
2564}
2565
2566bool intel_plane_can_remap(const struct intel_plane_state *plane_state)
2567{
2568 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2569 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
2570 const struct drm_framebuffer *fb = plane_state->base.fb;
2571 int i;
2572
2573 /* We don't want to deal with remapping with cursors */
2574 if (plane->id == PLANE_CURSOR)
2575 return false;
2576
2577 /*
2578 * The display engine limits already match/exceed the
2579 * render engine limits, so not much point in remapping.
2580 * Would also need to deal with the fence POT alignment
2581 * and gen2 2KiB GTT tile size.
2582 */
2583 if (INTEL_GEN(dev_priv) < 4)
2584 return false;
2585
2586 /*
2587 * The new CCS hash mode isn't compatible with remapping as
2588 * the virtual address of the pages affects the compressed data.
2589 */
2590 if (is_ccs_modifier(fb->modifier))
2591 return false;
2592
2593 /* Linear needs a page aligned stride for remapping */
2594 if (fb->modifier == DRM_FORMAT_MOD_LINEAR) {
2595 unsigned int alignment = intel_tile_size(dev_priv) - 1;
2596
2597 for (i = 0; i < fb->format->num_planes; i++) {
2598 if (fb->pitches[i] & alignment)
2599 return false;
2600 }
2601 }
2602
2603 return true;
2604}
2605
2606static bool intel_plane_needs_remap(const struct intel_plane_state *plane_state)
2607{
2608 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
2609 const struct drm_framebuffer *fb = plane_state->base.fb;
2610 unsigned int rotation = plane_state->base.rotation;
2611 u32 stride, max_stride;
2612
2613 /*
2614 * No remapping for invisible planes since we don't have
2615 * an actual source viewport to remap.
2616 */
2617 if (!plane_state->base.visible)
2618 return false;
2619
2620 if (!intel_plane_can_remap(plane_state))
2621 return false;
2622
2623 /*
2624 * FIXME: aux plane limits on gen9+ are
2625 * unclear in Bspec, for now no checking.
2626 */
2627 stride = intel_fb_pitch(fb, 0, rotation);
2628 max_stride = plane->max_stride(plane, fb->format->format,
2629 fb->modifier, rotation);
2630
2631 return stride > max_stride;
a88c40eb
VS
2632}
2633
6687c906
VS
2634static int
2635intel_fill_fb_info(struct drm_i915_private *dev_priv,
2636 struct drm_framebuffer *fb)
2637{
2638 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2639 struct intel_rotation_info *rot_info = &intel_fb->rot_info;
a5ff7a45 2640 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
6687c906
VS
2641 u32 gtt_offset_rotated = 0;
2642 unsigned int max_size = 0;
bcb0b461 2643 int i, num_planes = fb->format->num_planes;
6687c906
VS
2644 unsigned int tile_size = intel_tile_size(dev_priv);
2645
2646 for (i = 0; i < num_planes; i++) {
2647 unsigned int width, height;
2648 unsigned int cpp, size;
2649 u32 offset;
2650 int x, y;
303ba695 2651 int ret;
6687c906 2652
353c8598 2653 cpp = fb->format->cpp[i];
145fcb11
VS
2654 width = drm_framebuffer_plane_width(fb->width, fb, i);
2655 height = drm_framebuffer_plane_height(fb->height, fb, i);
6687c906 2656
303ba695
VS
2657 ret = intel_fb_offset_to_xy(&x, &y, fb, i);
2658 if (ret) {
2659 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2660 i, fb->offsets[i]);
2661 return ret;
2662 }
6687c906 2663
63eaf9ac 2664 if (is_ccs_modifier(fb->modifier) && i == 1) {
2e2adb05
VS
2665 int hsub = fb->format->hsub;
2666 int vsub = fb->format->vsub;
2667 int tile_width, tile_height;
2668 int main_x, main_y;
2669 int ccs_x, ccs_y;
2670
2671 intel_tile_dims(fb, i, &tile_width, &tile_height);
303ba695
VS
2672 tile_width *= hsub;
2673 tile_height *= vsub;
2e2adb05 2674
303ba695
VS
2675 ccs_x = (x * hsub) % tile_width;
2676 ccs_y = (y * vsub) % tile_height;
2677 main_x = intel_fb->normal[0].x % tile_width;
2678 main_y = intel_fb->normal[0].y % tile_height;
2e2adb05
VS
2679
2680 /*
2681 * CCS doesn't have its own x/y offset register, so the intra CCS tile
2682 * x/y offsets must match between CCS and the main surface.
2683 */
2684 if (main_x != ccs_x || main_y != ccs_y) {
2685 DRM_DEBUG_KMS("Bad CCS x/y (main %d,%d ccs %d,%d) full (main %d,%d ccs %d,%d)\n",
2686 main_x, main_y,
2687 ccs_x, ccs_y,
2688 intel_fb->normal[0].x,
2689 intel_fb->normal[0].y,
2690 x, y);
2691 return -EINVAL;
2692 }
2693 }
2694
60d5f2a4
VS
2695 /*
2696 * The fence (if used) is aligned to the start of the object
2697 * so having the framebuffer wrap around across the edge of the
2698 * fenced region doesn't really work. We have no API to configure
2699 * the fence start offset within the object (nor could we probably
2700 * on gen2/3). So it's just easier if we just require that the
2701 * fb layout agrees with the fence layout. We already check that the
2702 * fb stride matches the fence stride elsewhere.
2703 */
a5ff7a45 2704 if (i == 0 && i915_gem_object_is_tiled(obj) &&
60d5f2a4 2705 (x + width) * cpp > fb->pitches[i]) {
144cc143
VS
2706 DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
2707 i, fb->offsets[i]);
60d5f2a4
VS
2708 return -EINVAL;
2709 }
2710
6687c906
VS
2711 /*
2712 * First pixel of the framebuffer from
2713 * the start of the normal gtt mapping.
2714 */
2715 intel_fb->normal[i].x = x;
2716 intel_fb->normal[i].y = y;
2717
6d19a44c
VS
2718 offset = intel_compute_aligned_offset(dev_priv, &x, &y, fb, i,
2719 fb->pitches[i],
2720 DRM_MODE_ROTATE_0,
2721 tile_size);
6687c906
VS
2722 offset /= tile_size;
2723
2a11b1b4 2724 if (!is_surface_linear(fb->modifier, i)) {
6687c906
VS
2725 unsigned int tile_width, tile_height;
2726 unsigned int pitch_tiles;
2727 struct drm_rect r;
2728
d88c4afd 2729 intel_tile_dims(fb, i, &tile_width, &tile_height);
6687c906
VS
2730
2731 rot_info->plane[i].offset = offset;
2732 rot_info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i], tile_width * cpp);
2733 rot_info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2734 rot_info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2735
2736 intel_fb->rotated[i].pitch =
2737 rot_info->plane[i].height * tile_height;
2738
2739 /* how many tiles does this plane need */
2740 size = rot_info->plane[i].stride * rot_info->plane[i].height;
2741 /*
2742 * If the plane isn't horizontally tile aligned,
2743 * we need one more tile.
2744 */
2745 if (x != 0)
2746 size++;
2747
2748 /* rotate the x/y offsets to match the GTT view */
2749 r.x1 = x;
2750 r.y1 = y;
2751 r.x2 = x + width;
2752 r.y2 = y + height;
2753 drm_rect_rotate(&r,
2754 rot_info->plane[i].width * tile_width,
2755 rot_info->plane[i].height * tile_height,
c2c446ad 2756 DRM_MODE_ROTATE_270);
6687c906
VS
2757 x = r.x1;
2758 y = r.y1;
2759
2760 /* rotate the tile dimensions to match the GTT view */
2761 pitch_tiles = intel_fb->rotated[i].pitch / tile_height;
2762 swap(tile_width, tile_height);
2763
2764 /*
2765 * We only keep the x/y offsets, so push all of the
2766 * gtt offset into the x/y offsets.
2767 */
6d19a44c
VS
2768 intel_adjust_tile_offset(&x, &y,
2769 tile_width, tile_height,
2770 tile_size, pitch_tiles,
2771 gtt_offset_rotated * tile_size, 0);
6687c906
VS
2772
2773 gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
2774
2775 /*
2776 * First pixel of the framebuffer from
2777 * the start of the rotated gtt mapping.
2778 */
2779 intel_fb->rotated[i].x = x;
2780 intel_fb->rotated[i].y = y;
2781 } else {
2782 size = DIV_ROUND_UP((y + height) * fb->pitches[i] +
2783 x * cpp, tile_size);
2784 }
2785
2786 /* how many tiles in total needed in the bo */
2787 max_size = max(max_size, offset + size);
2788 }
2789
4e05047d
VS
2790 if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
2791 DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
2792 mul_u32_u32(max_size, tile_size), obj->base.size);
6687c906
VS
2793 return -EINVAL;
2794 }
2795
2796 return 0;
2797}
2798
54d4d719
VS
2799static void
2800intel_plane_remap_gtt(struct intel_plane_state *plane_state)
2801{
2802 struct drm_i915_private *dev_priv =
2803 to_i915(plane_state->base.plane->dev);
2804 struct drm_framebuffer *fb = plane_state->base.fb;
2805 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
2806 struct intel_rotation_info *info = &plane_state->view.rotated;
2807 unsigned int rotation = plane_state->base.rotation;
2808 int i, num_planes = fb->format->num_planes;
2809 unsigned int tile_size = intel_tile_size(dev_priv);
2810 unsigned int src_x, src_y;
2811 unsigned int src_w, src_h;
2812 u32 gtt_offset = 0;
2813
2814 memset(&plane_state->view, 0, sizeof(plane_state->view));
2815 plane_state->view.type = drm_rotation_90_or_270(rotation) ?
2816 I915_GGTT_VIEW_ROTATED : I915_GGTT_VIEW_REMAPPED;
2817
2818 src_x = plane_state->base.src.x1 >> 16;
2819 src_y = plane_state->base.src.y1 >> 16;
2820 src_w = drm_rect_width(&plane_state->base.src) >> 16;
2821 src_h = drm_rect_height(&plane_state->base.src) >> 16;
2822
2823 WARN_ON(is_ccs_modifier(fb->modifier));
2824
2825 /* Make src coordinates relative to the viewport */
2826 drm_rect_translate(&plane_state->base.src,
2827 -(src_x << 16), -(src_y << 16));
2828
2829 /* Rotate src coordinates to match rotated GTT view */
2830 if (drm_rotation_90_or_270(rotation))
2831 drm_rect_rotate(&plane_state->base.src,
2832 src_w << 16, src_h << 16,
2833 DRM_MODE_ROTATE_270);
2834
2835 for (i = 0; i < num_planes; i++) {
2836 unsigned int hsub = i ? fb->format->hsub : 1;
2837 unsigned int vsub = i ? fb->format->vsub : 1;
2838 unsigned int cpp = fb->format->cpp[i];
2839 unsigned int tile_width, tile_height;
2840 unsigned int width, height;
2841 unsigned int pitch_tiles;
2842 unsigned int x, y;
2843 u32 offset;
2844
2845 intel_tile_dims(fb, i, &tile_width, &tile_height);
2846
2847 x = src_x / hsub;
2848 y = src_y / vsub;
2849 width = src_w / hsub;
2850 height = src_h / vsub;
2851
2852 /*
2853 * First pixel of the src viewport from the
2854 * start of the normal gtt mapping.
2855 */
2856 x += intel_fb->normal[i].x;
2857 y += intel_fb->normal[i].y;
2858
2859 offset = intel_compute_aligned_offset(dev_priv, &x, &y,
2860 fb, i, fb->pitches[i],
2861 DRM_MODE_ROTATE_0, tile_size);
2862 offset /= tile_size;
2863
2864 info->plane[i].offset = offset;
2865 info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
2866 tile_width * cpp);
2867 info->plane[i].width = DIV_ROUND_UP(x + width, tile_width);
2868 info->plane[i].height = DIV_ROUND_UP(y + height, tile_height);
2869
2870 if (drm_rotation_90_or_270(rotation)) {
2871 struct drm_rect r;
2872
2873 /* rotate the x/y offsets to match the GTT view */
2874 r.x1 = x;
2875 r.y1 = y;
2876 r.x2 = x + width;
2877 r.y2 = y + height;
2878 drm_rect_rotate(&r,
2879 info->plane[i].width * tile_width,
2880 info->plane[i].height * tile_height,
2881 DRM_MODE_ROTATE_270);
2882 x = r.x1;
2883 y = r.y1;
2884
2885 pitch_tiles = info->plane[i].height;
2886 plane_state->color_plane[i].stride = pitch_tiles * tile_height;
2887
2888 /* rotate the tile dimensions to match the GTT view */
2889 swap(tile_width, tile_height);
2890 } else {
2891 pitch_tiles = info->plane[i].width;
2892 plane_state->color_plane[i].stride = pitch_tiles * tile_width * cpp;
2893 }
2894
2895 /*
2896 * We only keep the x/y offsets, so push all of the
2897 * gtt offset into the x/y offsets.
2898 */
2899 intel_adjust_tile_offset(&x, &y,
2900 tile_width, tile_height,
2901 tile_size, pitch_tiles,
2902 gtt_offset * tile_size, 0);
2903
2904 gtt_offset += info->plane[i].width * info->plane[i].height;
2905
2906 plane_state->color_plane[i].offset = 0;
2907 plane_state->color_plane[i].x = x;
2908 plane_state->color_plane[i].y = y;
2909 }
2910}
2911
2912static int
2913intel_plane_compute_gtt(struct intel_plane_state *plane_state)
2914{
2915 const struct intel_framebuffer *fb =
2916 to_intel_framebuffer(plane_state->base.fb);
2917 unsigned int rotation = plane_state->base.rotation;
2918 int i, num_planes;
2919
2920 if (!fb)
2921 return 0;
2922
2923 num_planes = fb->base.format->num_planes;
2924
2925 if (intel_plane_needs_remap(plane_state)) {
2926 intel_plane_remap_gtt(plane_state);
2927
2928 /*
2929 * Sometimes even remapping can't overcome
2930 * the stride limitations :( Can happen with
2931 * big plane sizes and suitably misaligned
2932 * offsets.
2933 */
2934 return intel_plane_check_stride(plane_state);
2935 }
2936
2937 intel_fill_fb_ggtt_view(&plane_state->view, &fb->base, rotation);
2938
2939 for (i = 0; i < num_planes; i++) {
2940 plane_state->color_plane[i].stride = intel_fb_pitch(&fb->base, i, rotation);
2941 plane_state->color_plane[i].offset = 0;
2942
2943 if (drm_rotation_90_or_270(rotation)) {
2944 plane_state->color_plane[i].x = fb->rotated[i].x;
2945 plane_state->color_plane[i].y = fb->rotated[i].y;
2946 } else {
2947 plane_state->color_plane[i].x = fb->normal[i].x;
2948 plane_state->color_plane[i].y = fb->normal[i].y;
2949 }
2950 }
2951
2952 /* Rotate src coordinates to match rotated GTT view */
2953 if (drm_rotation_90_or_270(rotation))
2954 drm_rect_rotate(&plane_state->base.src,
2955 fb->base.width << 16, fb->base.height << 16,
2956 DRM_MODE_ROTATE_270);
2957
2958 return intel_plane_check_stride(plane_state);
2959}
2960
b35d63fa 2961static int i9xx_format_to_fourcc(int format)
46f297fb
JB
2962{
2963 switch (format) {
2964 case DISPPLANE_8BPP:
2965 return DRM_FORMAT_C8;
2966 case DISPPLANE_BGRX555:
2967 return DRM_FORMAT_XRGB1555;
2968 case DISPPLANE_BGRX565:
2969 return DRM_FORMAT_RGB565;
2970 default:
2971 case DISPPLANE_BGRX888:
2972 return DRM_FORMAT_XRGB8888;
2973 case DISPPLANE_RGBX888:
2974 return DRM_FORMAT_XBGR8888;
2975 case DISPPLANE_BGRX101010:
2976 return DRM_FORMAT_XRGB2101010;
2977 case DISPPLANE_RGBX101010:
2978 return DRM_FORMAT_XBGR2101010;
2979 }
2980}
2981
ddf34319 2982int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
bc8d7dff
DL
2983{
2984 switch (format) {
2985 case PLANE_CTL_FORMAT_RGB_565:
2986 return DRM_FORMAT_RGB565;
f34a291c
MK
2987 case PLANE_CTL_FORMAT_NV12:
2988 return DRM_FORMAT_NV12;
df7d4156
JPH
2989 case PLANE_CTL_FORMAT_P010:
2990 return DRM_FORMAT_P010;
2991 case PLANE_CTL_FORMAT_P012:
2992 return DRM_FORMAT_P012;
2993 case PLANE_CTL_FORMAT_P016:
2994 return DRM_FORMAT_P016;
296e9b19
SS
2995 case PLANE_CTL_FORMAT_Y210:
2996 return DRM_FORMAT_Y210;
2997 case PLANE_CTL_FORMAT_Y212:
2998 return DRM_FORMAT_Y212;
2999 case PLANE_CTL_FORMAT_Y216:
3000 return DRM_FORMAT_Y216;
3001 case PLANE_CTL_FORMAT_Y410:
ff01e697 3002 return DRM_FORMAT_XVYU2101010;
296e9b19 3003 case PLANE_CTL_FORMAT_Y412:
ff01e697 3004 return DRM_FORMAT_XVYU12_16161616;
296e9b19 3005 case PLANE_CTL_FORMAT_Y416:
ff01e697 3006 return DRM_FORMAT_XVYU16161616;
bc8d7dff
DL
3007 default:
3008 case PLANE_CTL_FORMAT_XRGB_8888:
3009 if (rgb_order) {
3010 if (alpha)
3011 return DRM_FORMAT_ABGR8888;
3012 else
3013 return DRM_FORMAT_XBGR8888;
3014 } else {
3015 if (alpha)
3016 return DRM_FORMAT_ARGB8888;
3017 else
3018 return DRM_FORMAT_XRGB8888;
3019 }
3020 case PLANE_CTL_FORMAT_XRGB_2101010:
3021 if (rgb_order)
3022 return DRM_FORMAT_XBGR2101010;
3023 else
3024 return DRM_FORMAT_XRGB2101010;
a94bed60
KS
3025 case PLANE_CTL_FORMAT_XRGB_16161616F:
3026 if (rgb_order) {
3027 if (alpha)
3028 return DRM_FORMAT_ABGR16161616F;
3029 else
3030 return DRM_FORMAT_XBGR16161616F;
3031 } else {
3032 if (alpha)
3033 return DRM_FORMAT_ARGB16161616F;
3034 else
3035 return DRM_FORMAT_XRGB16161616F;
3036 }
bc8d7dff
DL
3037 }
3038}
3039
5724dbd1 3040static bool
f6936e29
DV
3041intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
3042 struct intel_initial_plane_config *plane_config)
46f297fb
JB
3043{
3044 struct drm_device *dev = crtc->base.dev;
3badb49f 3045 struct drm_i915_private *dev_priv = to_i915(dev);
46f297fb
JB
3046 struct drm_i915_gem_object *obj = NULL;
3047 struct drm_mode_fb_cmd2 mode_cmd = { 0 };
2d14030b 3048 struct drm_framebuffer *fb = &plane_config->fb->base;
f37b5c2b
DV
3049 u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
3050 u32 size_aligned = round_up(plane_config->base + plane_config->size,
3051 PAGE_SIZE);
3052
3053 size_aligned -= base_aligned;
46f297fb 3054
ff2652ea
CW
3055 if (plane_config->size == 0)
3056 return false;
3057
3badb49f
PZ
3058 /* If the FB is too big, just don't use it since fbdev is not very
3059 * important and we should probably use that space with FBC or other
3060 * features. */
b1ace601 3061 if (size_aligned * 2 > dev_priv->stolen_usable_size)
3badb49f
PZ
3062 return false;
3063
914a4fd8
ID
3064 switch (fb->modifier) {
3065 case DRM_FORMAT_MOD_LINEAR:
3066 case I915_FORMAT_MOD_X_TILED:
3067 case I915_FORMAT_MOD_Y_TILED:
3068 break;
3069 default:
3070 DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
3071 fb->modifier);
3072 return false;
3073 }
3074
12c83d99 3075 mutex_lock(&dev->struct_mutex);
187685cb 3076 obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
f37b5c2b
DV
3077 base_aligned,
3078 base_aligned,
3079 size_aligned);
24dbf51a
CW
3080 mutex_unlock(&dev->struct_mutex);
3081 if (!obj)
484b41dd 3082 return false;
46f297fb 3083
914a4fd8
ID
3084 switch (plane_config->tiling) {
3085 case I915_TILING_NONE:
3086 break;
3087 case I915_TILING_X:
3088 case I915_TILING_Y:
3089 obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
3090 break;
3091 default:
3092 MISSING_CASE(plane_config->tiling);
3093 return false;
3094 }
46f297fb 3095
438b74a5 3096 mode_cmd.pixel_format = fb->format->format;
6bf129df
DL
3097 mode_cmd.width = fb->width;
3098 mode_cmd.height = fb->height;
3099 mode_cmd.pitches[0] = fb->pitches[0];
bae781b2 3100 mode_cmd.modifier[0] = fb->modifier;
18c5247e 3101 mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
46f297fb 3102
24dbf51a 3103 if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
46f297fb
JB
3104 DRM_DEBUG_KMS("intel fb init failed\n");
3105 goto out_unref_obj;
3106 }
12c83d99 3107
484b41dd 3108
f6936e29 3109 DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
484b41dd 3110 return true;
46f297fb
JB
3111
3112out_unref_obj:
f8c417cd 3113 i915_gem_object_put(obj);
484b41dd
JB
3114 return false;
3115}
3116
e9728bd8
VS
3117static void
3118intel_set_plane_visible(struct intel_crtc_state *crtc_state,
3119 struct intel_plane_state *plane_state,
3120 bool visible)
3121{
3122 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
3123
3124 plane_state->base.visible = visible;
3125
62358aa4 3126 if (visible)
40560e26 3127 crtc_state->base.plane_mask |= drm_plane_mask(&plane->base);
62358aa4 3128 else
40560e26 3129 crtc_state->base.plane_mask &= ~drm_plane_mask(&plane->base);
e9728bd8
VS
3130}
3131
62358aa4
VS
3132static void fixup_active_planes(struct intel_crtc_state *crtc_state)
3133{
3134 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
3135 struct drm_plane *plane;
3136
3137 /*
3138 * Active_planes aliases if multiple "primary" or cursor planes
3139 * have been used on the same (or wrong) pipe. plane_mask uses
3140 * unique ids, hence we can use that to reconstruct active_planes.
3141 */
3142 crtc_state->active_planes = 0;
3143
3144 drm_for_each_plane_mask(plane, &dev_priv->drm,
3145 crtc_state->base.plane_mask)
3146 crtc_state->active_planes |= BIT(to_intel_plane(plane)->id);
3147}
3148
b1e01595
VS
3149static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
3150 struct intel_plane *plane)
3151{
3152 struct intel_crtc_state *crtc_state =
3153 to_intel_crtc_state(crtc->base.state);
3154 struct intel_plane_state *plane_state =
3155 to_intel_plane_state(plane->base.state);
3156
7a4a2a46
VS
3157 DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
3158 plane->base.base.id, plane->base.name,
3159 crtc->base.base.id, crtc->base.name);
3160
b1e01595 3161 intel_set_plane_visible(crtc_state, plane_state, false);
62358aa4 3162 fixup_active_planes(crtc_state);
c457d9cf 3163 crtc_state->data_rate[plane->id] = 0;
b1e01595
VS
3164
3165 if (plane->id == PLANE_PRIMARY)
3166 intel_pre_disable_primary_noatomic(&crtc->base);
3167
c48b86f9 3168 intel_disable_plane(plane, crtc_state);
b1e01595
VS
3169}
3170
5724dbd1 3171static void
f6936e29
DV
3172intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
3173 struct intel_initial_plane_config *plane_config)
484b41dd
JB
3174{
3175 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 3176 struct drm_i915_private *dev_priv = to_i915(dev);
484b41dd 3177 struct drm_crtc *c;
2ff8fde1 3178 struct drm_i915_gem_object *obj;
88595ac9 3179 struct drm_plane *primary = intel_crtc->base.primary;
be5651f2 3180 struct drm_plane_state *plane_state = primary->state;
200757f5 3181 struct intel_plane *intel_plane = to_intel_plane(primary);
0a8d8a86
MR
3182 struct intel_plane_state *intel_state =
3183 to_intel_plane_state(plane_state);
88595ac9 3184 struct drm_framebuffer *fb;
484b41dd 3185
2d14030b 3186 if (!plane_config->fb)
484b41dd
JB
3187 return;
3188
f6936e29 3189 if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
88595ac9
DV
3190 fb = &plane_config->fb->base;
3191 goto valid_fb;
f55548b5 3192 }
484b41dd 3193
2d14030b 3194 kfree(plane_config->fb);
484b41dd
JB
3195
3196 /*
3197 * Failed to alloc the obj, check to see if we should share
3198 * an fb with another CRTC instead
3199 */
70e1e0ec 3200 for_each_crtc(dev, c) {
be1e3415 3201 struct intel_plane_state *state;
484b41dd
JB
3202
3203 if (c == &intel_crtc->base)
3204 continue;
3205
be1e3415 3206 if (!to_intel_crtc(c)->active)
2ff8fde1
MR
3207 continue;
3208
be1e3415
CW
3209 state = to_intel_plane_state(c->primary->state);
3210 if (!state->vma)
484b41dd
JB
3211 continue;
3212
be1e3415 3213 if (intel_plane_ggtt_offset(state) == plane_config->base) {
8bc20f65 3214 fb = state->base.fb;
c3ed1103 3215 drm_framebuffer_get(fb);
88595ac9 3216 goto valid_fb;
484b41dd
JB
3217 }
3218 }
88595ac9 3219
200757f5
MR
3220 /*
3221 * We've failed to reconstruct the BIOS FB. Current display state
3222 * indicates that the primary plane is visible, but has a NULL FB,
3223 * which will lead to problems later if we don't fix it up. The
3224 * simplest solution is to just disable the primary plane now and
3225 * pretend the BIOS never had it enabled.
3226 */
b1e01595 3227 intel_plane_disable_noatomic(intel_crtc, intel_plane);
200757f5 3228
88595ac9
DV
3229 return;
3230
3231valid_fb:
f43348a3 3232 intel_state->base.rotation = plane_config->rotation;
f5929c53
VS
3233 intel_fill_fb_ggtt_view(&intel_state->view, fb,
3234 intel_state->base.rotation);
df79cf44
VS
3235 intel_state->color_plane[0].stride =
3236 intel_fb_pitch(fb, 0, intel_state->base.rotation);
3237
be1e3415
CW
3238 mutex_lock(&dev->struct_mutex);
3239 intel_state->vma =
5935485f 3240 intel_pin_and_fence_fb_obj(fb,
f5929c53 3241 &intel_state->view,
f7a02ad7 3242 intel_plane_uses_fence(intel_state),
5935485f 3243 &intel_state->flags);
be1e3415
CW
3244 mutex_unlock(&dev->struct_mutex);
3245 if (IS_ERR(intel_state->vma)) {
3246 DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
3247 intel_crtc->pipe, PTR_ERR(intel_state->vma));
3248
3249 intel_state->vma = NULL;
c3ed1103 3250 drm_framebuffer_put(fb);
be1e3415
CW
3251 return;
3252 }
3253
07bcd99b
DP
3254 obj = intel_fb_obj(fb);
3255 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
3256
f44e2659
VS
3257 plane_state->src_x = 0;
3258 plane_state->src_y = 0;
be5651f2
ML
3259 plane_state->src_w = fb->width << 16;
3260 plane_state->src_h = fb->height << 16;
3261
f44e2659
VS
3262 plane_state->crtc_x = 0;
3263 plane_state->crtc_y = 0;
be5651f2
ML
3264 plane_state->crtc_w = fb->width;
3265 plane_state->crtc_h = fb->height;
3266
1638d30c
RC
3267 intel_state->base.src = drm_plane_state_src(plane_state);
3268 intel_state->base.dst = drm_plane_state_dest(plane_state);
0a8d8a86 3269
3e510a8e 3270 if (i915_gem_object_is_tiled(obj))
88595ac9
DV
3271 dev_priv->preserve_bios_swizzle = true;
3272
cd30fbca
VS
3273 plane_state->fb = fb;
3274 plane_state->crtc = &intel_crtc->base;
e9728bd8 3275
faf5bf0a
CW
3276 atomic_or(to_intel_plane(primary)->frontbuffer_bit,
3277 &obj->frontbuffer_bits);
46f297fb
JB
3278}
3279
5d2a1950
VS
3280static int skl_max_plane_width(const struct drm_framebuffer *fb,
3281 int color_plane,
b63a16f6
VS
3282 unsigned int rotation)
3283{
5d2a1950 3284 int cpp = fb->format->cpp[color_plane];
b63a16f6 3285
bae781b2 3286 switch (fb->modifier) {
2f075565 3287 case DRM_FORMAT_MOD_LINEAR:
b63a16f6 3288 case I915_FORMAT_MOD_X_TILED:
372b9ffb 3289 return 4096;
2e2adb05
VS
3290 case I915_FORMAT_MOD_Y_TILED_CCS:
3291 case I915_FORMAT_MOD_Yf_TILED_CCS:
3292 /* FIXME AUX plane? */
b63a16f6
VS
3293 case I915_FORMAT_MOD_Y_TILED:
3294 case I915_FORMAT_MOD_Yf_TILED:
372b9ffb 3295 if (cpp == 8)
b63a16f6 3296 return 2048;
372b9ffb 3297 else
b63a16f6 3298 return 4096;
b63a16f6 3299 default:
bae781b2 3300 MISSING_CASE(fb->modifier);
372b9ffb 3301 return 2048;
b63a16f6 3302 }
372b9ffb 3303}
b63a16f6 3304
372b9ffb
VS
3305static int glk_max_plane_width(const struct drm_framebuffer *fb,
3306 int color_plane,
3307 unsigned int rotation)
3308{
3309 int cpp = fb->format->cpp[color_plane];
3310
3311 switch (fb->modifier) {
3312 case DRM_FORMAT_MOD_LINEAR:
3313 case I915_FORMAT_MOD_X_TILED:
3314 if (cpp == 8)
3315 return 4096;
3316 else
3317 return 5120;
3318 case I915_FORMAT_MOD_Y_TILED_CCS:
3319 case I915_FORMAT_MOD_Yf_TILED_CCS:
3320 /* FIXME AUX plane? */
3321 case I915_FORMAT_MOD_Y_TILED:
3322 case I915_FORMAT_MOD_Yf_TILED:
3323 if (cpp == 8)
3324 return 2048;
3325 else
3326 return 5120;
3327 default:
3328 MISSING_CASE(fb->modifier);
3329 return 2048;
3330 }
3331}
3332
3333static int icl_max_plane_width(const struct drm_framebuffer *fb,
3334 int color_plane,
3335 unsigned int rotation)
3336{
3337 return 5120;
b63a16f6
VS
3338}
3339
2e2adb05
VS
3340static bool skl_check_main_ccs_coordinates(struct intel_plane_state *plane_state,
3341 int main_x, int main_y, u32 main_offset)
3342{
3343 const struct drm_framebuffer *fb = plane_state->base.fb;
3344 int hsub = fb->format->hsub;
3345 int vsub = fb->format->vsub;
c11ada07
VS
3346 int aux_x = plane_state->color_plane[1].x;
3347 int aux_y = plane_state->color_plane[1].y;
3348 u32 aux_offset = plane_state->color_plane[1].offset;
2e2adb05
VS
3349 u32 alignment = intel_surf_alignment(fb, 1);
3350
3351 while (aux_offset >= main_offset && aux_y <= main_y) {
3352 int x, y;
3353
3354 if (aux_x == main_x && aux_y == main_y)
3355 break;
3356
3357 if (aux_offset == 0)
3358 break;
3359
3360 x = aux_x / hsub;
3361 y = aux_y / vsub;
6d19a44c
VS
3362 aux_offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 1,
3363 aux_offset, aux_offset - alignment);
2e2adb05
VS
3364 aux_x = x * hsub + aux_x % hsub;
3365 aux_y = y * vsub + aux_y % vsub;
3366 }
3367
3368 if (aux_x != main_x || aux_y != main_y)
3369 return false;
3370
c11ada07
VS
3371 plane_state->color_plane[1].offset = aux_offset;
3372 plane_state->color_plane[1].x = aux_x;
3373 plane_state->color_plane[1].y = aux_y;
2e2adb05
VS
3374
3375 return true;
3376}
3377
73266595 3378static int skl_check_main_surface(struct intel_plane_state *plane_state)
b63a16f6 3379{
372b9ffb 3380 struct drm_i915_private *dev_priv = to_i915(plane_state->base.plane->dev);
b63a16f6
VS
3381 const struct drm_framebuffer *fb = plane_state->base.fb;
3382 unsigned int rotation = plane_state->base.rotation;
cc926387
DV
3383 int x = plane_state->base.src.x1 >> 16;
3384 int y = plane_state->base.src.y1 >> 16;
3385 int w = drm_rect_width(&plane_state->base.src) >> 16;
3386 int h = drm_rect_height(&plane_state->base.src) >> 16;
372b9ffb 3387 int max_width;
b63a16f6 3388 int max_height = 4096;
c11ada07 3389 u32 alignment, offset, aux_offset = plane_state->color_plane[1].offset;
b63a16f6 3390
372b9ffb
VS
3391 if (INTEL_GEN(dev_priv) >= 11)
3392 max_width = icl_max_plane_width(fb, 0, rotation);
3393 else if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
3394 max_width = glk_max_plane_width(fb, 0, rotation);
3395 else
3396 max_width = skl_max_plane_width(fb, 0, rotation);
3397
b63a16f6
VS
3398 if (w > max_width || h > max_height) {
3399 DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
3400 w, h, max_width, max_height);
3401 return -EINVAL;
3402 }
3403
3404 intel_add_fb_offsets(&x, &y, plane_state, 0);
6d19a44c 3405 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
d88c4afd 3406 alignment = intel_surf_alignment(fb, 0);
b63a16f6 3407
8d970654
VS
3408 /*
3409 * AUX surface offset is specified as the distance from the
3410 * main surface offset, and it must be non-negative. Make
3411 * sure that is what we will get.
3412 */
3413 if (offset > aux_offset)
6d19a44c
VS
3414 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3415 offset, aux_offset & ~(alignment - 1));
8d970654 3416
b63a16f6
VS
3417 /*
3418 * When using an X-tiled surface, the plane blows up
3419 * if the x offset + width exceed the stride.
3420 *
3421 * TODO: linear and Y-tiled seem fine, Yf untested,
3422 */
bae781b2 3423 if (fb->modifier == I915_FORMAT_MOD_X_TILED) {
353c8598 3424 int cpp = fb->format->cpp[0];
b63a16f6 3425
df79cf44 3426 while ((x + w) * cpp > plane_state->color_plane[0].stride) {
b63a16f6 3427 if (offset == 0) {
2e2adb05 3428 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
b63a16f6
VS
3429 return -EINVAL;
3430 }
3431
6d19a44c
VS
3432 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3433 offset, offset - alignment);
b63a16f6
VS
3434 }
3435 }
3436
2e2adb05
VS
3437 /*
3438 * CCS AUX surface doesn't have its own x/y offsets, we must make sure
3439 * they match with the main surface x/y offsets.
3440 */
63eaf9ac 3441 if (is_ccs_modifier(fb->modifier)) {
2e2adb05
VS
3442 while (!skl_check_main_ccs_coordinates(plane_state, x, y, offset)) {
3443 if (offset == 0)
3444 break;
3445
6d19a44c
VS
3446 offset = intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0,
3447 offset, offset - alignment);
2e2adb05
VS
3448 }
3449
c11ada07 3450 if (x != plane_state->color_plane[1].x || y != plane_state->color_plane[1].y) {
2e2adb05
VS
3451 DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
3452 return -EINVAL;
3453 }
3454 }
3455
c11ada07
VS
3456 plane_state->color_plane[0].offset = offset;
3457 plane_state->color_plane[0].x = x;
3458 plane_state->color_plane[0].y = y;
b63a16f6 3459
54d4d719
VS
3460 /*
3461 * Put the final coordinates back so that the src
3462 * coordinate checks will see the right values.
3463 */
3464 drm_rect_translate(&plane_state->base.src,
3465 (x << 16) - plane_state->base.src.x1,
3466 (y << 16) - plane_state->base.src.y1);
3467
b63a16f6
VS
3468 return 0;
3469}
3470
8d970654
VS
3471static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
3472{
3473 const struct drm_framebuffer *fb = plane_state->base.fb;
3474 unsigned int rotation = plane_state->base.rotation;
3475 int max_width = skl_max_plane_width(fb, 1, rotation);
3476 int max_height = 4096;
cc926387
DV
3477 int x = plane_state->base.src.x1 >> 17;
3478 int y = plane_state->base.src.y1 >> 17;
3479 int w = drm_rect_width(&plane_state->base.src) >> 17;
3480 int h = drm_rect_height(&plane_state->base.src) >> 17;
8d970654
VS
3481 u32 offset;
3482
3483 intel_add_fb_offsets(&x, &y, plane_state, 1);
6d19a44c 3484 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
8d970654
VS
3485
3486 /* FIXME not quite sure how/if these apply to the chroma plane */
3487 if (w > max_width || h > max_height) {
3488 DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
3489 w, h, max_width, max_height);
3490 return -EINVAL;
3491 }
3492
c11ada07
VS
3493 plane_state->color_plane[1].offset = offset;
3494 plane_state->color_plane[1].x = x;
3495 plane_state->color_plane[1].y = y;
8d970654
VS
3496
3497 return 0;
3498}
3499
2e2adb05
VS
3500static int skl_check_ccs_aux_surface(struct intel_plane_state *plane_state)
3501{
2e2adb05
VS
3502 const struct drm_framebuffer *fb = plane_state->base.fb;
3503 int src_x = plane_state->base.src.x1 >> 16;
3504 int src_y = plane_state->base.src.y1 >> 16;
3505 int hsub = fb->format->hsub;
3506 int vsub = fb->format->vsub;
3507 int x = src_x / hsub;
3508 int y = src_y / vsub;
3509 u32 offset;
3510
2e2adb05 3511 intel_add_fb_offsets(&x, &y, plane_state, 1);
6d19a44c 3512 offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 1);
2e2adb05 3513
c11ada07
VS
3514 plane_state->color_plane[1].offset = offset;
3515 plane_state->color_plane[1].x = x * hsub + src_x % hsub;
3516 plane_state->color_plane[1].y = y * vsub + src_y % vsub;
2e2adb05
VS
3517
3518 return 0;
3519}
3520
73266595 3521int skl_check_plane_surface(struct intel_plane_state *plane_state)
b63a16f6
VS
3522{
3523 const struct drm_framebuffer *fb = plane_state->base.fb;
b63a16f6
VS
3524 int ret;
3525
54d4d719 3526 ret = intel_plane_compute_gtt(plane_state);
fc3fed5d
VS
3527 if (ret)
3528 return ret;
3529
a5e4c7d0
VS
3530 if (!plane_state->base.visible)
3531 return 0;
3532
8d970654
VS
3533 /*
3534 * Handle the AUX surface first since
3535 * the main surface setup depends on it.
3536 */
df7d4156 3537 if (is_planar_yuv_format(fb->format->format)) {
8d970654
VS
3538 ret = skl_check_nv12_aux_surface(plane_state);
3539 if (ret)
3540 return ret;
63eaf9ac 3541 } else if (is_ccs_modifier(fb->modifier)) {
2e2adb05
VS
3542 ret = skl_check_ccs_aux_surface(plane_state);
3543 if (ret)
3544 return ret;
8d970654 3545 } else {
c11ada07
VS
3546 plane_state->color_plane[1].offset = ~0xfff;
3547 plane_state->color_plane[1].x = 0;
3548 plane_state->color_plane[1].y = 0;
8d970654
VS
3549 }
3550
73266595 3551 ret = skl_check_main_surface(plane_state);
b63a16f6
VS
3552 if (ret)
3553 return ret;
3554
3555 return 0;
3556}
3557
ddd5713d
VS
3558unsigned int
3559i9xx_plane_max_stride(struct intel_plane *plane,
3560 u32 pixel_format, u64 modifier,
3561 unsigned int rotation)
3562{
3563 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3564
b2ae318a 3565 if (!HAS_GMCH(dev_priv)) {
ddd5713d
VS
3566 return 32*1024;
3567 } else if (INTEL_GEN(dev_priv) >= 4) {
3568 if (modifier == I915_FORMAT_MOD_X_TILED)
3569 return 16*1024;
3570 else
3571 return 32*1024;
3572 } else if (INTEL_GEN(dev_priv) >= 3) {
3573 if (modifier == I915_FORMAT_MOD_X_TILED)
3574 return 8*1024;
3575 else
3576 return 16*1024;
3577 } else {
3578 if (plane->i9xx_plane == PLANE_C)
3579 return 4*1024;
3580 else
3581 return 8*1024;
3582 }
3583}
3584
7eb31a0b
VS
3585static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
3586{
3587 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
3588 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
3589 u32 dspcntr = 0;
3590
5f29ab23
VS
3591 if (crtc_state->gamma_enable)
3592 dspcntr |= DISPPLANE_GAMMA_ENABLE;
7eb31a0b 3593
8271b2ef 3594 if (crtc_state->csc_enable)
7eb31a0b
VS
3595 dspcntr |= DISPPLANE_PIPE_CSC_ENABLE;
3596
3597 if (INTEL_GEN(dev_priv) < 5)
3598 dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe);
3599
3600 return dspcntr;
3601}
3602
7145f60a
VS
3603static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state,
3604 const struct intel_plane_state *plane_state)
81255565 3605{
7145f60a
VS
3606 struct drm_i915_private *dev_priv =
3607 to_i915(plane_state->base.plane->dev);
7145f60a 3608 const struct drm_framebuffer *fb = plane_state->base.fb;
8d0deca8 3609 unsigned int rotation = plane_state->base.rotation;
7145f60a 3610 u32 dspcntr;
c9ba6fad 3611
7eb31a0b 3612 dspcntr = DISPLAY_PLANE_ENABLE;
f45651ba 3613
cf819eff
LDM
3614 if (IS_G4X(dev_priv) || IS_GEN(dev_priv, 5) ||
3615 IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
7145f60a 3616 dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE;
f45651ba 3617
438b74a5 3618 switch (fb->format->format) {
57779d06 3619 case DRM_FORMAT_C8:
81255565
JB
3620 dspcntr |= DISPPLANE_8BPP;
3621 break;
57779d06 3622 case DRM_FORMAT_XRGB1555:
57779d06 3623 dspcntr |= DISPPLANE_BGRX555;
81255565 3624 break;
57779d06
VS
3625 case DRM_FORMAT_RGB565:
3626 dspcntr |= DISPPLANE_BGRX565;
3627 break;
3628 case DRM_FORMAT_XRGB8888:
57779d06
VS
3629 dspcntr |= DISPPLANE_BGRX888;
3630 break;
3631 case DRM_FORMAT_XBGR8888:
57779d06
VS
3632 dspcntr |= DISPPLANE_RGBX888;
3633 break;
3634 case DRM_FORMAT_XRGB2101010:
57779d06
VS
3635 dspcntr |= DISPPLANE_BGRX101010;
3636 break;
3637 case DRM_FORMAT_XBGR2101010:
57779d06 3638 dspcntr |= DISPPLANE_RGBX101010;
81255565
JB
3639 break;
3640 default:
7145f60a
VS
3641 MISSING_CASE(fb->format->format);
3642 return 0;
81255565 3643 }
57779d06 3644
72618ebf 3645 if (INTEL_GEN(dev_priv) >= 4 &&
bae781b2 3646 fb->modifier == I915_FORMAT_MOD_X_TILED)
f45651ba 3647 dspcntr |= DISPPLANE_TILED;
81255565 3648
c2c446ad 3649 if (rotation & DRM_MODE_ROTATE_180)
df0cd455
VS
3650 dspcntr |= DISPPLANE_ROTATE_180;
3651
c2c446ad 3652 if (rotation & DRM_MODE_REFLECT_X)
4ea7be2b
VS
3653 dspcntr |= DISPPLANE_MIRROR;
3654
7145f60a
VS
3655 return dspcntr;
3656}
de1aa629 3657
f9407ae1 3658int i9xx_check_plane_surface(struct intel_plane_state *plane_state)
5b7fcc44
VS
3659{
3660 struct drm_i915_private *dev_priv =
3661 to_i915(plane_state->base.plane->dev);
54d4d719 3662 int src_x, src_y;
5b7fcc44 3663 u32 offset;
fc3fed5d 3664 int ret;
81255565 3665
54d4d719 3666 ret = intel_plane_compute_gtt(plane_state);
fc3fed5d
VS
3667 if (ret)
3668 return ret;
3669
54d4d719
VS
3670 if (!plane_state->base.visible)
3671 return 0;
3672
3673 src_x = plane_state->base.src.x1 >> 16;
3674 src_y = plane_state->base.src.y1 >> 16;
3675
5b7fcc44 3676 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
e506a0c6 3677
5b7fcc44 3678 if (INTEL_GEN(dev_priv) >= 4)
6d19a44c
VS
3679 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
3680 plane_state, 0);
5b7fcc44
VS
3681 else
3682 offset = 0;
3683
54d4d719
VS
3684 /*
3685 * Put the final coordinates back so that the src
3686 * coordinate checks will see the right values.
3687 */
3688 drm_rect_translate(&plane_state->base.src,
3689 (src_x << 16) - plane_state->base.src.x1,
3690 (src_y << 16) - plane_state->base.src.y1);
3691
5b7fcc44
VS
3692 /* HSW/BDW do this automagically in hardware */
3693 if (!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv)) {
54d4d719 3694 unsigned int rotation = plane_state->base.rotation;
5b7fcc44
VS
3695 int src_w = drm_rect_width(&plane_state->base.src) >> 16;
3696 int src_h = drm_rect_height(&plane_state->base.src) >> 16;
3697
c2c446ad 3698 if (rotation & DRM_MODE_ROTATE_180) {
5b7fcc44
VS
3699 src_x += src_w - 1;
3700 src_y += src_h - 1;
c2c446ad 3701 } else if (rotation & DRM_MODE_REFLECT_X) {
5b7fcc44
VS
3702 src_x += src_w - 1;
3703 }
48404c1e
SJ
3704 }
3705
c11ada07
VS
3706 plane_state->color_plane[0].offset = offset;
3707 plane_state->color_plane[0].x = src_x;
3708 plane_state->color_plane[0].y = src_y;
5b7fcc44
VS
3709
3710 return 0;
3711}
3712
4e0b83a5
VS
3713static int
3714i9xx_plane_check(struct intel_crtc_state *crtc_state,
3715 struct intel_plane_state *plane_state)
3716{
3717 int ret;
3718
25721f82
VS
3719 ret = chv_plane_check_rotation(plane_state);
3720 if (ret)
3721 return ret;
3722
4e0b83a5
VS
3723 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
3724 &crtc_state->base,
3725 DRM_PLANE_HELPER_NO_SCALING,
3726 DRM_PLANE_HELPER_NO_SCALING,
3727 false, true);
3728 if (ret)
3729 return ret;
3730
54d4d719
VS
3731 ret = i9xx_check_plane_surface(plane_state);
3732 if (ret)
3733 return ret;
3734
4e0b83a5
VS
3735 if (!plane_state->base.visible)
3736 return 0;
3737
3738 ret = intel_plane_check_src_coordinates(plane_state);
3739 if (ret)
3740 return ret;
3741
4e0b83a5
VS
3742 plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state);
3743
3744 return 0;
3745}
3746
ed15030d
VS
3747static void i9xx_update_plane(struct intel_plane *plane,
3748 const struct intel_crtc_state *crtc_state,
3749 const struct intel_plane_state *plane_state)
7145f60a 3750{
ed15030d 3751 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
ed15030d 3752 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
7145f60a 3753 u32 linear_offset;
c11ada07
VS
3754 int x = plane_state->color_plane[0].x;
3755 int y = plane_state->color_plane[0].y;
7145f60a 3756 unsigned long irqflags;
e288881b 3757 u32 dspaddr_offset;
7eb31a0b
VS
3758 u32 dspcntr;
3759
3760 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state);
7145f60a 3761
2949056c 3762 linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0);
6687c906 3763
5b7fcc44 3764 if (INTEL_GEN(dev_priv) >= 4)
c11ada07 3765 dspaddr_offset = plane_state->color_plane[0].offset;
5b7fcc44 3766 else
e288881b 3767 dspaddr_offset = linear_offset;
6687c906 3768
dd584fc0
VS
3769 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
3770
83234d13
VS
3771 I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
3772
78587de2
VS
3773 if (INTEL_GEN(dev_priv) < 4) {
3774 /* pipesrc and dspsize control the size that is scaled from,
3775 * which should always be the user's requested size.
3776 */
83234d13 3777 I915_WRITE_FW(DSPPOS(i9xx_plane), 0);
ed15030d 3778 I915_WRITE_FW(DSPSIZE(i9xx_plane),
dd584fc0
VS
3779 ((crtc_state->pipe_src_h - 1) << 16) |
3780 (crtc_state->pipe_src_w - 1));
ed15030d 3781 } else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
83234d13 3782 I915_WRITE_FW(PRIMPOS(i9xx_plane), 0);
ed15030d 3783 I915_WRITE_FW(PRIMSIZE(i9xx_plane),
dd584fc0
VS
3784 ((crtc_state->pipe_src_h - 1) << 16) |
3785 (crtc_state->pipe_src_w - 1));
ed15030d 3786 I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
78587de2
VS
3787 }
3788
3ba35e53 3789 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
ed15030d 3790 I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
3ba35e53 3791 } else if (INTEL_GEN(dev_priv) >= 4) {
83234d13
VS
3792 I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
3793 I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
3794 }
3795
3796 /*
3797 * The control register self-arms if the plane was previously
3798 * disabled. Try to make the plane enable atomic by writing
3799 * the control register just before the surface register.
3800 */
3801 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
3802 if (INTEL_GEN(dev_priv) >= 4)
ed15030d 3803 I915_WRITE_FW(DSPSURF(i9xx_plane),
dd584fc0 3804 intel_plane_ggtt_offset(plane_state) +
e288881b 3805 dspaddr_offset);
83234d13 3806 else
ed15030d 3807 I915_WRITE_FW(DSPADDR(i9xx_plane),
dd584fc0 3808 intel_plane_ggtt_offset(plane_state) +
e288881b 3809 dspaddr_offset);
dd584fc0
VS
3810
3811 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
17638cd6
JB
3812}
3813
ed15030d 3814static void i9xx_disable_plane(struct intel_plane *plane,
0dd14be3 3815 const struct intel_crtc_state *crtc_state)
17638cd6 3816{
ed15030d
VS
3817 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
3818 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
dd584fc0 3819 unsigned long irqflags;
7eb31a0b
VS
3820 u32 dspcntr;
3821
3822 /*
3823 * DSPCNTR pipe gamma enable on g4x+ and pipe csc
3824 * enable on ilk+ affect the pipe bottom color as
3825 * well, so we must configure them even if the plane
3826 * is disabled.
3827 *
3828 * On pre-g4x there is no way to gamma correct the
3829 * pipe bottom color but we'll keep on doing this
9d5441de 3830 * anyway so that the crtc state readout works correctly.
7eb31a0b
VS
3831 */
3832 dspcntr = i9xx_plane_ctl_crtc(crtc_state);
dd584fc0
VS
3833
3834 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
f45651ba 3835
7eb31a0b 3836 I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
ed15030d
VS
3837 if (INTEL_GEN(dev_priv) >= 4)
3838 I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
a8d201af 3839 else
ed15030d 3840 I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
dd584fc0
VS
3841
3842 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
a8d201af 3843}
c9ba6fad 3844
eade6c89
VS
3845static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
3846 enum pipe *pipe)
51f5a096 3847{
ed15030d 3848 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
51f5a096 3849 enum intel_display_power_domain power_domain;
ed15030d 3850 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
0e6e0be4 3851 intel_wakeref_t wakeref;
51f5a096 3852 bool ret;
eade6c89 3853 u32 val;
51f5a096
VS
3854
3855 /*
3856 * Not 100% correct for planes that can move between pipes,
3857 * but that's only the case for gen2-4 which don't have any
3858 * display power wells.
3859 */
eade6c89 3860 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
0e6e0be4
CW
3861 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
3862 if (!wakeref)
51f5a096
VS
3863 return false;
3864
eade6c89
VS
3865 val = I915_READ(DSPCNTR(i9xx_plane));
3866
3867 ret = val & DISPLAY_PLANE_ENABLE;
3868
3869 if (INTEL_GEN(dev_priv) >= 5)
3870 *pipe = plane->pipe;
3871 else
3872 *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >>
3873 DISPPLANE_SEL_PIPE_SHIFT;
51f5a096 3874
0e6e0be4 3875 intel_display_power_put(dev_priv, power_domain, wakeref);
51f5a096
VS
3876
3877 return ret;
3878}
3879
e435d6e5
ML
3880static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
3881{
3882 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 3883 struct drm_i915_private *dev_priv = to_i915(dev);
e435d6e5
ML
3884
3885 I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
3886 I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
3887 I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
e435d6e5
ML
3888}
3889
a1b2278e
CK
3890/*
3891 * This function detaches (aka. unbinds) unused scalers in hardware
3892 */
15cbe5d0 3893static void skl_detach_scalers(const struct intel_crtc_state *crtc_state)
a1b2278e 3894{
15cbe5d0
ML
3895 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
3896 const struct intel_crtc_scaler_state *scaler_state =
3897 &crtc_state->scaler_state;
a1b2278e
CK
3898 int i;
3899
a1b2278e
CK
3900 /* loop through and disable scalers that aren't in use */
3901 for (i = 0; i < intel_crtc->num_scalers; i++) {
e435d6e5
ML
3902 if (!scaler_state->scalers[i].in_use)
3903 skl_detach_scaler(intel_crtc, i);
a1b2278e
CK
3904 }
3905}
3906
b3cf5c06
VS
3907static unsigned int skl_plane_stride_mult(const struct drm_framebuffer *fb,
3908 int color_plane, unsigned int rotation)
3909{
3910 /*
3911 * The stride is either expressed as a multiple of 64 bytes chunks for
3912 * linear buffers or in number of tiles for tiled buffers.
3913 */
3914 if (fb->modifier == DRM_FORMAT_MOD_LINEAR)
3915 return 64;
3916 else if (drm_rotation_90_or_270(rotation))
3917 return intel_tile_height(fb, color_plane);
3918 else
3919 return intel_tile_width_bytes(fb, color_plane);
3920}
3921
df79cf44 3922u32 skl_plane_stride(const struct intel_plane_state *plane_state,
5d2a1950 3923 int color_plane)
d2196774 3924{
df79cf44
VS
3925 const struct drm_framebuffer *fb = plane_state->base.fb;
3926 unsigned int rotation = plane_state->base.rotation;
5d2a1950 3927 u32 stride = plane_state->color_plane[color_plane].stride;
1b500535 3928
5d2a1950 3929 if (color_plane >= fb->format->num_planes)
1b500535
VS
3930 return 0;
3931
b3cf5c06 3932 return stride / skl_plane_stride_mult(fb, color_plane, rotation);
d2196774
VS
3933}
3934
ba3f4d0a 3935static u32 skl_plane_ctl_format(u32 pixel_format)
70d21f0e 3936{
6156a456 3937 switch (pixel_format) {
d161cf7a 3938 case DRM_FORMAT_C8:
c34ce3d1 3939 return PLANE_CTL_FORMAT_INDEXED;
70d21f0e 3940 case DRM_FORMAT_RGB565:
c34ce3d1 3941 return PLANE_CTL_FORMAT_RGB_565;
70d21f0e 3942 case DRM_FORMAT_XBGR8888:
4036c78c 3943 case DRM_FORMAT_ABGR8888:
c34ce3d1 3944 return PLANE_CTL_FORMAT_XRGB_8888 | PLANE_CTL_ORDER_RGBX;
6156a456 3945 case DRM_FORMAT_XRGB8888:
6156a456 3946 case DRM_FORMAT_ARGB8888:
4036c78c 3947 return PLANE_CTL_FORMAT_XRGB_8888;
70d21f0e 3948 case DRM_FORMAT_XRGB2101010:
c34ce3d1 3949 return PLANE_CTL_FORMAT_XRGB_2101010;
70d21f0e 3950 case DRM_FORMAT_XBGR2101010:
c34ce3d1 3951 return PLANE_CTL_ORDER_RGBX | PLANE_CTL_FORMAT_XRGB_2101010;
a94bed60
KS
3952 case DRM_FORMAT_XBGR16161616F:
3953 case DRM_FORMAT_ABGR16161616F:
3954 return PLANE_CTL_FORMAT_XRGB_16161616F | PLANE_CTL_ORDER_RGBX;
3955 case DRM_FORMAT_XRGB16161616F:
3956 case DRM_FORMAT_ARGB16161616F:
3957 return PLANE_CTL_FORMAT_XRGB_16161616F;
6156a456 3958 case DRM_FORMAT_YUYV:
c34ce3d1 3959 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YUYV;
6156a456 3960 case DRM_FORMAT_YVYU:
c34ce3d1 3961 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_YVYU;
6156a456 3962 case DRM_FORMAT_UYVY:
c34ce3d1 3963 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_UYVY;
6156a456 3964 case DRM_FORMAT_VYUY:
c34ce3d1 3965 return PLANE_CTL_FORMAT_YUV422 | PLANE_CTL_YUV422_VYUY;
77224cd5
CK
3966 case DRM_FORMAT_NV12:
3967 return PLANE_CTL_FORMAT_NV12;
df7d4156
JPH
3968 case DRM_FORMAT_P010:
3969 return PLANE_CTL_FORMAT_P010;
3970 case DRM_FORMAT_P012:
3971 return PLANE_CTL_FORMAT_P012;
3972 case DRM_FORMAT_P016:
3973 return PLANE_CTL_FORMAT_P016;
296e9b19
SS
3974 case DRM_FORMAT_Y210:
3975 return PLANE_CTL_FORMAT_Y210;
3976 case DRM_FORMAT_Y212:
3977 return PLANE_CTL_FORMAT_Y212;
3978 case DRM_FORMAT_Y216:
3979 return PLANE_CTL_FORMAT_Y216;
ff01e697 3980 case DRM_FORMAT_XVYU2101010:
296e9b19 3981 return PLANE_CTL_FORMAT_Y410;
ff01e697 3982 case DRM_FORMAT_XVYU12_16161616:
296e9b19 3983 return PLANE_CTL_FORMAT_Y412;
ff01e697 3984 case DRM_FORMAT_XVYU16161616:
296e9b19 3985 return PLANE_CTL_FORMAT_Y416;
70d21f0e 3986 default:
4249eeef 3987 MISSING_CASE(pixel_format);
70d21f0e 3988 }
8cfcba41 3989
c34ce3d1 3990 return 0;
6156a456 3991}
70d21f0e 3992
b2081525 3993static u32 skl_plane_ctl_alpha(const struct intel_plane_state *plane_state)
4036c78c 3994{
b2081525
ML
3995 if (!plane_state->base.fb->format->has_alpha)
3996 return PLANE_CTL_ALPHA_DISABLE;
3997
3998 switch (plane_state->base.pixel_blend_mode) {
3999 case DRM_MODE_BLEND_PIXEL_NONE:
4000 return PLANE_CTL_ALPHA_DISABLE;
4001 case DRM_MODE_BLEND_PREMULTI:
4036c78c 4002 return PLANE_CTL_ALPHA_SW_PREMULTIPLY;
b2081525
ML
4003 case DRM_MODE_BLEND_COVERAGE:
4004 return PLANE_CTL_ALPHA_HW_PREMULTIPLY;
4036c78c 4005 default:
b2081525 4006 MISSING_CASE(plane_state->base.pixel_blend_mode);
4036c78c
JA
4007 return PLANE_CTL_ALPHA_DISABLE;
4008 }
4009}
4010
b2081525 4011static u32 glk_plane_color_ctl_alpha(const struct intel_plane_state *plane_state)
4036c78c 4012{
b2081525
ML
4013 if (!plane_state->base.fb->format->has_alpha)
4014 return PLANE_COLOR_ALPHA_DISABLE;
4015
4016 switch (plane_state->base.pixel_blend_mode) {
4017 case DRM_MODE_BLEND_PIXEL_NONE:
4018 return PLANE_COLOR_ALPHA_DISABLE;
4019 case DRM_MODE_BLEND_PREMULTI:
4036c78c 4020 return PLANE_COLOR_ALPHA_SW_PREMULTIPLY;
b2081525
ML
4021 case DRM_MODE_BLEND_COVERAGE:
4022 return PLANE_COLOR_ALPHA_HW_PREMULTIPLY;
4036c78c 4023 default:
b2081525 4024 MISSING_CASE(plane_state->base.pixel_blend_mode);
4036c78c
JA
4025 return PLANE_COLOR_ALPHA_DISABLE;
4026 }
4027}
4028
ba3f4d0a 4029static u32 skl_plane_ctl_tiling(u64 fb_modifier)
6156a456 4030{
6156a456 4031 switch (fb_modifier) {
2f075565 4032 case DRM_FORMAT_MOD_LINEAR:
70d21f0e 4033 break;
30af77c4 4034 case I915_FORMAT_MOD_X_TILED:
c34ce3d1 4035 return PLANE_CTL_TILED_X;
b321803d 4036 case I915_FORMAT_MOD_Y_TILED:
c34ce3d1 4037 return PLANE_CTL_TILED_Y;
2e2adb05 4038 case I915_FORMAT_MOD_Y_TILED_CCS:
53867b46 4039 return PLANE_CTL_TILED_Y | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
b321803d 4040 case I915_FORMAT_MOD_Yf_TILED:
c34ce3d1 4041 return PLANE_CTL_TILED_YF;
2e2adb05 4042 case I915_FORMAT_MOD_Yf_TILED_CCS:
53867b46 4043 return PLANE_CTL_TILED_YF | PLANE_CTL_RENDER_DECOMPRESSION_ENABLE;
70d21f0e 4044 default:
6156a456 4045 MISSING_CASE(fb_modifier);
70d21f0e 4046 }
8cfcba41 4047
c34ce3d1 4048 return 0;
6156a456 4049}
70d21f0e 4050
5f8e3f57 4051static u32 skl_plane_ctl_rotate(unsigned int rotate)
6156a456 4052{
5f8e3f57 4053 switch (rotate) {
c2c446ad 4054 case DRM_MODE_ROTATE_0:
6156a456 4055 break;
1e8df167 4056 /*
c2c446ad 4057 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
1e8df167
SJ
4058 * while i915 HW rotation is clockwise, thats why this swapping.
4059 */
c2c446ad 4060 case DRM_MODE_ROTATE_90:
1e8df167 4061 return PLANE_CTL_ROTATE_270;
c2c446ad 4062 case DRM_MODE_ROTATE_180:
c34ce3d1 4063 return PLANE_CTL_ROTATE_180;
c2c446ad 4064 case DRM_MODE_ROTATE_270:
1e8df167 4065 return PLANE_CTL_ROTATE_90;
6156a456 4066 default:
5f8e3f57
JL
4067 MISSING_CASE(rotate);
4068 }
4069
4070 return 0;
4071}
4072
4073static u32 cnl_plane_ctl_flip(unsigned int reflect)
4074{
4075 switch (reflect) {
4076 case 0:
4077 break;
4078 case DRM_MODE_REFLECT_X:
4079 return PLANE_CTL_FLIP_HORIZONTAL;
4080 case DRM_MODE_REFLECT_Y:
4081 default:
4082 MISSING_CASE(reflect);
6156a456
CK
4083 }
4084
c34ce3d1 4085 return 0;
6156a456
CK
4086}
4087
7eb31a0b
VS
4088u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state)
4089{
4090 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4091 u32 plane_ctl = 0;
4092
4093 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
4094 return plane_ctl;
4095
5f29ab23
VS
4096 if (crtc_state->gamma_enable)
4097 plane_ctl |= PLANE_CTL_PIPE_GAMMA_ENABLE;
4098
8271b2ef
VS
4099 if (crtc_state->csc_enable)
4100 plane_ctl |= PLANE_CTL_PIPE_CSC_ENABLE;
7eb31a0b
VS
4101
4102 return plane_ctl;
4103}
4104
2e881264
VS
4105u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state,
4106 const struct intel_plane_state *plane_state)
46f788ba
VS
4107{
4108 struct drm_i915_private *dev_priv =
4109 to_i915(plane_state->base.plane->dev);
4110 const struct drm_framebuffer *fb = plane_state->base.fb;
4111 unsigned int rotation = plane_state->base.rotation;
2e881264 4112 const struct drm_intel_sprite_colorkey *key = &plane_state->ckey;
46f788ba
VS
4113 u32 plane_ctl;
4114
4115 plane_ctl = PLANE_CTL_ENABLE;
4116
4036c78c 4117 if (INTEL_GEN(dev_priv) < 10 && !IS_GEMINILAKE(dev_priv)) {
b2081525 4118 plane_ctl |= skl_plane_ctl_alpha(plane_state);
7eb31a0b 4119 plane_ctl |= PLANE_CTL_PLANE_GAMMA_DISABLE;
b0f5c0ba
VS
4120
4121 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4122 plane_ctl |= PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709;
c8624ede
VS
4123
4124 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4125 plane_ctl |= PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE;
46f788ba
VS
4126 }
4127
4128 plane_ctl |= skl_plane_ctl_format(fb->format->format);
4129 plane_ctl |= skl_plane_ctl_tiling(fb->modifier);
5f8e3f57
JL
4130 plane_ctl |= skl_plane_ctl_rotate(rotation & DRM_MODE_ROTATE_MASK);
4131
4132 if (INTEL_GEN(dev_priv) >= 10)
4133 plane_ctl |= cnl_plane_ctl_flip(rotation &
4134 DRM_MODE_REFLECT_MASK);
46f788ba 4135
2e881264
VS
4136 if (key->flags & I915_SET_COLORKEY_DESTINATION)
4137 plane_ctl |= PLANE_CTL_KEY_ENABLE_DESTINATION;
4138 else if (key->flags & I915_SET_COLORKEY_SOURCE)
4139 plane_ctl |= PLANE_CTL_KEY_ENABLE_SOURCE;
4140
46f788ba
VS
4141 return plane_ctl;
4142}
4143
7eb31a0b
VS
4144u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state)
4145{
4146 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
4147 u32 plane_color_ctl = 0;
4148
4149 if (INTEL_GEN(dev_priv) >= 11)
4150 return plane_color_ctl;
4151
5f29ab23
VS
4152 if (crtc_state->gamma_enable)
4153 plane_color_ctl |= PLANE_COLOR_PIPE_GAMMA_ENABLE;
4154
8271b2ef
VS
4155 if (crtc_state->csc_enable)
4156 plane_color_ctl |= PLANE_COLOR_PIPE_CSC_ENABLE;
7eb31a0b
VS
4157
4158 return plane_color_ctl;
4159}
4160
4036c78c
JA
4161u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state,
4162 const struct intel_plane_state *plane_state)
4163{
42fd20ed
KS
4164 struct drm_i915_private *dev_priv =
4165 to_i915(plane_state->base.plane->dev);
4036c78c 4166 const struct drm_framebuffer *fb = plane_state->base.fb;
bfe60a02 4167 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
4036c78c
JA
4168 u32 plane_color_ctl = 0;
4169
4036c78c 4170 plane_color_ctl |= PLANE_COLOR_PLANE_GAMMA_DISABLE;
b2081525 4171 plane_color_ctl |= glk_plane_color_ctl_alpha(plane_state);
4036c78c 4172
42fd20ed 4173 if (fb->format->is_yuv && !icl_is_hdr_plane(dev_priv, plane->id)) {
b0f5c0ba
VS
4174 if (plane_state->base.color_encoding == DRM_COLOR_YCBCR_BT709)
4175 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709;
4176 else
4177 plane_color_ctl |= PLANE_COLOR_CSC_MODE_YUV601_TO_RGB709;
c8624ede
VS
4178
4179 if (plane_state->base.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
4180 plane_color_ctl |= PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE;
bfe60a02
US
4181 } else if (fb->format->is_yuv) {
4182 plane_color_ctl |= PLANE_COLOR_INPUT_CSC_ENABLE;
b0f5c0ba 4183 }
012d79e6 4184
4036c78c
JA
4185 return plane_color_ctl;
4186}
4187
73974893
ML
4188static int
4189__intel_display_resume(struct drm_device *dev,
581e49fe
ML
4190 struct drm_atomic_state *state,
4191 struct drm_modeset_acquire_ctx *ctx)
73974893
ML
4192{
4193 struct drm_crtc_state *crtc_state;
4194 struct drm_crtc *crtc;
4195 int i, ret;
11c22da6 4196
aecd36b8 4197 intel_modeset_setup_hw_state(dev, ctx);
29b74b7f 4198 i915_redisable_vga(to_i915(dev));
73974893
ML
4199
4200 if (!state)
4201 return 0;
4202
aa5e9b47
ML
4203 /*
4204 * We've duplicated the state, pointers to the old state are invalid.
4205 *
4206 * Don't attempt to use the old state until we commit the duplicated state.
4207 */
4208 for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
73974893
ML
4209 /*
4210 * Force recalculation even if we restore
4211 * current state. With fast modeset this may not result
4212 * in a modeset when the state is compatible.
4213 */
4214 crtc_state->mode_changed = true;
96a02917 4215 }
73974893
ML
4216
4217 /* ignore any reset values/BIOS leftovers in the WM registers */
b2ae318a 4218 if (!HAS_GMCH(to_i915(dev)))
602ae835 4219 to_intel_atomic_state(state)->skip_intermediate_wm = true;
73974893 4220
581e49fe 4221 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
73974893
ML
4222
4223 WARN_ON(ret == -EDEADLK);
4224 return ret;
96a02917
VS
4225}
4226
4ac2ba2f
VS
4227static bool gpu_reset_clobbers_display(struct drm_i915_private *dev_priv)
4228{
55277e1f
CW
4229 return (INTEL_INFO(dev_priv)->gpu_reset_clobbers_display &&
4230 intel_has_gpu_reset(dev_priv));
4ac2ba2f
VS
4231}
4232
c033666a 4233void intel_prepare_reset(struct drm_i915_private *dev_priv)
7514747d 4234{
73974893
ML
4235 struct drm_device *dev = &dev_priv->drm;
4236 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
4237 struct drm_atomic_state *state;
4238 int ret;
4239
ce87ea15 4240 /* reset doesn't touch the display */
4f044a88 4241 if (!i915_modparams.force_reset_modeset_test &&
ce87ea15
DV
4242 !gpu_reset_clobbers_display(dev_priv))
4243 return;
4244
9db529aa
DV
4245 /* We have a modeset vs reset deadlock, defensively unbreak it. */
4246 set_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
4247 wake_up_all(&dev_priv->gpu_error.wait_queue);
4248
4249 if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
4250 DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
4251 i915_gem_set_wedged(dev_priv);
4252 }
97154ec2 4253
73974893
ML
4254 /*
4255 * Need mode_config.mutex so that we don't
4256 * trample ongoing ->detect() and whatnot.
4257 */
4258 mutex_lock(&dev->mode_config.mutex);
4259 drm_modeset_acquire_init(ctx, 0);
4260 while (1) {
4261 ret = drm_modeset_lock_all_ctx(dev, ctx);
4262 if (ret != -EDEADLK)
4263 break;
4264
4265 drm_modeset_backoff(ctx);
4266 }
f98ce92f
VS
4267 /*
4268 * Disabling the crtcs gracefully seems nicer. Also the
4269 * g33 docs say we should at least disable all the planes.
4270 */
73974893
ML
4271 state = drm_atomic_helper_duplicate_state(dev, ctx);
4272 if (IS_ERR(state)) {
4273 ret = PTR_ERR(state);
73974893 4274 DRM_ERROR("Duplicating state failed with %i\n", ret);
1e5a15d6 4275 return;
73974893
ML
4276 }
4277
4278 ret = drm_atomic_helper_disable_all(dev, ctx);
4279 if (ret) {
4280 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
1e5a15d6
ACO
4281 drm_atomic_state_put(state);
4282 return;
73974893
ML
4283 }
4284
4285 dev_priv->modeset_restore_state = state;
4286 state->acquire_ctx = ctx;
7514747d
VS
4287}
4288
c033666a 4289void intel_finish_reset(struct drm_i915_private *dev_priv)
7514747d 4290{
73974893
ML
4291 struct drm_device *dev = &dev_priv->drm;
4292 struct drm_modeset_acquire_ctx *ctx = &dev_priv->reset_ctx;
40da1d31 4293 struct drm_atomic_state *state;
73974893
ML
4294 int ret;
4295
ce87ea15 4296 /* reset doesn't touch the display */
40da1d31 4297 if (!test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
ce87ea15
DV
4298 return;
4299
40da1d31 4300 state = fetch_and_zero(&dev_priv->modeset_restore_state);
ce87ea15
DV
4301 if (!state)
4302 goto unlock;
4303
7514747d 4304 /* reset doesn't touch the display */
4ac2ba2f 4305 if (!gpu_reset_clobbers_display(dev_priv)) {
ce87ea15
DV
4306 /* for testing only restore the display */
4307 ret = __intel_display_resume(dev, state, ctx);
942d5d0d
CW
4308 if (ret)
4309 DRM_ERROR("Restoring old state failed with %i\n", ret);
73974893
ML
4310 } else {
4311 /*
4312 * The display has been reset as well,
4313 * so need a full re-initialization.
4314 */
51f59205 4315 intel_pps_unlock_regs_wa(dev_priv);
73974893 4316 intel_modeset_init_hw(dev);
f72b84c6 4317 intel_init_clock_gating(dev_priv);
7514747d 4318
73974893
ML
4319 spin_lock_irq(&dev_priv->irq_lock);
4320 if (dev_priv->display.hpd_irq_setup)
4321 dev_priv->display.hpd_irq_setup(dev_priv);
4322 spin_unlock_irq(&dev_priv->irq_lock);
7514747d 4323
581e49fe 4324 ret = __intel_display_resume(dev, state, ctx);
73974893
ML
4325 if (ret)
4326 DRM_ERROR("Restoring old state failed with %i\n", ret);
7514747d 4327
73974893
ML
4328 intel_hpd_init(dev_priv);
4329 }
7514747d 4330
ce87ea15
DV
4331 drm_atomic_state_put(state);
4332unlock:
73974893
ML
4333 drm_modeset_drop_locks(ctx);
4334 drm_modeset_acquire_fini(ctx);
4335 mutex_unlock(&dev->mode_config.mutex);
9db529aa
DV
4336
4337 clear_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags);
7514747d
VS
4338}
4339
d1622119
VS
4340static void icl_set_pipe_chicken(struct intel_crtc *crtc)
4341{
4342 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4343 enum pipe pipe = crtc->pipe;
4344 u32 tmp;
4345
4346 tmp = I915_READ(PIPE_CHICKEN(pipe));
4347
4348 /*
4349 * Display WA #1153: icl
4350 * enable hardware to bypass the alpha math
4351 * and rounding for per-pixel values 00 and 0xff
4352 */
4353 tmp |= PER_PIXEL_ALPHA_BYPASS_EN;
26eeea15
AS
4354 /*
4355 * Display WA # 1605353570: icl
4356 * Set the pixel rounding bit to 1 for allowing
4357 * passthrough of Frame buffer pixels unmodified
4358 * across pipe
4359 */
4360 tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
d1622119
VS
4361 I915_WRITE(PIPE_CHICKEN(pipe), tmp);
4362}
4363
1a15b77b
VS
4364static void intel_update_pipe_config(const struct intel_crtc_state *old_crtc_state,
4365 const struct intel_crtc_state *new_crtc_state)
e30e8f75 4366{
1a15b77b 4367 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
6315b5d3 4368 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
e30e8f75 4369
bfd16b2a 4370 /* drm_atomic_helper_update_legacy_modeset_state might not be called. */
1a15b77b 4371 crtc->base.mode = new_crtc_state->base.mode;
bfd16b2a 4372
e30e8f75
GP
4373 /*
4374 * Update pipe size and adjust fitter if needed: the reason for this is
4375 * that in compute_mode_changes we check the native mode (not the pfit
4376 * mode) to see if we can flip rather than do a full mode set. In the
4377 * fastboot case, we'll flip, but if we don't update the pipesrc and
4378 * pfit state, we'll end up with a big fb scanned out into the wrong
4379 * sized surface.
e30e8f75
GP
4380 */
4381
e30e8f75 4382 I915_WRITE(PIPESRC(crtc->pipe),
1a15b77b
VS
4383 ((new_crtc_state->pipe_src_w - 1) << 16) |
4384 (new_crtc_state->pipe_src_h - 1));
bfd16b2a
ML
4385
4386 /* on skylake this is done by detaching scalers */
6315b5d3 4387 if (INTEL_GEN(dev_priv) >= 9) {
15cbe5d0 4388 skl_detach_scalers(new_crtc_state);
bfd16b2a 4389
1a15b77b 4390 if (new_crtc_state->pch_pfit.enabled)
b2562712 4391 skylake_pfit_enable(new_crtc_state);
6e266956 4392 } else if (HAS_PCH_SPLIT(dev_priv)) {
1a15b77b 4393 if (new_crtc_state->pch_pfit.enabled)
b2562712 4394 ironlake_pfit_enable(new_crtc_state);
bfd16b2a 4395 else if (old_crtc_state->pch_pfit.enabled)
b2562712 4396 ironlake_pfit_disable(old_crtc_state);
e30e8f75 4397 }
c0550305 4398
108d14bd
VS
4399 if (INTEL_GEN(dev_priv) >= 11)
4400 icl_set_pipe_chicken(crtc);
e30e8f75
GP
4401}
4402
4cbe4b2b 4403static void intel_fdi_normal_train(struct intel_crtc *crtc)
5e84e1a4 4404{
4cbe4b2b 4405 struct drm_device *dev = crtc->base.dev;
fac5e23e 4406 struct drm_i915_private *dev_priv = to_i915(dev);
4cbe4b2b 4407 int pipe = crtc->pipe;
f0f59a00
VS
4408 i915_reg_t reg;
4409 u32 temp;
5e84e1a4
ZW
4410
4411 /* enable normal train */
4412 reg = FDI_TX_CTL(pipe);
4413 temp = I915_READ(reg);
fd6b8f43 4414 if (IS_IVYBRIDGE(dev_priv)) {
357555c0
JB
4415 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4416 temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
61e499bf
KP
4417 } else {
4418 temp &= ~FDI_LINK_TRAIN_NONE;
4419 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
357555c0 4420 }
5e84e1a4
ZW
4421 I915_WRITE(reg, temp);
4422
4423 reg = FDI_RX_CTL(pipe);
4424 temp = I915_READ(reg);
6e266956 4425 if (HAS_PCH_CPT(dev_priv)) {
5e84e1a4
ZW
4426 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4427 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
4428 } else {
4429 temp &= ~FDI_LINK_TRAIN_NONE;
4430 temp |= FDI_LINK_TRAIN_NONE;
4431 }
4432 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
4433
4434 /* wait one idle pattern time */
4435 POSTING_READ(reg);
4436 udelay(1000);
357555c0
JB
4437
4438 /* IVB wants error correction enabled */
fd6b8f43 4439 if (IS_IVYBRIDGE(dev_priv))
357555c0
JB
4440 I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
4441 FDI_FE_ERRC_ENABLE);
5e84e1a4
ZW
4442}
4443
8db9d77b 4444/* The FDI link training functions for ILK/Ibexpeak. */
dc4a1094
ACO
4445static void ironlake_fdi_link_train(struct intel_crtc *crtc,
4446 const struct intel_crtc_state *crtc_state)
8db9d77b 4447{
4cbe4b2b 4448 struct drm_device *dev = crtc->base.dev;
fac5e23e 4449 struct drm_i915_private *dev_priv = to_i915(dev);
4cbe4b2b 4450 int pipe = crtc->pipe;
f0f59a00
VS
4451 i915_reg_t reg;
4452 u32 temp, tries;
8db9d77b 4453
1c8562f6 4454 /* FDI needs bits from pipe first */
0fc932b8 4455 assert_pipe_enabled(dev_priv, pipe);
0fc932b8 4456
e1a44743
AJ
4457 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4458 for train result */
5eddb70b
CW
4459 reg = FDI_RX_IMR(pipe);
4460 temp = I915_READ(reg);
e1a44743
AJ
4461 temp &= ~FDI_RX_SYMBOL_LOCK;
4462 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
4463 I915_WRITE(reg, temp);
4464 I915_READ(reg);
e1a44743
AJ
4465 udelay(150);
4466
8db9d77b 4467 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
4468 reg = FDI_TX_CTL(pipe);
4469 temp = I915_READ(reg);
627eb5a3 4470 temp &= ~FDI_DP_PORT_WIDTH_MASK;
dc4a1094 4471 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
8db9d77b
ZW
4472 temp &= ~FDI_LINK_TRAIN_NONE;
4473 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b 4474 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 4475
5eddb70b
CW
4476 reg = FDI_RX_CTL(pipe);
4477 temp = I915_READ(reg);
8db9d77b
ZW
4478 temp &= ~FDI_LINK_TRAIN_NONE;
4479 temp |= FDI_LINK_TRAIN_PATTERN_1;
5eddb70b
CW
4480 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4481
4482 POSTING_READ(reg);
8db9d77b
ZW
4483 udelay(150);
4484
5b2adf89 4485 /* Ironlake workaround, enable clock pointer after FDI enable*/
8f5718a6
DV
4486 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
4487 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
4488 FDI_RX_PHASE_SYNC_POINTER_EN);
5b2adf89 4489
5eddb70b 4490 reg = FDI_RX_IIR(pipe);
e1a44743 4491 for (tries = 0; tries < 5; tries++) {
5eddb70b 4492 temp = I915_READ(reg);
8db9d77b
ZW
4493 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4494
4495 if ((temp & FDI_RX_BIT_LOCK)) {
4496 DRM_DEBUG_KMS("FDI train 1 done.\n");
5eddb70b 4497 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
8db9d77b
ZW
4498 break;
4499 }
8db9d77b 4500 }
e1a44743 4501 if (tries == 5)
5eddb70b 4502 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
4503
4504 /* Train 2 */
5eddb70b
CW
4505 reg = FDI_TX_CTL(pipe);
4506 temp = I915_READ(reg);
8db9d77b
ZW
4507 temp &= ~FDI_LINK_TRAIN_NONE;
4508 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 4509 I915_WRITE(reg, temp);
8db9d77b 4510
5eddb70b
CW
4511 reg = FDI_RX_CTL(pipe);
4512 temp = I915_READ(reg);
8db9d77b
ZW
4513 temp &= ~FDI_LINK_TRAIN_NONE;
4514 temp |= FDI_LINK_TRAIN_PATTERN_2;
5eddb70b 4515 I915_WRITE(reg, temp);
8db9d77b 4516
5eddb70b
CW
4517 POSTING_READ(reg);
4518 udelay(150);
8db9d77b 4519
5eddb70b 4520 reg = FDI_RX_IIR(pipe);
e1a44743 4521 for (tries = 0; tries < 5; tries++) {
5eddb70b 4522 temp = I915_READ(reg);
8db9d77b
ZW
4523 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4524
4525 if (temp & FDI_RX_SYMBOL_LOCK) {
5eddb70b 4526 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
8db9d77b
ZW
4527 DRM_DEBUG_KMS("FDI train 2 done.\n");
4528 break;
4529 }
8db9d77b 4530 }
e1a44743 4531 if (tries == 5)
5eddb70b 4532 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
4533
4534 DRM_DEBUG_KMS("FDI train done\n");
5c5313c8 4535
8db9d77b
ZW
4536}
4537
0206e353 4538static const int snb_b_fdi_train_param[] = {
8db9d77b
ZW
4539 FDI_LINK_TRAIN_400MV_0DB_SNB_B,
4540 FDI_LINK_TRAIN_400MV_6DB_SNB_B,
4541 FDI_LINK_TRAIN_600MV_3_5DB_SNB_B,
4542 FDI_LINK_TRAIN_800MV_0DB_SNB_B,
4543};
4544
4545/* The FDI link training functions for SNB/Cougarpoint. */
dc4a1094
ACO
4546static void gen6_fdi_link_train(struct intel_crtc *crtc,
4547 const struct intel_crtc_state *crtc_state)
8db9d77b 4548{
4cbe4b2b 4549 struct drm_device *dev = crtc->base.dev;
fac5e23e 4550 struct drm_i915_private *dev_priv = to_i915(dev);
4cbe4b2b 4551 int pipe = crtc->pipe;
f0f59a00
VS
4552 i915_reg_t reg;
4553 u32 temp, i, retry;
8db9d77b 4554
e1a44743
AJ
4555 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4556 for train result */
5eddb70b
CW
4557 reg = FDI_RX_IMR(pipe);
4558 temp = I915_READ(reg);
e1a44743
AJ
4559 temp &= ~FDI_RX_SYMBOL_LOCK;
4560 temp &= ~FDI_RX_BIT_LOCK;
5eddb70b
CW
4561 I915_WRITE(reg, temp);
4562
4563 POSTING_READ(reg);
e1a44743
AJ
4564 udelay(150);
4565
8db9d77b 4566 /* enable CPU FDI TX and PCH FDI RX */
5eddb70b
CW
4567 reg = FDI_TX_CTL(pipe);
4568 temp = I915_READ(reg);
627eb5a3 4569 temp &= ~FDI_DP_PORT_WIDTH_MASK;
dc4a1094 4570 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
8db9d77b
ZW
4571 temp &= ~FDI_LINK_TRAIN_NONE;
4572 temp |= FDI_LINK_TRAIN_PATTERN_1;
4573 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4574 /* SNB-B */
4575 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
5eddb70b 4576 I915_WRITE(reg, temp | FDI_TX_ENABLE);
8db9d77b 4577
d74cf324
DV
4578 I915_WRITE(FDI_RX_MISC(pipe),
4579 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
4580
5eddb70b
CW
4581 reg = FDI_RX_CTL(pipe);
4582 temp = I915_READ(reg);
6e266956 4583 if (HAS_PCH_CPT(dev_priv)) {
8db9d77b
ZW
4584 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4585 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4586 } else {
4587 temp &= ~FDI_LINK_TRAIN_NONE;
4588 temp |= FDI_LINK_TRAIN_PATTERN_1;
4589 }
5eddb70b
CW
4590 I915_WRITE(reg, temp | FDI_RX_ENABLE);
4591
4592 POSTING_READ(reg);
8db9d77b
ZW
4593 udelay(150);
4594
0206e353 4595 for (i = 0; i < 4; i++) {
5eddb70b
CW
4596 reg = FDI_TX_CTL(pipe);
4597 temp = I915_READ(reg);
8db9d77b
ZW
4598 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4599 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
4600 I915_WRITE(reg, temp);
4601
4602 POSTING_READ(reg);
8db9d77b
ZW
4603 udelay(500);
4604
fa37d39e
SP
4605 for (retry = 0; retry < 5; retry++) {
4606 reg = FDI_RX_IIR(pipe);
4607 temp = I915_READ(reg);
4608 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4609 if (temp & FDI_RX_BIT_LOCK) {
4610 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4611 DRM_DEBUG_KMS("FDI train 1 done.\n");
4612 break;
4613 }
4614 udelay(50);
8db9d77b 4615 }
fa37d39e
SP
4616 if (retry < 5)
4617 break;
8db9d77b
ZW
4618 }
4619 if (i == 4)
5eddb70b 4620 DRM_ERROR("FDI train 1 fail!\n");
8db9d77b
ZW
4621
4622 /* Train 2 */
5eddb70b
CW
4623 reg = FDI_TX_CTL(pipe);
4624 temp = I915_READ(reg);
8db9d77b
ZW
4625 temp &= ~FDI_LINK_TRAIN_NONE;
4626 temp |= FDI_LINK_TRAIN_PATTERN_2;
cf819eff 4627 if (IS_GEN(dev_priv, 6)) {
8db9d77b
ZW
4628 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4629 /* SNB-B */
4630 temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
4631 }
5eddb70b 4632 I915_WRITE(reg, temp);
8db9d77b 4633
5eddb70b
CW
4634 reg = FDI_RX_CTL(pipe);
4635 temp = I915_READ(reg);
6e266956 4636 if (HAS_PCH_CPT(dev_priv)) {
8db9d77b
ZW
4637 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4638 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
4639 } else {
4640 temp &= ~FDI_LINK_TRAIN_NONE;
4641 temp |= FDI_LINK_TRAIN_PATTERN_2;
4642 }
5eddb70b
CW
4643 I915_WRITE(reg, temp);
4644
4645 POSTING_READ(reg);
8db9d77b
ZW
4646 udelay(150);
4647
0206e353 4648 for (i = 0; i < 4; i++) {
5eddb70b
CW
4649 reg = FDI_TX_CTL(pipe);
4650 temp = I915_READ(reg);
8db9d77b
ZW
4651 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
4652 temp |= snb_b_fdi_train_param[i];
5eddb70b
CW
4653 I915_WRITE(reg, temp);
4654
4655 POSTING_READ(reg);
8db9d77b
ZW
4656 udelay(500);
4657
fa37d39e
SP
4658 for (retry = 0; retry < 5; retry++) {
4659 reg = FDI_RX_IIR(pipe);
4660 temp = I915_READ(reg);
4661 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
4662 if (temp & FDI_RX_SYMBOL_LOCK) {
4663 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4664 DRM_DEBUG_KMS("FDI train 2 done.\n");
4665 break;
4666 }
4667 udelay(50);
8db9d77b 4668 }
fa37d39e
SP
4669 if (retry < 5)
4670 break;
8db9d77b
ZW
4671 }
4672 if (i == 4)
5eddb70b 4673 DRM_ERROR("FDI train 2 fail!\n");
8db9d77b
ZW
4674
4675 DRM_DEBUG_KMS("FDI train done.\n");
4676}
4677
357555c0 4678/* Manual link training for Ivy Bridge A0 parts */
dc4a1094
ACO
4679static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
4680 const struct intel_crtc_state *crtc_state)
357555c0 4681{
4cbe4b2b 4682 struct drm_device *dev = crtc->base.dev;
fac5e23e 4683 struct drm_i915_private *dev_priv = to_i915(dev);
4cbe4b2b 4684 int pipe = crtc->pipe;
f0f59a00
VS
4685 i915_reg_t reg;
4686 u32 temp, i, j;
357555c0
JB
4687
4688 /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
4689 for train result */
4690 reg = FDI_RX_IMR(pipe);
4691 temp = I915_READ(reg);
4692 temp &= ~FDI_RX_SYMBOL_LOCK;
4693 temp &= ~FDI_RX_BIT_LOCK;
4694 I915_WRITE(reg, temp);
4695
4696 POSTING_READ(reg);
4697 udelay(150);
4698
01a415fd
DV
4699 DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
4700 I915_READ(FDI_RX_IIR(pipe)));
4701
139ccd3f
JB
4702 /* Try each vswing and preemphasis setting twice before moving on */
4703 for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
4704 /* disable first in case we need to retry */
4705 reg = FDI_TX_CTL(pipe);
4706 temp = I915_READ(reg);
4707 temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
4708 temp &= ~FDI_TX_ENABLE;
4709 I915_WRITE(reg, temp);
357555c0 4710
139ccd3f
JB
4711 reg = FDI_RX_CTL(pipe);
4712 temp = I915_READ(reg);
4713 temp &= ~FDI_LINK_TRAIN_AUTO;
4714 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4715 temp &= ~FDI_RX_ENABLE;
4716 I915_WRITE(reg, temp);
357555c0 4717
139ccd3f 4718 /* enable CPU FDI TX and PCH FDI RX */
357555c0
JB
4719 reg = FDI_TX_CTL(pipe);
4720 temp = I915_READ(reg);
139ccd3f 4721 temp &= ~FDI_DP_PORT_WIDTH_MASK;
dc4a1094 4722 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
139ccd3f 4723 temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
357555c0 4724 temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
139ccd3f
JB
4725 temp |= snb_b_fdi_train_param[j/2];
4726 temp |= FDI_COMPOSITE_SYNC;
4727 I915_WRITE(reg, temp | FDI_TX_ENABLE);
357555c0 4728
139ccd3f
JB
4729 I915_WRITE(FDI_RX_MISC(pipe),
4730 FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
357555c0 4731
139ccd3f 4732 reg = FDI_RX_CTL(pipe);
357555c0 4733 temp = I915_READ(reg);
139ccd3f
JB
4734 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4735 temp |= FDI_COMPOSITE_SYNC;
4736 I915_WRITE(reg, temp | FDI_RX_ENABLE);
357555c0 4737
139ccd3f
JB
4738 POSTING_READ(reg);
4739 udelay(1); /* should be 0.5us */
357555c0 4740
139ccd3f
JB
4741 for (i = 0; i < 4; i++) {
4742 reg = FDI_RX_IIR(pipe);
4743 temp = I915_READ(reg);
4744 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 4745
139ccd3f
JB
4746 if (temp & FDI_RX_BIT_LOCK ||
4747 (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
4748 I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
4749 DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
4750 i);
4751 break;
4752 }
4753 udelay(1); /* should be 0.5us */
4754 }
4755 if (i == 4) {
4756 DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
4757 continue;
4758 }
357555c0 4759
139ccd3f 4760 /* Train 2 */
357555c0
JB
4761 reg = FDI_TX_CTL(pipe);
4762 temp = I915_READ(reg);
139ccd3f
JB
4763 temp &= ~FDI_LINK_TRAIN_NONE_IVB;
4764 temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
4765 I915_WRITE(reg, temp);
4766
4767 reg = FDI_RX_CTL(pipe);
4768 temp = I915_READ(reg);
4769 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4770 temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
357555c0
JB
4771 I915_WRITE(reg, temp);
4772
4773 POSTING_READ(reg);
139ccd3f 4774 udelay(2); /* should be 1.5us */
357555c0 4775
139ccd3f
JB
4776 for (i = 0; i < 4; i++) {
4777 reg = FDI_RX_IIR(pipe);
4778 temp = I915_READ(reg);
4779 DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
357555c0 4780
139ccd3f
JB
4781 if (temp & FDI_RX_SYMBOL_LOCK ||
4782 (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
4783 I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
4784 DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
4785 i);
4786 goto train_done;
4787 }
4788 udelay(2); /* should be 1.5us */
357555c0 4789 }
139ccd3f
JB
4790 if (i == 4)
4791 DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
357555c0 4792 }
357555c0 4793
139ccd3f 4794train_done:
357555c0
JB
4795 DRM_DEBUG_KMS("FDI train done.\n");
4796}
4797
b2354c78 4798static void ironlake_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
2c07245f 4799{
b2354c78
ML
4800 struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->base.crtc);
4801 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
2c07245f 4802 int pipe = intel_crtc->pipe;
f0f59a00
VS
4803 i915_reg_t reg;
4804 u32 temp;
c64e311e 4805
c98e9dcf 4806 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
5eddb70b
CW
4807 reg = FDI_RX_CTL(pipe);
4808 temp = I915_READ(reg);
627eb5a3 4809 temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
b2354c78 4810 temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
dfd07d72 4811 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
5eddb70b
CW
4812 I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
4813
4814 POSTING_READ(reg);
c98e9dcf
JB
4815 udelay(200);
4816
4817 /* Switch from Rawclk to PCDclk */
5eddb70b
CW
4818 temp = I915_READ(reg);
4819 I915_WRITE(reg, temp | FDI_PCDCLK);
4820
4821 POSTING_READ(reg);
c98e9dcf
JB
4822 udelay(200);
4823
20749730
PZ
4824 /* Enable CPU FDI TX PLL, always on for Ironlake */
4825 reg = FDI_TX_CTL(pipe);
4826 temp = I915_READ(reg);
4827 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
4828 I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
5eddb70b 4829
20749730
PZ
4830 POSTING_READ(reg);
4831 udelay(100);
6be4a607 4832 }
0e23b99d
JB
4833}
4834
88cefb6c
DV
4835static void ironlake_fdi_pll_disable(struct intel_crtc *intel_crtc)
4836{
4837 struct drm_device *dev = intel_crtc->base.dev;
fac5e23e 4838 struct drm_i915_private *dev_priv = to_i915(dev);
88cefb6c 4839 int pipe = intel_crtc->pipe;
f0f59a00
VS
4840 i915_reg_t reg;
4841 u32 temp;
88cefb6c
DV
4842
4843 /* Switch from PCDclk to Rawclk */
4844 reg = FDI_RX_CTL(pipe);
4845 temp = I915_READ(reg);
4846 I915_WRITE(reg, temp & ~FDI_PCDCLK);
4847
4848 /* Disable CPU FDI TX PLL */
4849 reg = FDI_TX_CTL(pipe);
4850 temp = I915_READ(reg);
4851 I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
4852
4853 POSTING_READ(reg);
4854 udelay(100);
4855
4856 reg = FDI_RX_CTL(pipe);
4857 temp = I915_READ(reg);
4858 I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
4859
4860 /* Wait for the clocks to turn off. */
4861 POSTING_READ(reg);
4862 udelay(100);
4863}
4864
0fc932b8
JB
4865static void ironlake_fdi_disable(struct drm_crtc *crtc)
4866{
4867 struct drm_device *dev = crtc->dev;
fac5e23e 4868 struct drm_i915_private *dev_priv = to_i915(dev);
0fc932b8
JB
4869 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
4870 int pipe = intel_crtc->pipe;
f0f59a00
VS
4871 i915_reg_t reg;
4872 u32 temp;
0fc932b8
JB
4873
4874 /* disable CPU FDI tx and PCH FDI rx */
4875 reg = FDI_TX_CTL(pipe);
4876 temp = I915_READ(reg);
4877 I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
4878 POSTING_READ(reg);
4879
4880 reg = FDI_RX_CTL(pipe);
4881 temp = I915_READ(reg);
4882 temp &= ~(0x7 << 16);
dfd07d72 4883 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
4884 I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
4885
4886 POSTING_READ(reg);
4887 udelay(100);
4888
4889 /* Ironlake workaround, disable clock pointer after downing FDI */
6e266956 4890 if (HAS_PCH_IBX(dev_priv))
6f06ce18 4891 I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
0fc932b8
JB
4892
4893 /* still set train pattern 1 */
4894 reg = FDI_TX_CTL(pipe);
4895 temp = I915_READ(reg);
4896 temp &= ~FDI_LINK_TRAIN_NONE;
4897 temp |= FDI_LINK_TRAIN_PATTERN_1;
4898 I915_WRITE(reg, temp);
4899
4900 reg = FDI_RX_CTL(pipe);
4901 temp = I915_READ(reg);
6e266956 4902 if (HAS_PCH_CPT(dev_priv)) {
0fc932b8
JB
4903 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
4904 temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
4905 } else {
4906 temp &= ~FDI_LINK_TRAIN_NONE;
4907 temp |= FDI_LINK_TRAIN_PATTERN_1;
4908 }
4909 /* BPC in FDI rx is consistent with that in PIPECONF */
4910 temp &= ~(0x07 << 16);
dfd07d72 4911 temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
0fc932b8
JB
4912 I915_WRITE(reg, temp);
4913
4914 POSTING_READ(reg);
4915 udelay(100);
4916}
4917
49d73912 4918bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv)
5dce5b93 4919{
fa05887a
DV
4920 struct drm_crtc *crtc;
4921 bool cleanup_done;
4922
4923 drm_for_each_crtc(crtc, &dev_priv->drm) {
4924 struct drm_crtc_commit *commit;
4925 spin_lock(&crtc->commit_lock);
4926 commit = list_first_entry_or_null(&crtc->commit_list,
4927 struct drm_crtc_commit, commit_entry);
4928 cleanup_done = commit ?
4929 try_wait_for_completion(&commit->cleanup_done) : true;
4930 spin_unlock(&crtc->commit_lock);
4931
4932 if (cleanup_done)
5dce5b93
CW
4933 continue;
4934
fa05887a 4935 drm_crtc_wait_one_vblank(crtc);
5dce5b93
CW
4936
4937 return true;
4938 }
4939
4940 return false;
4941}
4942
b7076546 4943void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
060f02d8
VS
4944{
4945 u32 temp;
4946
4947 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
4948
4949 mutex_lock(&dev_priv->sb_lock);
4950
4951 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
4952 temp |= SBI_SSCCTL_DISABLE;
4953 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
4954
4955 mutex_unlock(&dev_priv->sb_lock);
4956}
4957
e615efe4 4958/* Program iCLKIP clock to the desired frequency */
c5b36fac 4959static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
e615efe4 4960{
c5b36fac 4961 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
0dcdc382 4962 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
c5b36fac 4963 int clock = crtc_state->base.adjusted_mode.crtc_clock;
e615efe4
ED
4964 u32 divsel, phaseinc, auxdiv, phasedir = 0;
4965 u32 temp;
4966
060f02d8 4967 lpt_disable_iclkip(dev_priv);
e615efe4 4968
64b46a06
VS
4969 /* The iCLK virtual clock root frequency is in MHz,
4970 * but the adjusted_mode->crtc_clock in in KHz. To get the
4971 * divisors, it is necessary to divide one by another, so we
4972 * convert the virtual clock precision to KHz here for higher
4973 * precision.
4974 */
4975 for (auxdiv = 0; auxdiv < 2; auxdiv++) {
e615efe4
ED
4976 u32 iclk_virtual_root_freq = 172800 * 1000;
4977 u32 iclk_pi_range = 64;
64b46a06 4978 u32 desired_divisor;
e615efe4 4979
64b46a06
VS
4980 desired_divisor = DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
4981 clock << auxdiv);
4982 divsel = (desired_divisor / iclk_pi_range) - 2;
4983 phaseinc = desired_divisor % iclk_pi_range;
e615efe4 4984
64b46a06
VS
4985 /*
4986 * Near 20MHz is a corner case which is
4987 * out of range for the 7-bit divisor
4988 */
4989 if (divsel <= 0x7f)
4990 break;
e615efe4
ED
4991 }
4992
4993 /* This should not happen with any sane values */
4994 WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
4995 ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
4996 WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
4997 ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
4998
4999 DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
12d7ceed 5000 clock,
e615efe4
ED
5001 auxdiv,
5002 divsel,
5003 phasedir,
5004 phaseinc);
5005
060f02d8
VS
5006 mutex_lock(&dev_priv->sb_lock);
5007
e615efe4 5008 /* Program SSCDIVINTPHASE6 */
988d6ee8 5009 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
e615efe4
ED
5010 temp &= ~SBI_SSCDIVINTPHASE_DIVSEL_MASK;
5011 temp |= SBI_SSCDIVINTPHASE_DIVSEL(divsel);
5012 temp &= ~SBI_SSCDIVINTPHASE_INCVAL_MASK;
5013 temp |= SBI_SSCDIVINTPHASE_INCVAL(phaseinc);
5014 temp |= SBI_SSCDIVINTPHASE_DIR(phasedir);
5015 temp |= SBI_SSCDIVINTPHASE_PROPAGATE;
988d6ee8 5016 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE6, temp, SBI_ICLK);
e615efe4
ED
5017
5018 /* Program SSCAUXDIV */
988d6ee8 5019 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
e615efe4
ED
5020 temp &= ~SBI_SSCAUXDIV_FINALDIV2SEL(1);
5021 temp |= SBI_SSCAUXDIV_FINALDIV2SEL(auxdiv);
988d6ee8 5022 intel_sbi_write(dev_priv, SBI_SSCAUXDIV6, temp, SBI_ICLK);
e615efe4
ED
5023
5024 /* Enable modulator and associated divider */
988d6ee8 5025 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
e615efe4 5026 temp &= ~SBI_SSCCTL_DISABLE;
988d6ee8 5027 intel_sbi_write(dev_priv, SBI_SSCCTL6, temp, SBI_ICLK);
e615efe4 5028
060f02d8
VS
5029 mutex_unlock(&dev_priv->sb_lock);
5030
e615efe4
ED
5031 /* Wait for initialization time */
5032 udelay(24);
5033
5034 I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
5035}
5036
8802e5b6
VS
5037int lpt_get_iclkip(struct drm_i915_private *dev_priv)
5038{
5039 u32 divsel, phaseinc, auxdiv;
5040 u32 iclk_virtual_root_freq = 172800 * 1000;
5041 u32 iclk_pi_range = 64;
5042 u32 desired_divisor;
5043 u32 temp;
5044
5045 if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
5046 return 0;
5047
5048 mutex_lock(&dev_priv->sb_lock);
5049
5050 temp = intel_sbi_read(dev_priv, SBI_SSCCTL6, SBI_ICLK);
5051 if (temp & SBI_SSCCTL_DISABLE) {
5052 mutex_unlock(&dev_priv->sb_lock);
5053 return 0;
5054 }
5055
5056 temp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE6, SBI_ICLK);
5057 divsel = (temp & SBI_SSCDIVINTPHASE_DIVSEL_MASK) >>
5058 SBI_SSCDIVINTPHASE_DIVSEL_SHIFT;
5059 phaseinc = (temp & SBI_SSCDIVINTPHASE_INCVAL_MASK) >>
5060 SBI_SSCDIVINTPHASE_INCVAL_SHIFT;
5061
5062 temp = intel_sbi_read(dev_priv, SBI_SSCAUXDIV6, SBI_ICLK);
5063 auxdiv = (temp & SBI_SSCAUXDIV_FINALDIV2SEL_MASK) >>
5064 SBI_SSCAUXDIV_FINALDIV2SEL_SHIFT;
5065
5066 mutex_unlock(&dev_priv->sb_lock);
5067
5068 desired_divisor = (divsel + 2) * iclk_pi_range + phaseinc;
5069
5070 return DIV_ROUND_CLOSEST(iclk_virtual_root_freq,
5071 desired_divisor << auxdiv);
5072}
5073
5e1cdf54 5074static void ironlake_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state,
275f01b2
DV
5075 enum pipe pch_transcoder)
5076{
5e1cdf54
ML
5077 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5078 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5079 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
275f01b2
DV
5080
5081 I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
5082 I915_READ(HTOTAL(cpu_transcoder)));
5083 I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
5084 I915_READ(HBLANK(cpu_transcoder)));
5085 I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
5086 I915_READ(HSYNC(cpu_transcoder)));
5087
5088 I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
5089 I915_READ(VTOTAL(cpu_transcoder)));
5090 I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
5091 I915_READ(VBLANK(cpu_transcoder)));
5092 I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
5093 I915_READ(VSYNC(cpu_transcoder)));
5094 I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
5095 I915_READ(VSYNCSHIFT(cpu_transcoder)));
5096}
5097
b0b62d84 5098static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
1fbc0d78 5099{
ba3f4d0a 5100 u32 temp;
1fbc0d78
DV
5101
5102 temp = I915_READ(SOUTH_CHICKEN1);
003632d9 5103 if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
1fbc0d78
DV
5104 return;
5105
5106 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
5107 WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
5108
003632d9
ACO
5109 temp &= ~FDI_BC_BIFURCATION_SELECT;
5110 if (enable)
5111 temp |= FDI_BC_BIFURCATION_SELECT;
5112
5113 DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
1fbc0d78
DV
5114 I915_WRITE(SOUTH_CHICKEN1, temp);
5115 POSTING_READ(SOUTH_CHICKEN1);
5116}
5117
b0b62d84 5118static void ivybridge_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
1fbc0d78 5119{
b0b62d84
ML
5120 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5121 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1fbc0d78 5122
b0b62d84 5123 switch (crtc->pipe) {
1fbc0d78
DV
5124 case PIPE_A:
5125 break;
5126 case PIPE_B:
b0b62d84
ML
5127 if (crtc_state->fdi_lanes > 2)
5128 cpt_set_fdi_bc_bifurcation(dev_priv, false);
1fbc0d78 5129 else
b0b62d84 5130 cpt_set_fdi_bc_bifurcation(dev_priv, true);
1fbc0d78
DV
5131
5132 break;
5133 case PIPE_C:
b0b62d84 5134 cpt_set_fdi_bc_bifurcation(dev_priv, true);
1fbc0d78
DV
5135
5136 break;
5137 default:
5138 BUG();
5139 }
5140}
5141
f606bc6d
VS
5142/*
5143 * Finds the encoder associated with the given CRTC. This can only be
5144 * used when we know that the CRTC isn't feeding multiple encoders!
5145 */
5146static struct intel_encoder *
5a0b385e
VS
5147intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
5148 const struct intel_crtc_state *crtc_state)
f606bc6d
VS
5149{
5150 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
f606bc6d
VS
5151 const struct drm_connector_state *connector_state;
5152 const struct drm_connector *connector;
5153 struct intel_encoder *encoder = NULL;
5154 int num_encoders = 0;
5155 int i;
5156
5a0b385e 5157 for_each_new_connector_in_state(&state->base, connector, connector_state, i) {
f606bc6d
VS
5158 if (connector_state->crtc != &crtc->base)
5159 continue;
5160
5161 encoder = to_intel_encoder(connector_state->best_encoder);
5162 num_encoders++;
5163 }
5164
5165 WARN(num_encoders != 1, "%d encoders for pipe %c\n",
5166 num_encoders, pipe_name(crtc->pipe));
5167
5168 return encoder;
5169}
5170
f67a559d
JB
5171/*
5172 * Enable PCH resources required for PCH ports:
5173 * - PCH PLLs
5174 * - FDI training & RX/TX
5175 * - update transcoder timings
5176 * - DP transcoding bits
5177 * - transcoder
5178 */
5a0b385e
VS
5179static void ironlake_pch_enable(const struct intel_atomic_state *state,
5180 const struct intel_crtc_state *crtc_state)
0e23b99d 5181{
2ce42273 5182 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4cbe4b2b 5183 struct drm_device *dev = crtc->base.dev;
fac5e23e 5184 struct drm_i915_private *dev_priv = to_i915(dev);
4cbe4b2b 5185 int pipe = crtc->pipe;
f0f59a00 5186 u32 temp;
2c07245f 5187
ab9412ba 5188 assert_pch_transcoder_disabled(dev_priv, pipe);
e7e164db 5189
fd6b8f43 5190 if (IS_IVYBRIDGE(dev_priv))
b0b62d84 5191 ivybridge_update_fdi_bc_bifurcation(crtc_state);
1fbc0d78 5192
cd986abb
DV
5193 /* Write the TU size bits before fdi link training, so that error
5194 * detection works. */
5195 I915_WRITE(FDI_RX_TUSIZE1(pipe),
5196 I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
5197
c98e9dcf 5198 /* For PCH output, training FDI link */
dc4a1094 5199 dev_priv->display.fdi_link_train(crtc, crtc_state);
2c07245f 5200
3ad8a208
DV
5201 /* We need to program the right clock selection before writing the pixel
5202 * mutliplier into the DPLL. */
6e266956 5203 if (HAS_PCH_CPT(dev_priv)) {
ee7b9f93 5204 u32 sel;
4b645f14 5205
c98e9dcf 5206 temp = I915_READ(PCH_DPLL_SEL);
11887397
DV
5207 temp |= TRANS_DPLL_ENABLE(pipe);
5208 sel = TRANS_DPLLB_SEL(pipe);
2ce42273 5209 if (crtc_state->shared_dpll ==
8106ddbd 5210 intel_get_shared_dpll_by_id(dev_priv, DPLL_ID_PCH_PLL_B))
ee7b9f93
JB
5211 temp |= sel;
5212 else
5213 temp &= ~sel;
c98e9dcf 5214 I915_WRITE(PCH_DPLL_SEL, temp);
c98e9dcf 5215 }
5eddb70b 5216
3ad8a208
DV
5217 /* XXX: pch pll's can be enabled any time before we enable the PCH
5218 * transcoder, and we actually should do this to not upset any PCH
5219 * transcoder that already use the clock when we share it.
5220 *
5221 * Note that enable_shared_dpll tries to do the right thing, but
5222 * get_shared_dpll unconditionally resets the pll - we need that to have
5223 * the right LVDS enable sequence. */
65c307fd 5224 intel_enable_shared_dpll(crtc_state);
3ad8a208 5225
d9b6cb56
JB
5226 /* set transcoder timing, panel must allow it */
5227 assert_panel_unlocked(dev_priv, pipe);
5e1cdf54 5228 ironlake_pch_transcoder_set_timings(crtc_state, pipe);
8db9d77b 5229
303b81e0 5230 intel_fdi_normal_train(crtc);
5e84e1a4 5231
c98e9dcf 5232 /* For PCH DP, enable TRANS_DP_CTL */
6e266956 5233 if (HAS_PCH_CPT(dev_priv) &&
2ce42273 5234 intel_crtc_has_dp_encoder(crtc_state)) {
9c4edaee 5235 const struct drm_display_mode *adjusted_mode =
2ce42273 5236 &crtc_state->base.adjusted_mode;
dfd07d72 5237 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
f0f59a00 5238 i915_reg_t reg = TRANS_DP_CTL(pipe);
f67dc6d8
VS
5239 enum port port;
5240
5eddb70b
CW
5241 temp = I915_READ(reg);
5242 temp &= ~(TRANS_DP_PORT_SEL_MASK |
220cad3c
EA
5243 TRANS_DP_SYNC_MASK |
5244 TRANS_DP_BPC_MASK);
e3ef4479 5245 temp |= TRANS_DP_OUTPUT_ENABLE;
9325c9f0 5246 temp |= bpc << 9; /* same format but at 11:9 */
c98e9dcf 5247
9c4edaee 5248 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
5eddb70b 5249 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
9c4edaee 5250 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
5eddb70b 5251 temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
c98e9dcf 5252
5a0b385e 5253 port = intel_get_crtc_new_encoder(state, crtc_state)->port;
f67dc6d8
VS
5254 WARN_ON(port < PORT_B || port > PORT_D);
5255 temp |= TRANS_DP_PORT_SEL(port);
2c07245f 5256
5eddb70b 5257 I915_WRITE(reg, temp);
6be4a607 5258 }
b52eb4dc 5259
7efd90fb 5260 ironlake_enable_pch_transcoder(crtc_state);
f67a559d
JB
5261}
5262
5a0b385e
VS
5263static void lpt_pch_enable(const struct intel_atomic_state *state,
5264 const struct intel_crtc_state *crtc_state)
1507e5bd 5265{
2ce42273 5266 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
0dcdc382 5267 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2ce42273 5268 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
1507e5bd 5269
a2196033 5270 assert_pch_transcoder_disabled(dev_priv, PIPE_A);
1507e5bd 5271
c5b36fac 5272 lpt_program_iclkip(crtc_state);
1507e5bd 5273
0540e488 5274 /* Set transcoder timing. */
5e1cdf54 5275 ironlake_pch_transcoder_set_timings(crtc_state, PIPE_A);
1507e5bd 5276
937bb610 5277 lpt_enable_pch_transcoder(dev_priv, cpu_transcoder);
f67a559d
JB
5278}
5279
a1520318 5280static void cpt_verify_modeset(struct drm_device *dev, int pipe)
d4270e57 5281{
fac5e23e 5282 struct drm_i915_private *dev_priv = to_i915(dev);
f0f59a00 5283 i915_reg_t dslreg = PIPEDSL(pipe);
d4270e57
JB
5284 u32 temp;
5285
5286 temp = I915_READ(dslreg);
5287 udelay(500);
5288 if (wait_for(I915_READ(dslreg) != temp, 5)) {
d4270e57 5289 if (wait_for(I915_READ(dslreg) != temp, 5))
84f44ce7 5290 DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
d4270e57
JB
5291 }
5292}
5293
0a59952b
VS
5294/*
5295 * The hardware phase 0.0 refers to the center of the pixel.
5296 * We want to start from the top/left edge which is phase
5297 * -0.5. That matches how the hardware calculates the scaling
5298 * factors (from top-left of the first pixel to bottom-right
5299 * of the last pixel, as opposed to the pixel centers).
5300 *
5301 * For 4:2:0 subsampled chroma planes we obviously have to
5302 * adjust that so that the chroma sample position lands in
5303 * the right spot.
5304 *
5305 * Note that for packed YCbCr 4:2:2 formats there is no way to
5306 * control chroma siting. The hardware simply replicates the
5307 * chroma samples for both of the luma samples, and thus we don't
5308 * actually get the expected MPEG2 chroma siting convention :(
5309 * The same behaviour is observed on pre-SKL platforms as well.
e7a278a3
VS
5310 *
5311 * Theory behind the formula (note that we ignore sub-pixel
5312 * source coordinates):
5313 * s = source sample position
5314 * d = destination sample position
5315 *
5316 * Downscaling 4:1:
5317 * -0.5
5318 * | 0.0
5319 * | | 1.5 (initial phase)
5320 * | | |
5321 * v v v
5322 * | s | s | s | s |
5323 * | d |
5324 *
5325 * Upscaling 1:4:
5326 * -0.5
5327 * | -0.375 (initial phase)
5328 * | | 0.0
5329 * | | |
5330 * v v v
5331 * | s |
5332 * | d | d | d | d |
0a59952b 5333 */
e7a278a3 5334u16 skl_scaler_calc_phase(int sub, int scale, bool chroma_cosited)
0a59952b
VS
5335{
5336 int phase = -0x8000;
5337 u16 trip = 0;
5338
5339 if (chroma_cosited)
5340 phase += (sub - 1) * 0x8000 / sub;
5341
e7a278a3
VS
5342 phase += scale / (2 * sub);
5343
5344 /*
5345 * Hardware initial phase limited to [-0.5:1.5].
5346 * Since the max hardware scale factor is 3.0, we
5347 * should never actually excdeed 1.0 here.
5348 */
5349 WARN_ON(phase < -0x8000 || phase > 0x18000);
5350
0a59952b
VS
5351 if (phase < 0)
5352 phase = 0x10000 + phase;
5353 else
5354 trip = PS_PHASE_TRIP;
5355
5356 return ((phase >> 2) & PS_PHASE_MASK) | trip;
5357}
5358
69f44d3b
JN
5359#define SKL_MIN_SRC_W 8
5360#define SKL_MAX_SRC_W 4096
5361#define SKL_MIN_SRC_H 8
5362#define SKL_MAX_SRC_H 4096
5363#define SKL_MIN_DST_W 8
5364#define SKL_MAX_DST_W 4096
5365#define SKL_MIN_DST_H 8
5366#define SKL_MAX_DST_H 4096
5367#define ICL_MAX_SRC_W 5120
5368#define ICL_MAX_SRC_H 4096
5369#define ICL_MAX_DST_W 5120
5370#define ICL_MAX_DST_H 4096
5371#define SKL_MIN_YUV_420_SRC_W 16
5372#define SKL_MIN_YUV_420_SRC_H 16
5373
86adf9d7
ML
5374static int
5375skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
d96a7d2a 5376 unsigned int scaler_user, int *scaler_id,
77224cd5 5377 int src_w, int src_h, int dst_w, int dst_h,
b1554e23 5378 const struct drm_format_info *format, bool need_scaler)
a1b2278e 5379{
86adf9d7
ML
5380 struct intel_crtc_scaler_state *scaler_state =
5381 &crtc_state->scaler_state;
5382 struct intel_crtc *intel_crtc =
5383 to_intel_crtc(crtc_state->base.crtc);
7f58cbb1
MK
5384 struct drm_i915_private *dev_priv = to_i915(intel_crtc->base.dev);
5385 const struct drm_display_mode *adjusted_mode =
5386 &crtc_state->base.adjusted_mode;
6156a456 5387
d96a7d2a
VS
5388 /*
5389 * Src coordinates are already rotated by 270 degrees for
5390 * the 90/270 degree plane rotation cases (to match the
5391 * GTT mapping), hence no need to account for rotation here.
5392 */
b1554e23
ML
5393 if (src_w != dst_w || src_h != dst_h)
5394 need_scaler = true;
e5c05931 5395
7f58cbb1
MK
5396 /*
5397 * Scaling/fitting not supported in IF-ID mode in GEN9+
5398 * TODO: Interlace fetch mode doesn't support YUV420 planar formats.
5399 * Once NV12 is enabled, handle it here while allocating scaler
5400 * for NV12.
5401 */
5402 if (INTEL_GEN(dev_priv) >= 9 && crtc_state->base.enable &&
b1554e23 5403 need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
7f58cbb1
MK
5404 DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
5405 return -EINVAL;
5406 }
5407
a1b2278e
CK
5408 /*
5409 * if plane is being disabled or scaler is no more required or force detach
5410 * - free scaler binded to this plane/crtc
5411 * - in order to do this, update crtc->scaler_usage
5412 *
5413 * Here scaler state in crtc_state is set free so that
5414 * scaler can be assigned to other user. Actual register
5415 * update to free the scaler is done in plane/panel-fit programming.
5416 * For this purpose crtc/plane_state->scaler_id isn't reset here.
5417 */
b1554e23 5418 if (force_detach || !need_scaler) {
a1b2278e 5419 if (*scaler_id >= 0) {
86adf9d7 5420 scaler_state->scaler_users &= ~(1 << scaler_user);
a1b2278e
CK
5421 scaler_state->scalers[*scaler_id].in_use = 0;
5422
86adf9d7
ML
5423 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5424 "Staged freeing scaler id %d scaler_users = 0x%x\n",
5425 intel_crtc->pipe, scaler_user, *scaler_id,
a1b2278e
CK
5426 scaler_state->scaler_users);
5427 *scaler_id = -1;
5428 }
5429 return 0;
5430 }
5431
df7d4156 5432 if (format && is_planar_yuv_format(format->format) &&
5d794288 5433 (src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
df7d4156 5434 DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
77224cd5
CK
5435 return -EINVAL;
5436 }
5437
a1b2278e
CK
5438 /* range checks */
5439 if (src_w < SKL_MIN_SRC_W || src_h < SKL_MIN_SRC_H ||
323301af 5440 dst_w < SKL_MIN_DST_W || dst_h < SKL_MIN_DST_H ||
2dd24a9c 5441 (INTEL_GEN(dev_priv) >= 11 &&
323301af
NM
5442 (src_w > ICL_MAX_SRC_W || src_h > ICL_MAX_SRC_H ||
5443 dst_w > ICL_MAX_DST_W || dst_h > ICL_MAX_DST_H)) ||
2dd24a9c 5444 (INTEL_GEN(dev_priv) < 11 &&
323301af
NM
5445 (src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
5446 dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
86adf9d7 5447 DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
a1b2278e 5448 "size is out of scaler range\n",
86adf9d7 5449 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
a1b2278e
CK
5450 return -EINVAL;
5451 }
5452
86adf9d7
ML
5453 /* mark this plane as a scaler user in crtc_state */
5454 scaler_state->scaler_users |= (1 << scaler_user);
5455 DRM_DEBUG_KMS("scaler_user index %u.%u: "
5456 "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
5457 intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
5458 scaler_state->scaler_users);
5459
5460 return 0;
5461}
5462
5463/**
5464 * skl_update_scaler_crtc - Stages update to scaler state for a given crtc.
5465 *
5466 * @state: crtc's scaler state
86adf9d7
ML
5467 *
5468 * Return
5469 * 0 - scaler_usage updated successfully
5470 * error - requested scaling cannot be supported or other error condition
5471 */
e435d6e5 5472int skl_update_scaler_crtc(struct intel_crtc_state *state)
86adf9d7 5473{
7c5f93b0 5474 const struct drm_display_mode *adjusted_mode = &state->base.adjusted_mode;
b1554e23
ML
5475 bool need_scaler = false;
5476
5477 if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
5478 need_scaler = true;
86adf9d7 5479
e435d6e5 5480 return skl_update_scaler(state, !state->base.active, SKL_CRTC_INDEX,
77224cd5
CK
5481 &state->scaler_state.scaler_id,
5482 state->pipe_src_w, state->pipe_src_h,
5483 adjusted_mode->crtc_hdisplay,
b1554e23 5484 adjusted_mode->crtc_vdisplay, NULL, need_scaler);
86adf9d7
ML
5485}
5486
5487/**
5488 * skl_update_scaler_plane - Stages update to scaler state for a given plane.
c38c1455 5489 * @crtc_state: crtc's scaler state
86adf9d7
ML
5490 * @plane_state: atomic plane state to update
5491 *
5492 * Return
5493 * 0 - scaler_usage updated successfully
5494 * error - requested scaling cannot be supported or other error condition
5495 */
da20eabd
ML
5496static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
5497 struct intel_plane_state *plane_state)
86adf9d7 5498{
da20eabd
ML
5499 struct intel_plane *intel_plane =
5500 to_intel_plane(plane_state->base.plane);
42fd20ed 5501 struct drm_i915_private *dev_priv = to_i915(intel_plane->base.dev);
86adf9d7
ML
5502 struct drm_framebuffer *fb = plane_state->base.fb;
5503 int ret;
936e71e3 5504 bool force_detach = !fb || !plane_state->base.visible;
b1554e23
ML
5505 bool need_scaler = false;
5506
5507 /* Pre-gen11 and SDR planes always need a scaler for planar formats. */
42fd20ed 5508 if (!icl_is_hdr_plane(dev_priv, intel_plane->id) &&
df7d4156 5509 fb && is_planar_yuv_format(fb->format->format))
b1554e23 5510 need_scaler = true;
86adf9d7 5511
86adf9d7
ML
5512 ret = skl_update_scaler(crtc_state, force_detach,
5513 drm_plane_index(&intel_plane->base),
5514 &plane_state->scaler_id,
936e71e3
VS
5515 drm_rect_width(&plane_state->base.src) >> 16,
5516 drm_rect_height(&plane_state->base.src) >> 16,
5517 drm_rect_width(&plane_state->base.dst),
77224cd5 5518 drm_rect_height(&plane_state->base.dst),
b1554e23 5519 fb ? fb->format : NULL, need_scaler);
86adf9d7
ML
5520
5521 if (ret || plane_state->scaler_id < 0)
5522 return ret;
5523
a1b2278e 5524 /* check colorkey */
6ec5bd34 5525 if (plane_state->ckey.flags) {
72660ce0
VS
5526 DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
5527 intel_plane->base.base.id,
5528 intel_plane->base.name);
a1b2278e
CK
5529 return -EINVAL;
5530 }
5531
5532 /* Check src format */
438b74a5 5533 switch (fb->format->format) {
86adf9d7
ML
5534 case DRM_FORMAT_RGB565:
5535 case DRM_FORMAT_XBGR8888:
5536 case DRM_FORMAT_XRGB8888:
5537 case DRM_FORMAT_ABGR8888:
5538 case DRM_FORMAT_ARGB8888:
5539 case DRM_FORMAT_XRGB2101010:
5540 case DRM_FORMAT_XBGR2101010:
a94bed60
KS
5541 case DRM_FORMAT_XBGR16161616F:
5542 case DRM_FORMAT_ABGR16161616F:
5543 case DRM_FORMAT_XRGB16161616F:
5544 case DRM_FORMAT_ARGB16161616F:
86adf9d7
ML
5545 case DRM_FORMAT_YUYV:
5546 case DRM_FORMAT_YVYU:
5547 case DRM_FORMAT_UYVY:
5548 case DRM_FORMAT_VYUY:
77224cd5 5549 case DRM_FORMAT_NV12:
df7d4156
JPH
5550 case DRM_FORMAT_P010:
5551 case DRM_FORMAT_P012:
5552 case DRM_FORMAT_P016:
296e9b19
SS
5553 case DRM_FORMAT_Y210:
5554 case DRM_FORMAT_Y212:
5555 case DRM_FORMAT_Y216:
ff01e697
ML
5556 case DRM_FORMAT_XVYU2101010:
5557 case DRM_FORMAT_XVYU12_16161616:
5558 case DRM_FORMAT_XVYU16161616:
86adf9d7
ML
5559 break;
5560 default:
72660ce0
VS
5561 DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
5562 intel_plane->base.base.id, intel_plane->base.name,
438b74a5 5563 fb->base.id, fb->format->format);
86adf9d7 5564 return -EINVAL;
a1b2278e
CK
5565 }
5566
a1b2278e
CK
5567 return 0;
5568}
5569
e435d6e5
ML
5570static void skylake_scaler_disable(struct intel_crtc *crtc)
5571{
5572 int i;
5573
5574 for (i = 0; i < crtc->num_scalers; i++)
5575 skl_detach_scaler(crtc, i);
5576}
5577
b2562712 5578static void skylake_pfit_enable(const struct intel_crtc_state *crtc_state)
bd2e244f 5579{
b2562712
ML
5580 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5581 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5582 enum pipe pipe = crtc->pipe;
5583 const struct intel_crtc_scaler_state *scaler_state =
5584 &crtc_state->scaler_state;
a1b2278e 5585
b2562712 5586 if (crtc_state->pch_pfit.enabled) {
0a59952b 5587 u16 uv_rgb_hphase, uv_rgb_vphase;
e7a278a3 5588 int pfit_w, pfit_h, hscale, vscale;
a1b2278e
CK
5589 int id;
5590
b2562712 5591 if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
a1b2278e 5592 return;
a1b2278e 5593
e7a278a3
VS
5594 pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
5595 pfit_h = crtc_state->pch_pfit.size & 0xFFFF;
5596
5597 hscale = (crtc_state->pipe_src_w << 16) / pfit_w;
5598 vscale = (crtc_state->pipe_src_h << 16) / pfit_h;
5599
5600 uv_rgb_hphase = skl_scaler_calc_phase(1, hscale, false);
5601 uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
0a59952b 5602
a1b2278e
CK
5603 id = scaler_state->scaler_id;
5604 I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
5605 PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
0a59952b
VS
5606 I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
5607 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
5608 I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
5609 PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
b2562712
ML
5610 I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
5611 I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
bd2e244f
JB
5612 }
5613}
5614
b2562712 5615static void ironlake_pfit_enable(const struct intel_crtc_state *crtc_state)
b074cec8 5616{
b2562712
ML
5617 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
5618 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
b074cec8
JB
5619 int pipe = crtc->pipe;
5620
b2562712 5621 if (crtc_state->pch_pfit.enabled) {
b074cec8
JB
5622 /* Force use of hard-coded filter coefficients
5623 * as some pre-programmed values are broken,
5624 * e.g. x201.
5625 */
fd6b8f43 5626 if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
b074cec8
JB
5627 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
5628 PF_PIPE_SEL_IVB(pipe));
5629 else
5630 I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
b2562712
ML
5631 I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
5632 I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
d4270e57
JB
5633 }
5634}
5635
199ea381 5636void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
d77e4531 5637{
199ea381 5638 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
cea165c3 5639 struct drm_device *dev = crtc->base.dev;
fac5e23e 5640 struct drm_i915_private *dev_priv = to_i915(dev);
d77e4531 5641
24f28450 5642 if (!crtc_state->ips_enabled)
d77e4531
PZ
5643 return;
5644
307e4498
ML
5645 /*
5646 * We can only enable IPS after we enable a plane and wait for a vblank
5647 * This function is called from post_plane_update, which is run after
5648 * a vblank wait.
5649 */
24f28450 5650 WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
51f5a096 5651
8652744b 5652 if (IS_BROADWELL(dev_priv)) {
61843f0e
VS
5653 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
5654 IPS_ENABLE | IPS_PCODE_CONTROL));
2a114cc1
BW
5655 /* Quoting Art Runyan: "its not safe to expect any particular
5656 * value in IPS_CTL bit 31 after enabling IPS through the
e59150dc
JB
5657 * mailbox." Moreover, the mailbox may return a bogus state,
5658 * so we need to just enable it and continue on.
2a114cc1
BW
5659 */
5660 } else {
5661 I915_WRITE(IPS_CTL, IPS_ENABLE);
5662 /* The bit only becomes 1 in the next vblank, so this wait here
5663 * is essentially intel_wait_for_vblank. If we don't have this
5664 * and don't wait for vblanks until the end of crtc_enable, then
5665 * the HW state readout code will complain that the expected
5666 * IPS_CTL value is not the one we read. */
97a04e0d 5667 if (intel_wait_for_register(&dev_priv->uncore,
2ec9ba3c
CW
5668 IPS_CTL, IPS_ENABLE, IPS_ENABLE,
5669 50))
2a114cc1
BW
5670 DRM_ERROR("Timed out waiting for IPS enable\n");
5671 }
d77e4531
PZ
5672}
5673
199ea381 5674void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
d77e4531 5675{
199ea381 5676 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
d77e4531 5677 struct drm_device *dev = crtc->base.dev;
fac5e23e 5678 struct drm_i915_private *dev_priv = to_i915(dev);
d77e4531 5679
199ea381 5680 if (!crtc_state->ips_enabled)
d77e4531
PZ
5681 return;
5682
8652744b 5683 if (IS_BROADWELL(dev_priv)) {
2a114cc1 5684 WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
acb3ef0e
ID
5685 /*
5686 * Wait for PCODE to finish disabling IPS. The BSpec specified
5687 * 42ms timeout value leads to occasional timeouts so use 100ms
5688 * instead.
5689 */
97a04e0d 5690 if (intel_wait_for_register(&dev_priv->uncore,
b85c1ecf 5691 IPS_CTL, IPS_ENABLE, 0,
acb3ef0e 5692 100))
23d0b130 5693 DRM_ERROR("Timed out waiting for IPS disable\n");
e59150dc 5694 } else {
2a114cc1 5695 I915_WRITE(IPS_CTL, 0);
e59150dc
JB
5696 POSTING_READ(IPS_CTL);
5697 }
d77e4531
PZ
5698
5699 /* We need to wait for a vblank before we can disable the plane. */
0f0f74bc 5700 intel_wait_for_vblank(dev_priv, crtc->pipe);
d77e4531
PZ
5701}
5702
7cac945f 5703static void intel_crtc_dpms_overlay_disable(struct intel_crtc *intel_crtc)
d3eedb1a 5704{
7cac945f 5705 if (intel_crtc->overlay) {
d3eedb1a 5706 struct drm_device *dev = intel_crtc->base.dev;
d3eedb1a
VS
5707
5708 mutex_lock(&dev->struct_mutex);
d3eedb1a 5709 (void) intel_overlay_switch_off(intel_crtc->overlay);
d3eedb1a
VS
5710 mutex_unlock(&dev->struct_mutex);
5711 }
5712
5713 /* Let userspace switch the overlay on again. In most cases userspace
5714 * has to recompute where to put it anyway.
5715 */
5716}
5717
87d4300a
ML
5718/**
5719 * intel_post_enable_primary - Perform operations after enabling primary plane
5720 * @crtc: the CRTC whose primary plane was just enabled
c38c1455 5721 * @new_crtc_state: the enabling state
87d4300a
ML
5722 *
5723 * Performs potentially sleeping operations that must be done after the primary
5724 * plane is enabled, such as updating FBC and IPS. Note that this may be
5725 * called due to an explicit primary plane update, or due to an implicit
5726 * re-enable that is caused when a sprite plane is updated to no longer
5727 * completely hide the primary plane.
5728 */
5729static void
199ea381
ML
5730intel_post_enable_primary(struct drm_crtc *crtc,
5731 const struct intel_crtc_state *new_crtc_state)
a5c4d7bc
VS
5732{
5733 struct drm_device *dev = crtc->dev;
fac5e23e 5734 struct drm_i915_private *dev_priv = to_i915(dev);
a5c4d7bc
VS
5735 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5736 int pipe = intel_crtc->pipe;
a5c4d7bc 5737
f99d7069 5738 /*
87d4300a
ML
5739 * Gen2 reports pipe underruns whenever all planes are disabled.
5740 * So don't enable underrun reporting before at least some planes
5741 * are enabled.
5742 * FIXME: Need to fix the logic to work when we turn off all planes
5743 * but leave the pipe running.
f99d7069 5744 */
cf819eff 5745 if (IS_GEN(dev_priv, 2))
87d4300a
ML
5746 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5747
aca7b684
VS
5748 /* Underruns don't always raise interrupts, so check manually. */
5749 intel_check_cpu_fifo_underruns(dev_priv);
5750 intel_check_pch_fifo_underruns(dev_priv);
a5c4d7bc
VS
5751}
5752
24f28450 5753/* FIXME get rid of this and use pre_plane_update */
87d4300a 5754static void
24f28450 5755intel_pre_disable_primary_noatomic(struct drm_crtc *crtc)
a5c4d7bc
VS
5756{
5757 struct drm_device *dev = crtc->dev;
fac5e23e 5758 struct drm_i915_private *dev_priv = to_i915(dev);
a5c4d7bc
VS
5759 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
5760 int pipe = intel_crtc->pipe;
a5c4d7bc 5761
87d4300a
ML
5762 /*
5763 * Gen2 reports pipe underruns whenever all planes are disabled.
24f28450 5764 * So disable underrun reporting before all the planes get disabled.
87d4300a 5765 */
cf819eff 5766 if (IS_GEN(dev_priv, 2))
87d4300a 5767 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
a5c4d7bc 5768
24f28450 5769 hsw_disable_ips(to_intel_crtc_state(crtc->state));
2622a081 5770
87d4300a
ML
5771 /*
5772 * Vblank time updates from the shadow to live plane control register
5773 * are blocked if the memory self-refresh mode is active at that
5774 * moment. So to make sure the plane gets truly disabled, disable
5775 * first the self-refresh mode. The self-refresh enable bit in turn
5776 * will be checked/applied by the HW only at the next frame start
5777 * event which is after the vblank start event, so we need to have a
5778 * wait-for-vblank between disabling the plane and the pipe.
5779 */
b2ae318a 5780 if (HAS_GMCH(dev_priv) &&
11a85d6a 5781 intel_set_memory_cxsr(dev_priv, false))
0f0f74bc 5782 intel_wait_for_vblank(dev_priv, pipe);
87d4300a
ML
5783}
5784
24f28450
ML
5785static bool hsw_pre_update_disable_ips(const struct intel_crtc_state *old_crtc_state,
5786 const struct intel_crtc_state *new_crtc_state)
5787{
051a6d8d
VS
5788 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5789 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5790
24f28450
ML
5791 if (!old_crtc_state->ips_enabled)
5792 return false;
5793
5794 if (needs_modeset(&new_crtc_state->base))
5795 return true;
5796
051a6d8d
VS
5797 /*
5798 * Workaround : Do not read or write the pipe palette/gamma data while
5799 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5800 *
5801 * Disable IPS before we program the LUT.
5802 */
5803 if (IS_HASWELL(dev_priv) &&
5804 (new_crtc_state->base.color_mgmt_changed ||
5805 new_crtc_state->update_pipe) &&
5806 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5807 return true;
5808
24f28450
ML
5809 return !new_crtc_state->ips_enabled;
5810}
5811
5812static bool hsw_post_update_enable_ips(const struct intel_crtc_state *old_crtc_state,
5813 const struct intel_crtc_state *new_crtc_state)
5814{
051a6d8d
VS
5815 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
5816 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5817
24f28450
ML
5818 if (!new_crtc_state->ips_enabled)
5819 return false;
5820
5821 if (needs_modeset(&new_crtc_state->base))
5822 return true;
5823
051a6d8d
VS
5824 /*
5825 * Workaround : Do not read or write the pipe palette/gamma data while
5826 * GAMMA_MODE is configured for split gamma and IPS_CTL has IPS enabled.
5827 *
5828 * Re-enable IPS after the LUT has been programmed.
5829 */
5830 if (IS_HASWELL(dev_priv) &&
5831 (new_crtc_state->base.color_mgmt_changed ||
5832 new_crtc_state->update_pipe) &&
5833 new_crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT)
5834 return true;
5835
24f28450
ML
5836 /*
5837 * We can't read out IPS on broadwell, assume the worst and
5838 * forcibly enable IPS on the first fastset.
5839 */
5840 if (new_crtc_state->update_pipe &&
5841 old_crtc_state->base.adjusted_mode.private_flags & I915_MODE_FLAG_INHERITED)
5842 return true;
5843
5844 return !old_crtc_state->ips_enabled;
5845}
5846
8e021151
ML
5847static bool needs_nv12_wa(struct drm_i915_private *dev_priv,
5848 const struct intel_crtc_state *crtc_state)
5849{
5850 if (!crtc_state->nv12_planes)
5851 return false;
5852
1347d3ce 5853 /* WA Display #0827: Gen9:all */
cf819eff 5854 if (IS_GEN(dev_priv, 9) && !IS_GEMINILAKE(dev_priv))
8e021151
ML
5855 return true;
5856
5857 return false;
5858}
5859
51eb1a1d
RS
5860static bool needs_scalerclk_wa(struct drm_i915_private *dev_priv,
5861 const struct intel_crtc_state *crtc_state)
5862{
5863 /* Wa_2006604312:icl */
5864 if (crtc_state->scaler_state.scaler_users > 0 && IS_ICELAKE(dev_priv))
5865 return true;
5866
5867 return false;
5868}
5869
5a21b665
DV
5870static void intel_post_plane_update(struct intel_crtc_state *old_crtc_state)
5871{
5872 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
c4a4efa9
VS
5873 struct drm_device *dev = crtc->base.dev;
5874 struct drm_i915_private *dev_priv = to_i915(dev);
5a21b665
DV
5875 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5876 struct intel_crtc_state *pipe_config =
f9a8c149
VS
5877 intel_atomic_get_new_crtc_state(to_intel_atomic_state(old_state),
5878 crtc);
5a21b665 5879 struct drm_plane *primary = crtc->base.primary;
8b69449d
ML
5880 struct drm_plane_state *old_primary_state =
5881 drm_atomic_get_old_plane_state(old_state, primary);
5a21b665 5882
5748b6a1 5883 intel_frontbuffer_flip(to_i915(crtc->base.dev), pipe_config->fb_bits);
5a21b665 5884
5a21b665 5885 if (pipe_config->update_wm_post && pipe_config->base.active)
432081bc 5886 intel_update_watermarks(crtc);
5a21b665 5887
24f28450
ML
5888 if (hsw_post_update_enable_ips(old_crtc_state, pipe_config))
5889 hsw_enable_ips(pipe_config);
5890
8b69449d
ML
5891 if (old_primary_state) {
5892 struct drm_plane_state *new_primary_state =
5893 drm_atomic_get_new_plane_state(old_state, primary);
5a21b665
DV
5894
5895 intel_fbc_post_update(crtc);
5896
8b69449d 5897 if (new_primary_state->visible &&
5a21b665 5898 (needs_modeset(&pipe_config->base) ||
8b69449d 5899 !old_primary_state->visible))
199ea381 5900 intel_post_enable_primary(&crtc->base, pipe_config);
5a21b665 5901 }
8e021151 5902
8e021151 5903 if (needs_nv12_wa(dev_priv, old_crtc_state) &&
51eb1a1d 5904 !needs_nv12_wa(dev_priv, pipe_config))
2474028e 5905 skl_wa_827(dev_priv, crtc->pipe, false);
51eb1a1d
RS
5906
5907 if (needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5908 !needs_scalerclk_wa(dev_priv, pipe_config))
5909 icl_wa_scalerclkgating(dev_priv, crtc->pipe, false);
5a21b665
DV
5910}
5911
aa5e9b47
ML
5912static void intel_pre_plane_update(struct intel_crtc_state *old_crtc_state,
5913 struct intel_crtc_state *pipe_config)
ac21b225 5914{
5c74cd73 5915 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
ac21b225 5916 struct drm_device *dev = crtc->base.dev;
fac5e23e 5917 struct drm_i915_private *dev_priv = to_i915(dev);
5c74cd73
ML
5918 struct drm_atomic_state *old_state = old_crtc_state->base.state;
5919 struct drm_plane *primary = crtc->base.primary;
8b69449d
ML
5920 struct drm_plane_state *old_primary_state =
5921 drm_atomic_get_old_plane_state(old_state, primary);
5c74cd73 5922 bool modeset = needs_modeset(&pipe_config->base);
ccf010fb
ML
5923 struct intel_atomic_state *old_intel_state =
5924 to_intel_atomic_state(old_state);
ac21b225 5925
24f28450
ML
5926 if (hsw_pre_update_disable_ips(old_crtc_state, pipe_config))
5927 hsw_disable_ips(old_crtc_state);
5928
8b69449d
ML
5929 if (old_primary_state) {
5930 struct intel_plane_state *new_primary_state =
f9a8c149
VS
5931 intel_atomic_get_new_plane_state(old_intel_state,
5932 to_intel_plane(primary));
5c74cd73 5933
8b69449d 5934 intel_fbc_pre_update(crtc, pipe_config, new_primary_state);
24f28450
ML
5935 /*
5936 * Gen2 reports pipe underruns whenever all planes are disabled.
5937 * So disable underrun reporting before all the planes get disabled.
5938 */
cf819eff 5939 if (IS_GEN(dev_priv, 2) && old_primary_state->visible &&
8b69449d 5940 (modeset || !new_primary_state->base.visible))
24f28450 5941 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false);
5c74cd73 5942 }
852eb00d 5943
8e021151
ML
5944 /* Display WA 827 */
5945 if (!needs_nv12_wa(dev_priv, old_crtc_state) &&
51eb1a1d 5946 needs_nv12_wa(dev_priv, pipe_config))
2474028e 5947 skl_wa_827(dev_priv, crtc->pipe, true);
51eb1a1d
RS
5948
5949 /* Wa_2006604312:icl */
5950 if (!needs_scalerclk_wa(dev_priv, old_crtc_state) &&
5951 needs_scalerclk_wa(dev_priv, pipe_config))
5952 icl_wa_scalerclkgating(dev_priv, crtc->pipe, true);
8e021151 5953
5eeb798b
VS
5954 /*
5955 * Vblank time updates from the shadow to live plane control register
5956 * are blocked if the memory self-refresh mode is active at that
5957 * moment. So to make sure the plane gets truly disabled, disable
5958 * first the self-refresh mode. The self-refresh enable bit in turn
5959 * will be checked/applied by the HW only at the next frame start
5960 * event which is after the vblank start event, so we need to have a
5961 * wait-for-vblank between disabling the plane and the pipe.
5962 */
b2ae318a 5963 if (HAS_GMCH(dev_priv) && old_crtc_state->base.active &&
5eeb798b
VS
5964 pipe_config->disable_cxsr && intel_set_memory_cxsr(dev_priv, false))
5965 intel_wait_for_vblank(dev_priv, crtc->pipe);
92826fcd 5966
ed4a6a7c
MR
5967 /*
5968 * IVB workaround: must disable low power watermarks for at least
5969 * one frame before enabling scaling. LP watermarks can be re-enabled
5970 * when scaling is disabled.
5971 *
5972 * WaCxSRDisabledForSpriteScaling:ivb
5973 */
8e7a4424
VS
5974 if (pipe_config->disable_lp_wm && ilk_disable_lp_wm(dev) &&
5975 old_crtc_state->base.active)
0f0f74bc 5976 intel_wait_for_vblank(dev_priv, crtc->pipe);
ed4a6a7c
MR
5977
5978 /*
5979 * If we're doing a modeset, we're done. No need to do any pre-vblank
5980 * watermark programming here.
5981 */
5982 if (needs_modeset(&pipe_config->base))
5983 return;
5984
5985 /*
5986 * For platforms that support atomic watermarks, program the
5987 * 'intermediate' watermarks immediately. On pre-gen9 platforms, these
5988 * will be the intermediate values that are safe for both pre- and
5989 * post- vblank; when vblank happens, the 'active' values will be set
5990 * to the final 'target' values and we'll do this again to get the
5991 * optimal watermarks. For gen9+ platforms, the values we program here
5992 * will be the final target values which will get automatically latched
5993 * at vblank time; no further programming will be necessary.
5994 *
5995 * If a platform hasn't been transitioned to atomic watermarks yet,
5996 * we'll continue to update watermarks the old way, if flags tell
5997 * us to.
5998 */
5999 if (dev_priv->display.initial_watermarks != NULL)
ccf010fb
ML
6000 dev_priv->display.initial_watermarks(old_intel_state,
6001 pipe_config);
caed361d 6002 else if (pipe_config->update_wm_pre)
432081bc 6003 intel_update_watermarks(crtc);
ac21b225
ML
6004}
6005
0dd14be3
VS
6006static void intel_crtc_disable_planes(struct intel_atomic_state *state,
6007 struct intel_crtc *crtc)
87d4300a 6008{
0dd14be3
VS
6009 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6010 const struct intel_crtc_state *new_crtc_state =
6011 intel_atomic_get_new_crtc_state(state, crtc);
6012 unsigned int update_mask = new_crtc_state->update_planes;
6013 const struct intel_plane_state *old_plane_state;
f59e9701
ML
6014 struct intel_plane *plane;
6015 unsigned fb_bits = 0;
0dd14be3 6016 int i;
87d4300a 6017
f59e9701 6018 intel_crtc_dpms_overlay_disable(crtc);
27321ae8 6019
0dd14be3
VS
6020 for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) {
6021 if (crtc->pipe != plane->pipe ||
6022 !(update_mask & BIT(plane->id)))
6023 continue;
6024
c48b86f9 6025 intel_disable_plane(plane, new_crtc_state);
f98551ae 6026
0dd14be3 6027 if (old_plane_state->base.visible)
f59e9701 6028 fb_bits |= plane->frontbuffer_bit;
f59e9701
ML
6029 }
6030
0dd14be3 6031 intel_frontbuffer_flip(dev_priv, fb_bits);
a5c4d7bc
VS
6032}
6033
fb1c98b1 6034static void intel_encoders_pre_pll_enable(struct drm_crtc *crtc,
fd6bbda9 6035 struct intel_crtc_state *crtc_state,
fb1c98b1
ML
6036 struct drm_atomic_state *old_state)
6037{
aa5e9b47 6038 struct drm_connector_state *conn_state;
fb1c98b1
ML
6039 struct drm_connector *conn;
6040 int i;
6041
aa5e9b47 6042 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
fb1c98b1
ML
6043 struct intel_encoder *encoder =
6044 to_intel_encoder(conn_state->best_encoder);
6045
6046 if (conn_state->crtc != crtc)
6047 continue;
6048
6049 if (encoder->pre_pll_enable)
fd6bbda9 6050 encoder->pre_pll_enable(encoder, crtc_state, conn_state);
fb1c98b1
ML
6051 }
6052}
6053
6054static void intel_encoders_pre_enable(struct drm_crtc *crtc,
fd6bbda9 6055 struct intel_crtc_state *crtc_state,
fb1c98b1
ML
6056 struct drm_atomic_state *old_state)
6057{
aa5e9b47 6058 struct drm_connector_state *conn_state;
fb1c98b1
ML
6059 struct drm_connector *conn;
6060 int i;
6061
aa5e9b47 6062 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
fb1c98b1
ML
6063 struct intel_encoder *encoder =
6064 to_intel_encoder(conn_state->best_encoder);
6065
6066 if (conn_state->crtc != crtc)
6067 continue;
6068
6069 if (encoder->pre_enable)
fd6bbda9 6070 encoder->pre_enable(encoder, crtc_state, conn_state);
fb1c98b1
ML
6071 }
6072}
6073
6074static void intel_encoders_enable(struct drm_crtc *crtc,
fd6bbda9 6075 struct intel_crtc_state *crtc_state,
fb1c98b1
ML
6076 struct drm_atomic_state *old_state)
6077{
aa5e9b47 6078 struct drm_connector_state *conn_state;
fb1c98b1
ML
6079 struct drm_connector *conn;
6080 int i;
6081
aa5e9b47 6082 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
fb1c98b1
ML
6083 struct intel_encoder *encoder =
6084 to_intel_encoder(conn_state->best_encoder);
6085
6086 if (conn_state->crtc != crtc)
6087 continue;
6088
c84c6fe3
JN
6089 if (encoder->enable)
6090 encoder->enable(encoder, crtc_state, conn_state);
fb1c98b1
ML
6091 intel_opregion_notify_encoder(encoder, true);
6092 }
6093}
6094
6095static void intel_encoders_disable(struct drm_crtc *crtc,
fd6bbda9 6096 struct intel_crtc_state *old_crtc_state,
fb1c98b1
ML
6097 struct drm_atomic_state *old_state)
6098{
6099 struct drm_connector_state *old_conn_state;
6100 struct drm_connector *conn;
6101 int i;
6102
aa5e9b47 6103 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
fb1c98b1
ML
6104 struct intel_encoder *encoder =
6105 to_intel_encoder(old_conn_state->best_encoder);
6106
6107 if (old_conn_state->crtc != crtc)
6108 continue;
6109
6110 intel_opregion_notify_encoder(encoder, false);
c84c6fe3
JN
6111 if (encoder->disable)
6112 encoder->disable(encoder, old_crtc_state, old_conn_state);
fb1c98b1
ML
6113 }
6114}
6115
6116static void intel_encoders_post_disable(struct drm_crtc *crtc,
fd6bbda9 6117 struct intel_crtc_state *old_crtc_state,
fb1c98b1
ML
6118 struct drm_atomic_state *old_state)
6119{
6120 struct drm_connector_state *old_conn_state;
6121 struct drm_connector *conn;
6122 int i;
6123
aa5e9b47 6124 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
fb1c98b1
ML
6125 struct intel_encoder *encoder =
6126 to_intel_encoder(old_conn_state->best_encoder);
6127
6128 if (old_conn_state->crtc != crtc)
6129 continue;
6130
6131 if (encoder->post_disable)
fd6bbda9 6132 encoder->post_disable(encoder, old_crtc_state, old_conn_state);
fb1c98b1
ML
6133 }
6134}
6135
6136static void intel_encoders_post_pll_disable(struct drm_crtc *crtc,
fd6bbda9 6137 struct intel_crtc_state *old_crtc_state,
fb1c98b1
ML
6138 struct drm_atomic_state *old_state)
6139{
6140 struct drm_connector_state *old_conn_state;
6141 struct drm_connector *conn;
6142 int i;
6143
aa5e9b47 6144 for_each_old_connector_in_state(old_state, conn, old_conn_state, i) {
fb1c98b1
ML
6145 struct intel_encoder *encoder =
6146 to_intel_encoder(old_conn_state->best_encoder);
6147
6148 if (old_conn_state->crtc != crtc)
6149 continue;
6150
6151 if (encoder->post_pll_disable)
fd6bbda9 6152 encoder->post_pll_disable(encoder, old_crtc_state, old_conn_state);
fb1c98b1
ML
6153 }
6154}
6155
608ed4ab
HG
6156static void intel_encoders_update_pipe(struct drm_crtc *crtc,
6157 struct intel_crtc_state *crtc_state,
6158 struct drm_atomic_state *old_state)
6159{
6160 struct drm_connector_state *conn_state;
6161 struct drm_connector *conn;
6162 int i;
6163
6164 for_each_new_connector_in_state(old_state, conn, conn_state, i) {
6165 struct intel_encoder *encoder =
6166 to_intel_encoder(conn_state->best_encoder);
6167
6168 if (conn_state->crtc != crtc)
6169 continue;
6170
6171 if (encoder->update_pipe)
6172 encoder->update_pipe(encoder, crtc_state, conn_state);
6173 }
6174}
6175
73a116be
VS
6176static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state)
6177{
6178 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6179 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
6180
6181 plane->disable_plane(plane, crtc_state);
6182}
6183
4a806558
ML
6184static void ironlake_crtc_enable(struct intel_crtc_state *pipe_config,
6185 struct drm_atomic_state *old_state)
f67a559d 6186{
4a806558 6187 struct drm_crtc *crtc = pipe_config->base.crtc;
f67a559d 6188 struct drm_device *dev = crtc->dev;
fac5e23e 6189 struct drm_i915_private *dev_priv = to_i915(dev);
f67a559d
JB
6190 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6191 int pipe = intel_crtc->pipe;
ccf010fb
ML
6192 struct intel_atomic_state *old_intel_state =
6193 to_intel_atomic_state(old_state);
f67a559d 6194
53d9f4e9 6195 if (WARN_ON(intel_crtc->active))
f67a559d
JB
6196 return;
6197
b2c0593a
VS
6198 /*
6199 * Sometimes spurious CPU pipe underruns happen during FDI
6200 * training, at least with VGA+HDMI cloning. Suppress them.
6201 *
6202 * On ILK we get an occasional spurious CPU pipe underruns
6203 * between eDP port A enable and vdd enable. Also PCH port
6204 * enable seems to result in the occasional CPU pipe underrun.
6205 *
6206 * Spurious PCH underruns also occur during PCH enabling.
6207 */
2b5b6312
VS
6208 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6209 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
81b088ca 6210
65c307fd
ML
6211 if (pipe_config->has_pch_encoder)
6212 intel_prepare_shared_dpll(pipe_config);
b14b1055 6213
6f405638 6214 if (intel_crtc_has_dp_encoder(pipe_config))
4c354754 6215 intel_dp_set_m_n(pipe_config, M1_N1);
29407aab 6216
44fe7f35
ML
6217 intel_set_pipe_timings(pipe_config);
6218 intel_set_pipe_src_size(pipe_config);
29407aab 6219
6f405638 6220 if (pipe_config->has_pch_encoder) {
4c354754
ML
6221 intel_cpu_transcoder_set_m_n(pipe_config,
6222 &pipe_config->fdi_m_n, NULL);
29407aab
DV
6223 }
6224
fdf73510 6225 ironlake_set_pipeconf(pipe_config);
29407aab 6226
f67a559d 6227 intel_crtc->active = true;
8664281b 6228
fd6bbda9 6229 intel_encoders_pre_enable(crtc, pipe_config, old_state);
f67a559d 6230
6f405638 6231 if (pipe_config->has_pch_encoder) {
fff367c7
DV
6232 /* Note: FDI PLL enabling _must_ be done before we enable the
6233 * cpu pipes, hence this is separate from all the other fdi/pch
6234 * enabling. */
b2354c78 6235 ironlake_fdi_pll_enable(pipe_config);
46b6f814
DV
6236 } else {
6237 assert_fdi_tx_disabled(dev_priv, pipe);
6238 assert_fdi_rx_disabled(dev_priv, pipe);
6239 }
f67a559d 6240
b2562712 6241 ironlake_pfit_enable(pipe_config);
f67a559d 6242
9c54c0dd
JB
6243 /*
6244 * On ILK+ LUT must be loaded before the pipe is running but with
6245 * clocks enabled
6246 */
302da0cd 6247 intel_color_load_luts(pipe_config);
4d8ed54c 6248 intel_color_commit(pipe_config);
73a116be
VS
6249 /* update DSPCNTR to configure gamma for pipe bottom color */
6250 intel_disable_primary_plane(pipe_config);
9c54c0dd 6251
1d5bf5d9 6252 if (dev_priv->display.initial_watermarks != NULL)
6f405638 6253 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
4972f70a 6254 intel_enable_pipe(pipe_config);
f67a559d 6255
6f405638 6256 if (pipe_config->has_pch_encoder)
5a0b385e 6257 ironlake_pch_enable(old_intel_state, pipe_config);
c98e9dcf 6258
f9b61ff6 6259 assert_vblank_disabled(crtc);
32db0b65 6260 intel_crtc_vblank_on(pipe_config);
f9b61ff6 6261
fd6bbda9 6262 intel_encoders_enable(crtc, pipe_config, old_state);
61b77ddd 6263
6e266956 6264 if (HAS_PCH_CPT(dev_priv))
a1520318 6265 cpt_verify_modeset(dev, intel_crtc->pipe);
37ca8d4c 6266
ea80a661
VS
6267 /*
6268 * Must wait for vblank to avoid spurious PCH FIFO underruns.
6269 * And a second vblank wait is needed at least on ILK with
6270 * some interlaced HDMI modes. Let's do the double wait always
6271 * in case there are more corner cases we don't know about.
6272 */
6f405638 6273 if (pipe_config->has_pch_encoder) {
ea80a661 6274 intel_wait_for_vblank(dev_priv, pipe);
0f0f74bc 6275 intel_wait_for_vblank(dev_priv, pipe);
ea80a661 6276 }
b2c0593a 6277 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
37ca8d4c 6278 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6be4a607
JB
6279}
6280
42db64ef
PZ
6281/* IPS only exists on ULT machines and is tied to pipe A. */
6282static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
6283{
50a0bc90 6284 return HAS_IPS(to_i915(crtc->base.dev)) && crtc->pipe == PIPE_A;
42db64ef
PZ
6285}
6286
ed69cd40
ID
6287static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
6288 enum pipe pipe, bool apply)
6289{
6290 u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
6291 u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
6292
6293 if (apply)
6294 val |= mask;
6295 else
6296 val &= ~mask;
6297
6298 I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
6299}
6300
c3cc39c5
MK
6301static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
6302{
6303 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6304 enum pipe pipe = crtc->pipe;
ba3f4d0a 6305 u32 val;
c3cc39c5 6306
443d5e39
RV
6307 val = MBUS_DBOX_A_CREDIT(2);
6308 val |= MBUS_DBOX_BW_CREDIT(1);
6309 val |= MBUS_DBOX_B_CREDIT(8);
c3cc39c5
MK
6310
6311 I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
6312}
6313
4a806558
ML
6314static void haswell_crtc_enable(struct intel_crtc_state *pipe_config,
6315 struct drm_atomic_state *old_state)
4f771f10 6316{
4a806558 6317 struct drm_crtc *crtc = pipe_config->base.crtc;
6315b5d3 6318 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
4f771f10 6319 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
99d736a2 6320 int pipe = intel_crtc->pipe, hsw_workaround_pipe;
6f405638 6321 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
ccf010fb
ML
6322 struct intel_atomic_state *old_intel_state =
6323 to_intel_atomic_state(old_state);
ed69cd40 6324 bool psl_clkgate_wa;
4f771f10 6325
53d9f4e9 6326 if (WARN_ON(intel_crtc->active))
4f771f10
PZ
6327 return;
6328
fd6bbda9 6329 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
95a7a2ae 6330
65c307fd
ML
6331 if (pipe_config->shared_dpll)
6332 intel_enable_shared_dpll(pipe_config);
df8ad70c 6333
c8af5274
PZ
6334 intel_encoders_pre_enable(crtc, pipe_config, old_state);
6335
6f405638 6336 if (intel_crtc_has_dp_encoder(pipe_config))
4c354754 6337 intel_dp_set_m_n(pipe_config, M1_N1);
229fca97 6338
d7edc4e5 6339 if (!transcoder_is_dsi(cpu_transcoder))
44fe7f35 6340 intel_set_pipe_timings(pipe_config);
4d1de975 6341
44fe7f35 6342 intel_set_pipe_src_size(pipe_config);
229fca97 6343
4d1de975
JN
6344 if (cpu_transcoder != TRANSCODER_EDP &&
6345 !transcoder_is_dsi(cpu_transcoder)) {
6346 I915_WRITE(PIPE_MULT(cpu_transcoder),
6f405638 6347 pipe_config->pixel_multiplier - 1);
ebb69c95
CT
6348 }
6349
6f405638 6350 if (pipe_config->has_pch_encoder) {
4c354754
ML
6351 intel_cpu_transcoder_set_m_n(pipe_config,
6352 &pipe_config->fdi_m_n, NULL);
229fca97
DV
6353 }
6354
d7edc4e5 6355 if (!transcoder_is_dsi(cpu_transcoder))
fdf73510 6356 haswell_set_pipeconf(pipe_config);
4d1de975 6357
9b11215e
VS
6358 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
6359 bdw_set_pipemisc(pipe_config);
229fca97 6360
4f771f10 6361 intel_crtc->active = true;
8664281b 6362
ed69cd40
ID
6363 /* Display WA #1180: WaDisableScalarClockGating: glk, cnl */
6364 psl_clkgate_wa = (IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
6f405638 6365 pipe_config->pch_pfit.enabled;
ed69cd40
ID
6366 if (psl_clkgate_wa)
6367 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true);
6368
6315b5d3 6369 if (INTEL_GEN(dev_priv) >= 9)
b2562712 6370 skylake_pfit_enable(pipe_config);
ff6d9f55 6371 else
b2562712 6372 ironlake_pfit_enable(pipe_config);
4f771f10
PZ
6373
6374 /*
6375 * On ILK+ LUT must be loaded before the pipe is running but with
6376 * clocks enabled
6377 */
302da0cd 6378 intel_color_load_luts(pipe_config);
4d8ed54c 6379 intel_color_commit(pipe_config);
73a116be
VS
6380 /* update DSPCNTR to configure gamma/csc for pipe bottom color */
6381 if (INTEL_GEN(dev_priv) < 9)
6382 intel_disable_primary_plane(pipe_config);
4f771f10 6383
d1622119
VS
6384 if (INTEL_GEN(dev_priv) >= 11)
6385 icl_set_pipe_chicken(intel_crtc);
e16a3750 6386
3dc38eea 6387 intel_ddi_set_pipe_settings(pipe_config);
d7edc4e5 6388 if (!transcoder_is_dsi(cpu_transcoder))
3dc38eea 6389 intel_ddi_enable_transcoder_func(pipe_config);
4f771f10 6390
1d5bf5d9 6391 if (dev_priv->display.initial_watermarks != NULL)
3125d39f 6392 dev_priv->display.initial_watermarks(old_intel_state, pipe_config);
4d1de975 6393
c3cc39c5
MK
6394 if (INTEL_GEN(dev_priv) >= 11)
6395 icl_pipe_mbus_enable(intel_crtc);
6396
4d1de975 6397 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
d7edc4e5 6398 if (!transcoder_is_dsi(cpu_transcoder))
4972f70a 6399 intel_enable_pipe(pipe_config);
42db64ef 6400
6f405638 6401 if (pipe_config->has_pch_encoder)
5a0b385e 6402 lpt_pch_enable(old_intel_state, pipe_config);
4f771f10 6403
6f405638 6404 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DP_MST))
3dc38eea 6405 intel_ddi_set_vc_payload_alloc(pipe_config, true);
0e32b39c 6406
f9b61ff6 6407 assert_vblank_disabled(crtc);
32db0b65 6408 intel_crtc_vblank_on(pipe_config);
f9b61ff6 6409
fd6bbda9 6410 intel_encoders_enable(crtc, pipe_config, old_state);
4f771f10 6411
ed69cd40
ID
6412 if (psl_clkgate_wa) {
6413 intel_wait_for_vblank(dev_priv, pipe);
6414 glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false);
6415 }
6416
e4916946
PZ
6417 /* If we change the relative order between pipe/planes enabling, we need
6418 * to change the workaround. */
99d736a2 6419 hsw_workaround_pipe = pipe_config->hsw_workaround_pipe;
772c2a51 6420 if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) {
0f0f74bc
VS
6421 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
6422 intel_wait_for_vblank(dev_priv, hsw_workaround_pipe);
99d736a2 6423 }
4f771f10
PZ
6424}
6425
b2562712 6426static void ironlake_pfit_disable(const struct intel_crtc_state *old_crtc_state)
3f8dce3a 6427{
b2562712
ML
6428 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6429 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
6430 enum pipe pipe = crtc->pipe;
3f8dce3a
DV
6431
6432 /* To avoid upsetting the power well on haswell only disable the pfit if
6433 * it's in use. The hw state code will make sure we get this right. */
b2562712 6434 if (old_crtc_state->pch_pfit.enabled) {
3f8dce3a
DV
6435 I915_WRITE(PF_CTL(pipe), 0);
6436 I915_WRITE(PF_WIN_POS(pipe), 0);
6437 I915_WRITE(PF_WIN_SZ(pipe), 0);
6438 }
6439}
6440
4a806558
ML
6441static void ironlake_crtc_disable(struct intel_crtc_state *old_crtc_state,
6442 struct drm_atomic_state *old_state)
6be4a607 6443{
4a806558 6444 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6be4a607 6445 struct drm_device *dev = crtc->dev;
fac5e23e 6446 struct drm_i915_private *dev_priv = to_i915(dev);
6be4a607
JB
6447 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6448 int pipe = intel_crtc->pipe;
b52eb4dc 6449
b2c0593a
VS
6450 /*
6451 * Sometimes spurious CPU pipe underruns happen when the
6452 * pipe is already disabled, but FDI RX/TX is still enabled.
6453 * Happens at least with VGA+HDMI cloning. Suppress them.
6454 */
2b5b6312
VS
6455 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
6456 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false);
37ca8d4c 6457
fd6bbda9 6458 intel_encoders_disable(crtc, old_crtc_state, old_state);
ea9d758d 6459
f9b61ff6
DV
6460 drm_crtc_vblank_off(crtc);
6461 assert_vblank_disabled(crtc);
6462
4972f70a 6463 intel_disable_pipe(old_crtc_state);
32f9d658 6464
b2562712 6465 ironlake_pfit_disable(old_crtc_state);
2c07245f 6466
6f405638 6467 if (old_crtc_state->has_pch_encoder)
5a74f70a
VS
6468 ironlake_fdi_disable(crtc);
6469
fd6bbda9 6470 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
2c07245f 6471
6f405638 6472 if (old_crtc_state->has_pch_encoder) {
d925c59a 6473 ironlake_disable_pch_transcoder(dev_priv, pipe);
6be4a607 6474
6e266956 6475 if (HAS_PCH_CPT(dev_priv)) {
f0f59a00
VS
6476 i915_reg_t reg;
6477 u32 temp;
6478
d925c59a
DV
6479 /* disable TRANS_DP_CTL */
6480 reg = TRANS_DP_CTL(pipe);
6481 temp = I915_READ(reg);
6482 temp &= ~(TRANS_DP_OUTPUT_ENABLE |
6483 TRANS_DP_PORT_SEL_MASK);
6484 temp |= TRANS_DP_PORT_SEL_NONE;
6485 I915_WRITE(reg, temp);
6486
6487 /* disable DPLL_SEL */
6488 temp = I915_READ(PCH_DPLL_SEL);
11887397 6489 temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
d925c59a 6490 I915_WRITE(PCH_DPLL_SEL, temp);
9db4a9c7 6491 }
e3421a18 6492
d925c59a
DV
6493 ironlake_fdi_pll_disable(intel_crtc);
6494 }
81b088ca 6495
b2c0593a 6496 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
81b088ca 6497 intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true);
6be4a607 6498}
1b3c7a47 6499
4a806558
ML
6500static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
6501 struct drm_atomic_state *old_state)
ee7b9f93 6502{
4a806558 6503 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6315b5d3 6504 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
ee7b9f93 6505 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
24a28179 6506 enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
ee7b9f93 6507
fd6bbda9 6508 intel_encoders_disable(crtc, old_crtc_state, old_state);
4f771f10 6509
f9b61ff6
DV
6510 drm_crtc_vblank_off(crtc);
6511 assert_vblank_disabled(crtc);
6512
4d1de975 6513 /* XXX: Do the pipe assertions at the right place for BXT DSI. */
d7edc4e5 6514 if (!transcoder_is_dsi(cpu_transcoder))
4972f70a 6515 intel_disable_pipe(old_crtc_state);
4f771f10 6516
24a28179
ID
6517 if (intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
6518 intel_ddi_set_vc_payload_alloc(old_crtc_state, false);
a4bf214f 6519
d7edc4e5 6520 if (!transcoder_is_dsi(cpu_transcoder))
90c3e219 6521 intel_ddi_disable_transcoder_func(old_crtc_state);
4f771f10 6522
a600622c
MN
6523 intel_dsc_disable(old_crtc_state);
6524
6315b5d3 6525 if (INTEL_GEN(dev_priv) >= 9)
e435d6e5 6526 skylake_scaler_disable(intel_crtc);
ff6d9f55 6527 else
b2562712 6528 ironlake_pfit_disable(old_crtc_state);
4f771f10 6529
fd6bbda9 6530 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
c27e917e 6531
bdaa29b6 6532 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
4f771f10
PZ
6533}
6534
b2562712 6535static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
2dd24552 6536{
b2562712
ML
6537 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6538 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
2dd24552 6539
b2562712 6540 if (!crtc_state->gmch_pfit.control)
2dd24552
JB
6541 return;
6542
2dd24552 6543 /*
c0b03411
DV
6544 * The panel fitter should only be adjusted whilst the pipe is disabled,
6545 * according to register description and PRM.
2dd24552 6546 */
c0b03411
DV
6547 WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
6548 assert_pipe_disabled(dev_priv, crtc->pipe);
2dd24552 6549
b2562712
ML
6550 I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
6551 I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
5a80c45c
DV
6552
6553 /* Border color in case we don't scale up to the full screen. Black by
6554 * default, change to something else for debugging. */
6555 I915_WRITE(BCLRPAT(crtc->pipe), 0);
2dd24552
JB
6556}
6557
176597a1
MK
6558bool intel_port_is_combophy(struct drm_i915_private *dev_priv, enum port port)
6559{
6560 if (port == PORT_NONE)
6561 return false;
6562
f722b8c1
BP
6563 if (IS_ELKHARTLAKE(dev_priv))
6564 return port <= PORT_C;
6565
2dd24a9c 6566 if (INTEL_GEN(dev_priv) >= 11)
176597a1
MK
6567 return port <= PORT_B;
6568
6569 return false;
6570}
6571
ac213c1b
PZ
6572bool intel_port_is_tc(struct drm_i915_private *dev_priv, enum port port)
6573{
f722b8c1 6574 if (INTEL_GEN(dev_priv) >= 11 && !IS_ELKHARTLAKE(dev_priv))
ac213c1b
PZ
6575 return port >= PORT_C && port <= PORT_F;
6576
6577 return false;
6578}
6579
6580enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port)
6581{
6582 if (!intel_port_is_tc(dev_priv, port))
6583 return PORT_TC_NONE;
6584
6585 return port - PORT_C;
6586}
6587
79f255a0 6588enum intel_display_power_domain intel_port_to_power_domain(enum port port)
d05410f9
DA
6589{
6590 switch (port) {
6591 case PORT_A:
6331a704 6592 return POWER_DOMAIN_PORT_DDI_A_LANES;
d05410f9 6593 case PORT_B:
6331a704 6594 return POWER_DOMAIN_PORT_DDI_B_LANES;
d05410f9 6595 case PORT_C:
6331a704 6596 return POWER_DOMAIN_PORT_DDI_C_LANES;
d05410f9 6597 case PORT_D:
6331a704 6598 return POWER_DOMAIN_PORT_DDI_D_LANES;
d8e19f99 6599 case PORT_E:
6331a704 6600 return POWER_DOMAIN_PORT_DDI_E_LANES;
9787e835
RV
6601 case PORT_F:
6602 return POWER_DOMAIN_PORT_DDI_F_LANES;
d05410f9 6603 default:
b9fec167 6604 MISSING_CASE(port);
d05410f9
DA
6605 return POWER_DOMAIN_PORT_OTHER;
6606 }
6607}
6608
337837ac
ID
6609enum intel_display_power_domain
6610intel_aux_power_domain(struct intel_digital_port *dig_port)
6611{
6612 switch (dig_port->aux_ch) {
6613 case AUX_CH_A:
6614 return POWER_DOMAIN_AUX_A;
6615 case AUX_CH_B:
6616 return POWER_DOMAIN_AUX_B;
6617 case AUX_CH_C:
6618 return POWER_DOMAIN_AUX_C;
6619 case AUX_CH_D:
6620 return POWER_DOMAIN_AUX_D;
6621 case AUX_CH_E:
6622 return POWER_DOMAIN_AUX_E;
6623 case AUX_CH_F:
6624 return POWER_DOMAIN_AUX_F;
6625 default:
6626 MISSING_CASE(dig_port->aux_ch);
6627 return POWER_DOMAIN_AUX_A;
6628 }
6629}
6630
d8fc70b7
ACO
6631static u64 get_crtc_power_domains(struct drm_crtc *crtc,
6632 struct intel_crtc_state *crtc_state)
77d22dca 6633{
319be8ae 6634 struct drm_device *dev = crtc->dev;
37255d8d 6635 struct drm_i915_private *dev_priv = to_i915(dev);
74bff5f9 6636 struct drm_encoder *encoder;
319be8ae
ID
6637 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6638 enum pipe pipe = intel_crtc->pipe;
d8fc70b7 6639 u64 mask;
74bff5f9 6640 enum transcoder transcoder = crtc_state->cpu_transcoder;
77d22dca 6641
74bff5f9 6642 if (!crtc_state->base.active)
292b990e
ML
6643 return 0;
6644
17bd6e66
ID
6645 mask = BIT_ULL(POWER_DOMAIN_PIPE(pipe));
6646 mask |= BIT_ULL(POWER_DOMAIN_TRANSCODER(transcoder));
74bff5f9
ML
6647 if (crtc_state->pch_pfit.enabled ||
6648 crtc_state->pch_pfit.force_thru)
d8fc70b7 6649 mask |= BIT_ULL(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe));
77d22dca 6650
74bff5f9
ML
6651 drm_for_each_encoder_mask(encoder, dev, crtc_state->base.encoder_mask) {
6652 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
6653
79f255a0 6654 mask |= BIT_ULL(intel_encoder->power_domain);
74bff5f9 6655 }
319be8ae 6656
37255d8d 6657 if (HAS_DDI(dev_priv) && crtc_state->has_audio)
17bd6e66 6658 mask |= BIT_ULL(POWER_DOMAIN_AUDIO);
37255d8d 6659
15e7ec29 6660 if (crtc_state->shared_dpll)
08d8e170 6661 mask |= BIT_ULL(POWER_DOMAIN_DISPLAY_CORE);
15e7ec29 6662
77d22dca
ID
6663 return mask;
6664}
6665
d2d15016 6666static u64
74bff5f9
ML
6667modeset_get_crtc_power_domains(struct drm_crtc *crtc,
6668 struct intel_crtc_state *crtc_state)
77d22dca 6669{
fac5e23e 6670 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
292b990e
ML
6671 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6672 enum intel_display_power_domain domain;
d8fc70b7 6673 u64 domains, new_domains, old_domains;
77d22dca 6674
292b990e 6675 old_domains = intel_crtc->enabled_power_domains;
74bff5f9
ML
6676 intel_crtc->enabled_power_domains = new_domains =
6677 get_crtc_power_domains(crtc, crtc_state);
77d22dca 6678
5a21b665 6679 domains = new_domains & ~old_domains;
292b990e
ML
6680
6681 for_each_power_domain(domain, domains)
6682 intel_display_power_get(dev_priv, domain);
6683
5a21b665 6684 return old_domains & ~new_domains;
292b990e
ML
6685}
6686
6687static void modeset_put_power_domains(struct drm_i915_private *dev_priv,
d8fc70b7 6688 u64 domains)
292b990e
ML
6689{
6690 enum intel_display_power_domain domain;
6691
6692 for_each_power_domain(domain, domains)
0e6e0be4 6693 intel_display_power_put_unchecked(dev_priv, domain);
292b990e 6694}
77d22dca 6695
7ff89ca2
VS
6696static void valleyview_crtc_enable(struct intel_crtc_state *pipe_config,
6697 struct drm_atomic_state *old_state)
adafdc6f 6698{
ff32c54e
VS
6699 struct intel_atomic_state *old_intel_state =
6700 to_intel_atomic_state(old_state);
7ff89ca2
VS
6701 struct drm_crtc *crtc = pipe_config->base.crtc;
6702 struct drm_device *dev = crtc->dev;
6703 struct drm_i915_private *dev_priv = to_i915(dev);
6704 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6705 int pipe = intel_crtc->pipe;
adafdc6f 6706
7ff89ca2
VS
6707 if (WARN_ON(intel_crtc->active))
6708 return;
adafdc6f 6709
6f405638 6710 if (intel_crtc_has_dp_encoder(pipe_config))
4c354754 6711 intel_dp_set_m_n(pipe_config, M1_N1);
b2045352 6712
44fe7f35
ML
6713 intel_set_pipe_timings(pipe_config);
6714 intel_set_pipe_src_size(pipe_config);
b2045352 6715
7ff89ca2 6716 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
7ff89ca2
VS
6717 I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
6718 I915_WRITE(CHV_CANVAS(pipe), 0);
560a7ae4
DL
6719 }
6720
fdf73510 6721 i9xx_set_pipeconf(pipe_config);
560a7ae4 6722
7ff89ca2 6723 intel_crtc->active = true;
92891e45 6724
7ff89ca2 6725 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5f199dfa 6726
7ff89ca2 6727 intel_encoders_pre_pll_enable(crtc, pipe_config, old_state);
5f199dfa 6728
7ff89ca2 6729 if (IS_CHERRYVIEW(dev_priv)) {
b2354c78
ML
6730 chv_prepare_pll(intel_crtc, pipe_config);
6731 chv_enable_pll(intel_crtc, pipe_config);
7ff89ca2 6732 } else {
b2354c78
ML
6733 vlv_prepare_pll(intel_crtc, pipe_config);
6734 vlv_enable_pll(intel_crtc, pipe_config);
5f199dfa
VS
6735 }
6736
7ff89ca2 6737 intel_encoders_pre_enable(crtc, pipe_config, old_state);
5f199dfa 6738
b2562712 6739 i9xx_pfit_enable(pipe_config);
89b3c3c7 6740
302da0cd 6741 intel_color_load_luts(pipe_config);
4d8ed54c 6742 intel_color_commit(pipe_config);
73a116be
VS
6743 /* update DSPCNTR to configure gamma for pipe bottom color */
6744 intel_disable_primary_plane(pipe_config);
89b3c3c7 6745
ff32c54e
VS
6746 dev_priv->display.initial_watermarks(old_intel_state,
6747 pipe_config);
4972f70a 6748 intel_enable_pipe(pipe_config);
7ff89ca2
VS
6749
6750 assert_vblank_disabled(crtc);
32db0b65 6751 intel_crtc_vblank_on(pipe_config);
89b3c3c7 6752
7ff89ca2 6753 intel_encoders_enable(crtc, pipe_config, old_state);
89b3c3c7
ACO
6754}
6755
b2354c78 6756static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
2b73001e 6757{
b2354c78
ML
6758 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6759 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
83d7c81f 6760
b2354c78
ML
6761 I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
6762 I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
2b73001e
VS
6763}
6764
7ff89ca2
VS
6765static void i9xx_crtc_enable(struct intel_crtc_state *pipe_config,
6766 struct drm_atomic_state *old_state)
2b73001e 6767{
04548cba
VS
6768 struct intel_atomic_state *old_intel_state =
6769 to_intel_atomic_state(old_state);
7ff89ca2
VS
6770 struct drm_crtc *crtc = pipe_config->base.crtc;
6771 struct drm_device *dev = crtc->dev;
6772 struct drm_i915_private *dev_priv = to_i915(dev);
6773 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6774 enum pipe pipe = intel_crtc->pipe;
2b73001e 6775
7ff89ca2
VS
6776 if (WARN_ON(intel_crtc->active))
6777 return;
2b73001e 6778
b2354c78 6779 i9xx_set_pll_dividers(pipe_config);
2b73001e 6780
6f405638 6781 if (intel_crtc_has_dp_encoder(pipe_config))
4c354754 6782 intel_dp_set_m_n(pipe_config, M1_N1);
83d7c81f 6783
44fe7f35
ML
6784 intel_set_pipe_timings(pipe_config);
6785 intel_set_pipe_src_size(pipe_config);
2b73001e 6786
fdf73510 6787 i9xx_set_pipeconf(pipe_config);
f8437dd1 6788
7ff89ca2 6789 intel_crtc->active = true;
5f199dfa 6790
cf819eff 6791 if (!IS_GEN(dev_priv, 2))
7ff89ca2 6792 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
5f199dfa 6793
7ff89ca2 6794 intel_encoders_pre_enable(crtc, pipe_config, old_state);
f8437dd1 6795
939994da 6796 i9xx_enable_pll(intel_crtc, pipe_config);
f8437dd1 6797
b2562712 6798 i9xx_pfit_enable(pipe_config);
f8437dd1 6799
302da0cd 6800 intel_color_load_luts(pipe_config);
4d8ed54c 6801 intel_color_commit(pipe_config);
73a116be
VS
6802 /* update DSPCNTR to configure gamma for pipe bottom color */
6803 intel_disable_primary_plane(pipe_config);
f8437dd1 6804
04548cba
VS
6805 if (dev_priv->display.initial_watermarks != NULL)
6806 dev_priv->display.initial_watermarks(old_intel_state,
6f405638 6807 pipe_config);
04548cba
VS
6808 else
6809 intel_update_watermarks(intel_crtc);
4972f70a 6810 intel_enable_pipe(pipe_config);
f8437dd1 6811
7ff89ca2 6812 assert_vblank_disabled(crtc);
32db0b65 6813 intel_crtc_vblank_on(pipe_config);
f8437dd1 6814
7ff89ca2
VS
6815 intel_encoders_enable(crtc, pipe_config, old_state);
6816}
f8437dd1 6817
b2562712 6818static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
7ff89ca2 6819{
b2562712
ML
6820 struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
6821 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
f8437dd1 6822
b2562712 6823 if (!old_crtc_state->gmch_pfit.control)
f8437dd1 6824 return;
f8437dd1 6825
7ff89ca2
VS
6826 assert_pipe_disabled(dev_priv, crtc->pipe);
6827
43031788
CW
6828 DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
6829 I915_READ(PFIT_CONTROL));
7ff89ca2 6830 I915_WRITE(PFIT_CONTROL, 0);
f8437dd1
VK
6831}
6832
7ff89ca2
VS
6833static void i9xx_crtc_disable(struct intel_crtc_state *old_crtc_state,
6834 struct drm_atomic_state *old_state)
f8437dd1 6835{
7ff89ca2
VS
6836 struct drm_crtc *crtc = old_crtc_state->base.crtc;
6837 struct drm_device *dev = crtc->dev;
6838 struct drm_i915_private *dev_priv = to_i915(dev);
6839 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6840 int pipe = intel_crtc->pipe;
d66a2194 6841
d66a2194 6842 /*
7ff89ca2
VS
6843 * On gen2 planes are double buffered but the pipe isn't, so we must
6844 * wait for planes to fully turn off before disabling the pipe.
d66a2194 6845 */
cf819eff 6846 if (IS_GEN(dev_priv, 2))
7ff89ca2 6847 intel_wait_for_vblank(dev_priv, pipe);
d66a2194 6848
7ff89ca2 6849 intel_encoders_disable(crtc, old_crtc_state, old_state);
d66a2194 6850
7ff89ca2
VS
6851 drm_crtc_vblank_off(crtc);
6852 assert_vblank_disabled(crtc);
d66a2194 6853
4972f70a 6854 intel_disable_pipe(old_crtc_state);
d66a2194 6855
b2562712 6856 i9xx_pfit_disable(old_crtc_state);
89b3c3c7 6857
7ff89ca2 6858 intel_encoders_post_disable(crtc, old_crtc_state, old_state);
d66a2194 6859
6f405638 6860 if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) {
7ff89ca2
VS
6861 if (IS_CHERRYVIEW(dev_priv))
6862 chv_disable_pll(dev_priv, pipe);
6863 else if (IS_VALLEYVIEW(dev_priv))
6864 vlv_disable_pll(dev_priv, pipe);
6865 else
b2354c78 6866 i9xx_disable_pll(old_crtc_state);
7ff89ca2 6867 }
c2e001ef 6868
7ff89ca2 6869 intel_encoders_post_pll_disable(crtc, old_crtc_state, old_state);
89b3c3c7 6870
cf819eff 6871 if (!IS_GEN(dev_priv, 2))
7ff89ca2 6872 intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
ff32c54e
VS
6873
6874 if (!dev_priv->display.initial_watermarks)
6875 intel_update_watermarks(intel_crtc);
2ee0da16
VS
6876
6877 /* clock the pipe down to 640x480@60 to potentially save power */
6878 if (IS_I830(dev_priv))
6879 i830_enable_pipe(dev_priv, pipe);
f8437dd1
VK
6880}
6881
da1d0e26
VS
6882static void intel_crtc_disable_noatomic(struct drm_crtc *crtc,
6883 struct drm_modeset_acquire_ctx *ctx)
f8437dd1 6884{
7ff89ca2
VS
6885 struct intel_encoder *encoder;
6886 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6887 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
c457d9cf
VS
6888 struct intel_bw_state *bw_state =
6889 to_intel_bw_state(dev_priv->bw_obj.state);
7ff89ca2 6890 enum intel_display_power_domain domain;
b1e01595 6891 struct intel_plane *plane;
d2d15016 6892 u64 domains;
7ff89ca2
VS
6893 struct drm_atomic_state *state;
6894 struct intel_crtc_state *crtc_state;
6895 int ret;
f8437dd1 6896
7ff89ca2
VS
6897 if (!intel_crtc->active)
6898 return;
a8ca4934 6899
b1e01595
VS
6900 for_each_intel_plane_on_crtc(&dev_priv->drm, intel_crtc, plane) {
6901 const struct intel_plane_state *plane_state =
6902 to_intel_plane_state(plane->base.state);
709e05c3 6903
b1e01595
VS
6904 if (plane_state->base.visible)
6905 intel_plane_disable_noatomic(intel_crtc, plane);
7ff89ca2 6906 }
5d96d8af 6907
7ff89ca2
VS
6908 state = drm_atomic_state_alloc(crtc->dev);
6909 if (!state) {
6910 DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
6911 crtc->base.id, crtc->name);
1c3f7700 6912 return;
7ff89ca2 6913 }
9f7eb31a 6914
da1d0e26 6915 state->acquire_ctx = ctx;
ea61791e 6916
7ff89ca2
VS
6917 /* Everything's already locked, -EDEADLK can't happen. */
6918 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
6919 ret = drm_atomic_add_affected_connectors(state, crtc);
9f7eb31a 6920
7ff89ca2 6921 WARN_ON(IS_ERR(crtc_state) || ret);
5d96d8af 6922
7ff89ca2 6923 dev_priv->display.crtc_disable(crtc_state, state);
4a806558 6924
0853695c 6925 drm_atomic_state_put(state);
842e0307 6926
78108b7c
VS
6927 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
6928 crtc->base.id, crtc->name);
842e0307
ML
6929
6930 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->state, NULL) < 0);
6931 crtc->state->active = false;
37d9078b 6932 intel_crtc->active = false;
842e0307
ML
6933 crtc->enabled = false;
6934 crtc->state->connector_mask = 0;
6935 crtc->state->encoder_mask = 0;
6936
6937 for_each_encoder_on_crtc(crtc->dev, crtc, encoder)
6938 encoder->base.crtc = NULL;
6939
58f9c0bc 6940 intel_fbc_disable(intel_crtc);
432081bc 6941 intel_update_watermarks(intel_crtc);
65c307fd 6942 intel_disable_shared_dpll(to_intel_crtc_state(crtc->state));
b17d48e2
ML
6943
6944 domains = intel_crtc->enabled_power_domains;
6945 for_each_power_domain(domain, domains)
0e6e0be4 6946 intel_display_power_put_unchecked(dev_priv, domain);
b17d48e2 6947 intel_crtc->enabled_power_domains = 0;
565602d7
ML
6948
6949 dev_priv->active_crtcs &= ~(1 << intel_crtc->pipe);
d305e061 6950 dev_priv->min_cdclk[intel_crtc->pipe] = 0;
53e9bf5e 6951 dev_priv->min_voltage_level[intel_crtc->pipe] = 0;
c457d9cf
VS
6952
6953 bw_state->data_rate[intel_crtc->pipe] = 0;
6954 bw_state->num_active_planes[intel_crtc->pipe] = 0;
b17d48e2
ML
6955}
6956
6b72d486
ML
6957/*
6958 * turn all crtc's off, but do not adjust state
6959 * This has to be paired with a call to intel_modeset_setup_hw_state.
6960 */
70e0bd74 6961int intel_display_suspend(struct drm_device *dev)
ee7b9f93 6962{
e2c8b870 6963 struct drm_i915_private *dev_priv = to_i915(dev);
70e0bd74 6964 struct drm_atomic_state *state;
e2c8b870 6965 int ret;
70e0bd74 6966
e2c8b870
ML
6967 state = drm_atomic_helper_suspend(dev);
6968 ret = PTR_ERR_OR_ZERO(state);
70e0bd74
ML
6969 if (ret)
6970 DRM_ERROR("Suspending crtc's failed with %i\n", ret);
e2c8b870
ML
6971 else
6972 dev_priv->modeset_restore_state = state;
70e0bd74 6973 return ret;
ee7b9f93
JB
6974}
6975
ea5b213a 6976void intel_encoder_destroy(struct drm_encoder *encoder)
7e7d76c3 6977{
4ef69c7a 6978 struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
ea5b213a 6979
ea5b213a
CW
6980 drm_encoder_cleanup(encoder);
6981 kfree(intel_encoder);
7e7d76c3
JB
6982}
6983
0a91ca29
DV
6984/* Cross check the actual hw state with our own modeset state tracking (and it's
6985 * internal consistency). */
749d98b8
ML
6986static void intel_connector_verify_state(struct drm_crtc_state *crtc_state,
6987 struct drm_connector_state *conn_state)
79e53945 6988{
749d98b8 6989 struct intel_connector *connector = to_intel_connector(conn_state->connector);
35dd3c64
ML
6990
6991 DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
6992 connector->base.base.id,
6993 connector->base.name);
6994
0a91ca29 6995 if (connector->get_hw_state(connector)) {
e85376cb 6996 struct intel_encoder *encoder = connector->encoder;
0a91ca29 6997
749d98b8 6998 I915_STATE_WARN(!crtc_state,
35dd3c64 6999 "connector enabled without attached crtc\n");
0a91ca29 7000
749d98b8 7001 if (!crtc_state)
35dd3c64
ML
7002 return;
7003
749d98b8 7004 I915_STATE_WARN(!crtc_state->active,
35dd3c64
ML
7005 "connector is active, but attached crtc isn't\n");
7006
e85376cb 7007 if (!encoder || encoder->type == INTEL_OUTPUT_DP_MST)
35dd3c64
ML
7008 return;
7009
e85376cb 7010 I915_STATE_WARN(conn_state->best_encoder != &encoder->base,
35dd3c64
ML
7011 "atomic encoder doesn't match attached encoder\n");
7012
e85376cb 7013 I915_STATE_WARN(conn_state->crtc != encoder->base.crtc,
35dd3c64
ML
7014 "attached encoder crtc differs from connector crtc\n");
7015 } else {
749d98b8 7016 I915_STATE_WARN(crtc_state && crtc_state->active,
4d688a2a 7017 "attached crtc is active, but connector isn't\n");
749d98b8 7018 I915_STATE_WARN(!crtc_state && conn_state->best_encoder,
35dd3c64 7019 "best encoder set without crtc!\n");
0a91ca29 7020 }
79e53945
JB
7021}
7022
6d293983 7023static int pipe_required_fdi_lanes(struct intel_crtc_state *crtc_state)
d272ddfa 7024{
6d293983
ACO
7025 if (crtc_state->base.enable && crtc_state->has_pch_encoder)
7026 return crtc_state->fdi_lanes;
d272ddfa
VS
7027
7028 return 0;
7029}
7030
6d293983 7031static int ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
5cec258b 7032 struct intel_crtc_state *pipe_config)
1857e1da 7033{
8652744b 7034 struct drm_i915_private *dev_priv = to_i915(dev);
6d293983
ACO
7035 struct drm_atomic_state *state = pipe_config->base.state;
7036 struct intel_crtc *other_crtc;
7037 struct intel_crtc_state *other_crtc_state;
7038
1857e1da
DV
7039 DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
7040 pipe_name(pipe), pipe_config->fdi_lanes);
7041 if (pipe_config->fdi_lanes > 4) {
7042 DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
7043 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 7044 return -EINVAL;
1857e1da
DV
7045 }
7046
8652744b 7047 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
1857e1da
DV
7048 if (pipe_config->fdi_lanes > 2) {
7049 DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
7050 pipe_config->fdi_lanes);
6d293983 7051 return -EINVAL;
1857e1da 7052 } else {
6d293983 7053 return 0;
1857e1da
DV
7054 }
7055 }
7056
b7f05d4a 7057 if (INTEL_INFO(dev_priv)->num_pipes == 2)
6d293983 7058 return 0;
1857e1da
DV
7059
7060 /* Ivybridge 3 pipe is really complicated */
7061 switch (pipe) {
7062 case PIPE_A:
6d293983 7063 return 0;
1857e1da 7064 case PIPE_B:
6d293983
ACO
7065 if (pipe_config->fdi_lanes <= 2)
7066 return 0;
7067
b91eb5cc 7068 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_C);
6d293983
ACO
7069 other_crtc_state =
7070 intel_atomic_get_crtc_state(state, other_crtc);
7071 if (IS_ERR(other_crtc_state))
7072 return PTR_ERR(other_crtc_state);
7073
7074 if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
1857e1da
DV
7075 DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
7076 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 7077 return -EINVAL;
1857e1da 7078 }
6d293983 7079 return 0;
1857e1da 7080 case PIPE_C:
251cc67c
VS
7081 if (pipe_config->fdi_lanes > 2) {
7082 DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
7083 pipe_name(pipe), pipe_config->fdi_lanes);
6d293983 7084 return -EINVAL;
251cc67c 7085 }
6d293983 7086
b91eb5cc 7087 other_crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_B);
6d293983
ACO
7088 other_crtc_state =
7089 intel_atomic_get_crtc_state(state, other_crtc);
7090 if (IS_ERR(other_crtc_state))
7091 return PTR_ERR(other_crtc_state);
7092
7093 if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
1857e1da 7094 DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
6d293983 7095 return -EINVAL;
1857e1da 7096 }
6d293983 7097 return 0;
1857e1da
DV
7098 default:
7099 BUG();
7100 }
7101}
7102
e29c22c0
DV
7103#define RETRY 1
7104static int ironlake_fdi_compute_config(struct intel_crtc *intel_crtc,
5cec258b 7105 struct intel_crtc_state *pipe_config)
877d48d5 7106{
1857e1da 7107 struct drm_device *dev = intel_crtc->base.dev;
7c5f93b0 7108 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
6d293983
ACO
7109 int lane, link_bw, fdi_dotclock, ret;
7110 bool needs_recompute = false;
877d48d5 7111
e29c22c0 7112retry:
877d48d5
DV
7113 /* FDI is a binary signal running at ~2.7GHz, encoding
7114 * each output octet as 10 bits. The actual frequency
7115 * is stored as a divider into a 100MHz clock, and the
7116 * mode pixel clock is stored in units of 1KHz.
7117 * Hence the bw of each lane in terms of the mode signal
7118 * is:
7119 */
21a727b3 7120 link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
877d48d5 7121
241bfc38 7122 fdi_dotclock = adjusted_mode->crtc_clock;
877d48d5 7123
2bd89a07 7124 lane = ironlake_get_lanes_required(fdi_dotclock, link_bw,
877d48d5
DV
7125 pipe_config->pipe_bpp);
7126
7127 pipe_config->fdi_lanes = lane;
7128
2bd89a07 7129 intel_link_compute_m_n(pipe_config->pipe_bpp, lane, fdi_dotclock,
b31e85ed 7130 link_bw, &pipe_config->fdi_m_n, false);
1857e1da 7131
e3b247da 7132 ret = ironlake_check_fdi_lanes(dev, intel_crtc->pipe, pipe_config);
8e2b4dff
VS
7133 if (ret == -EDEADLK)
7134 return ret;
7135
6d293983 7136 if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
e29c22c0 7137 pipe_config->pipe_bpp -= 2*3;
7ff89ca2
VS
7138 DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
7139 pipe_config->pipe_bpp);
7140 needs_recompute = true;
7141 pipe_config->bw_constrained = true;
257a7ffc 7142
7ff89ca2 7143 goto retry;
257a7ffc 7144 }
79e53945 7145
7ff89ca2
VS
7146 if (needs_recompute)
7147 return RETRY;
e70236a8 7148
7ff89ca2 7149 return ret;
e70236a8
JB
7150}
7151
24f28450 7152bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
e70236a8 7153{
24f28450
ML
7154 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7155 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7156
7157 /* IPS only exists on ULT machines and is tied to pipe A. */
7158 if (!hsw_crtc_supports_ips(crtc))
6e644626
VS
7159 return false;
7160
24f28450 7161 if (!i915_modparams.enable_ips)
7ff89ca2 7162 return false;
e70236a8 7163
24f28450
ML
7164 if (crtc_state->pipe_bpp > 24)
7165 return false;
1b1d2716 7166
65cd2b3f 7167 /*
7ff89ca2
VS
7168 * We compare against max which means we must take
7169 * the increased cdclk requirement into account when
7170 * calculating the new cdclk.
7171 *
7172 * Should measure whether using a lower cdclk w/o IPS
e70236a8 7173 */
24f28450
ML
7174 if (IS_BROADWELL(dev_priv) &&
7175 crtc_state->pixel_rate > dev_priv->max_cdclk_freq * 95 / 100)
7176 return false;
7177
7178 return true;
e70236a8 7179}
79e53945 7180
24f28450 7181static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
7ff89ca2 7182{
24f28450
ML
7183 struct drm_i915_private *dev_priv =
7184 to_i915(crtc_state->base.crtc->dev);
7185 struct intel_atomic_state *intel_state =
7186 to_intel_atomic_state(crtc_state->base.state);
7187
7188 if (!hsw_crtc_state_ips_capable(crtc_state))
7189 return false;
7190
a8ebf607
JRS
7191 /*
7192 * When IPS gets enabled, the pipe CRC changes. Since IPS gets
7193 * enabled and disabled dynamically based on package C states,
7194 * user space can't make reliable use of the CRCs, so let's just
7195 * completely disable it.
7196 */
7197 if (crtc_state->crc_enabled)
24f28450
ML
7198 return false;
7199
adbe5c5c
ML
7200 /* IPS should be fine as long as at least one plane is enabled. */
7201 if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
24f28450 7202 return false;
34edce2f 7203
24f28450
ML
7204 /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
7205 if (IS_BROADWELL(dev_priv) &&
7206 crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
7207 return false;
7208
7209 return true;
34edce2f
VS
7210}
7211
7ff89ca2 7212static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
34edce2f 7213{
7ff89ca2 7214 const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
34edce2f 7215
7ff89ca2 7216 /* GDG double wide on either pipe, otherwise pipe A only */
c56b89f1 7217 return INTEL_GEN(dev_priv) < 4 &&
7ff89ca2 7218 (crtc->pipe == PIPE_A || IS_I915G(dev_priv));
34edce2f
VS
7219}
7220
ba3f4d0a 7221static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *pipe_config)
ceb99320 7222{
ba3f4d0a 7223 u32 pixel_rate;
ceb99320
VS
7224
7225 pixel_rate = pipe_config->base.adjusted_mode.crtc_clock;
7226
7227 /*
7228 * We only use IF-ID interlacing. If we ever use
7229 * PF-ID we'll need to adjust the pixel_rate here.
7230 */
7231
7232 if (pipe_config->pch_pfit.enabled) {
ba3f4d0a
JN
7233 u64 pipe_w, pipe_h, pfit_w, pfit_h;
7234 u32 pfit_size = pipe_config->pch_pfit.size;
ceb99320
VS
7235
7236 pipe_w = pipe_config->pipe_src_w;
7237 pipe_h = pipe_config->pipe_src_h;
7238
7239 pfit_w = (pfit_size >> 16) & 0xFFFF;
7240 pfit_h = pfit_size & 0xFFFF;
7241 if (pipe_w < pfit_w)
7242 pipe_w = pfit_w;
7243 if (pipe_h < pfit_h)
7244 pipe_h = pfit_h;
7245
7246 if (WARN_ON(!pfit_w || !pfit_h))
7247 return pixel_rate;
7248
d492a29d 7249 pixel_rate = div_u64(mul_u32_u32(pixel_rate, pipe_w * pipe_h),
ceb99320
VS
7250 pfit_w * pfit_h);
7251 }
7252
7253 return pixel_rate;
7254}
7255
7ff89ca2 7256static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state)
34edce2f 7257{
7ff89ca2 7258 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
34edce2f 7259
b2ae318a 7260 if (HAS_GMCH(dev_priv))
7ff89ca2
VS
7261 /* FIXME calculate proper pipe pixel rate for GMCH pfit */
7262 crtc_state->pixel_rate =
7263 crtc_state->base.adjusted_mode.crtc_clock;
7264 else
7265 crtc_state->pixel_rate =
7266 ilk_pipe_pixel_rate(crtc_state);
7267}
34edce2f 7268
7ff89ca2
VS
7269static int intel_crtc_compute_config(struct intel_crtc *crtc,
7270 struct intel_crtc_state *pipe_config)
7271{
d2daff2c 7272 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7ff89ca2
VS
7273 const struct drm_display_mode *adjusted_mode = &pipe_config->base.adjusted_mode;
7274 int clock_limit = dev_priv->max_dotclk_freq;
34edce2f 7275
7ff89ca2
VS
7276 if (INTEL_GEN(dev_priv) < 4) {
7277 clock_limit = dev_priv->max_cdclk_freq * 9 / 10;
34edce2f 7278
7ff89ca2
VS
7279 /*
7280 * Enable double wide mode when the dot clock
7281 * is > 90% of the (display) core speed.
7282 */
7283 if (intel_crtc_supports_double_wide(crtc) &&
7284 adjusted_mode->crtc_clock > clock_limit) {
7285 clock_limit = dev_priv->max_dotclk_freq;
7286 pipe_config->double_wide = true;
7287 }
34edce2f
VS
7288 }
7289
7ff89ca2
VS
7290 if (adjusted_mode->crtc_clock > clock_limit) {
7291 DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
7292 adjusted_mode->crtc_clock, clock_limit,
7293 yesno(pipe_config->double_wide));
7294 return -EINVAL;
7295 }
34edce2f 7296
8c79f844
SS
7297 if ((pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
7298 pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444) &&
7299 pipe_config->base.ctm) {
25edf915
SS
7300 /*
7301 * There is only one pipe CSC unit per pipe, and we need that
7302 * for output conversion from RGB->YCBCR. So if CTM is already
7303 * applied we can't support YCBCR420 output.
7304 */
7305 DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
7306 return -EINVAL;
7307 }
7308
7ff89ca2
VS
7309 /*
7310 * Pipe horizontal size must be even in:
7311 * - DVO ganged mode
7312 * - LVDS dual channel mode
7313 * - Double wide pipe
7314 */
0574bd88
VS
7315 if (pipe_config->pipe_src_w & 1) {
7316 if (pipe_config->double_wide) {
7317 DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
7318 return -EINVAL;
7319 }
7320
7321 if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
d2daff2c 7322 intel_is_dual_link_lvds(dev_priv)) {
0574bd88
VS
7323 DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
7324 return -EINVAL;
7325 }
7326 }
34edce2f 7327
7ff89ca2
VS
7328 /* Cantiga+ cannot handle modes with a hsync front porch of 0.
7329 * WaPruneModeWithIncorrectHsyncOffset:ctg,elk,ilk,snb,ivb,vlv,hsw.
7330 */
7331 if ((INTEL_GEN(dev_priv) > 4 || IS_G4X(dev_priv)) &&
7332 adjusted_mode->crtc_hsync_start == adjusted_mode->crtc_hdisplay)
7333 return -EINVAL;
34edce2f 7334
7ff89ca2 7335 intel_crtc_compute_pixel_rate(pipe_config);
34edce2f 7336
7ff89ca2
VS
7337 if (pipe_config->has_pch_encoder)
7338 return ironlake_fdi_compute_config(crtc, pipe_config);
34edce2f 7339
7ff89ca2 7340 return 0;
34edce2f
VS
7341}
7342
2c07245f 7343static void
ba3f4d0a 7344intel_reduce_m_n_ratio(u32 *num, u32 *den)
2c07245f 7345{
a65851af
VS
7346 while (*num > DATA_LINK_M_N_MASK ||
7347 *den > DATA_LINK_M_N_MASK) {
2c07245f
ZW
7348 *num >>= 1;
7349 *den >>= 1;
7350 }
7351}
7352
a65851af 7353static void compute_m_n(unsigned int m, unsigned int n,
ba3f4d0a 7354 u32 *ret_m, u32 *ret_n,
53ca2edc 7355 bool constant_n)
a65851af 7356{
9a86cda0 7357 /*
53ca2edc
LS
7358 * Several DP dongles in particular seem to be fussy about
7359 * too large link M/N values. Give N value as 0x8000 that
7360 * should be acceptable by specific devices. 0x8000 is the
7361 * specified fixed N value for asynchronous clock mode,
7362 * which the devices expect also in synchronous clock mode.
9a86cda0 7363 */
53ca2edc
LS
7364 if (constant_n)
7365 *ret_n = 0x8000;
7366 else
7367 *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX);
9a86cda0 7368
d492a29d 7369 *ret_m = div_u64(mul_u32_u32(m, *ret_n), n);
a65851af
VS
7370 intel_reduce_m_n_ratio(ret_m, ret_n);
7371}
7372
e69d0bc1 7373void
a4a15777 7374intel_link_compute_m_n(u16 bits_per_pixel, int nlanes,
e69d0bc1 7375 int pixel_clock, int link_clock,
b31e85ed 7376 struct intel_link_m_n *m_n,
53ca2edc 7377 bool constant_n)
2c07245f 7378{
e69d0bc1 7379 m_n->tu = 64;
a65851af
VS
7380
7381 compute_m_n(bits_per_pixel * pixel_clock,
7382 link_clock * nlanes * 8,
b31e85ed 7383 &m_n->gmch_m, &m_n->gmch_n,
53ca2edc 7384 constant_n);
a65851af
VS
7385
7386 compute_m_n(pixel_clock, link_clock,
b31e85ed 7387 &m_n->link_m, &m_n->link_n,
53ca2edc 7388 constant_n);
2c07245f
ZW
7389}
7390
a7615030
CW
7391static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv)
7392{
4f044a88
MW
7393 if (i915_modparams.panel_use_ssc >= 0)
7394 return i915_modparams.panel_use_ssc != 0;
41aa3448 7395 return dev_priv->vbt.lvds_use_ssc
435793df 7396 && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE);
a7615030
CW
7397}
7398
ba3f4d0a 7399static u32 pnv_dpll_compute_fp(struct dpll *dpll)
c65d77d8 7400{
7df00d7a 7401 return (1 << dpll->n) << 16 | dpll->m2;
7429e9d4 7402}
f47709a9 7403
ba3f4d0a 7404static u32 i9xx_dpll_compute_fp(struct dpll *dpll)
7429e9d4
DV
7405{
7406 return dpll->n << 16 | dpll->m1 << 8 | dpll->m2;
c65d77d8
JB
7407}
7408
f47709a9 7409static void i9xx_update_pll_dividers(struct intel_crtc *crtc,
190f68c5 7410 struct intel_crtc_state *crtc_state,
9e2c8475 7411 struct dpll *reduced_clock)
a7516a05 7412{
9b1e14f4 7413 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
a7516a05
JB
7414 u32 fp, fp2 = 0;
7415
9b1e14f4 7416 if (IS_PINEVIEW(dev_priv)) {
190f68c5 7417 fp = pnv_dpll_compute_fp(&crtc_state->dpll);
a7516a05 7418 if (reduced_clock)
7429e9d4 7419 fp2 = pnv_dpll_compute_fp(reduced_clock);
a7516a05 7420 } else {
190f68c5 7421 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
a7516a05 7422 if (reduced_clock)
7429e9d4 7423 fp2 = i9xx_dpll_compute_fp(reduced_clock);
a7516a05
JB
7424 }
7425
190f68c5 7426 crtc_state->dpll_hw_state.fp0 = fp;
a7516a05 7427
2d84d2b3 7428 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ab585dea 7429 reduced_clock) {
190f68c5 7430 crtc_state->dpll_hw_state.fp1 = fp2;
a7516a05 7431 } else {
190f68c5 7432 crtc_state->dpll_hw_state.fp1 = fp;
a7516a05
JB
7433 }
7434}
7435
5e69f97f
CML
7436static void vlv_pllb_recal_opamp(struct drm_i915_private *dev_priv, enum pipe
7437 pipe)
89b667f8
JB
7438{
7439 u32 reg_val;
7440
7441 /*
7442 * PLLB opamp always calibrates to max value of 0x3f, force enable it
7443 * and set it to a reasonable value instead.
7444 */
ab3c759a 7445 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
89b667f8
JB
7446 reg_val &= 0xffffff00;
7447 reg_val |= 0x00000030;
ab3c759a 7448 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
89b667f8 7449
ab3c759a 7450 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
ed58570f
ID
7451 reg_val &= 0x00ffffff;
7452 reg_val |= 0x8c000000;
ab3c759a 7453 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
89b667f8 7454
ab3c759a 7455 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW9(1));
89b667f8 7456 reg_val &= 0xffffff00;
ab3c759a 7457 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9(1), reg_val);
89b667f8 7458
ab3c759a 7459 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_REF_DW13);
89b667f8
JB
7460 reg_val &= 0x00ffffff;
7461 reg_val |= 0xb0000000;
ab3c759a 7462 vlv_dpio_write(dev_priv, pipe, VLV_REF_DW13, reg_val);
89b667f8
JB
7463}
7464
4c354754
ML
7465static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7466 const struct intel_link_m_n *m_n)
b551842d 7467{
4c354754
ML
7468 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7469 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7470 enum pipe pipe = crtc->pipe;
b551842d 7471
e3b95f1e
DV
7472 I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7473 I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
7474 I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
7475 I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
b551842d
DV
7476}
7477
4207c8b9
ML
7478static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
7479 enum transcoder transcoder)
7480{
7481 if (IS_HASWELL(dev_priv))
7482 return transcoder == TRANSCODER_EDP;
7483
7484 /*
7485 * Strictly speaking some registers are available before
7486 * gen7, but we only support DRRS on gen7+
7487 */
cf819eff 7488 return IS_GEN(dev_priv, 7) || IS_CHERRYVIEW(dev_priv);
4207c8b9
ML
7489}
7490
4c354754
ML
7491static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state,
7492 const struct intel_link_m_n *m_n,
7493 const struct intel_link_m_n *m2_n2)
b551842d 7494{
4c354754 7495 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
6315b5d3 7496 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
4c354754
ML
7497 enum pipe pipe = crtc->pipe;
7498 enum transcoder transcoder = crtc_state->cpu_transcoder;
b551842d 7499
6315b5d3 7500 if (INTEL_GEN(dev_priv) >= 5) {
b551842d
DV
7501 I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
7502 I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
7503 I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
7504 I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
4207c8b9
ML
7505 /*
7506 * M2_N2 registers are set only if DRRS is supported
7507 * (to make sure the registers are not unnecessarily accessed).
f769cd24 7508 */
4207c8b9
ML
7509 if (m2_n2 && crtc_state->has_drrs &&
7510 transcoder_has_m2_n2(dev_priv, transcoder)) {
f769cd24
VK
7511 I915_WRITE(PIPE_DATA_M2(transcoder),
7512 TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
7513 I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
7514 I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
7515 I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
7516 }
b551842d 7517 } else {
e3b95f1e
DV
7518 I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
7519 I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
7520 I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
7521 I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
b551842d
DV
7522 }
7523}
7524
4c354754 7525void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
03afc4a2 7526{
4c354754 7527 const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
fe3cd48d
R
7528
7529 if (m_n == M1_N1) {
4c354754
ML
7530 dp_m_n = &crtc_state->dp_m_n;
7531 dp_m2_n2 = &crtc_state->dp_m2_n2;
fe3cd48d
R
7532 } else if (m_n == M2_N2) {
7533
7534 /*
7535 * M2_N2 registers are not supported. Hence m2_n2 divider value
7536 * needs to be programmed into M1_N1.
7537 */
4c354754 7538 dp_m_n = &crtc_state->dp_m2_n2;
fe3cd48d
R
7539 } else {
7540 DRM_ERROR("Unsupported divider value\n");
7541 return;
7542 }
7543
4c354754
ML
7544 if (crtc_state->has_pch_encoder)
7545 intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n);
03afc4a2 7546 else
4c354754 7547 intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2);
03afc4a2
DV
7548}
7549
251ac862
DV
7550static void vlv_compute_dpll(struct intel_crtc *crtc,
7551 struct intel_crtc_state *pipe_config)
bdd4b6a6 7552{
03ed5cbf 7553 pipe_config->dpll_hw_state.dpll = DPLL_INTEGRATED_REF_CLK_VLV |
cd2d34d9 7554 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
03ed5cbf
VS
7555 if (crtc->pipe != PIPE_A)
7556 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
bdd4b6a6 7557
cd2d34d9 7558 /* DPLL not used with DSI, but still need the rest set up */
d7edc4e5 7559 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
cd2d34d9
VS
7560 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE |
7561 DPLL_EXT_BUFFER_ENABLE_VLV;
7562
03ed5cbf
VS
7563 pipe_config->dpll_hw_state.dpll_md =
7564 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
7565}
bdd4b6a6 7566
03ed5cbf
VS
7567static void chv_compute_dpll(struct intel_crtc *crtc,
7568 struct intel_crtc_state *pipe_config)
7569{
7570 pipe_config->dpll_hw_state.dpll = DPLL_SSC_REF_CLK_CHV |
cd2d34d9 7571 DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
03ed5cbf
VS
7572 if (crtc->pipe != PIPE_A)
7573 pipe_config->dpll_hw_state.dpll |= DPLL_INTEGRATED_CRI_CLK_VLV;
7574
cd2d34d9 7575 /* DPLL not used with DSI, but still need the rest set up */
d7edc4e5 7576 if (!intel_crtc_has_type(pipe_config, INTEL_OUTPUT_DSI))
cd2d34d9
VS
7577 pipe_config->dpll_hw_state.dpll |= DPLL_VCO_ENABLE;
7578
03ed5cbf
VS
7579 pipe_config->dpll_hw_state.dpll_md =
7580 (pipe_config->pixel_multiplier - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT;
bdd4b6a6
DV
7581}
7582
d288f65f 7583static void vlv_prepare_pll(struct intel_crtc *crtc,
5cec258b 7584 const struct intel_crtc_state *pipe_config)
a0c4da24 7585{
f47709a9 7586 struct drm_device *dev = crtc->base.dev;
fac5e23e 7587 struct drm_i915_private *dev_priv = to_i915(dev);
cd2d34d9 7588 enum pipe pipe = crtc->pipe;
bdd4b6a6 7589 u32 mdiv;
a0c4da24 7590 u32 bestn, bestm1, bestm2, bestp1, bestp2;
bdd4b6a6 7591 u32 coreclk, reg_val;
a0c4da24 7592
cd2d34d9
VS
7593 /* Enable Refclk */
7594 I915_WRITE(DPLL(pipe),
7595 pipe_config->dpll_hw_state.dpll &
7596 ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
7597
7598 /* No need to actually set up the DPLL with DSI */
7599 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7600 return;
7601
221c7862 7602 vlv_dpio_get(dev_priv);
09153000 7603
d288f65f
VS
7604 bestn = pipe_config->dpll.n;
7605 bestm1 = pipe_config->dpll.m1;
7606 bestm2 = pipe_config->dpll.m2;
7607 bestp1 = pipe_config->dpll.p1;
7608 bestp2 = pipe_config->dpll.p2;
a0c4da24 7609
89b667f8
JB
7610 /* See eDP HDMI DPIO driver vbios notes doc */
7611
7612 /* PLL B needs special handling */
bdd4b6a6 7613 if (pipe == PIPE_B)
5e69f97f 7614 vlv_pllb_recal_opamp(dev_priv, pipe);
89b667f8
JB
7615
7616 /* Set up Tx target for periodic Rcomp update */
ab3c759a 7617 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW9_BCAST, 0x0100000f);
89b667f8
JB
7618
7619 /* Disable target IRef on PLL */
ab3c759a 7620 reg_val = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW8(pipe));
89b667f8 7621 reg_val &= 0x00ffffff;
ab3c759a 7622 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW8(pipe), reg_val);
89b667f8
JB
7623
7624 /* Disable fast lock */
ab3c759a 7625 vlv_dpio_write(dev_priv, pipe, VLV_CMN_DW0, 0x610);
89b667f8
JB
7626
7627 /* Set idtafcrecal before PLL is enabled */
a0c4da24
JB
7628 mdiv = ((bestm1 << DPIO_M1DIV_SHIFT) | (bestm2 & DPIO_M2DIV_MASK));
7629 mdiv |= ((bestp1 << DPIO_P1_SHIFT) | (bestp2 << DPIO_P2_SHIFT));
7630 mdiv |= ((bestn << DPIO_N_SHIFT));
a0c4da24 7631 mdiv |= (1 << DPIO_K_SHIFT);
7df5080b
JB
7632
7633 /*
7634 * Post divider depends on pixel clock rate, DAC vs digital (and LVDS,
7635 * but we don't support that).
7636 * Note: don't use the DAC post divider as it seems unstable.
7637 */
7638 mdiv |= (DPIO_POST_DIV_HDMIDP << DPIO_POST_DIV_SHIFT);
ab3c759a 7639 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
a0c4da24 7640
a0c4da24 7641 mdiv |= DPIO_ENABLE_CALIBRATION;
ab3c759a 7642 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW3(pipe), mdiv);
a0c4da24 7643
89b667f8 7644 /* Set HBR and RBR LPF coefficients */
d288f65f 7645 if (pipe_config->port_clock == 162000 ||
92d54b07
ML
7646 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_ANALOG) ||
7647 intel_crtc_has_type(pipe_config, INTEL_OUTPUT_HDMI))
ab3c759a 7648 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
885b0120 7649 0x009f0003);
89b667f8 7650 else
ab3c759a 7651 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW10(pipe),
89b667f8
JB
7652 0x00d0000f);
7653
37a5650b 7654 if (intel_crtc_has_dp_encoder(pipe_config)) {
89b667f8 7655 /* Use SSC source */
bdd4b6a6 7656 if (pipe == PIPE_A)
ab3c759a 7657 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7658 0x0df40000);
7659 else
ab3c759a 7660 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7661 0x0df70000);
7662 } else { /* HDMI or VGA */
7663 /* Use bend source */
bdd4b6a6 7664 if (pipe == PIPE_A)
ab3c759a 7665 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7666 0x0df70000);
7667 else
ab3c759a 7668 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW5(pipe),
89b667f8
JB
7669 0x0df40000);
7670 }
a0c4da24 7671
ab3c759a 7672 coreclk = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW7(pipe));
89b667f8 7673 coreclk = (coreclk & 0x0000ff00) | 0x01c00000;
92d54b07 7674 if (intel_crtc_has_dp_encoder(pipe_config))
89b667f8 7675 coreclk |= 0x01000000;
ab3c759a 7676 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW7(pipe), coreclk);
a0c4da24 7677
ab3c759a 7678 vlv_dpio_write(dev_priv, pipe, VLV_PLL_DW11(pipe), 0x87871000);
221c7862
CW
7679
7680 vlv_dpio_put(dev_priv);
a0c4da24
JB
7681}
7682
d288f65f 7683static void chv_prepare_pll(struct intel_crtc *crtc,
5cec258b 7684 const struct intel_crtc_state *pipe_config)
9d556c99
CML
7685{
7686 struct drm_device *dev = crtc->base.dev;
fac5e23e 7687 struct drm_i915_private *dev_priv = to_i915(dev);
cd2d34d9 7688 enum pipe pipe = crtc->pipe;
9d556c99 7689 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9cbe40c1 7690 u32 loopfilter, tribuf_calcntr;
9d556c99 7691 u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac;
a945ce7e 7692 u32 dpio_val;
9cbe40c1 7693 int vco;
9d556c99 7694
cd2d34d9
VS
7695 /* Enable Refclk and SSC */
7696 I915_WRITE(DPLL(pipe),
7697 pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
7698
7699 /* No need to actually set up the DPLL with DSI */
7700 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
7701 return;
7702
d288f65f
VS
7703 bestn = pipe_config->dpll.n;
7704 bestm2_frac = pipe_config->dpll.m2 & 0x3fffff;
7705 bestm1 = pipe_config->dpll.m1;
7706 bestm2 = pipe_config->dpll.m2 >> 22;
7707 bestp1 = pipe_config->dpll.p1;
7708 bestp2 = pipe_config->dpll.p2;
9cbe40c1 7709 vco = pipe_config->dpll.vco;
a945ce7e 7710 dpio_val = 0;
9cbe40c1 7711 loopfilter = 0;
9d556c99 7712
221c7862 7713 vlv_dpio_get(dev_priv);
9d556c99 7714
9d556c99
CML
7715 /* p1 and p2 divider */
7716 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW13(port),
7717 5 << DPIO_CHV_S1_DIV_SHIFT |
7718 bestp1 << DPIO_CHV_P1_DIV_SHIFT |
7719 bestp2 << DPIO_CHV_P2_DIV_SHIFT |
7720 1 << DPIO_CHV_K_DIV_SHIFT);
7721
7722 /* Feedback post-divider - m2 */
7723 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW0(port), bestm2);
7724
7725 /* Feedback refclk divider - n and m1 */
7726 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW1(port),
7727 DPIO_CHV_M1_DIV_BY_2 |
7728 1 << DPIO_CHV_N_DIV_SHIFT);
7729
7730 /* M2 fraction division */
25a25dfc 7731 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac);
9d556c99
CML
7732
7733 /* M2 fraction division enable */
a945ce7e
VP
7734 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
7735 dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN);
7736 dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT);
7737 if (bestm2_frac)
7738 dpio_val |= DPIO_CHV_FRAC_DIV_EN;
7739 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val);
9d556c99 7740
de3a0fde
VP
7741 /* Program digital lock detect threshold */
7742 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port));
7743 dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK |
7744 DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE);
7745 dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT);
7746 if (!bestm2_frac)
7747 dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE;
7748 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val);
7749
9d556c99 7750 /* Loop filter */
9cbe40c1
VP
7751 if (vco == 5400000) {
7752 loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT);
7753 loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT);
7754 loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT);
7755 tribuf_calcntr = 0x9;
7756 } else if (vco <= 6200000) {
7757 loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT);
7758 loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT);
7759 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7760 tribuf_calcntr = 0x9;
7761 } else if (vco <= 6480000) {
7762 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7763 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7764 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7765 tribuf_calcntr = 0x8;
7766 } else {
7767 /* Not supported. Apply the same limits as in the max case */
7768 loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT);
7769 loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT);
7770 loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT);
7771 tribuf_calcntr = 0;
7772 }
9d556c99
CML
7773 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter);
7774
968040b2 7775 dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port));
9cbe40c1
VP
7776 dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK;
7777 dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT);
7778 vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val);
7779
9d556c99
CML
7780 /* AFC Recal */
7781 vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port),
7782 vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) |
7783 DPIO_AFC_RECAL);
7784
221c7862 7785 vlv_dpio_put(dev_priv);
9d556c99
CML
7786}
7787
d288f65f
VS
7788/**
7789 * vlv_force_pll_on - forcibly enable just the PLL
7790 * @dev_priv: i915 private structure
7791 * @pipe: pipe PLL to enable
7792 * @dpll: PLL configuration
7793 *
7794 * Enable the PLL for @pipe using the supplied @dpll config. To be used
7795 * in cases where we need the PLL enabled even when @pipe is not going to
7796 * be enabled.
7797 */
30ad9814 7798int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
3f36b937 7799 const struct dpll *dpll)
d288f65f 7800{
b91eb5cc 7801 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
3f36b937
TU
7802 struct intel_crtc_state *pipe_config;
7803
7804 pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL);
7805 if (!pipe_config)
7806 return -ENOMEM;
7807
7808 pipe_config->base.crtc = &crtc->base;
7809 pipe_config->pixel_multiplier = 1;
7810 pipe_config->dpll = *dpll;
d288f65f 7811
30ad9814 7812 if (IS_CHERRYVIEW(dev_priv)) {
3f36b937
TU
7813 chv_compute_dpll(crtc, pipe_config);
7814 chv_prepare_pll(crtc, pipe_config);
7815 chv_enable_pll(crtc, pipe_config);
d288f65f 7816 } else {
3f36b937
TU
7817 vlv_compute_dpll(crtc, pipe_config);
7818 vlv_prepare_pll(crtc, pipe_config);
7819 vlv_enable_pll(crtc, pipe_config);
d288f65f 7820 }
3f36b937
TU
7821
7822 kfree(pipe_config);
7823
7824 return 0;
d288f65f
VS
7825}
7826
7827/**
7828 * vlv_force_pll_off - forcibly disable just the PLL
7829 * @dev_priv: i915 private structure
7830 * @pipe: pipe PLL to disable
7831 *
7832 * Disable the PLL for @pipe. To be used in cases where we need
7833 * the PLL enabled even when @pipe is not going to be enabled.
7834 */
30ad9814 7835void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe)
d288f65f 7836{
30ad9814
VS
7837 if (IS_CHERRYVIEW(dev_priv))
7838 chv_disable_pll(dev_priv, pipe);
d288f65f 7839 else
30ad9814 7840 vlv_disable_pll(dev_priv, pipe);
d288f65f
VS
7841}
7842
251ac862
DV
7843static void i9xx_compute_dpll(struct intel_crtc *crtc,
7844 struct intel_crtc_state *crtc_state,
9e2c8475 7845 struct dpll *reduced_clock)
eb1cbe48 7846{
9b1e14f4 7847 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
eb1cbe48 7848 u32 dpll;
190f68c5 7849 struct dpll *clock = &crtc_state->dpll;
eb1cbe48 7850
190f68c5 7851 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
2a8f64ca 7852
eb1cbe48
DV
7853 dpll = DPLL_VGA_MODE_DIS;
7854
2d84d2b3 7855 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
eb1cbe48
DV
7856 dpll |= DPLLB_MODE_LVDS;
7857 else
7858 dpll |= DPLLB_MODE_DAC_SERIAL;
6cc5f341 7859
73f67aa8
JN
7860 if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
7861 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
190f68c5 7862 dpll |= (crtc_state->pixel_multiplier - 1)
198a037f 7863 << SDVO_MULTIPLIER_SHIFT_HIRES;
eb1cbe48 7864 }
198a037f 7865
3d6e9ee0
VS
7866 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
7867 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
4a33e48d 7868 dpll |= DPLL_SDVO_HIGH_SPEED;
198a037f 7869
37a5650b 7870 if (intel_crtc_has_dp_encoder(crtc_state))
4a33e48d 7871 dpll |= DPLL_SDVO_HIGH_SPEED;
eb1cbe48
DV
7872
7873 /* compute bitmask from p1 value */
9b1e14f4 7874 if (IS_PINEVIEW(dev_priv))
eb1cbe48
DV
7875 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW;
7876 else {
7877 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
9beb5fea 7878 if (IS_G4X(dev_priv) && reduced_clock)
eb1cbe48
DV
7879 dpll |= (1 << (reduced_clock->p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
7880 }
7881 switch (clock->p2) {
7882 case 5:
7883 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
7884 break;
7885 case 7:
7886 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
7887 break;
7888 case 10:
7889 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
7890 break;
7891 case 14:
7892 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
7893 break;
7894 }
9b1e14f4 7895 if (INTEL_GEN(dev_priv) >= 4)
eb1cbe48
DV
7896 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
7897
190f68c5 7898 if (crtc_state->sdvo_tv_clock)
eb1cbe48 7899 dpll |= PLL_REF_INPUT_TVCLKINBC;
2d84d2b3 7900 else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ceb41007 7901 intel_panel_use_ssc(dev_priv))
eb1cbe48
DV
7902 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7903 else
7904 dpll |= PLL_REF_INPUT_DREFCLK;
7905
7906 dpll |= DPLL_VCO_ENABLE;
190f68c5 7907 crtc_state->dpll_hw_state.dpll = dpll;
8bcc2795 7908
9b1e14f4 7909 if (INTEL_GEN(dev_priv) >= 4) {
190f68c5 7910 u32 dpll_md = (crtc_state->pixel_multiplier - 1)
ef1b460d 7911 << DPLL_MD_UDI_MULTIPLIER_SHIFT;
190f68c5 7912 crtc_state->dpll_hw_state.dpll_md = dpll_md;
eb1cbe48
DV
7913 }
7914}
7915
251ac862
DV
7916static void i8xx_compute_dpll(struct intel_crtc *crtc,
7917 struct intel_crtc_state *crtc_state,
9e2c8475 7918 struct dpll *reduced_clock)
eb1cbe48 7919{
f47709a9 7920 struct drm_device *dev = crtc->base.dev;
fac5e23e 7921 struct drm_i915_private *dev_priv = to_i915(dev);
eb1cbe48 7922 u32 dpll;
190f68c5 7923 struct dpll *clock = &crtc_state->dpll;
eb1cbe48 7924
190f68c5 7925 i9xx_update_pll_dividers(crtc, crtc_state, reduced_clock);
2a8f64ca 7926
eb1cbe48
DV
7927 dpll = DPLL_VGA_MODE_DIS;
7928
2d84d2b3 7929 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
eb1cbe48
DV
7930 dpll |= (1 << (clock->p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7931 } else {
7932 if (clock->p1 == 2)
7933 dpll |= PLL_P1_DIVIDE_BY_TWO;
7934 else
7935 dpll |= (clock->p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT;
7936 if (clock->p2 == 4)
7937 dpll |= PLL_P2_DIVIDE_BY_4;
7938 }
7939
171d1562
VS
7940 /*
7941 * Bspec:
7942 * "[Almador Errata}: For the correct operation of the muxed DVO pins
7943 * (GDEVSELB/I2Cdata, GIRDBY/I2CClk) and (GFRAMEB/DVI_Data,
7944 * GTRDYB/DVI_Clk): Bit 31 (DPLL VCO Enable) and Bit 30 (2X Clock
7945 * Enable) must be set to “1” in both the DPLL A Control Register
7946 * (06014h-06017h) and DPLL B Control Register (06018h-0601Bh)."
7947 *
7948 * For simplicity We simply keep both bits always enabled in
7949 * both DPLLS. The spec says we should disable the DVO 2X clock
7950 * when not needed, but this seems to work fine in practice.
7951 */
7952 if (IS_I830(dev_priv) ||
50a0bc90 7953 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO))
4a33e48d
DV
7954 dpll |= DPLL_DVO_2X_MODE;
7955
2d84d2b3 7956 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
ceb41007 7957 intel_panel_use_ssc(dev_priv))
eb1cbe48
DV
7958 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
7959 else
7960 dpll |= PLL_REF_INPUT_DREFCLK;
7961
7962 dpll |= DPLL_VCO_ENABLE;
190f68c5 7963 crtc_state->dpll_hw_state.dpll = dpll;
eb1cbe48
DV
7964}
7965
44fe7f35 7966static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
b0e77b9c 7967{
44fe7f35
ML
7968 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7969 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
7970 enum pipe pipe = crtc->pipe;
7971 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
7972 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
ba3f4d0a 7973 u32 crtc_vtotal, crtc_vblank_end;
1caea6e9 7974 int vsyncshift = 0;
4d8a62ea
DV
7975
7976 /* We need to be careful not to changed the adjusted mode, for otherwise
7977 * the hw state checker will get angry at the mismatch. */
7978 crtc_vtotal = adjusted_mode->crtc_vtotal;
7979 crtc_vblank_end = adjusted_mode->crtc_vblank_end;
b0e77b9c 7980
609aeaca 7981 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
b0e77b9c 7982 /* the chip adds 2 halflines automatically */
4d8a62ea
DV
7983 crtc_vtotal -= 1;
7984 crtc_vblank_end -= 1;
609aeaca 7985
44fe7f35 7986 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
609aeaca
VS
7987 vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2;
7988 else
7989 vsyncshift = adjusted_mode->crtc_hsync_start -
7990 adjusted_mode->crtc_htotal / 2;
1caea6e9
VS
7991 if (vsyncshift < 0)
7992 vsyncshift += adjusted_mode->crtc_htotal;
b0e77b9c
PZ
7993 }
7994
6315b5d3 7995 if (INTEL_GEN(dev_priv) > 3)
fe2b8f9d 7996 I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
b0e77b9c 7997
fe2b8f9d 7998 I915_WRITE(HTOTAL(cpu_transcoder),
b0e77b9c
PZ
7999 (adjusted_mode->crtc_hdisplay - 1) |
8000 ((adjusted_mode->crtc_htotal - 1) << 16));
fe2b8f9d 8001 I915_WRITE(HBLANK(cpu_transcoder),
b0e77b9c
PZ
8002 (adjusted_mode->crtc_hblank_start - 1) |
8003 ((adjusted_mode->crtc_hblank_end - 1) << 16));
fe2b8f9d 8004 I915_WRITE(HSYNC(cpu_transcoder),
b0e77b9c
PZ
8005 (adjusted_mode->crtc_hsync_start - 1) |
8006 ((adjusted_mode->crtc_hsync_end - 1) << 16));
8007
fe2b8f9d 8008 I915_WRITE(VTOTAL(cpu_transcoder),
b0e77b9c 8009 (adjusted_mode->crtc_vdisplay - 1) |
4d8a62ea 8010 ((crtc_vtotal - 1) << 16));
fe2b8f9d 8011 I915_WRITE(VBLANK(cpu_transcoder),
b0e77b9c 8012 (adjusted_mode->crtc_vblank_start - 1) |
4d8a62ea 8013 ((crtc_vblank_end - 1) << 16));
fe2b8f9d 8014 I915_WRITE(VSYNC(cpu_transcoder),
b0e77b9c
PZ
8015 (adjusted_mode->crtc_vsync_start - 1) |
8016 ((adjusted_mode->crtc_vsync_end - 1) << 16));
8017
b5e508d4
PZ
8018 /* Workaround: when the EDP input selection is B, the VTOTAL_B must be
8019 * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
8020 * documented on the DDI_FUNC_CTL register description, EDP Input Select
8021 * bits. */
772c2a51 8022 if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
b5e508d4
PZ
8023 (pipe == PIPE_B || pipe == PIPE_C))
8024 I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
8025
bc58be60
JN
8026}
8027
44fe7f35 8028static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
bc58be60 8029{
44fe7f35
ML
8030 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8031 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8032 enum pipe pipe = crtc->pipe;
bc58be60 8033
b0e77b9c
PZ
8034 /* pipesrc controls the size that is scaled from, which should
8035 * always be the user's requested size.
8036 */
8037 I915_WRITE(PIPESRC(pipe),
44fe7f35
ML
8038 ((crtc_state->pipe_src_w - 1) << 16) |
8039 (crtc_state->pipe_src_h - 1));
b0e77b9c
PZ
8040}
8041
1bd1bd80 8042static void intel_get_pipe_timings(struct intel_crtc *crtc,
5cec258b 8043 struct intel_crtc_state *pipe_config)
1bd1bd80
DV
8044{
8045 struct drm_device *dev = crtc->base.dev;
fac5e23e 8046 struct drm_i915_private *dev_priv = to_i915(dev);
1bd1bd80 8047 enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
ba3f4d0a 8048 u32 tmp;
1bd1bd80
DV
8049
8050 tmp = I915_READ(HTOTAL(cpu_transcoder));
2d112de7
ACO
8051 pipe_config->base.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
8052 pipe_config->base.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
3c23ed13
VK
8053
8054 if (!transcoder_is_dsi(cpu_transcoder)) {
8055 tmp = I915_READ(HBLANK(cpu_transcoder));
8056 pipe_config->base.adjusted_mode.crtc_hblank_start =
8057 (tmp & 0xffff) + 1;
8058 pipe_config->base.adjusted_mode.crtc_hblank_end =
8059 ((tmp >> 16) & 0xffff) + 1;
8060 }
1bd1bd80 8061 tmp = I915_READ(HSYNC(cpu_transcoder));
2d112de7
ACO
8062 pipe_config->base.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
8063 pipe_config->base.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80
DV
8064
8065 tmp = I915_READ(VTOTAL(cpu_transcoder));
2d112de7
ACO
8066 pipe_config->base.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
8067 pipe_config->base.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
3c23ed13
VK
8068
8069 if (!transcoder_is_dsi(cpu_transcoder)) {
8070 tmp = I915_READ(VBLANK(cpu_transcoder));
8071 pipe_config->base.adjusted_mode.crtc_vblank_start =
8072 (tmp & 0xffff) + 1;
8073 pipe_config->base.adjusted_mode.crtc_vblank_end =
8074 ((tmp >> 16) & 0xffff) + 1;
8075 }
1bd1bd80 8076 tmp = I915_READ(VSYNC(cpu_transcoder));
2d112de7
ACO
8077 pipe_config->base.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
8078 pipe_config->base.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
1bd1bd80
DV
8079
8080 if (I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK) {
2d112de7
ACO
8081 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_INTERLACE;
8082 pipe_config->base.adjusted_mode.crtc_vtotal += 1;
8083 pipe_config->base.adjusted_mode.crtc_vblank_end += 1;
1bd1bd80 8084 }
bc58be60
JN
8085}
8086
8087static void intel_get_pipe_src_size(struct intel_crtc *crtc,
8088 struct intel_crtc_state *pipe_config)
8089{
8090 struct drm_device *dev = crtc->base.dev;
fac5e23e 8091 struct drm_i915_private *dev_priv = to_i915(dev);
bc58be60 8092 u32 tmp;
1bd1bd80
DV
8093
8094 tmp = I915_READ(PIPESRC(crtc->pipe));
37327abd
VS
8095 pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
8096 pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
8097
2d112de7
ACO
8098 pipe_config->base.mode.vdisplay = pipe_config->pipe_src_h;
8099 pipe_config->base.mode.hdisplay = pipe_config->pipe_src_w;
1bd1bd80
DV
8100}
8101
f6a83288 8102void intel_mode_from_pipe_config(struct drm_display_mode *mode,
5cec258b 8103 struct intel_crtc_state *pipe_config)
babea61d 8104{
2d112de7
ACO
8105 mode->hdisplay = pipe_config->base.adjusted_mode.crtc_hdisplay;
8106 mode->htotal = pipe_config->base.adjusted_mode.crtc_htotal;
8107 mode->hsync_start = pipe_config->base.adjusted_mode.crtc_hsync_start;
8108 mode->hsync_end = pipe_config->base.adjusted_mode.crtc_hsync_end;
babea61d 8109
2d112de7
ACO
8110 mode->vdisplay = pipe_config->base.adjusted_mode.crtc_vdisplay;
8111 mode->vtotal = pipe_config->base.adjusted_mode.crtc_vtotal;
8112 mode->vsync_start = pipe_config->base.adjusted_mode.crtc_vsync_start;
8113 mode->vsync_end = pipe_config->base.adjusted_mode.crtc_vsync_end;
babea61d 8114
2d112de7 8115 mode->flags = pipe_config->base.adjusted_mode.flags;
cd13f5ab 8116 mode->type = DRM_MODE_TYPE_DRIVER;
babea61d 8117
2d112de7 8118 mode->clock = pipe_config->base.adjusted_mode.crtc_clock;
cd13f5ab
ML
8119
8120 mode->hsync = drm_mode_hsync(mode);
8121 mode->vrefresh = drm_mode_vrefresh(mode);
8122 drm_mode_set_name(mode);
babea61d
JB
8123}
8124
fdf73510 8125static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
84b046f3 8126{
fdf73510
ML
8127 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8128 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
ba3f4d0a 8129 u32 pipeconf;
84b046f3 8130
9f11a9e4 8131 pipeconf = 0;
84b046f3 8132
e56134bc
VS
8133 /* we keep both pipes enabled on 830 */
8134 if (IS_I830(dev_priv))
fdf73510 8135 pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
67c72a12 8136
fdf73510 8137 if (crtc_state->double_wide)
cf532bb2 8138 pipeconf |= PIPECONF_DOUBLE_WIDE;
84b046f3 8139
ff9ce46e 8140 /* only g4x and later have fancy bpc/dither controls */
9beb5fea
TU
8141 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8142 IS_CHERRYVIEW(dev_priv)) {
ff9ce46e 8143 /* Bspec claims that we can't use dithering for 30bpp pipes. */
fdf73510 8144 if (crtc_state->dither && crtc_state->pipe_bpp != 30)
ff9ce46e 8145 pipeconf |= PIPECONF_DITHER_EN |
84b046f3 8146 PIPECONF_DITHER_TYPE_SP;
84b046f3 8147
fdf73510 8148 switch (crtc_state->pipe_bpp) {
ff9ce46e
DV
8149 case 18:
8150 pipeconf |= PIPECONF_6BPC;
8151 break;
8152 case 24:
8153 pipeconf |= PIPECONF_8BPC;
8154 break;
8155 case 30:
8156 pipeconf |= PIPECONF_10BPC;
8157 break;
8158 default:
8159 /* Case prevented by intel_choose_pipe_bpp_dither. */
8160 BUG();
84b046f3
DV
8161 }
8162 }
8163
fdf73510 8164 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) {
6315b5d3 8165 if (INTEL_GEN(dev_priv) < 4 ||
fdf73510 8166 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO))
efc2cfff
VS
8167 pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION;
8168 else
8169 pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT;
27b680f9 8170 } else {
84b046f3 8171 pipeconf |= PIPECONF_PROGRESSIVE;
27b680f9 8172 }
84b046f3 8173
920a14b2 8174 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
fdf73510 8175 crtc_state->limited_color_range)
9f11a9e4 8176 pipeconf |= PIPECONF_COLOR_RANGE_SELECT;
9c8e09b7 8177
9d5441de
VS
8178 pipeconf |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
8179
fdf73510
ML
8180 I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
8181 POSTING_READ(PIPECONF(crtc->pipe));
84b046f3
DV
8182}
8183
81c97f52
ACO
8184static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
8185 struct intel_crtc_state *crtc_state)
8186{
8187 struct drm_device *dev = crtc->base.dev;
fac5e23e 8188 struct drm_i915_private *dev_priv = to_i915(dev);
1b6f4958 8189 const struct intel_limit *limit;
81c97f52
ACO
8190 int refclk = 48000;
8191
8192 memset(&crtc_state->dpll_hw_state, 0,
8193 sizeof(crtc_state->dpll_hw_state));
8194
2d84d2b3 8195 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
81c97f52
ACO
8196 if (intel_panel_use_ssc(dev_priv)) {
8197 refclk = dev_priv->vbt.lvds_ssc_freq;
8198 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8199 }
8200
8201 limit = &intel_limits_i8xx_lvds;
2d84d2b3 8202 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DVO)) {
81c97f52
ACO
8203 limit = &intel_limits_i8xx_dvo;
8204 } else {
8205 limit = &intel_limits_i8xx_dac;
8206 }
8207
8208 if (!crtc_state->clock_set &&
8209 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8210 refclk, NULL, &crtc_state->dpll)) {
8211 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8212 return -EINVAL;
8213 }
8214
8215 i8xx_compute_dpll(crtc, crtc_state, NULL);
8216
8217 return 0;
8218}
8219
19ec6693
ACO
8220static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
8221 struct intel_crtc_state *crtc_state)
8222{
d2daff2c 8223 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1b6f4958 8224 const struct intel_limit *limit;
19ec6693
ACO
8225 int refclk = 96000;
8226
8227 memset(&crtc_state->dpll_hw_state, 0,
8228 sizeof(crtc_state->dpll_hw_state));
8229
2d84d2b3 8230 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
19ec6693
ACO
8231 if (intel_panel_use_ssc(dev_priv)) {
8232 refclk = dev_priv->vbt.lvds_ssc_freq;
8233 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8234 }
8235
d2daff2c 8236 if (intel_is_dual_link_lvds(dev_priv))
19ec6693
ACO
8237 limit = &intel_limits_g4x_dual_channel_lvds;
8238 else
8239 limit = &intel_limits_g4x_single_channel_lvds;
2d84d2b3
VS
8240 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
8241 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
19ec6693 8242 limit = &intel_limits_g4x_hdmi;
2d84d2b3 8243 } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) {
19ec6693
ACO
8244 limit = &intel_limits_g4x_sdvo;
8245 } else {
8246 /* The option is for other outputs */
8247 limit = &intel_limits_i9xx_sdvo;
8248 }
8249
8250 if (!crtc_state->clock_set &&
8251 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8252 refclk, NULL, &crtc_state->dpll)) {
8253 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8254 return -EINVAL;
8255 }
8256
8257 i9xx_compute_dpll(crtc, crtc_state, NULL);
8258
8259 return 0;
8260}
8261
70e8aa21
ACO
8262static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
8263 struct intel_crtc_state *crtc_state)
8264{
8265 struct drm_device *dev = crtc->base.dev;
fac5e23e 8266 struct drm_i915_private *dev_priv = to_i915(dev);
1b6f4958 8267 const struct intel_limit *limit;
70e8aa21
ACO
8268 int refclk = 96000;
8269
8270 memset(&crtc_state->dpll_hw_state, 0,
8271 sizeof(crtc_state->dpll_hw_state));
8272
2d84d2b3 8273 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
70e8aa21
ACO
8274 if (intel_panel_use_ssc(dev_priv)) {
8275 refclk = dev_priv->vbt.lvds_ssc_freq;
8276 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8277 }
8278
8279 limit = &intel_limits_pineview_lvds;
8280 } else {
8281 limit = &intel_limits_pineview_sdvo;
8282 }
8283
8284 if (!crtc_state->clock_set &&
8285 !pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8286 refclk, NULL, &crtc_state->dpll)) {
8287 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8288 return -EINVAL;
8289 }
8290
8291 i9xx_compute_dpll(crtc, crtc_state, NULL);
8292
8293 return 0;
8294}
8295
190f68c5
ACO
8296static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
8297 struct intel_crtc_state *crtc_state)
79e53945 8298{
c7653199 8299 struct drm_device *dev = crtc->base.dev;
fac5e23e 8300 struct drm_i915_private *dev_priv = to_i915(dev);
1b6f4958 8301 const struct intel_limit *limit;
81c97f52 8302 int refclk = 96000;
79e53945 8303
dd3cd74a
ACO
8304 memset(&crtc_state->dpll_hw_state, 0,
8305 sizeof(crtc_state->dpll_hw_state));
8306
2d84d2b3 8307 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
70e8aa21
ACO
8308 if (intel_panel_use_ssc(dev_priv)) {
8309 refclk = dev_priv->vbt.lvds_ssc_freq;
8310 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
8311 }
43565a06 8312
70e8aa21
ACO
8313 limit = &intel_limits_i9xx_lvds;
8314 } else {
8315 limit = &intel_limits_i9xx_sdvo;
81c97f52 8316 }
79e53945 8317
70e8aa21
ACO
8318 if (!crtc_state->clock_set &&
8319 !i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8320 refclk, NULL, &crtc_state->dpll)) {
8321 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8322 return -EINVAL;
f47709a9 8323 }
7026d4ac 8324
81c97f52 8325 i9xx_compute_dpll(crtc, crtc_state, NULL);
79e53945 8326
c8f7a0db 8327 return 0;
f564048e
EA
8328}
8329
65b3d6a9
ACO
8330static int chv_crtc_compute_clock(struct intel_crtc *crtc,
8331 struct intel_crtc_state *crtc_state)
8332{
8333 int refclk = 100000;
1b6f4958 8334 const struct intel_limit *limit = &intel_limits_chv;
65b3d6a9
ACO
8335
8336 memset(&crtc_state->dpll_hw_state, 0,
8337 sizeof(crtc_state->dpll_hw_state));
8338
65b3d6a9
ACO
8339 if (!crtc_state->clock_set &&
8340 !chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8341 refclk, NULL, &crtc_state->dpll)) {
8342 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8343 return -EINVAL;
8344 }
8345
8346 chv_compute_dpll(crtc, crtc_state);
8347
8348 return 0;
8349}
8350
8351static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
8352 struct intel_crtc_state *crtc_state)
8353{
8354 int refclk = 100000;
1b6f4958 8355 const struct intel_limit *limit = &intel_limits_vlv;
65b3d6a9
ACO
8356
8357 memset(&crtc_state->dpll_hw_state, 0,
8358 sizeof(crtc_state->dpll_hw_state));
8359
65b3d6a9
ACO
8360 if (!crtc_state->clock_set &&
8361 !vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
8362 refclk, NULL, &crtc_state->dpll)) {
8363 DRM_ERROR("Couldn't find PLL settings for mode!\n");
8364 return -EINVAL;
8365 }
8366
8367 vlv_compute_dpll(crtc, crtc_state);
8368
8369 return 0;
8370}
8371
b7c8093f
VS
8372static bool i9xx_has_pfit(struct drm_i915_private *dev_priv)
8373{
8374 if (IS_I830(dev_priv))
8375 return false;
8376
8377 return INTEL_GEN(dev_priv) >= 4 ||
8378 IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv);
8379}
8380
2fa2fe9a 8381static void i9xx_get_pfit_config(struct intel_crtc *crtc,
5cec258b 8382 struct intel_crtc_state *pipe_config)
2fa2fe9a 8383{
6315b5d3 8384 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
ba3f4d0a 8385 u32 tmp;
2fa2fe9a 8386
b7c8093f 8387 if (!i9xx_has_pfit(dev_priv))
dc9e7dec
VS
8388 return;
8389
2fa2fe9a 8390 tmp = I915_READ(PFIT_CONTROL);
06922821
DV
8391 if (!(tmp & PFIT_ENABLE))
8392 return;
2fa2fe9a 8393
06922821 8394 /* Check whether the pfit is attached to our pipe. */
6315b5d3 8395 if (INTEL_GEN(dev_priv) < 4) {
2fa2fe9a
DV
8396 if (crtc->pipe != PIPE_B)
8397 return;
2fa2fe9a
DV
8398 } else {
8399 if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT))
8400 return;
8401 }
8402
06922821 8403 pipe_config->gmch_pfit.control = tmp;
2fa2fe9a 8404 pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
2fa2fe9a
DV
8405}
8406
acbec814 8407static void vlv_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 8408 struct intel_crtc_state *pipe_config)
acbec814
JB
8409{
8410 struct drm_device *dev = crtc->base.dev;
fac5e23e 8411 struct drm_i915_private *dev_priv = to_i915(dev);
acbec814 8412 int pipe = pipe_config->cpu_transcoder;
9e2c8475 8413 struct dpll clock;
acbec814 8414 u32 mdiv;
662c6ecb 8415 int refclk = 100000;
acbec814 8416
b521973b
VS
8417 /* In case of DSI, DPLL will not be used */
8418 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
f573de5a
SK
8419 return;
8420
221c7862 8421 vlv_dpio_get(dev_priv);
ab3c759a 8422 mdiv = vlv_dpio_read(dev_priv, pipe, VLV_PLL_DW3(pipe));
221c7862 8423 vlv_dpio_put(dev_priv);
acbec814
JB
8424
8425 clock.m1 = (mdiv >> DPIO_M1DIV_SHIFT) & 7;
8426 clock.m2 = mdiv & DPIO_M2DIV_MASK;
8427 clock.n = (mdiv >> DPIO_N_SHIFT) & 0xf;
8428 clock.p1 = (mdiv >> DPIO_P1_SHIFT) & 7;
8429 clock.p2 = (mdiv >> DPIO_P2_SHIFT) & 0x1f;
8430
dccbea3b 8431 pipe_config->port_clock = vlv_calc_dpll_params(refclk, &clock);
acbec814
JB
8432}
8433
5724dbd1
DL
8434static void
8435i9xx_get_initial_plane_config(struct intel_crtc *crtc,
8436 struct intel_initial_plane_config *plane_config)
1ad292b5
JB
8437{
8438 struct drm_device *dev = crtc->base.dev;
fac5e23e 8439 struct drm_i915_private *dev_priv = to_i915(dev);
282e83ef
VS
8440 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8441 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
eade6c89 8442 enum pipe pipe;
1ad292b5 8443 u32 val, base, offset;
1ad292b5 8444 int fourcc, pixel_format;
6761dd31 8445 unsigned int aligned_height;
b113d5ee 8446 struct drm_framebuffer *fb;
1b842c89 8447 struct intel_framebuffer *intel_fb;
1ad292b5 8448
eade6c89 8449 if (!plane->get_hw_state(plane, &pipe))
42a7b088
DL
8450 return;
8451
eade6c89
VS
8452 WARN_ON(pipe != crtc->pipe);
8453
d9806c9f 8454 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 8455 if (!intel_fb) {
1ad292b5
JB
8456 DRM_DEBUG_KMS("failed to alloc fb\n");
8457 return;
8458 }
8459
1b842c89
DL
8460 fb = &intel_fb->base;
8461
d2e9f5fc
VS
8462 fb->dev = dev;
8463
2924b8cc
VS
8464 val = I915_READ(DSPCNTR(i9xx_plane));
8465
6315b5d3 8466 if (INTEL_GEN(dev_priv) >= 4) {
18c5247e 8467 if (val & DISPPLANE_TILED) {
49af449b 8468 plane_config->tiling = I915_TILING_X;
bae781b2 8469 fb->modifier = I915_FORMAT_MOD_X_TILED;
18c5247e 8470 }
f43348a3
VS
8471
8472 if (val & DISPPLANE_ROTATE_180)
8473 plane_config->rotation = DRM_MODE_ROTATE_180;
18c5247e 8474 }
1ad292b5 8475
f43348a3
VS
8476 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B &&
8477 val & DISPPLANE_MIRROR)
8478 plane_config->rotation |= DRM_MODE_REFLECT_X;
8479
1ad292b5 8480 pixel_format = val & DISPPLANE_PIXFORMAT_MASK;
b35d63fa 8481 fourcc = i9xx_format_to_fourcc(pixel_format);
2f3f4763 8482 fb->format = drm_format_info(fourcc);
1ad292b5 8483
81894b2f
VS
8484 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
8485 offset = I915_READ(DSPOFFSET(i9xx_plane));
8486 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
8487 } else if (INTEL_GEN(dev_priv) >= 4) {
49af449b 8488 if (plane_config->tiling)
282e83ef 8489 offset = I915_READ(DSPTILEOFF(i9xx_plane));
1ad292b5 8490 else
282e83ef
VS
8491 offset = I915_READ(DSPLINOFF(i9xx_plane));
8492 base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
1ad292b5 8493 } else {
282e83ef 8494 base = I915_READ(DSPADDR(i9xx_plane));
1ad292b5
JB
8495 }
8496 plane_config->base = base;
8497
8498 val = I915_READ(PIPESRC(pipe));
b113d5ee
DL
8499 fb->width = ((val >> 16) & 0xfff) + 1;
8500 fb->height = ((val >> 0) & 0xfff) + 1;
1ad292b5 8501
282e83ef 8502 val = I915_READ(DSPSTRIDE(i9xx_plane));
b113d5ee 8503 fb->pitches[0] = val & 0xffffffc0;
1ad292b5 8504
d88c4afd 8505 aligned_height = intel_fb_align_height(fb, 0, fb->height);
1ad292b5 8506
f37b5c2b 8507 plane_config->size = fb->pitches[0] * aligned_height;
1ad292b5 8508
282e83ef
VS
8509 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
8510 crtc->base.name, plane->base.name, fb->width, fb->height,
272725c7 8511 fb->format->cpp[0] * 8, base, fb->pitches[0],
2844a921 8512 plane_config->size);
1ad292b5 8513
2d14030b 8514 plane_config->fb = intel_fb;
1ad292b5
JB
8515}
8516
70b23a98 8517static void chv_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 8518 struct intel_crtc_state *pipe_config)
70b23a98
VS
8519{
8520 struct drm_device *dev = crtc->base.dev;
fac5e23e 8521 struct drm_i915_private *dev_priv = to_i915(dev);
70b23a98
VS
8522 int pipe = pipe_config->cpu_transcoder;
8523 enum dpio_channel port = vlv_pipe_to_channel(pipe);
9e2c8475 8524 struct dpll clock;
0d7b6b11 8525 u32 cmn_dw13, pll_dw0, pll_dw1, pll_dw2, pll_dw3;
70b23a98
VS
8526 int refclk = 100000;
8527
b521973b
VS
8528 /* In case of DSI, DPLL will not be used */
8529 if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
8530 return;
8531
221c7862 8532 vlv_dpio_get(dev_priv);
70b23a98
VS
8533 cmn_dw13 = vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW13(port));
8534 pll_dw0 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW0(port));
8535 pll_dw1 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW1(port));
8536 pll_dw2 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW2(port));
0d7b6b11 8537 pll_dw3 = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port));
221c7862 8538 vlv_dpio_put(dev_priv);
70b23a98
VS
8539
8540 clock.m1 = (pll_dw1 & 0x7) == DPIO_CHV_M1_DIV_BY_2 ? 2 : 0;
0d7b6b11
ID
8541 clock.m2 = (pll_dw0 & 0xff) << 22;
8542 if (pll_dw3 & DPIO_CHV_FRAC_DIV_EN)
8543 clock.m2 |= pll_dw2 & 0x3fffff;
70b23a98
VS
8544 clock.n = (pll_dw1 >> DPIO_CHV_N_DIV_SHIFT) & 0xf;
8545 clock.p1 = (cmn_dw13 >> DPIO_CHV_P1_DIV_SHIFT) & 0x7;
8546 clock.p2 = (cmn_dw13 >> DPIO_CHV_P2_DIV_SHIFT) & 0x1f;
8547
dccbea3b 8548 pipe_config->port_clock = chv_calc_dpll_params(refclk, &clock);
70b23a98
VS
8549}
8550
33b7f3ee
SS
8551static void intel_get_crtc_ycbcr_config(struct intel_crtc *crtc,
8552 struct intel_crtc_state *pipe_config)
8553{
8554 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8555 enum intel_output_format output = INTEL_OUTPUT_FORMAT_RGB;
8556
668b6c17
SS
8557 pipe_config->lspcon_downsampling = false;
8558
33b7f3ee
SS
8559 if (IS_BROADWELL(dev_priv) || INTEL_GEN(dev_priv) >= 9) {
8560 u32 tmp = I915_READ(PIPEMISC(crtc->pipe));
8561
8562 if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
8563 bool ycbcr420_enabled = tmp & PIPEMISC_YUV420_ENABLE;
8564 bool blend = tmp & PIPEMISC_YUV420_MODE_FULL_BLEND;
8565
8566 if (ycbcr420_enabled) {
8567 /* We support 4:2:0 in full blend mode only */
8568 if (!blend)
8569 output = INTEL_OUTPUT_FORMAT_INVALID;
8570 else if (!(IS_GEMINILAKE(dev_priv) ||
8571 INTEL_GEN(dev_priv) >= 10))
8572 output = INTEL_OUTPUT_FORMAT_INVALID;
8573 else
8574 output = INTEL_OUTPUT_FORMAT_YCBCR420;
8c79f844 8575 } else {
668b6c17
SS
8576 /*
8577 * Currently there is no interface defined to
8578 * check user preference between RGB/YCBCR444
8579 * or YCBCR420. So the only possible case for
8580 * YCBCR444 usage is driving YCBCR420 output
8581 * with LSPCON, when pipe is configured for
8582 * YCBCR444 output and LSPCON takes care of
8583 * downsampling it.
8584 */
8585 pipe_config->lspcon_downsampling = true;
8c79f844 8586 output = INTEL_OUTPUT_FORMAT_YCBCR444;
33b7f3ee
SS
8587 }
8588 }
8589 }
8590
8591 pipe_config->output_format = output;
8592}
8593
5f29ab23
VS
8594static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
8595{
8596 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
8597 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
8598 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
8599 enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
8600 u32 tmp;
8601
8602 tmp = I915_READ(DSPCNTR(i9xx_plane));
8603
8604 if (tmp & DISPPLANE_GAMMA_ENABLE)
8605 crtc_state->gamma_enable = true;
8271b2ef
VS
8606
8607 if (!HAS_GMCH(dev_priv) &&
8608 tmp & DISPPLANE_PIPE_CSC_ENABLE)
8609 crtc_state->csc_enable = true;
5f29ab23
VS
8610}
8611
0e8ffe1b 8612static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
5cec258b 8613 struct intel_crtc_state *pipe_config)
0e8ffe1b 8614{
6315b5d3 8615 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1729050e 8616 enum intel_display_power_domain power_domain;
0e6e0be4 8617 intel_wakeref_t wakeref;
ba3f4d0a 8618 u32 tmp;
1729050e 8619 bool ret;
0e8ffe1b 8620
1729050e 8621 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
0e6e0be4
CW
8622 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
8623 if (!wakeref)
b5482bd0
ID
8624 return false;
8625
d9facae6 8626 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
e143a21c 8627 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8106ddbd 8628 pipe_config->shared_dpll = NULL;
eccb140b 8629
1729050e
ID
8630 ret = false;
8631
0e8ffe1b
DV
8632 tmp = I915_READ(PIPECONF(crtc->pipe));
8633 if (!(tmp & PIPECONF_ENABLE))
1729050e 8634 goto out;
0e8ffe1b 8635
9beb5fea
TU
8636 if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
8637 IS_CHERRYVIEW(dev_priv)) {
42571aef
VS
8638 switch (tmp & PIPECONF_BPC_MASK) {
8639 case PIPECONF_6BPC:
8640 pipe_config->pipe_bpp = 18;
8641 break;
8642 case PIPECONF_8BPC:
8643 pipe_config->pipe_bpp = 24;
8644 break;
8645 case PIPECONF_10BPC:
8646 pipe_config->pipe_bpp = 30;
8647 break;
8648 default:
8649 break;
8650 }
8651 }
8652
920a14b2 8653 if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
666a4537 8654 (tmp & PIPECONF_COLOR_RANGE_SELECT))
b5a9fa09
DV
8655 pipe_config->limited_color_range = true;
8656
9d5441de
VS
8657 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >>
8658 PIPECONF_GAMMA_MODE_SHIFT;
8659
9fdfb8e7
VS
8660 if (IS_CHERRYVIEW(dev_priv))
8661 pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
8662
5f29ab23 8663 i9xx_get_pipe_color_config(pipe_config);
3633e511 8664 intel_color_get_config(pipe_config);
5f29ab23 8665
6315b5d3 8666 if (INTEL_GEN(dev_priv) < 4)
282740f7
VS
8667 pipe_config->double_wide = tmp & PIPECONF_DOUBLE_WIDE;
8668
1bd1bd80 8669 intel_get_pipe_timings(crtc, pipe_config);
bc58be60 8670 intel_get_pipe_src_size(crtc, pipe_config);
1bd1bd80 8671
2fa2fe9a
DV
8672 i9xx_get_pfit_config(crtc, pipe_config);
8673
6315b5d3 8674 if (INTEL_GEN(dev_priv) >= 4) {
c231775c 8675 /* No way to read it out on pipes B and C */
920a14b2 8676 if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
c231775c
VS
8677 tmp = dev_priv->chv_dpll_md[crtc->pipe];
8678 else
8679 tmp = I915_READ(DPLL_MD(crtc->pipe));
6c49f241
DV
8680 pipe_config->pixel_multiplier =
8681 ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
8682 >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
8bcc2795 8683 pipe_config->dpll_hw_state.dpll_md = tmp;
50a0bc90 8684 } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
73f67aa8 8685 IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
6c49f241
DV
8686 tmp = I915_READ(DPLL(crtc->pipe));
8687 pipe_config->pixel_multiplier =
8688 ((tmp & SDVO_MULTIPLIER_MASK)
8689 >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
8690 } else {
8691 /* Note that on i915G/GM the pixel multiplier is in the sdvo
8692 * port and will be fixed up in the encoder->get_config
8693 * function. */
8694 pipe_config->pixel_multiplier = 1;
8695 }
8bcc2795 8696 pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
920a14b2 8697 if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
8bcc2795
DV
8698 pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
8699 pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
165e901c
VS
8700 } else {
8701 /* Mask out read-only status bits. */
8702 pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
8703 DPLL_PORTC_READY_MASK |
8704 DPLL_PORTB_READY_MASK);
8bcc2795 8705 }
6c49f241 8706
920a14b2 8707 if (IS_CHERRYVIEW(dev_priv))
70b23a98 8708 chv_crtc_clock_get(crtc, pipe_config);
11a914c2 8709 else if (IS_VALLEYVIEW(dev_priv))
acbec814
JB
8710 vlv_crtc_clock_get(crtc, pipe_config);
8711 else
8712 i9xx_crtc_clock_get(crtc, pipe_config);
18442d08 8713
0f64614d
VS
8714 /*
8715 * Normally the dotclock is filled in by the encoder .get_config()
8716 * but in case the pipe is enabled w/o any ports we need a sane
8717 * default.
8718 */
8719 pipe_config->base.adjusted_mode.crtc_clock =
8720 pipe_config->port_clock / pipe_config->pixel_multiplier;
8721
1729050e
ID
8722 ret = true;
8723
8724out:
0e6e0be4 8725 intel_display_power_put(dev_priv, power_domain, wakeref);
1729050e
ID
8726
8727 return ret;
0e8ffe1b
DV
8728}
8729
c39055b0 8730static void ironlake_init_pch_refclk(struct drm_i915_private *dev_priv)
13d83a67 8731{
13d83a67 8732 struct intel_encoder *encoder;
1c1a24d2 8733 int i;
74cfd7ac 8734 u32 val, final;
13d83a67 8735 bool has_lvds = false;
199e5d79 8736 bool has_cpu_edp = false;
199e5d79 8737 bool has_panel = false;
99eb6a01
KP
8738 bool has_ck505 = false;
8739 bool can_ssc = false;
1c1a24d2 8740 bool using_ssc_source = false;
13d83a67
JB
8741
8742 /* We need to take the global config into account */
c39055b0 8743 for_each_intel_encoder(&dev_priv->drm, encoder) {
199e5d79
KP
8744 switch (encoder->type) {
8745 case INTEL_OUTPUT_LVDS:
8746 has_panel = true;
8747 has_lvds = true;
8748 break;
8749 case INTEL_OUTPUT_EDP:
8750 has_panel = true;
8f4f2797 8751 if (encoder->port == PORT_A)
199e5d79
KP
8752 has_cpu_edp = true;
8753 break;
6847d71b
PZ
8754 default:
8755 break;
13d83a67
JB
8756 }
8757 }
8758
6e266956 8759 if (HAS_PCH_IBX(dev_priv)) {
41aa3448 8760 has_ck505 = dev_priv->vbt.display_clock_mode;
99eb6a01
KP
8761 can_ssc = has_ck505;
8762 } else {
8763 has_ck505 = false;
8764 can_ssc = true;
8765 }
8766
1c1a24d2
L
8767 /* Check if any DPLLs are using the SSC source */
8768 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
8769 u32 temp = I915_READ(PCH_DPLL(i));
8770
8771 if (!(temp & DPLL_VCO_ENABLE))
8772 continue;
8773
8774 if ((temp & PLL_REF_INPUT_MASK) ==
8775 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
8776 using_ssc_source = true;
8777 break;
8778 }
8779 }
8780
8781 DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
8782 has_panel, has_lvds, has_ck505, using_ssc_source);
13d83a67
JB
8783
8784 /* Ironlake: try to setup display ref clock before DPLL
8785 * enabling. This is only under driver's control after
8786 * PCH B stepping, previous chipset stepping should be
8787 * ignoring this setting.
8788 */
74cfd7ac
CW
8789 val = I915_READ(PCH_DREF_CONTROL);
8790
8791 /* As we must carefully and slowly disable/enable each source in turn,
8792 * compute the final state we want first and check if we need to
8793 * make any changes at all.
8794 */
8795 final = val;
8796 final &= ~DREF_NONSPREAD_SOURCE_MASK;
8797 if (has_ck505)
8798 final |= DREF_NONSPREAD_CK505_ENABLE;
8799 else
8800 final |= DREF_NONSPREAD_SOURCE_ENABLE;
8801
8c07eb68 8802 final &= ~DREF_SSC_SOURCE_MASK;
74cfd7ac 8803 final &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
8c07eb68 8804 final &= ~DREF_SSC1_ENABLE;
74cfd7ac
CW
8805
8806 if (has_panel) {
8807 final |= DREF_SSC_SOURCE_ENABLE;
8808
8809 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8810 final |= DREF_SSC1_ENABLE;
8811
8812 if (has_cpu_edp) {
8813 if (intel_panel_use_ssc(dev_priv) && can_ssc)
8814 final |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
8815 else
8816 final |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
8817 } else
8818 final |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1c1a24d2
L
8819 } else if (using_ssc_source) {
8820 final |= DREF_SSC_SOURCE_ENABLE;
8821 final |= DREF_SSC1_ENABLE;
74cfd7ac
CW
8822 }
8823
8824 if (final == val)
8825 return;
8826
13d83a67 8827 /* Always enable nonspread source */
74cfd7ac 8828 val &= ~DREF_NONSPREAD_SOURCE_MASK;
13d83a67 8829
99eb6a01 8830 if (has_ck505)
74cfd7ac 8831 val |= DREF_NONSPREAD_CK505_ENABLE;
99eb6a01 8832 else
74cfd7ac 8833 val |= DREF_NONSPREAD_SOURCE_ENABLE;
13d83a67 8834
199e5d79 8835 if (has_panel) {
74cfd7ac
CW
8836 val &= ~DREF_SSC_SOURCE_MASK;
8837 val |= DREF_SSC_SOURCE_ENABLE;
13d83a67 8838
199e5d79 8839 /* SSC must be turned on before enabling the CPU output */
99eb6a01 8840 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 8841 DRM_DEBUG_KMS("Using SSC on panel\n");
74cfd7ac 8842 val |= DREF_SSC1_ENABLE;
e77166b5 8843 } else
74cfd7ac 8844 val &= ~DREF_SSC1_ENABLE;
199e5d79
KP
8845
8846 /* Get SSC going before enabling the outputs */
74cfd7ac 8847 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8848 POSTING_READ(PCH_DREF_CONTROL);
8849 udelay(200);
8850
74cfd7ac 8851 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
13d83a67
JB
8852
8853 /* Enable CPU source on CPU attached eDP */
199e5d79 8854 if (has_cpu_edp) {
99eb6a01 8855 if (intel_panel_use_ssc(dev_priv) && can_ssc) {
199e5d79 8856 DRM_DEBUG_KMS("Using SSC on eDP\n");
74cfd7ac 8857 val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
eba905b2 8858 } else
74cfd7ac 8859 val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
199e5d79 8860 } else
74cfd7ac 8861 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 8862
74cfd7ac 8863 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8864 POSTING_READ(PCH_DREF_CONTROL);
8865 udelay(200);
8866 } else {
1c1a24d2 8867 DRM_DEBUG_KMS("Disabling CPU source output\n");
199e5d79 8868
74cfd7ac 8869 val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
199e5d79
KP
8870
8871 /* Turn off CPU output */
74cfd7ac 8872 val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
199e5d79 8873
74cfd7ac 8874 I915_WRITE(PCH_DREF_CONTROL, val);
199e5d79
KP
8875 POSTING_READ(PCH_DREF_CONTROL);
8876 udelay(200);
8877
1c1a24d2
L
8878 if (!using_ssc_source) {
8879 DRM_DEBUG_KMS("Disabling SSC source\n");
199e5d79 8880
1c1a24d2
L
8881 /* Turn off the SSC source */
8882 val &= ~DREF_SSC_SOURCE_MASK;
8883 val |= DREF_SSC_SOURCE_DISABLE;
f165d283 8884
1c1a24d2
L
8885 /* Turn off SSC1 */
8886 val &= ~DREF_SSC1_ENABLE;
8887
8888 I915_WRITE(PCH_DREF_CONTROL, val);
8889 POSTING_READ(PCH_DREF_CONTROL);
8890 udelay(200);
8891 }
13d83a67 8892 }
74cfd7ac
CW
8893
8894 BUG_ON(val != final);
13d83a67
JB
8895}
8896
f31f2d55 8897static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
dde86e2d 8898{
ba3f4d0a 8899 u32 tmp;
dde86e2d 8900
0ff066a9
PZ
8901 tmp = I915_READ(SOUTH_CHICKEN2);
8902 tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
8903 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 8904
cf3598c2
ID
8905 if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
8906 FDI_MPHY_IOSFSB_RESET_STATUS, 100))
0ff066a9 8907 DRM_ERROR("FDI mPHY reset assert timeout\n");
dde86e2d 8908
0ff066a9
PZ
8909 tmp = I915_READ(SOUTH_CHICKEN2);
8910 tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
8911 I915_WRITE(SOUTH_CHICKEN2, tmp);
dde86e2d 8912
cf3598c2
ID
8913 if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
8914 FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
0ff066a9 8915 DRM_ERROR("FDI mPHY reset de-assert timeout\n");
f31f2d55
PZ
8916}
8917
8918/* WaMPhyProgramming:hsw */
8919static void lpt_program_fdi_mphy(struct drm_i915_private *dev_priv)
8920{
ba3f4d0a 8921 u32 tmp;
dde86e2d
PZ
8922
8923 tmp = intel_sbi_read(dev_priv, 0x8008, SBI_MPHY);
8924 tmp &= ~(0xFF << 24);
8925 tmp |= (0x12 << 24);
8926 intel_sbi_write(dev_priv, 0x8008, tmp, SBI_MPHY);
8927
dde86e2d
PZ
8928 tmp = intel_sbi_read(dev_priv, 0x2008, SBI_MPHY);
8929 tmp |= (1 << 11);
8930 intel_sbi_write(dev_priv, 0x2008, tmp, SBI_MPHY);
8931
8932 tmp = intel_sbi_read(dev_priv, 0x2108, SBI_MPHY);
8933 tmp |= (1 << 11);
8934 intel_sbi_write(dev_priv, 0x2108, tmp, SBI_MPHY);
8935
dde86e2d
PZ
8936 tmp = intel_sbi_read(dev_priv, 0x206C, SBI_MPHY);
8937 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8938 intel_sbi_write(dev_priv, 0x206C, tmp, SBI_MPHY);
8939
8940 tmp = intel_sbi_read(dev_priv, 0x216C, SBI_MPHY);
8941 tmp |= (1 << 24) | (1 << 21) | (1 << 18);
8942 intel_sbi_write(dev_priv, 0x216C, tmp, SBI_MPHY);
8943
0ff066a9
PZ
8944 tmp = intel_sbi_read(dev_priv, 0x2080, SBI_MPHY);
8945 tmp &= ~(7 << 13);
8946 tmp |= (5 << 13);
8947 intel_sbi_write(dev_priv, 0x2080, tmp, SBI_MPHY);
dde86e2d 8948
0ff066a9
PZ
8949 tmp = intel_sbi_read(dev_priv, 0x2180, SBI_MPHY);
8950 tmp &= ~(7 << 13);
8951 tmp |= (5 << 13);
8952 intel_sbi_write(dev_priv, 0x2180, tmp, SBI_MPHY);
dde86e2d
PZ
8953
8954 tmp = intel_sbi_read(dev_priv, 0x208C, SBI_MPHY);
8955 tmp &= ~0xFF;
8956 tmp |= 0x1C;
8957 intel_sbi_write(dev_priv, 0x208C, tmp, SBI_MPHY);
8958
8959 tmp = intel_sbi_read(dev_priv, 0x218C, SBI_MPHY);
8960 tmp &= ~0xFF;
8961 tmp |= 0x1C;
8962 intel_sbi_write(dev_priv, 0x218C, tmp, SBI_MPHY);
8963
8964 tmp = intel_sbi_read(dev_priv, 0x2098, SBI_MPHY);
8965 tmp &= ~(0xFF << 16);
8966 tmp |= (0x1C << 16);
8967 intel_sbi_write(dev_priv, 0x2098, tmp, SBI_MPHY);
8968
8969 tmp = intel_sbi_read(dev_priv, 0x2198, SBI_MPHY);
8970 tmp &= ~(0xFF << 16);
8971 tmp |= (0x1C << 16);
8972 intel_sbi_write(dev_priv, 0x2198, tmp, SBI_MPHY);
8973
0ff066a9
PZ
8974 tmp = intel_sbi_read(dev_priv, 0x20C4, SBI_MPHY);
8975 tmp |= (1 << 27);
8976 intel_sbi_write(dev_priv, 0x20C4, tmp, SBI_MPHY);
dde86e2d 8977
0ff066a9
PZ
8978 tmp = intel_sbi_read(dev_priv, 0x21C4, SBI_MPHY);
8979 tmp |= (1 << 27);
8980 intel_sbi_write(dev_priv, 0x21C4, tmp, SBI_MPHY);
dde86e2d 8981
0ff066a9
PZ
8982 tmp = intel_sbi_read(dev_priv, 0x20EC, SBI_MPHY);
8983 tmp &= ~(0xF << 28);
8984 tmp |= (4 << 28);
8985 intel_sbi_write(dev_priv, 0x20EC, tmp, SBI_MPHY);
dde86e2d 8986
0ff066a9
PZ
8987 tmp = intel_sbi_read(dev_priv, 0x21EC, SBI_MPHY);
8988 tmp &= ~(0xF << 28);
8989 tmp |= (4 << 28);
8990 intel_sbi_write(dev_priv, 0x21EC, tmp, SBI_MPHY);
f31f2d55
PZ
8991}
8992
2fa86a1f
PZ
8993/* Implements 3 different sequences from BSpec chapter "Display iCLK
8994 * Programming" based on the parameters passed:
8995 * - Sequence to enable CLKOUT_DP
8996 * - Sequence to enable CLKOUT_DP without spread
8997 * - Sequence to enable CLKOUT_DP for FDI usage and configure PCH FDI I/O
8998 */
c39055b0
ACO
8999static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
9000 bool with_spread, bool with_fdi)
f31f2d55 9001{
ba3f4d0a 9002 u32 reg, tmp;
2fa86a1f
PZ
9003
9004 if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
9005 with_spread = true;
4f8036a2
TU
9006 if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
9007 with_fdi, "LP PCH doesn't have FDI\n"))
2fa86a1f 9008 with_fdi = false;
f31f2d55 9009
a580516d 9010 mutex_lock(&dev_priv->sb_lock);
f31f2d55
PZ
9011
9012 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9013 tmp &= ~SBI_SSCCTL_DISABLE;
9014 tmp |= SBI_SSCCTL_PATHALT;
9015 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9016
9017 udelay(24);
9018
2fa86a1f
PZ
9019 if (with_spread) {
9020 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9021 tmp &= ~SBI_SSCCTL_PATHALT;
9022 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
f31f2d55 9023
2fa86a1f
PZ
9024 if (with_fdi) {
9025 lpt_reset_fdi_mphy(dev_priv);
9026 lpt_program_fdi_mphy(dev_priv);
9027 }
9028 }
dde86e2d 9029
4f8036a2 9030 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
2fa86a1f
PZ
9031 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9032 tmp |= SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9033 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
c00db246 9034
a580516d 9035 mutex_unlock(&dev_priv->sb_lock);
dde86e2d
PZ
9036}
9037
47701c3b 9038/* Sequence to disable CLKOUT_DP */
46034d2b 9039void lpt_disable_clkout_dp(struct drm_i915_private *dev_priv)
47701c3b 9040{
ba3f4d0a 9041 u32 reg, tmp;
47701c3b 9042
a580516d 9043 mutex_lock(&dev_priv->sb_lock);
47701c3b 9044
4f8036a2 9045 reg = HAS_PCH_LPT_LP(dev_priv) ? SBI_GEN0 : SBI_DBUFF0;
47701c3b
PZ
9046 tmp = intel_sbi_read(dev_priv, reg, SBI_ICLK);
9047 tmp &= ~SBI_GEN0_CFG_BUFFENABLE_DISABLE;
9048 intel_sbi_write(dev_priv, reg, tmp, SBI_ICLK);
9049
9050 tmp = intel_sbi_read(dev_priv, SBI_SSCCTL, SBI_ICLK);
9051 if (!(tmp & SBI_SSCCTL_DISABLE)) {
9052 if (!(tmp & SBI_SSCCTL_PATHALT)) {
9053 tmp |= SBI_SSCCTL_PATHALT;
9054 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9055 udelay(32);
9056 }
9057 tmp |= SBI_SSCCTL_DISABLE;
9058 intel_sbi_write(dev_priv, SBI_SSCCTL, tmp, SBI_ICLK);
9059 }
9060
a580516d 9061 mutex_unlock(&dev_priv->sb_lock);
47701c3b
PZ
9062}
9063
f7be2c21
VS
9064#define BEND_IDX(steps) ((50 + (steps)) / 5)
9065
ba3f4d0a 9066static const u16 sscdivintphase[] = {
f7be2c21
VS
9067 [BEND_IDX( 50)] = 0x3B23,
9068 [BEND_IDX( 45)] = 0x3B23,
9069 [BEND_IDX( 40)] = 0x3C23,
9070 [BEND_IDX( 35)] = 0x3C23,
9071 [BEND_IDX( 30)] = 0x3D23,
9072 [BEND_IDX( 25)] = 0x3D23,
9073 [BEND_IDX( 20)] = 0x3E23,
9074 [BEND_IDX( 15)] = 0x3E23,
9075 [BEND_IDX( 10)] = 0x3F23,
9076 [BEND_IDX( 5)] = 0x3F23,
9077 [BEND_IDX( 0)] = 0x0025,
9078 [BEND_IDX( -5)] = 0x0025,
9079 [BEND_IDX(-10)] = 0x0125,
9080 [BEND_IDX(-15)] = 0x0125,
9081 [BEND_IDX(-20)] = 0x0225,
9082 [BEND_IDX(-25)] = 0x0225,
9083 [BEND_IDX(-30)] = 0x0325,
9084 [BEND_IDX(-35)] = 0x0325,
9085 [BEND_IDX(-40)] = 0x0425,
9086 [BEND_IDX(-45)] = 0x0425,
9087 [BEND_IDX(-50)] = 0x0525,
9088};
9089
9090/*
9091 * Bend CLKOUT_DP
9092 * steps -50 to 50 inclusive, in steps of 5
9093 * < 0 slow down the clock, > 0 speed up the clock, 0 == no bend (135MHz)
9094 * change in clock period = -(steps / 10) * 5.787 ps
9095 */
9096static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
9097{
ba3f4d0a 9098 u32 tmp;
f7be2c21
VS
9099 int idx = BEND_IDX(steps);
9100
9101 if (WARN_ON(steps % 5 != 0))
9102 return;
9103
9104 if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
9105 return;
9106
9107 mutex_lock(&dev_priv->sb_lock);
9108
9109 if (steps % 10 != 0)
9110 tmp = 0xAAAAAAAB;
9111 else
9112 tmp = 0x00000000;
9113 intel_sbi_write(dev_priv, SBI_SSCDITHPHASE, tmp, SBI_ICLK);
9114
9115 tmp = intel_sbi_read(dev_priv, SBI_SSCDIVINTPHASE, SBI_ICLK);
9116 tmp &= 0xffff0000;
9117 tmp |= sscdivintphase[idx];
9118 intel_sbi_write(dev_priv, SBI_SSCDIVINTPHASE, tmp, SBI_ICLK);
9119
9120 mutex_unlock(&dev_priv->sb_lock);
9121}
9122
9123#undef BEND_IDX
9124
c39055b0 9125static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
bf8fa3d3 9126{
bf8fa3d3
PZ
9127 struct intel_encoder *encoder;
9128 bool has_vga = false;
9129
c39055b0 9130 for_each_intel_encoder(&dev_priv->drm, encoder) {
bf8fa3d3
PZ
9131 switch (encoder->type) {
9132 case INTEL_OUTPUT_ANALOG:
9133 has_vga = true;
9134 break;
6847d71b
PZ
9135 default:
9136 break;
bf8fa3d3
PZ
9137 }
9138 }
9139
f7be2c21 9140 if (has_vga) {
c39055b0
ACO
9141 lpt_bend_clkout_dp(dev_priv, 0);
9142 lpt_enable_clkout_dp(dev_priv, true, true);
f7be2c21 9143 } else {
c39055b0 9144 lpt_disable_clkout_dp(dev_priv);
f7be2c21 9145 }
bf8fa3d3
PZ
9146}
9147
dde86e2d
PZ
9148/*
9149 * Initialize reference clocks when the driver loads
9150 */
c39055b0 9151void intel_init_pch_refclk(struct drm_i915_private *dev_priv)
dde86e2d 9152{
6e266956 9153 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv))
c39055b0 9154 ironlake_init_pch_refclk(dev_priv);
6e266956 9155 else if (HAS_PCH_LPT(dev_priv))
c39055b0 9156 lpt_init_pch_refclk(dev_priv);
dde86e2d
PZ
9157}
9158
fdf73510 9159static void ironlake_set_pipeconf(const struct intel_crtc_state *crtc_state)
79e53945 9160{
fdf73510
ML
9161 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9162 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9163 enum pipe pipe = crtc->pipe;
ba3f4d0a 9164 u32 val;
c8203565 9165
78114071 9166 val = 0;
c8203565 9167
fdf73510 9168 switch (crtc_state->pipe_bpp) {
c8203565 9169 case 18:
dfd07d72 9170 val |= PIPECONF_6BPC;
c8203565
PZ
9171 break;
9172 case 24:
dfd07d72 9173 val |= PIPECONF_8BPC;
c8203565
PZ
9174 break;
9175 case 30:
dfd07d72 9176 val |= PIPECONF_10BPC;
c8203565
PZ
9177 break;
9178 case 36:
dfd07d72 9179 val |= PIPECONF_12BPC;
c8203565
PZ
9180 break;
9181 default:
cc769b62
PZ
9182 /* Case prevented by intel_choose_pipe_bpp_dither. */
9183 BUG();
c8203565
PZ
9184 }
9185
fdf73510 9186 if (crtc_state->dither)
c8203565
PZ
9187 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9188
fdf73510 9189 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
c8203565
PZ
9190 val |= PIPECONF_INTERLACED_ILK;
9191 else
9192 val |= PIPECONF_PROGRESSIVE;
9193
fdf73510 9194 if (crtc_state->limited_color_range)
3685a8f3 9195 val |= PIPECONF_COLOR_RANGE_SELECT;
3685a8f3 9196
9d5441de
VS
9197 val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
9198
c8203565
PZ
9199 I915_WRITE(PIPECONF(pipe), val);
9200 POSTING_READ(PIPECONF(pipe));
9201}
9202
fdf73510 9203static void haswell_set_pipeconf(const struct intel_crtc_state *crtc_state)
ee2b0b38 9204{
fdf73510
ML
9205 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9206 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9207 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
391bf048 9208 u32 val = 0;
ee2b0b38 9209
fdf73510 9210 if (IS_HASWELL(dev_priv) && crtc_state->dither)
ee2b0b38
PZ
9211 val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP);
9212
fdf73510 9213 if (crtc_state->base.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE)
ee2b0b38
PZ
9214 val |= PIPECONF_INTERLACED_ILK;
9215 else
9216 val |= PIPECONF_PROGRESSIVE;
9217
702e7a56
PZ
9218 I915_WRITE(PIPECONF(cpu_transcoder), val);
9219 POSTING_READ(PIPECONF(cpu_transcoder));
391bf048
JN
9220}
9221
9b11215e 9222static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
391bf048 9223{
9b11215e
VS
9224 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
9225 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9226 u32 val = 0;
756f85cf 9227
9b11215e
VS
9228 switch (crtc_state->pipe_bpp) {
9229 case 18:
9230 val |= PIPEMISC_DITHER_6_BPC;
9231 break;
9232 case 24:
9233 val |= PIPEMISC_DITHER_8_BPC;
9234 break;
9235 case 30:
9236 val |= PIPEMISC_DITHER_10_BPC;
9237 break;
9238 case 36:
9239 val |= PIPEMISC_DITHER_12_BPC;
9240 break;
9241 default:
9242 MISSING_CASE(crtc_state->pipe_bpp);
9243 break;
9244 }
756f85cf 9245
9b11215e
VS
9246 if (crtc_state->dither)
9247 val |= PIPEMISC_DITHER_ENABLE | PIPEMISC_DITHER_TYPE_SP;
756f85cf 9248
9b11215e
VS
9249 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
9250 crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
9251 val |= PIPEMISC_OUTPUT_COLORSPACE_YUV;
8c79f844 9252
9b11215e
VS
9253 if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
9254 val |= PIPEMISC_YUV420_ENABLE |
9255 PIPEMISC_YUV420_MODE_FULL_BLEND;
b22ca995 9256
09b25812 9257 if (INTEL_GEN(dev_priv) >= 11 &&
b7ffc4a8
VS
9258 (crtc_state->active_planes & ~(icl_hdr_plane_mask() |
9259 BIT(PLANE_CURSOR))) == 0)
09b25812
VS
9260 val |= PIPEMISC_HDR_MODE_PRECISION;
9261
9b11215e 9262 I915_WRITE(PIPEMISC(crtc->pipe), val);
ee2b0b38
PZ
9263}
9264
8ae89743
VK
9265int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
9266{
9267 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
9268 u32 tmp;
9269
9270 tmp = I915_READ(PIPEMISC(crtc->pipe));
9271
9272 switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
9273 case PIPEMISC_DITHER_6_BPC:
9274 return 18;
9275 case PIPEMISC_DITHER_8_BPC:
9276 return 24;
9277 case PIPEMISC_DITHER_10_BPC:
9278 return 30;
9279 case PIPEMISC_DITHER_12_BPC:
9280 return 36;
9281 default:
9282 MISSING_CASE(tmp);
9283 return 0;
9284 }
9285}
9286
d4b1931c
PZ
9287int ironlake_get_lanes_required(int target_clock, int link_bw, int bpp)
9288{
9289 /*
9290 * Account for spread spectrum to avoid
9291 * oversubscribing the link. Max center spread
9292 * is 2.5%; use 5% for safety's sake.
9293 */
9294 u32 bps = target_clock * bpp * 21 / 20;
619d4d04 9295 return DIV_ROUND_UP(bps, link_bw * 8);
d4b1931c
PZ
9296}
9297
7429e9d4 9298static bool ironlake_needs_fb_cb_tune(struct dpll *dpll, int factor)
6cf86a5e 9299{
7429e9d4 9300 return i9xx_dpll_compute_m(dpll) < factor * dpll->n;
f48d8f23
PZ
9301}
9302
d2daff2c 9303static void ironlake_compute_dpll(struct intel_crtc *crtc,
b75ca6f6 9304 struct intel_crtc_state *crtc_state,
9e2c8475 9305 struct dpll *reduced_clock)
79e53945 9306{
d2daff2c 9307 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
b75ca6f6 9308 u32 dpll, fp, fp2;
3d6e9ee0 9309 int factor;
79e53945 9310
c1858123 9311 /* Enable autotuning of the PLL clock (if permissible) */
8febb297 9312 factor = 21;
3d6e9ee0 9313 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
8febb297 9314 if ((intel_panel_use_ssc(dev_priv) &&
e91e941b 9315 dev_priv->vbt.lvds_ssc_freq == 100000) ||
d2daff2c
VS
9316 (HAS_PCH_IBX(dev_priv) &&
9317 intel_is_dual_link_lvds(dev_priv)))
8febb297 9318 factor = 25;
27b680f9 9319 } else if (crtc_state->sdvo_tv_clock) {
8febb297 9320 factor = 20;
27b680f9 9321 }
c1858123 9322
b75ca6f6
ACO
9323 fp = i9xx_dpll_compute_fp(&crtc_state->dpll);
9324
190f68c5 9325 if (ironlake_needs_fb_cb_tune(&crtc_state->dpll, factor))
b75ca6f6
ACO
9326 fp |= FP_CB_TUNE;
9327
9328 if (reduced_clock) {
9329 fp2 = i9xx_dpll_compute_fp(reduced_clock);
2c07245f 9330
b75ca6f6
ACO
9331 if (reduced_clock->m < factor * reduced_clock->n)
9332 fp2 |= FP_CB_TUNE;
9333 } else {
9334 fp2 = fp;
9335 }
9a7c7890 9336
5eddb70b 9337 dpll = 0;
2c07245f 9338
3d6e9ee0 9339 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS))
a07d6787
EA
9340 dpll |= DPLLB_MODE_LVDS;
9341 else
9342 dpll |= DPLLB_MODE_DAC_SERIAL;
198a037f 9343
190f68c5 9344 dpll |= (crtc_state->pixel_multiplier - 1)
ef1b460d 9345 << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
198a037f 9346
3d6e9ee0
VS
9347 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO) ||
9348 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
4a33e48d 9349 dpll |= DPLL_SDVO_HIGH_SPEED;
3d6e9ee0 9350
37a5650b 9351 if (intel_crtc_has_dp_encoder(crtc_state))
4a33e48d 9352 dpll |= DPLL_SDVO_HIGH_SPEED;
79e53945 9353
7d7f8633
VS
9354 /*
9355 * The high speed IO clock is only really required for
9356 * SDVO/HDMI/DP, but we also enable it for CRT to make it
9357 * possible to share the DPLL between CRT and HDMI. Enabling
9358 * the clock needlessly does no real harm, except use up a
9359 * bit of power potentially.
9360 *
9361 * We'll limit this to IVB with 3 pipes, since it has only two
9362 * DPLLs and so DPLL sharing is the only way to get three pipes
9363 * driving PCH ports at the same time. On SNB we could do this,
9364 * and potentially avoid enabling the second DPLL, but it's not
9365 * clear if it''s a win or loss power wise. No point in doing
9366 * this on ILK at all since it has a fixed DPLL<->pipe mapping.
9367 */
9368 if (INTEL_INFO(dev_priv)->num_pipes == 3 &&
9369 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
9370 dpll |= DPLL_SDVO_HIGH_SPEED;
9371
a07d6787 9372 /* compute bitmask from p1 value */
190f68c5 9373 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
a07d6787 9374 /* also FPA1 */
190f68c5 9375 dpll |= (1 << (crtc_state->dpll.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
a07d6787 9376
190f68c5 9377 switch (crtc_state->dpll.p2) {
a07d6787
EA
9378 case 5:
9379 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
9380 break;
9381 case 7:
9382 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7;
9383 break;
9384 case 10:
9385 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10;
9386 break;
9387 case 14:
9388 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
9389 break;
79e53945
JB
9390 }
9391
3d6e9ee0
VS
9392 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) &&
9393 intel_panel_use_ssc(dev_priv))
43565a06 9394 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
79e53945
JB
9395 else
9396 dpll |= PLL_REF_INPUT_DREFCLK;
9397
b75ca6f6
ACO
9398 dpll |= DPLL_VCO_ENABLE;
9399
9400 crtc_state->dpll_hw_state.dpll = dpll;
9401 crtc_state->dpll_hw_state.fp0 = fp;
9402 crtc_state->dpll_hw_state.fp1 = fp2;
de13a2e3
PZ
9403}
9404
190f68c5
ACO
9405static int ironlake_crtc_compute_clock(struct intel_crtc *crtc,
9406 struct intel_crtc_state *crtc_state)
de13a2e3 9407{
d2daff2c 9408 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
1b6f4958 9409 const struct intel_limit *limit;
997c030c 9410 int refclk = 120000;
de13a2e3 9411
dd3cd74a
ACO
9412 memset(&crtc_state->dpll_hw_state, 0,
9413 sizeof(crtc_state->dpll_hw_state));
9414
ded220e2
ACO
9415 /* CPU eDP is the only output that doesn't need a PCH PLL of its own. */
9416 if (!crtc_state->has_pch_encoder)
9417 return 0;
79e53945 9418
2d84d2b3 9419 if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
997c030c
ACO
9420 if (intel_panel_use_ssc(dev_priv)) {
9421 DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
9422 dev_priv->vbt.lvds_ssc_freq);
9423 refclk = dev_priv->vbt.lvds_ssc_freq;
9424 }
9425
d2daff2c 9426 if (intel_is_dual_link_lvds(dev_priv)) {
997c030c
ACO
9427 if (refclk == 100000)
9428 limit = &intel_limits_ironlake_dual_lvds_100m;
9429 else
9430 limit = &intel_limits_ironlake_dual_lvds;
9431 } else {
9432 if (refclk == 100000)
9433 limit = &intel_limits_ironlake_single_lvds_100m;
9434 else
9435 limit = &intel_limits_ironlake_single_lvds;
9436 }
9437 } else {
9438 limit = &intel_limits_ironlake_dac;
9439 }
9440
364ee29d 9441 if (!crtc_state->clock_set &&
997c030c
ACO
9442 !g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
9443 refclk, NULL, &crtc_state->dpll)) {
364ee29d
ACO
9444 DRM_ERROR("Couldn't find PLL settings for mode!\n");
9445 return -EINVAL;
f47709a9 9446 }
79e53945 9447
cbaa3315 9448 ironlake_compute_dpll(crtc, crtc_state, NULL);
66e985c0 9449
cc089e8a 9450 if (!intel_get_shared_dpll(crtc_state, NULL)) {
43031788
CW
9451 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9452 pipe_name(crtc->pipe));
ded220e2 9453 return -EINVAL;
3fb37703 9454 }
79e53945 9455
c8f7a0db 9456 return 0;
79e53945
JB
9457}
9458
eb14cb74
VS
9459static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
9460 struct intel_link_m_n *m_n)
9461{
9462 struct drm_device *dev = crtc->base.dev;
fac5e23e 9463 struct drm_i915_private *dev_priv = to_i915(dev);
eb14cb74
VS
9464 enum pipe pipe = crtc->pipe;
9465
9466 m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
9467 m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
9468 m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
9469 & ~TU_SIZE_MASK;
9470 m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
9471 m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
9472 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9473}
9474
9475static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
9476 enum transcoder transcoder,
b95af8be
VK
9477 struct intel_link_m_n *m_n,
9478 struct intel_link_m_n *m2_n2)
72419203 9479{
6315b5d3 9480 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
eb14cb74 9481 enum pipe pipe = crtc->pipe;
72419203 9482
6315b5d3 9483 if (INTEL_GEN(dev_priv) >= 5) {
eb14cb74
VS
9484 m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
9485 m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
9486 m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
9487 & ~TU_SIZE_MASK;
9488 m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
9489 m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
9490 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
4207c8b9
ML
9491
9492 if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
b95af8be
VK
9493 m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
9494 m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
9495 m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
9496 & ~TU_SIZE_MASK;
9497 m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
9498 m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
9499 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9500 }
eb14cb74
VS
9501 } else {
9502 m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
9503 m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
9504 m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
9505 & ~TU_SIZE_MASK;
9506 m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
9507 m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
9508 & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
9509 }
9510}
9511
9512void intel_dp_get_m_n(struct intel_crtc *crtc,
5cec258b 9513 struct intel_crtc_state *pipe_config)
eb14cb74 9514{
681a8504 9515 if (pipe_config->has_pch_encoder)
eb14cb74
VS
9516 intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n);
9517 else
9518 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
b95af8be
VK
9519 &pipe_config->dp_m_n,
9520 &pipe_config->dp_m2_n2);
eb14cb74 9521}
72419203 9522
eb14cb74 9523static void ironlake_get_fdi_m_n_config(struct intel_crtc *crtc,
5cec258b 9524 struct intel_crtc_state *pipe_config)
eb14cb74
VS
9525{
9526 intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder,
b95af8be 9527 &pipe_config->fdi_m_n, NULL);
72419203
DV
9528}
9529
bd2e244f 9530static void skylake_get_pfit_config(struct intel_crtc *crtc,
5cec258b 9531 struct intel_crtc_state *pipe_config)
bd2e244f
JB
9532{
9533 struct drm_device *dev = crtc->base.dev;
fac5e23e 9534 struct drm_i915_private *dev_priv = to_i915(dev);
a1b2278e 9535 struct intel_crtc_scaler_state *scaler_state = &pipe_config->scaler_state;
ba3f4d0a 9536 u32 ps_ctrl = 0;
a1b2278e
CK
9537 int id = -1;
9538 int i;
bd2e244f 9539
a1b2278e
CK
9540 /* find scaler attached to this pipe */
9541 for (i = 0; i < crtc->num_scalers; i++) {
9542 ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
9543 if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
9544 id = i;
9545 pipe_config->pch_pfit.enabled = true;
9546 pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
9547 pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
0cdc1d07 9548 scaler_state->scalers[i].in_use = true;
a1b2278e
CK
9549 break;
9550 }
9551 }
bd2e244f 9552
a1b2278e
CK
9553 scaler_state->scaler_id = id;
9554 if (id >= 0) {
9555 scaler_state->scaler_users |= (1 << SKL_CRTC_INDEX);
9556 } else {
9557 scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX);
bd2e244f
JB
9558 }
9559}
9560
5724dbd1
DL
9561static void
9562skylake_get_initial_plane_config(struct intel_crtc *crtc,
9563 struct intel_initial_plane_config *plane_config)
bc8d7dff
DL
9564{
9565 struct drm_device *dev = crtc->base.dev;
fac5e23e 9566 struct drm_i915_private *dev_priv = to_i915(dev);
282e83ef
VS
9567 struct intel_plane *plane = to_intel_plane(crtc->base.primary);
9568 enum plane_id plane_id = plane->id;
eade6c89 9569 enum pipe pipe;
4036c78c 9570 u32 val, base, offset, stride_mult, tiling, alpha;
bc8d7dff 9571 int fourcc, pixel_format;
6761dd31 9572 unsigned int aligned_height;
bc8d7dff 9573 struct drm_framebuffer *fb;
1b842c89 9574 struct intel_framebuffer *intel_fb;
bc8d7dff 9575
eade6c89 9576 if (!plane->get_hw_state(plane, &pipe))
2924b8cc
VS
9577 return;
9578
eade6c89
VS
9579 WARN_ON(pipe != crtc->pipe);
9580
d9806c9f 9581 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
1b842c89 9582 if (!intel_fb) {
bc8d7dff
DL
9583 DRM_DEBUG_KMS("failed to alloc fb\n");
9584 return;
9585 }
9586
1b842c89
DL
9587 fb = &intel_fb->base;
9588
d2e9f5fc
VS
9589 fb->dev = dev;
9590
282e83ef 9591 val = I915_READ(PLANE_CTL(pipe, plane_id));
42a7b088 9592
b5972776
JA
9593 if (INTEL_GEN(dev_priv) >= 11)
9594 pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
9595 else
9596 pixel_format = val & PLANE_CTL_FORMAT_MASK;
4036c78c
JA
9597
9598 if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
282e83ef 9599 alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
4036c78c
JA
9600 alpha &= PLANE_COLOR_ALPHA_MASK;
9601 } else {
9602 alpha = val & PLANE_CTL_ALPHA_MASK;
9603 }
9604
bc8d7dff 9605 fourcc = skl_format_to_fourcc(pixel_format,
4036c78c 9606 val & PLANE_CTL_ORDER_RGBX, alpha);
2f3f4763 9607 fb->format = drm_format_info(fourcc);
bc8d7dff 9608
40f46283
DL
9609 tiling = val & PLANE_CTL_TILED_MASK;
9610 switch (tiling) {
9611 case PLANE_CTL_TILED_LINEAR:
2f075565 9612 fb->modifier = DRM_FORMAT_MOD_LINEAR;
40f46283
DL
9613 break;
9614 case PLANE_CTL_TILED_X:
9615 plane_config->tiling = I915_TILING_X;
bae781b2 9616 fb->modifier = I915_FORMAT_MOD_X_TILED;
40f46283
DL
9617 break;
9618 case PLANE_CTL_TILED_Y:
914a4fd8 9619 plane_config->tiling = I915_TILING_Y;
53867b46 9620 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
2e2adb05
VS
9621 fb->modifier = I915_FORMAT_MOD_Y_TILED_CCS;
9622 else
9623 fb->modifier = I915_FORMAT_MOD_Y_TILED;
40f46283
DL
9624 break;
9625 case PLANE_CTL_TILED_YF:
53867b46 9626 if (val & PLANE_CTL_RENDER_DECOMPRESSION_ENABLE)
2e2adb05
VS
9627 fb->modifier = I915_FORMAT_MOD_Yf_TILED_CCS;
9628 else
9629 fb->modifier = I915_FORMAT_MOD_Yf_TILED;
40f46283
DL
9630 break;
9631 default:
9632 MISSING_CASE(tiling);
9633 goto error;
9634 }
9635
f43348a3
VS
9636 /*
9637 * DRM_MODE_ROTATE_ is counter clockwise to stay compatible with Xrandr
9638 * while i915 HW rotation is clockwise, thats why this swapping.
9639 */
9640 switch (val & PLANE_CTL_ROTATE_MASK) {
9641 case PLANE_CTL_ROTATE_0:
9642 plane_config->rotation = DRM_MODE_ROTATE_0;
9643 break;
9644 case PLANE_CTL_ROTATE_90:
9645 plane_config->rotation = DRM_MODE_ROTATE_270;
9646 break;
9647 case PLANE_CTL_ROTATE_180:
9648 plane_config->rotation = DRM_MODE_ROTATE_180;
9649 break;
9650 case PLANE_CTL_ROTATE_270:
9651 plane_config->rotation = DRM_MODE_ROTATE_90;
9652 break;
9653 }
9654
9655 if (INTEL_GEN(dev_priv) >= 10 &&
9656 val & PLANE_CTL_FLIP_HORIZONTAL)
9657 plane_config->rotation |= DRM_MODE_REFLECT_X;
9658
282e83ef 9659 base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
bc8d7dff
DL
9660 plane_config->base = base;
9661
282e83ef 9662 offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
bc8d7dff 9663
282e83ef 9664 val = I915_READ(PLANE_SIZE(pipe, plane_id));
bc8d7dff
DL
9665 fb->height = ((val >> 16) & 0xfff) + 1;
9666 fb->width = ((val >> 0) & 0x1fff) + 1;
9667
282e83ef 9668 val = I915_READ(PLANE_STRIDE(pipe, plane_id));
b3cf5c06 9669 stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
bc8d7dff
DL
9670 fb->pitches[0] = (val & 0x3ff) * stride_mult;
9671
d88c4afd 9672 aligned_height = intel_fb_align_height(fb, 0, fb->height);
bc8d7dff 9673
f37b5c2b 9674 plane_config->size = fb->pitches[0] * aligned_height;
bc8d7dff 9675
282e83ef
VS
9676 DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
9677 crtc->base.name, plane->base.name, fb->width, fb->height,
272725c7 9678 fb->format->cpp[0] * 8, base, fb->pitches[0],
bc8d7dff
DL
9679 plane_config->size);
9680
2d14030b 9681 plane_config->fb = intel_fb;
bc8d7dff
DL
9682 return;
9683
9684error:
d1a3a036 9685 kfree(intel_fb);
bc8d7dff
DL
9686}
9687
2fa2fe9a 9688static void ironlake_get_pfit_config(struct intel_crtc *crtc,
5cec258b 9689 struct intel_crtc_state *pipe_config)
2fa2fe9a
DV
9690{
9691 struct drm_device *dev = crtc->base.dev;
fac5e23e 9692 struct drm_i915_private *dev_priv = to_i915(dev);
ba3f4d0a 9693 u32 tmp;
2fa2fe9a
DV
9694
9695 tmp = I915_READ(PF_CTL(crtc->pipe));
9696
9697 if (tmp & PF_ENABLE) {
fd4daa9c 9698 pipe_config->pch_pfit.enabled = true;
2fa2fe9a
DV
9699 pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
9700 pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
cb8b2a30
DV
9701
9702 /* We currently do not free assignements of panel fitters on
9703 * ivb/hsw (since we don't use the higher upscaling modes which
9704 * differentiates them) so just WARN about this case for now. */
cf819eff 9705 if (IS_GEN(dev_priv, 7)) {
cb8b2a30
DV
9706 WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
9707 PF_PIPE_SEL_IVB(crtc->pipe));
9708 }
2fa2fe9a 9709 }
79e53945
JB
9710}
9711
0e8ffe1b 9712static bool ironlake_get_pipe_config(struct intel_crtc *crtc,
5cec258b 9713 struct intel_crtc_state *pipe_config)
0e8ffe1b
DV
9714{
9715 struct drm_device *dev = crtc->base.dev;
fac5e23e 9716 struct drm_i915_private *dev_priv = to_i915(dev);
1729050e 9717 enum intel_display_power_domain power_domain;
0e6e0be4 9718 intel_wakeref_t wakeref;
ba3f4d0a 9719 u32 tmp;
1729050e 9720 bool ret;
0e8ffe1b 9721
1729050e 9722 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
0e6e0be4
CW
9723 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
9724 if (!wakeref)
930e8c9e
PZ
9725 return false;
9726
d9facae6 9727 pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
e143a21c 9728 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
8106ddbd 9729 pipe_config->shared_dpll = NULL;
eccb140b 9730
1729050e 9731 ret = false;
0e8ffe1b
DV
9732 tmp = I915_READ(PIPECONF(crtc->pipe));
9733 if (!(tmp & PIPECONF_ENABLE))
1729050e 9734 goto out;
0e8ffe1b 9735
42571aef
VS
9736 switch (tmp & PIPECONF_BPC_MASK) {
9737 case PIPECONF_6BPC:
9738 pipe_config->pipe_bpp = 18;
9739 break;
9740 case PIPECONF_8BPC:
9741 pipe_config->pipe_bpp = 24;
9742 break;
9743 case PIPECONF_10BPC:
9744 pipe_config->pipe_bpp = 30;
9745 break;
9746 case PIPECONF_12BPC:
9747 pipe_config->pipe_bpp = 36;
9748 break;
9749 default:
9750 break;
9751 }
9752
b5a9fa09
DV
9753 if (tmp & PIPECONF_COLOR_RANGE_SELECT)
9754 pipe_config->limited_color_range = true;
9755
9d5441de
VS
9756 pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
9757 PIPECONF_GAMMA_MODE_SHIFT;
9758
a1f1e61b
VS
9759 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
9760
5f29ab23 9761 i9xx_get_pipe_color_config(pipe_config);
3633e511 9762 intel_color_get_config(pipe_config);
5f29ab23 9763
ab9412ba 9764 if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
66e985c0 9765 struct intel_shared_dpll *pll;
8106ddbd 9766 enum intel_dpll_id pll_id;
66e985c0 9767
88adfff1
DV
9768 pipe_config->has_pch_encoder = true;
9769
627eb5a3
DV
9770 tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
9771 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
9772 FDI_DP_PORT_WIDTH_SHIFT) + 1;
72419203
DV
9773
9774 ironlake_get_fdi_m_n_config(crtc, pipe_config);
6c49f241 9775
2d1fe073 9776 if (HAS_PCH_IBX(dev_priv)) {
d9a7bc67
ID
9777 /*
9778 * The pipe->pch transcoder and pch transcoder->pll
9779 * mapping is fixed.
9780 */
8106ddbd 9781 pll_id = (enum intel_dpll_id) crtc->pipe;
c0d43d62
DV
9782 } else {
9783 tmp = I915_READ(PCH_DPLL_SEL);
9784 if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
8106ddbd 9785 pll_id = DPLL_ID_PCH_PLL_B;
c0d43d62 9786 else
8106ddbd 9787 pll_id= DPLL_ID_PCH_PLL_A;
c0d43d62 9788 }
66e985c0 9789
8106ddbd
ACO
9790 pipe_config->shared_dpll =
9791 intel_get_shared_dpll_by_id(dev_priv, pll_id);
9792 pll = pipe_config->shared_dpll;
66e985c0 9793
ee1398ba
LDM
9794 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
9795 &pipe_config->dpll_hw_state));
c93f54cf
DV
9796
9797 tmp = pipe_config->dpll_hw_state.dpll;
9798 pipe_config->pixel_multiplier =
9799 ((tmp & PLL_REF_SDVO_HDMI_MULTIPLIER_MASK)
9800 >> PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT) + 1;
18442d08
VS
9801
9802 ironlake_pch_clock_get(crtc, pipe_config);
6c49f241
DV
9803 } else {
9804 pipe_config->pixel_multiplier = 1;
627eb5a3
DV
9805 }
9806
1bd1bd80 9807 intel_get_pipe_timings(crtc, pipe_config);
bc58be60 9808 intel_get_pipe_src_size(crtc, pipe_config);
1bd1bd80 9809
2fa2fe9a
DV
9810 ironlake_get_pfit_config(crtc, pipe_config);
9811
1729050e
ID
9812 ret = true;
9813
9814out:
0e6e0be4 9815 intel_display_power_put(dev_priv, power_domain, wakeref);
1729050e
ID
9816
9817 return ret;
0e8ffe1b 9818}
190f68c5
ACO
9819static int haswell_crtc_compute_clock(struct intel_crtc *crtc,
9820 struct intel_crtc_state *crtc_state)
09b4ddf9 9821{
70a057b7 9822 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
5a0b385e
VS
9823 struct intel_atomic_state *state =
9824 to_intel_atomic_state(crtc_state->base.state);
9825
70a057b7 9826 if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI) ||
2dd24a9c 9827 INTEL_GEN(dev_priv) >= 11) {
44a126ba 9828 struct intel_encoder *encoder =
5a0b385e 9829 intel_get_crtc_new_encoder(state, crtc_state);
44a126ba 9830
cc089e8a 9831 if (!intel_get_shared_dpll(crtc_state, encoder)) {
43031788
CW
9832 DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
9833 pipe_name(crtc->pipe));
af3997b5 9834 return -EINVAL;
44a126ba 9835 }
af3997b5 9836 }
716c2e55 9837
c8f7a0db 9838 return 0;
79e53945
JB
9839}
9840
8b0f7e06
KM
9841static void cannonlake_get_ddi_pll(struct drm_i915_private *dev_priv,
9842 enum port port,
9843 struct intel_crtc_state *pipe_config)
9844{
9845 enum intel_dpll_id id;
9846 u32 temp;
9847
9848 temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
dfbd4508 9849 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
8b0f7e06
KM
9850
9851 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
9852 return;
9853
9854 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9855}
9856
970888e7
PZ
9857static void icelake_get_ddi_pll(struct drm_i915_private *dev_priv,
9858 enum port port,
9859 struct intel_crtc_state *pipe_config)
9860{
9861 enum intel_dpll_id id;
9862 u32 temp;
9863
9864 /* TODO: TBT pll not implemented. */
8ea59e67 9865 if (intel_port_is_combophy(dev_priv, port)) {
970888e7
PZ
9866 temp = I915_READ(DPCLKA_CFGCR0_ICL) &
9867 DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
9868 id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
8ea59e67 9869 } else if (intel_port_is_tc(dev_priv, port)) {
584fca11 9870 id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv, port));
8ea59e67
VK
9871 } else {
9872 WARN(1, "Invalid port %x\n", port);
970888e7
PZ
9873 return;
9874 }
9875
9876 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
9877}
9878
3760b59c
S
9879static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
9880 enum port port,
9881 struct intel_crtc_state *pipe_config)
9882{
8106ddbd
ACO
9883 enum intel_dpll_id id;
9884
3760b59c
S
9885 switch (port) {
9886 case PORT_A:
08250c4b 9887 id = DPLL_ID_SKL_DPLL0;
3760b59c
S
9888 break;
9889 case PORT_B:
08250c4b 9890 id = DPLL_ID_SKL_DPLL1;
3760b59c
S
9891 break;
9892 case PORT_C:
08250c4b 9893 id = DPLL_ID_SKL_DPLL2;
3760b59c
S
9894 break;
9895 default:
9896 DRM_ERROR("Incorrect port type\n");
8106ddbd 9897 return;
3760b59c 9898 }
8106ddbd
ACO
9899
9900 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
3760b59c
S
9901}
9902
96b7dfb7
S
9903static void skylake_get_ddi_pll(struct drm_i915_private *dev_priv,
9904 enum port port,
5cec258b 9905 struct intel_crtc_state *pipe_config)
96b7dfb7 9906{
8106ddbd 9907 enum intel_dpll_id id;
a3c988ea 9908 u32 temp;
96b7dfb7
S
9909
9910 temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
c856052a 9911 id = temp >> (port * 3 + 1);
96b7dfb7 9912
c856052a 9913 if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
8106ddbd 9914 return;
8106ddbd
ACO
9915
9916 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
96b7dfb7
S
9917}
9918
7d2c8175
DL
9919static void haswell_get_ddi_pll(struct drm_i915_private *dev_priv,
9920 enum port port,
5cec258b 9921 struct intel_crtc_state *pipe_config)
7d2c8175 9922{
8106ddbd 9923 enum intel_dpll_id id;
ba3f4d0a 9924 u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
8106ddbd 9925
c856052a 9926 switch (ddi_pll_sel) {
7d2c8175 9927 case PORT_CLK_SEL_WRPLL1:
8106ddbd 9928 id = DPLL_ID_WRPLL1;
7d2c8175
DL
9929 break;
9930 case PORT_CLK_SEL_WRPLL2:
8106ddbd 9931 id = DPLL_ID_WRPLL2;
7d2c8175 9932 break;
00490c22 9933 case PORT_CLK_SEL_SPLL:
8106ddbd 9934 id = DPLL_ID_SPLL;
79bd23da 9935 break;
9d16da65
ACO
9936 case PORT_CLK_SEL_LCPLL_810:
9937 id = DPLL_ID_LCPLL_810;
9938 break;
9939 case PORT_CLK_SEL_LCPLL_1350:
9940 id = DPLL_ID_LCPLL_1350;
9941 break;
9942 case PORT_CLK_SEL_LCPLL_2700:
9943 id = DPLL_ID_LCPLL_2700;
9944 break;
8106ddbd 9945 default:
c856052a 9946 MISSING_CASE(ddi_pll_sel);
8106ddbd
ACO
9947 /* fall through */
9948 case PORT_CLK_SEL_NONE:
8106ddbd 9949 return;
7d2c8175 9950 }
8106ddbd
ACO
9951
9952 pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
7d2c8175
DL
9953}
9954
cf30429e
JN
9955static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
9956 struct intel_crtc_state *pipe_config,
513a4c55
CW
9957 u64 *power_domain_mask,
9958 intel_wakeref_t *wakerefs)
cf30429e
JN
9959{
9960 struct drm_device *dev = crtc->base.dev;
fac5e23e 9961 struct drm_i915_private *dev_priv = to_i915(dev);
cf30429e 9962 enum intel_display_power_domain power_domain;
bc7e3525 9963 unsigned long panel_transcoder_mask = 0;
0716931a
JN
9964 unsigned long enabled_panel_transcoders = 0;
9965 enum transcoder panel_transcoder;
513a4c55 9966 intel_wakeref_t wf;
cf30429e 9967 u32 tmp;
0716931a 9968
2dd24a9c 9969 if (INTEL_GEN(dev_priv) >= 11)
0716931a
JN
9970 panel_transcoder_mask |=
9971 BIT(TRANSCODER_DSI_0) | BIT(TRANSCODER_DSI_1);
cf30429e 9972
bc7e3525
LDM
9973 if (HAS_TRANSCODER_EDP(dev_priv))
9974 panel_transcoder_mask |= BIT(TRANSCODER_EDP);
9975
d9a7bc67
ID
9976 /*
9977 * The pipe->transcoder mapping is fixed with the exception of the eDP
0716931a 9978 * and DSI transcoders handled below.
d9a7bc67 9979 */
cf30429e
JN
9980 pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
9981
9982 /*
9983 * XXX: Do intel_display_power_get_if_enabled before reading this (for
9984 * consistency and less surprising code; it's in always on power).
9985 */
1b4bd5c4
CW
9986 for_each_set_bit(panel_transcoder,
9987 &panel_transcoder_mask,
9988 ARRAY_SIZE(INTEL_INFO(dev_priv)->trans_offsets)) {
dc0c0bfe 9989 bool force_thru = false;
0716931a 9990 enum pipe trans_pipe;
2ca711ca 9991
0716931a
JN
9992 tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
9993 if (!(tmp & TRANS_DDI_FUNC_ENABLE))
9994 continue;
2ca711ca 9995
0716931a
JN
9996 /*
9997 * Log all enabled ones, only use the first one.
9998 *
9999 * FIXME: This won't work for two separate DSI displays.
10000 */
10001 enabled_panel_transcoders |= BIT(panel_transcoder);
10002 if (enabled_panel_transcoders != BIT(panel_transcoder))
10003 continue;
2ca711ca 10004
cf30429e
JN
10005 switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
10006 default:
0716931a
JN
10007 WARN(1, "unknown pipe linked to transcoder %s\n",
10008 transcoder_name(panel_transcoder));
f0d759f0 10009 /* fall through */
cf30429e 10010 case TRANS_DDI_EDP_INPUT_A_ONOFF:
dc0c0bfe
VS
10011 force_thru = true;
10012 /* fall through */
cf30429e 10013 case TRANS_DDI_EDP_INPUT_A_ON:
2ca711ca 10014 trans_pipe = PIPE_A;
cf30429e
JN
10015 break;
10016 case TRANS_DDI_EDP_INPUT_B_ONOFF:
2ca711ca 10017 trans_pipe = PIPE_B;
cf30429e
JN
10018 break;
10019 case TRANS_DDI_EDP_INPUT_C_ONOFF:
2ca711ca 10020 trans_pipe = PIPE_C;
cf30429e
JN
10021 break;
10022 }
10023
dc0c0bfe 10024 if (trans_pipe == crtc->pipe) {
0716931a 10025 pipe_config->cpu_transcoder = panel_transcoder;
dc0c0bfe
VS
10026 pipe_config->pch_pfit.force_thru = force_thru;
10027 }
cf30429e
JN
10028 }
10029
0716931a
JN
10030 /*
10031 * Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
10032 */
10033 WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
10034 enabled_panel_transcoders != BIT(TRANSCODER_EDP));
10035
cf30429e 10036 power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
513a4c55
CW
10037 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10038
10039 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10040 if (!wf)
cf30429e 10041 return false;
04161d64 10042
513a4c55 10043 wakerefs[power_domain] = wf;
d8fc70b7 10044 *power_domain_mask |= BIT_ULL(power_domain);
cf30429e
JN
10045
10046 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
10047
10048 return tmp & PIPECONF_ENABLE;
10049}
10050
4d1de975
JN
10051static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
10052 struct intel_crtc_state *pipe_config,
513a4c55
CW
10053 u64 *power_domain_mask,
10054 intel_wakeref_t *wakerefs)
4d1de975
JN
10055{
10056 struct drm_device *dev = crtc->base.dev;
fac5e23e 10057 struct drm_i915_private *dev_priv = to_i915(dev);
4d1de975 10058 enum intel_display_power_domain power_domain;
4d1de975 10059 enum transcoder cpu_transcoder;
513a4c55
CW
10060 intel_wakeref_t wf;
10061 enum port port;
4d1de975
JN
10062 u32 tmp;
10063
4d1de975
JN
10064 for_each_port_masked(port, BIT(PORT_A) | BIT(PORT_C)) {
10065 if (port == PORT_A)
10066 cpu_transcoder = TRANSCODER_DSI_A;
10067 else
10068 cpu_transcoder = TRANSCODER_DSI_C;
10069
10070 power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
513a4c55
CW
10071 WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
10072
10073 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10074 if (!wf)
4d1de975 10075 continue;
04161d64 10076
513a4c55 10077 wakerefs[power_domain] = wf;
d8fc70b7 10078 *power_domain_mask |= BIT_ULL(power_domain);
4d1de975 10079
db18b6a6
ID
10080 /*
10081 * The PLL needs to be enabled with a valid divider
10082 * configuration, otherwise accessing DSI registers will hang
10083 * the machine. See BSpec North Display Engine
10084 * registers/MIPI[BXT]. We can break out here early, since we
10085 * need the same DSI PLL to be enabled for both DSI ports.
10086 */
e518634b 10087 if (!bxt_dsi_pll_is_enabled(dev_priv))
db18b6a6
ID
10088 break;
10089
4d1de975
JN
10090 /* XXX: this works for video mode only */
10091 tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
10092 if (!(tmp & DPI_ENABLE))
10093 continue;
10094
10095 tmp = I915_READ(MIPI_CTRL(port));
10096 if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
10097 continue;
10098
10099 pipe_config->cpu_transcoder = cpu_transcoder;
4d1de975
JN
10100 break;
10101 }
10102
d7edc4e5 10103 return transcoder_is_dsi(pipe_config->cpu_transcoder);
4d1de975
JN
10104}
10105
26804afd 10106static void haswell_get_ddi_port_state(struct intel_crtc *crtc,
5cec258b 10107 struct intel_crtc_state *pipe_config)
26804afd 10108{
6315b5d3 10109 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
d452c5b6 10110 struct intel_shared_dpll *pll;
26804afd 10111 enum port port;
ba3f4d0a 10112 u32 tmp;
26804afd
DV
10113
10114 tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe_config->cpu_transcoder));
10115
10116 port = (tmp & TRANS_DDI_PORT_MASK) >> TRANS_DDI_PORT_SHIFT;
10117
2dd24a9c 10118 if (INTEL_GEN(dev_priv) >= 11)
970888e7
PZ
10119 icelake_get_ddi_pll(dev_priv, port, pipe_config);
10120 else if (IS_CANNONLAKE(dev_priv))
8b0f7e06
KM
10121 cannonlake_get_ddi_pll(dev_priv, port, pipe_config);
10122 else if (IS_GEN9_BC(dev_priv))
96b7dfb7 10123 skylake_get_ddi_pll(dev_priv, port, pipe_config);
cc3f90f0 10124 else if (IS_GEN9_LP(dev_priv))
3760b59c 10125 bxt_get_ddi_pll(dev_priv, port, pipe_config);
96b7dfb7
S
10126 else
10127 haswell_get_ddi_pll(dev_priv, port, pipe_config);
9cd86933 10128
8106ddbd
ACO
10129 pll = pipe_config->shared_dpll;
10130 if (pll) {
ee1398ba
LDM
10131 WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
10132 &pipe_config->dpll_hw_state));
d452c5b6
DV
10133 }
10134
26804afd
DV
10135 /*
10136 * Haswell has only FDI/PCH transcoder A. It is which is connected to
10137 * DDI E. So just check whether this pipe is wired to DDI E and whether
10138 * the PCH transcoder is on.
10139 */
6315b5d3 10140 if (INTEL_GEN(dev_priv) < 9 &&
ca370455 10141 (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
26804afd
DV
10142 pipe_config->has_pch_encoder = true;
10143
10144 tmp = I915_READ(FDI_RX_CTL(PIPE_A));
10145 pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
10146 FDI_DP_PORT_WIDTH_SHIFT) + 1;
10147
10148 ironlake_get_fdi_m_n_config(crtc, pipe_config);
10149 }
10150}
10151
0e8ffe1b 10152static bool haswell_get_pipe_config(struct intel_crtc *crtc,
5cec258b 10153 struct intel_crtc_state *pipe_config)
0e8ffe1b 10154{
6315b5d3 10155 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
513a4c55 10156 intel_wakeref_t wakerefs[POWER_DOMAIN_NUM], wf;
1729050e 10157 enum intel_display_power_domain power_domain;
d8fc70b7 10158 u64 power_domain_mask;
cf30429e 10159 bool active;
0e8ffe1b 10160
e79dfb51 10161 intel_crtc_init_scalers(crtc, pipe_config);
5fb9dadf 10162
1729050e 10163 power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
513a4c55
CW
10164 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10165 if (!wf)
b5482bd0 10166 return false;
513a4c55
CW
10167
10168 wakerefs[power_domain] = wf;
d8fc70b7 10169 power_domain_mask = BIT_ULL(power_domain);
1729050e 10170
8106ddbd 10171 pipe_config->shared_dpll = NULL;
c0d43d62 10172
513a4c55
CW
10173 active = hsw_get_transcoder_state(crtc, pipe_config,
10174 &power_domain_mask, wakerefs);
eccb140b 10175
cc3f90f0 10176 if (IS_GEN9_LP(dev_priv) &&
513a4c55
CW
10177 bxt_get_dsi_transcoder_state(crtc, pipe_config,
10178 &power_domain_mask, wakerefs)) {
d7edc4e5
VS
10179 WARN_ON(active);
10180 active = true;
4d1de975
JN
10181 }
10182
cf30429e 10183 if (!active)
1729050e 10184 goto out;
0e8ffe1b 10185
2eae5d6b 10186 if (!transcoder_is_dsi(pipe_config->cpu_transcoder) ||
2dd24a9c 10187 INTEL_GEN(dev_priv) >= 11) {
4d1de975
JN
10188 haswell_get_ddi_port_state(crtc, pipe_config);
10189 intel_get_pipe_timings(crtc, pipe_config);
10190 }
627eb5a3 10191
bc58be60 10192 intel_get_pipe_src_size(crtc, pipe_config);
33b7f3ee 10193 intel_get_crtc_ycbcr_config(crtc, pipe_config);
1bd1bd80 10194
2a3902bd 10195 pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
05dc698c 10196
a1f1e61b
VS
10197 pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
10198
5f29ab23
VS
10199 if (INTEL_GEN(dev_priv) >= 9) {
10200 u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
10201
10202 if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
10203 pipe_config->gamma_enable = true;
8271b2ef
VS
10204
10205 if (tmp & SKL_BOTTOM_COLOR_CSC_ENABLE)
10206 pipe_config->csc_enable = true;
5f29ab23
VS
10207 } else {
10208 i9xx_get_pipe_color_config(pipe_config);
10209 }
10210
3633e511
SS
10211 intel_color_get_config(pipe_config);
10212
1729050e 10213 power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
513a4c55
CW
10214 WARN_ON(power_domain_mask & BIT_ULL(power_domain));
10215
10216 wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
10217 if (wf) {
10218 wakerefs[power_domain] = wf;
d8fc70b7 10219 power_domain_mask |= BIT_ULL(power_domain);
04161d64 10220
6315b5d3 10221 if (INTEL_GEN(dev_priv) >= 9)
bd2e244f 10222 skylake_get_pfit_config(crtc, pipe_config);
ff6d9f55 10223 else
1c132b44 10224 ironlake_get_pfit_config(crtc, pipe_config);
bd2e244f 10225 }
88adfff1 10226
24f28450
ML
10227 if (hsw_crtc_supports_ips(crtc)) {
10228 if (IS_HASWELL(dev_priv))
10229 pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
10230 else {
10231 /*
10232 * We cannot readout IPS state on broadwell, set to
10233 * true so we can set it to a defined state on first
10234 * commit.
10235 */
10236 pipe_config->ips_enabled = true;
10237 }
10238 }
10239
4d1de975
JN
10240 if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
10241 !transcoder_is_dsi(pipe_config->cpu_transcoder)) {
ebb69c95
CT
10242 pipe_config->pixel_multiplier =
10243 I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
10244 } else {
10245 pipe_config->pixel_multiplier = 1;
10246 }
6c49f241 10247
1729050e
ID
10248out:
10249 for_each_power_domain(power_domain, power_domain_mask)
513a4c55
CW
10250 intel_display_power_put(dev_priv,
10251 power_domain, wakerefs[power_domain]);
1729050e 10252
cf30429e 10253 return active;
0e8ffe1b
DV
10254}
10255
cd5dcbf1 10256static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
1cecc830
VS
10257{
10258 struct drm_i915_private *dev_priv =
10259 to_i915(plane_state->base.plane->dev);
10260 const struct drm_framebuffer *fb = plane_state->base.fb;
10261 const struct drm_i915_gem_object *obj = intel_fb_obj(fb);
10262 u32 base;
10263
d53db442 10264 if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
1cecc830
VS
10265 base = obj->phys_handle->busaddr;
10266 else
10267 base = intel_plane_ggtt_offset(plane_state);
10268
c11ada07 10269 base += plane_state->color_plane[0].offset;
1e7b4fd8 10270
1cecc830 10271 /* ILK+ do this automagically */
b2ae318a 10272 if (HAS_GMCH(dev_priv) &&
a82256bc 10273 plane_state->base.rotation & DRM_MODE_ROTATE_180)
1cecc830
VS
10274 base += (plane_state->base.crtc_h *
10275 plane_state->base.crtc_w - 1) * fb->format->cpp[0];
10276
10277 return base;
10278}
10279
ed270223
VS
10280static u32 intel_cursor_position(const struct intel_plane_state *plane_state)
10281{
10282 int x = plane_state->base.crtc_x;
10283 int y = plane_state->base.crtc_y;
10284 u32 pos = 0;
10285
10286 if (x < 0) {
10287 pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
10288 x = -x;
10289 }
10290 pos |= x << CURSOR_X_SHIFT;
10291
10292 if (y < 0) {
10293 pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
10294 y = -y;
10295 }
10296 pos |= y << CURSOR_Y_SHIFT;
10297
10298 return pos;
10299}
10300
3637ecf0
VS
10301static bool intel_cursor_size_ok(const struct intel_plane_state *plane_state)
10302{
10303 const struct drm_mode_config *config =
10304 &plane_state->base.plane->dev->mode_config;
10305 int width = plane_state->base.crtc_w;
10306 int height = plane_state->base.crtc_h;
10307
10308 return width > 0 && width <= config->cursor_width &&
10309 height > 0 && height <= config->cursor_height;
10310}
10311
fce8d235 10312static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
659056f2 10313{
1e7b4fd8
VS
10314 int src_x, src_y;
10315 u32 offset;
fc3fed5d 10316 int ret;
fce8d235 10317
54d4d719 10318 ret = intel_plane_compute_gtt(plane_state);
fc3fed5d
VS
10319 if (ret)
10320 return ret;
10321
54d4d719
VS
10322 if (!plane_state->base.visible)
10323 return 0;
10324
fce8d235
VS
10325 src_x = plane_state->base.src_x >> 16;
10326 src_y = plane_state->base.src_y >> 16;
10327
10328 intel_add_fb_offsets(&src_x, &src_y, plane_state, 0);
10329 offset = intel_plane_compute_aligned_offset(&src_x, &src_y,
10330 plane_state, 0);
10331
10332 if (src_x != 0 || src_y != 0) {
10333 DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
10334 return -EINVAL;
10335 }
10336
10337 plane_state->color_plane[0].offset = offset;
10338
10339 return 0;
10340}
10341
10342static int intel_check_cursor(struct intel_crtc_state *crtc_state,
10343 struct intel_plane_state *plane_state)
10344{
10345 const struct drm_framebuffer *fb = plane_state->base.fb;
659056f2
VS
10346 int ret;
10347
4e0b83a5
VS
10348 if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
10349 DRM_DEBUG_KMS("cursor cannot be tiled\n");
10350 return -EINVAL;
10351 }
10352
a01cb8ba
VS
10353 ret = drm_atomic_helper_check_plane_state(&plane_state->base,
10354 &crtc_state->base,
a01cb8ba
VS
10355 DRM_PLANE_HELPER_NO_SCALING,
10356 DRM_PLANE_HELPER_NO_SCALING,
10357 true, true);
659056f2
VS
10358 if (ret)
10359 return ret;
10360
54d4d719
VS
10361 ret = intel_cursor_check_surface(plane_state);
10362 if (ret)
10363 return ret;
10364
4e0b83a5 10365 if (!plane_state->base.visible)
659056f2
VS
10366 return 0;
10367
4e0b83a5
VS
10368 ret = intel_plane_check_src_coordinates(plane_state);
10369 if (ret)
10370 return ret;
659056f2
VS
10371
10372 return 0;
10373}
10374
ddd5713d
VS
10375static unsigned int
10376i845_cursor_max_stride(struct intel_plane *plane,
10377 u32 pixel_format, u64 modifier,
10378 unsigned int rotation)
10379{
10380 return 2048;
10381}
10382
7eb31a0b
VS
10383static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
10384{
5f29ab23
VS
10385 u32 cntl = 0;
10386
10387 if (crtc_state->gamma_enable)
10388 cntl |= CURSOR_GAMMA_ENABLE;
10389
10390 return cntl;
7eb31a0b
VS
10391}
10392
292889e1
VS
10393static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state,
10394 const struct intel_plane_state *plane_state)
10395{
292889e1 10396 return CURSOR_ENABLE |
292889e1 10397 CURSOR_FORMAT_ARGB |
df79cf44 10398 CURSOR_STRIDE(plane_state->color_plane[0].stride);
292889e1
VS
10399}
10400
659056f2
VS
10401static bool i845_cursor_size_ok(const struct intel_plane_state *plane_state)
10402{
659056f2 10403 int width = plane_state->base.crtc_w;
659056f2
VS
10404
10405 /*
10406 * 845g/865g are only limited by the width of their cursors,
10407 * the height is arbitrary up to the precision of the register.
10408 */
3637ecf0 10409 return intel_cursor_size_ok(plane_state) && IS_ALIGNED(width, 64);
659056f2
VS
10410}
10411
eb0f5044 10412static int i845_check_cursor(struct intel_crtc_state *crtc_state,
659056f2
VS
10413 struct intel_plane_state *plane_state)
10414{
10415 const struct drm_framebuffer *fb = plane_state->base.fb;
659056f2
VS
10416 int ret;
10417
10418 ret = intel_check_cursor(crtc_state, plane_state);
10419 if (ret)
10420 return ret;
10421
10422 /* if we want to turn off the cursor ignore width and height */
1e1bb871 10423 if (!fb)
659056f2
VS
10424 return 0;
10425
10426 /* Check for which cursor types we support */
10427 if (!i845_cursor_size_ok(plane_state)) {
10428 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10429 plane_state->base.crtc_w,
10430 plane_state->base.crtc_h);
10431 return -EINVAL;
10432 }
10433
df79cf44
VS
10434 WARN_ON(plane_state->base.visible &&
10435 plane_state->color_plane[0].stride != fb->pitches[0]);
10436
1e1bb871 10437 switch (fb->pitches[0]) {
292889e1
VS
10438 case 256:
10439 case 512:
10440 case 1024:
10441 case 2048:
10442 break;
1e1bb871
VS
10443 default:
10444 DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
10445 fb->pitches[0]);
10446 return -EINVAL;
292889e1
VS
10447 }
10448
659056f2
VS
10449 plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state);
10450
10451 return 0;
292889e1
VS
10452}
10453
b2d03b0d
VS
10454static void i845_update_cursor(struct intel_plane *plane,
10455 const struct intel_crtc_state *crtc_state,
55a08b3f 10456 const struct intel_plane_state *plane_state)
560b85bb 10457{
cd5dcbf1 10458 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
b2d03b0d
VS
10459 u32 cntl = 0, base = 0, pos = 0, size = 0;
10460 unsigned long irqflags;
560b85bb 10461
936e71e3 10462 if (plane_state && plane_state->base.visible) {
55a08b3f
ML
10463 unsigned int width = plane_state->base.crtc_w;
10464 unsigned int height = plane_state->base.crtc_h;
dc41c154 10465
7eb31a0b
VS
10466 cntl = plane_state->ctl |
10467 i845_cursor_ctl_crtc(crtc_state);
10468
dc41c154 10469 size = (height << 12) | width;
560b85bb 10470
b2d03b0d
VS
10471 base = intel_cursor_base(plane_state);
10472 pos = intel_cursor_position(plane_state);
4b0e333e 10473 }
560b85bb 10474
b2d03b0d 10475 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
4726e0b0 10476
e11ffddb
VS
10477 /* On these chipsets we can only modify the base/size/stride
10478 * whilst the cursor is disabled.
10479 */
10480 if (plane->cursor.base != base ||
10481 plane->cursor.size != size ||
10482 plane->cursor.cntl != cntl) {
dd584fc0 10483 I915_WRITE_FW(CURCNTR(PIPE_A), 0);
dd584fc0 10484 I915_WRITE_FW(CURBASE(PIPE_A), base);
dd584fc0 10485 I915_WRITE_FW(CURSIZE, size);
b2d03b0d 10486 I915_WRITE_FW(CURPOS(PIPE_A), pos);
dd584fc0 10487 I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
75343a44 10488
e11ffddb
VS
10489 plane->cursor.base = base;
10490 plane->cursor.size = size;
10491 plane->cursor.cntl = cntl;
10492 } else {
10493 I915_WRITE_FW(CURPOS(PIPE_A), pos);
560b85bb 10494 }
e11ffddb 10495
b2d03b0d
VS
10496 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
10497}
10498
10499static void i845_disable_cursor(struct intel_plane *plane,
0dd14be3 10500 const struct intel_crtc_state *crtc_state)
b2d03b0d 10501{
0dd14be3 10502 i845_update_cursor(plane, crtc_state, NULL);
560b85bb
CW
10503}
10504
eade6c89
VS
10505static bool i845_cursor_get_hw_state(struct intel_plane *plane,
10506 enum pipe *pipe)
51f5a096
VS
10507{
10508 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10509 enum intel_display_power_domain power_domain;
0e6e0be4 10510 intel_wakeref_t wakeref;
51f5a096
VS
10511 bool ret;
10512
10513 power_domain = POWER_DOMAIN_PIPE(PIPE_A);
0e6e0be4
CW
10514 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10515 if (!wakeref)
51f5a096
VS
10516 return false;
10517
10518 ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
10519
eade6c89
VS
10520 *pipe = PIPE_A;
10521
0e6e0be4 10522 intel_display_power_put(dev_priv, power_domain, wakeref);
51f5a096
VS
10523
10524 return ret;
10525}
10526
ddd5713d
VS
10527static unsigned int
10528i9xx_cursor_max_stride(struct intel_plane *plane,
10529 u32 pixel_format, u64 modifier,
10530 unsigned int rotation)
10531{
10532 return plane->base.dev->mode_config.cursor_width * 4;
10533}
10534
7eb31a0b 10535static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state)
292889e1 10536{
292889e1 10537 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
7eb31a0b 10538 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
c894d63c 10539 u32 cntl = 0;
292889e1 10540
7eb31a0b
VS
10541 if (INTEL_GEN(dev_priv) >= 11)
10542 return cntl;
e876b78c 10543
5f29ab23
VS
10544 if (crtc_state->gamma_enable)
10545 cntl = MCURSOR_GAMMA_ENABLE;
292889e1 10546
8271b2ef 10547 if (crtc_state->csc_enable)
7eb31a0b 10548 cntl |= MCURSOR_PIPE_CSC_ENABLE;
292889e1 10549
32ea06b6
VS
10550 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
10551 cntl |= MCURSOR_PIPE_SELECT(crtc->pipe);
292889e1 10552
7eb31a0b
VS
10553 return cntl;
10554}
10555
10556static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state,
10557 const struct intel_plane_state *plane_state)
10558{
10559 struct drm_i915_private *dev_priv =
10560 to_i915(plane_state->base.plane->dev);
10561 u32 cntl = 0;
10562
10563 if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv))
10564 cntl |= MCURSOR_TRICKLE_FEED_DISABLE;
10565
292889e1
VS
10566 switch (plane_state->base.crtc_w) {
10567 case 64:
b99b9ec1 10568 cntl |= MCURSOR_MODE_64_ARGB_AX;
292889e1
VS
10569 break;
10570 case 128:
b99b9ec1 10571 cntl |= MCURSOR_MODE_128_ARGB_AX;
292889e1
VS
10572 break;
10573 case 256:
b99b9ec1 10574 cntl |= MCURSOR_MODE_256_ARGB_AX;
292889e1
VS
10575 break;
10576 default:
10577 MISSING_CASE(plane_state->base.crtc_w);
10578 return 0;
10579 }
10580
c2c446ad 10581 if (plane_state->base.rotation & DRM_MODE_ROTATE_180)
b99b9ec1 10582 cntl |= MCURSOR_ROTATE_180;
292889e1
VS
10583
10584 return cntl;
10585}
10586
659056f2 10587static bool i9xx_cursor_size_ok(const struct intel_plane_state *plane_state)
65a21cd6 10588{
024faac7
VS
10589 struct drm_i915_private *dev_priv =
10590 to_i915(plane_state->base.plane->dev);
659056f2
VS
10591 int width = plane_state->base.crtc_w;
10592 int height = plane_state->base.crtc_h;
4b0e333e 10593
3637ecf0 10594 if (!intel_cursor_size_ok(plane_state))
659056f2 10595 return false;
4398ad45 10596
024faac7
VS
10597 /* Cursor width is limited to a few power-of-two sizes */
10598 switch (width) {
659056f2
VS
10599 case 256:
10600 case 128:
659056f2
VS
10601 case 64:
10602 break;
10603 default:
10604 return false;
65a21cd6 10605 }
4b0e333e 10606
024faac7
VS
10607 /*
10608 * IVB+ have CUR_FBC_CTL which allows an arbitrary cursor
10609 * height from 8 lines up to the cursor width, when the
10610 * cursor is not rotated. Everything else requires square
10611 * cursors.
10612 */
10613 if (HAS_CUR_FBC(dev_priv) &&
a82256bc 10614 plane_state->base.rotation & DRM_MODE_ROTATE_0) {
024faac7
VS
10615 if (height < 8 || height > width)
10616 return false;
10617 } else {
10618 if (height != width)
10619 return false;
10620 }
99d1f387 10621
659056f2 10622 return true;
65a21cd6
JB
10623}
10624
eb0f5044 10625static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
659056f2 10626 struct intel_plane_state *plane_state)
cda4b7d3 10627{
eb0f5044 10628 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
659056f2
VS
10629 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10630 const struct drm_framebuffer *fb = plane_state->base.fb;
659056f2 10631 enum pipe pipe = plane->pipe;
659056f2 10632 int ret;
cda4b7d3 10633
659056f2
VS
10634 ret = intel_check_cursor(crtc_state, plane_state);
10635 if (ret)
10636 return ret;
cda4b7d3 10637
659056f2 10638 /* if we want to turn off the cursor ignore width and height */
1e1bb871 10639 if (!fb)
659056f2 10640 return 0;
55a08b3f 10641
659056f2
VS
10642 /* Check for which cursor types we support */
10643 if (!i9xx_cursor_size_ok(plane_state)) {
10644 DRM_DEBUG("Cursor dimension %dx%d not supported\n",
10645 plane_state->base.crtc_w,
10646 plane_state->base.crtc_h);
10647 return -EINVAL;
cda4b7d3 10648 }
cda4b7d3 10649
df79cf44
VS
10650 WARN_ON(plane_state->base.visible &&
10651 plane_state->color_plane[0].stride != fb->pitches[0]);
10652
1e1bb871
VS
10653 if (fb->pitches[0] != plane_state->base.crtc_w * fb->format->cpp[0]) {
10654 DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
10655 fb->pitches[0], plane_state->base.crtc_w);
10656 return -EINVAL;
659056f2 10657 }
dd584fc0 10658
659056f2
VS
10659 /*
10660 * There's something wrong with the cursor on CHV pipe C.
10661 * If it straddles the left edge of the screen then
10662 * moving it away from the edge or disabling it often
10663 * results in a pipe underrun, and often that can lead to
10664 * dead pipe (constant underrun reported, and it scans
10665 * out just a solid color). To recover from that, the
10666 * display power well must be turned off and on again.
10667 * Refuse the put the cursor into that compromised position.
10668 */
10669 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
10670 plane_state->base.visible && plane_state->base.crtc_x < 0) {
10671 DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
10672 return -EINVAL;
10673 }
5efb3e28 10674
659056f2 10675 plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state);
dd584fc0 10676
659056f2 10677 return 0;
cda4b7d3
CW
10678}
10679
b2d03b0d
VS
10680static void i9xx_update_cursor(struct intel_plane *plane,
10681 const struct intel_crtc_state *crtc_state,
55a08b3f 10682 const struct intel_plane_state *plane_state)
dc41c154 10683{
cd5dcbf1
VS
10684 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10685 enum pipe pipe = plane->pipe;
024faac7 10686 u32 cntl = 0, base = 0, pos = 0, fbc_ctl = 0;
b2d03b0d 10687 unsigned long irqflags;
dc41c154 10688
b2d03b0d 10689 if (plane_state && plane_state->base.visible) {
7eb31a0b
VS
10690 cntl = plane_state->ctl |
10691 i9xx_cursor_ctl_crtc(crtc_state);
dc41c154 10692
024faac7
VS
10693 if (plane_state->base.crtc_h != plane_state->base.crtc_w)
10694 fbc_ctl = CUR_FBC_CTL_EN | (plane_state->base.crtc_h - 1);
dc41c154 10695
b2d03b0d
VS
10696 base = intel_cursor_base(plane_state);
10697 pos = intel_cursor_position(plane_state);
10698 }
10699
10700 spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
10701
e11ffddb
VS
10702 /*
10703 * On some platforms writing CURCNTR first will also
10704 * cause CURPOS to be armed by the CURBASE write.
10705 * Without the CURCNTR write the CURPOS write would
83234d13
VS
10706 * arm itself. Thus we always update CURCNTR before
10707 * CURPOS.
8753d2bc
VS
10708 *
10709 * On other platforms CURPOS always requires the
10710 * CURBASE write to arm the update. Additonally
10711 * a write to any of the cursor register will cancel
10712 * an already armed cursor update. Thus leaving out
10713 * the CURBASE write after CURPOS could lead to a
10714 * cursor that doesn't appear to move, or even change
10715 * shape. Thus we always write CURBASE.
e11ffddb 10716 *
83234d13
VS
10717 * The other registers are armed by by the CURBASE write
10718 * except when the plane is getting enabled at which time
10719 * the CURCNTR write arms the update.
e11ffddb 10720 */
ff43bc37
VS
10721
10722 if (INTEL_GEN(dev_priv) >= 9)
10723 skl_write_cursor_wm(plane, crtc_state);
10724
e11ffddb
VS
10725 if (plane->cursor.base != base ||
10726 plane->cursor.size != fbc_ctl ||
10727 plane->cursor.cntl != cntl) {
e11ffddb
VS
10728 if (HAS_CUR_FBC(dev_priv))
10729 I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
83234d13 10730 I915_WRITE_FW(CURCNTR(pipe), cntl);
b2d03b0d 10731 I915_WRITE_FW(CURPOS(pipe), pos);
75343a44
VS
10732 I915_WRITE_FW(CURBASE(pipe), base);
10733
e11ffddb
VS
10734 plane->cursor.base = base;
10735 plane->cursor.size = fbc_ctl;
10736 plane->cursor.cntl = cntl;
dc41c154 10737 } else {
e11ffddb 10738 I915_WRITE_FW(CURPOS(pipe), pos);
8753d2bc 10739 I915_WRITE_FW(CURBASE(pipe), base);
dc41c154
VS
10740 }
10741
b2d03b0d 10742 spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
65a21cd6
JB
10743}
10744
b2d03b0d 10745static void i9xx_disable_cursor(struct intel_plane *plane,
0dd14be3 10746 const struct intel_crtc_state *crtc_state)
cda4b7d3 10747{
0dd14be3 10748 i9xx_update_cursor(plane, crtc_state, NULL);
dc41c154
VS
10749}
10750
eade6c89
VS
10751static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
10752 enum pipe *pipe)
51f5a096
VS
10753{
10754 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
10755 enum intel_display_power_domain power_domain;
0e6e0be4 10756 intel_wakeref_t wakeref;
51f5a096 10757 bool ret;
eade6c89 10758 u32 val;
51f5a096
VS
10759
10760 /*
10761 * Not 100% correct for planes that can move between pipes,
10762 * but that's only the case for gen2-3 which don't have any
10763 * display power wells.
10764 */
eade6c89 10765 power_domain = POWER_DOMAIN_PIPE(plane->pipe);
0e6e0be4
CW
10766 wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
10767 if (!wakeref)
51f5a096
VS
10768 return false;
10769
eade6c89
VS
10770 val = I915_READ(CURCNTR(plane->pipe));
10771
b99b9ec1 10772 ret = val & MCURSOR_MODE;
eade6c89
VS
10773
10774 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
10775 *pipe = plane->pipe;
10776 else
10777 *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >>
10778 MCURSOR_PIPE_SELECT_SHIFT;
51f5a096 10779
0e6e0be4 10780 intel_display_power_put(dev_priv, power_domain, wakeref);
51f5a096
VS
10781
10782 return ret;
10783}
dc41c154 10784
79e53945 10785/* VESA 640x480x72Hz mode to set on the pipe */
bacdcd55 10786static const struct drm_display_mode load_detect_mode = {
79e53945
JB
10787 DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664,
10788 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC),
10789};
10790
a8bb6818 10791struct drm_framebuffer *
24dbf51a
CW
10792intel_framebuffer_create(struct drm_i915_gem_object *obj,
10793 struct drm_mode_fb_cmd2 *mode_cmd)
d2dff872
CW
10794{
10795 struct intel_framebuffer *intel_fb;
10796 int ret;
10797
10798 intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
dcb1394e 10799 if (!intel_fb)
d2dff872 10800 return ERR_PTR(-ENOMEM);
d2dff872 10801
24dbf51a 10802 ret = intel_framebuffer_init(intel_fb, obj, mode_cmd);
dd4916c5
DV
10803 if (ret)
10804 goto err;
d2dff872
CW
10805
10806 return &intel_fb->base;
dcb1394e 10807
dd4916c5 10808err:
dd4916c5 10809 kfree(intel_fb);
dd4916c5 10810 return ERR_PTR(ret);
d2dff872
CW
10811}
10812
20bdc112
VS
10813static int intel_modeset_disable_planes(struct drm_atomic_state *state,
10814 struct drm_crtc *crtc)
d3a40d1b 10815{
20bdc112 10816 struct drm_plane *plane;
d3a40d1b 10817 struct drm_plane_state *plane_state;
20bdc112 10818 int ret, i;
d3a40d1b 10819
20bdc112 10820 ret = drm_atomic_add_affected_planes(state, crtc);
d3a40d1b
ACO
10821 if (ret)
10822 return ret;
20bdc112
VS
10823
10824 for_each_new_plane_in_state(state, plane, plane_state, i) {
10825 if (plane_state->crtc != crtc)
10826 continue;
10827
10828 ret = drm_atomic_set_crtc_for_plane(plane_state, NULL);
10829 if (ret)
10830 return ret;
10831
10832 drm_atomic_set_fb_for_plane(plane_state, NULL);
10833 }
d3a40d1b
ACO
10834
10835 return 0;
10836}
10837
6c5ed5ae 10838int intel_get_load_detect_pipe(struct drm_connector *connector,
bacdcd55 10839 const struct drm_display_mode *mode,
6c5ed5ae
ML
10840 struct intel_load_detect_pipe *old,
10841 struct drm_modeset_acquire_ctx *ctx)
79e53945
JB
10842{
10843 struct intel_crtc *intel_crtc;
d2434ab7
DV
10844 struct intel_encoder *intel_encoder =
10845 intel_attached_encoder(connector);
79e53945 10846 struct drm_crtc *possible_crtc;
4ef69c7a 10847 struct drm_encoder *encoder = &intel_encoder->base;
79e53945
JB
10848 struct drm_crtc *crtc = NULL;
10849 struct drm_device *dev = encoder->dev;
0f0f74bc 10850 struct drm_i915_private *dev_priv = to_i915(dev);
51fd371b 10851 struct drm_mode_config *config = &dev->mode_config;
edde3617 10852 struct drm_atomic_state *state = NULL, *restore_state = NULL;
944b0c76 10853 struct drm_connector_state *connector_state;
4be07317 10854 struct intel_crtc_state *crtc_state;
51fd371b 10855 int ret, i = -1;
79e53945 10856
d2dff872 10857 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
c23cc417 10858 connector->base.id, connector->name,
8e329a03 10859 encoder->base.id, encoder->name);
d2dff872 10860
edde3617
ML
10861 old->restore_state = NULL;
10862
6c5ed5ae 10863 WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
6e9f798d 10864
79e53945
JB
10865 /*
10866 * Algorithm gets a little messy:
7a5e4805 10867 *
79e53945
JB
10868 * - if the connector already has an assigned crtc, use it (but make
10869 * sure it's on first)
7a5e4805 10870 *
79e53945
JB
10871 * - try to find the first unused crtc that can drive this connector,
10872 * and use that if we find one
79e53945
JB
10873 */
10874
10875 /* See if we already have a CRTC for this connector */
edde3617
ML
10876 if (connector->state->crtc) {
10877 crtc = connector->state->crtc;
8261b191 10878
51fd371b 10879 ret = drm_modeset_lock(&crtc->mutex, ctx);
4d02e2de 10880 if (ret)
ad3c558f 10881 goto fail;
8261b191
CW
10882
10883 /* Make sure the crtc and connector are running */
edde3617 10884 goto found;
79e53945
JB
10885 }
10886
10887 /* Find an unused one (if possible) */
70e1e0ec 10888 for_each_crtc(dev, possible_crtc) {
79e53945
JB
10889 i++;
10890 if (!(encoder->possible_crtcs & (1 << i)))
10891 continue;
edde3617
ML
10892
10893 ret = drm_modeset_lock(&possible_crtc->mutex, ctx);
10894 if (ret)
10895 goto fail;
10896
10897 if (possible_crtc->state->enable) {
10898 drm_modeset_unlock(&possible_crtc->mutex);
a459249c 10899 continue;
edde3617 10900 }
a459249c
VS
10901
10902 crtc = possible_crtc;
10903 break;
79e53945
JB
10904 }
10905
10906 /*
10907 * If we didn't find an unused CRTC, don't use any.
10908 */
10909 if (!crtc) {
7173188d 10910 DRM_DEBUG_KMS("no pipe available for load-detect\n");
f4bf77b4 10911 ret = -ENODEV;
ad3c558f 10912 goto fail;
79e53945
JB
10913 }
10914
edde3617
ML
10915found:
10916 intel_crtc = to_intel_crtc(crtc);
10917
83a57153 10918 state = drm_atomic_state_alloc(dev);
edde3617
ML
10919 restore_state = drm_atomic_state_alloc(dev);
10920 if (!state || !restore_state) {
10921 ret = -ENOMEM;
10922 goto fail;
10923 }
83a57153
ACO
10924
10925 state->acquire_ctx = ctx;
edde3617 10926 restore_state->acquire_ctx = ctx;
83a57153 10927
944b0c76
ACO
10928 connector_state = drm_atomic_get_connector_state(state, connector);
10929 if (IS_ERR(connector_state)) {
10930 ret = PTR_ERR(connector_state);
10931 goto fail;
10932 }
10933
edde3617
ML
10934 ret = drm_atomic_set_crtc_for_connector(connector_state, crtc);
10935 if (ret)
10936 goto fail;
944b0c76 10937
4be07317
ACO
10938 crtc_state = intel_atomic_get_crtc_state(state, intel_crtc);
10939 if (IS_ERR(crtc_state)) {
10940 ret = PTR_ERR(crtc_state);
10941 goto fail;
10942 }
10943
49d6fa21 10944 crtc_state->base.active = crtc_state->base.enable = true;
4be07317 10945
6492711d
CW
10946 if (!mode)
10947 mode = &load_detect_mode;
79e53945 10948
20bdc112 10949 ret = drm_atomic_set_mode_for_crtc(&crtc_state->base, mode);
d3a40d1b
ACO
10950 if (ret)
10951 goto fail;
10952
20bdc112 10953 ret = intel_modeset_disable_planes(state, crtc);
edde3617
ML
10954 if (ret)
10955 goto fail;
10956
10957 ret = PTR_ERR_OR_ZERO(drm_atomic_get_connector_state(restore_state, connector));
10958 if (!ret)
10959 ret = PTR_ERR_OR_ZERO(drm_atomic_get_crtc_state(restore_state, crtc));
be90cc31
VS
10960 if (!ret)
10961 ret = drm_atomic_add_affected_planes(restore_state, crtc);
edde3617
ML
10962 if (ret) {
10963 DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
10964 goto fail;
10965 }
8c7b5ccb 10966
3ba86073
ML
10967 ret = drm_atomic_commit(state);
10968 if (ret) {
6492711d 10969 DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
412b61d8 10970 goto fail;
79e53945 10971 }
edde3617
ML
10972
10973 old->restore_state = restore_state;
7abbd11f 10974 drm_atomic_state_put(state);
7173188d 10975
79e53945 10976 /* let the connector get through one full cycle before testing */
0f0f74bc 10977 intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
7173188d 10978 return true;
412b61d8 10979
ad3c558f 10980fail:
7fb71c8f
CW
10981 if (state) {
10982 drm_atomic_state_put(state);
10983 state = NULL;
10984 }
10985 if (restore_state) {
10986 drm_atomic_state_put(restore_state);
10987 restore_state = NULL;
10988 }
83a57153 10989
6c5ed5ae
ML
10990 if (ret == -EDEADLK)
10991 return ret;
51fd371b 10992
412b61d8 10993 return false;
79e53945
JB
10994}
10995
d2434ab7 10996void intel_release_load_detect_pipe(struct drm_connector *connector,
49172fee
ACO
10997 struct intel_load_detect_pipe *old,
10998 struct drm_modeset_acquire_ctx *ctx)
79e53945 10999{
d2434ab7
DV
11000 struct intel_encoder *intel_encoder =
11001 intel_attached_encoder(connector);
4ef69c7a 11002 struct drm_encoder *encoder = &intel_encoder->base;
edde3617 11003 struct drm_atomic_state *state = old->restore_state;
d3a40d1b 11004 int ret;
79e53945 11005
d2dff872 11006 DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
c23cc417 11007 connector->base.id, connector->name,
8e329a03 11008 encoder->base.id, encoder->name);
d2dff872 11009
edde3617 11010 if (!state)
0622a53c 11011 return;
79e53945 11012
581e49fe 11013 ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
0853695c 11014 if (ret)
edde3617 11015 DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
0853695c 11016 drm_atomic_state_put(state);
79e53945
JB
11017}
11018
da4a1efa 11019static int i9xx_pll_refclk(struct drm_device *dev,
5cec258b 11020 const struct intel_crtc_state *pipe_config)
da4a1efa 11021{
fac5e23e 11022 struct drm_i915_private *dev_priv = to_i915(dev);
da4a1efa
VS
11023 u32 dpll = pipe_config->dpll_hw_state.dpll;
11024
11025 if ((dpll & PLL_REF_INPUT_MASK) == PLLB_REF_INPUT_SPREADSPECTRUMIN)
e91e941b 11026 return dev_priv->vbt.lvds_ssc_freq;
6e266956 11027 else if (HAS_PCH_SPLIT(dev_priv))
da4a1efa 11028 return 120000;
cf819eff 11029 else if (!IS_GEN(dev_priv, 2))
da4a1efa
VS
11030 return 96000;
11031 else
11032 return 48000;
11033}
11034
79e53945 11035/* Returns the clock of the currently programmed mode of the given pipe. */
f1f644dc 11036static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
5cec258b 11037 struct intel_crtc_state *pipe_config)
79e53945 11038{
f1f644dc 11039 struct drm_device *dev = crtc->base.dev;
fac5e23e 11040 struct drm_i915_private *dev_priv = to_i915(dev);
f1f644dc 11041 int pipe = pipe_config->cpu_transcoder;
293623f7 11042 u32 dpll = pipe_config->dpll_hw_state.dpll;
79e53945 11043 u32 fp;
9e2c8475 11044 struct dpll clock;
dccbea3b 11045 int port_clock;
da4a1efa 11046 int refclk = i9xx_pll_refclk(dev, pipe_config);
79e53945
JB
11047
11048 if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
293623f7 11049 fp = pipe_config->dpll_hw_state.fp0;
79e53945 11050 else
293623f7 11051 fp = pipe_config->dpll_hw_state.fp1;
79e53945
JB
11052
11053 clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
9b1e14f4 11054 if (IS_PINEVIEW(dev_priv)) {
f2b115e6
AJ
11055 clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1;
11056 clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT;
2177832f
SL
11057 } else {
11058 clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT;
11059 clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT;
11060 }
11061
cf819eff 11062 if (!IS_GEN(dev_priv, 2)) {
9b1e14f4 11063 if (IS_PINEVIEW(dev_priv))
f2b115e6
AJ
11064 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >>
11065 DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW);
2177832f
SL
11066 else
11067 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >>
79e53945
JB
11068 DPLL_FPA01_P1_POST_DIV_SHIFT);
11069
11070 switch (dpll & DPLL_MODE_MASK) {
11071 case DPLLB_MODE_DAC_SERIAL:
11072 clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ?
11073 5 : 10;
11074 break;
11075 case DPLLB_MODE_LVDS:
11076 clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ?
11077 7 : 14;
11078 break;
11079 default:
28c97730 11080 DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
79e53945 11081 "mode\n", (int)(dpll & DPLL_MODE_MASK));
f1f644dc 11082 return;
79e53945
JB
11083 }
11084
9b1e14f4 11085 if (IS_PINEVIEW(dev_priv))
dccbea3b 11086 port_clock = pnv_calc_dpll_params(refclk, &clock);
ac58c3f0 11087 else
dccbea3b 11088 port_clock = i9xx_calc_dpll_params(refclk, &clock);
79e53945 11089 } else {
50a0bc90 11090 u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
b1c560d1 11091 bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
79e53945
JB
11092
11093 if (is_lvds) {
11094 clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >>
11095 DPLL_FPA01_P1_POST_DIV_SHIFT);
b1c560d1
VS
11096
11097 if (lvds & LVDS_CLKB_POWER_UP)
11098 clock.p2 = 7;
11099 else
11100 clock.p2 = 14;
79e53945
JB
11101 } else {
11102 if (dpll & PLL_P1_DIVIDE_BY_TWO)
11103 clock.p1 = 2;
11104 else {
11105 clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >>
11106 DPLL_FPA01_P1_POST_DIV_SHIFT) + 2;
11107 }
11108 if (dpll & PLL_P2_DIVIDE_BY_4)
11109 clock.p2 = 4;
11110 else
11111 clock.p2 = 2;
79e53945 11112 }
da4a1efa 11113
dccbea3b 11114 port_clock = i9xx_calc_dpll_params(refclk, &clock);
79e53945
JB
11115 }
11116
18442d08
VS
11117 /*
11118 * This value includes pixel_multiplier. We will use
241bfc38 11119 * port_clock to compute adjusted_mode.crtc_clock in the
18442d08
VS
11120 * encoder's get_config() function.
11121 */
dccbea3b 11122 pipe_config->port_clock = port_clock;
f1f644dc
JB
11123}
11124
6878da05
VS
11125int intel_dotclock_calculate(int link_freq,
11126 const struct intel_link_m_n *m_n)
f1f644dc 11127{
f1f644dc
JB
11128 /*
11129 * The calculation for the data clock is:
1041a02f 11130 * pixel_clock = ((m/n)*(link_clock * nr_lanes))/bpp
f1f644dc 11131 * But we want to avoid losing precison if possible, so:
1041a02f 11132 * pixel_clock = ((m * link_clock * nr_lanes)/(n*bpp))
f1f644dc
JB
11133 *
11134 * and the link clock is simpler:
1041a02f 11135 * link_clock = (m * link_clock) / n
f1f644dc
JB
11136 */
11137
6878da05
VS
11138 if (!m_n->link_n)
11139 return 0;
f1f644dc 11140
3123698f 11141 return div_u64(mul_u32_u32(m_n->link_m, link_freq), m_n->link_n);
6878da05 11142}
f1f644dc 11143
18442d08 11144static void ironlake_pch_clock_get(struct intel_crtc *crtc,
5cec258b 11145 struct intel_crtc_state *pipe_config)
6878da05 11146{
e3b247da 11147 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
79e53945 11148
18442d08
VS
11149 /* read out port_clock from the DPLL */
11150 i9xx_crtc_clock_get(crtc, pipe_config);
f1f644dc 11151
f1f644dc 11152 /*
e3b247da
VS
11153 * In case there is an active pipe without active ports,
11154 * we may need some idea for the dotclock anyway.
11155 * Calculate one based on the FDI configuration.
79e53945 11156 */
2d112de7 11157 pipe_config->base.adjusted_mode.crtc_clock =
21a727b3 11158 intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
18442d08 11159 &pipe_config->fdi_m_n);
79e53945
JB
11160}
11161
de330815
VS
11162/* Returns the currently programmed mode of the given encoder. */
11163struct drm_display_mode *
11164intel_encoder_current_mode(struct intel_encoder *encoder)
79e53945 11165{
de330815
VS
11166 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
11167 struct intel_crtc_state *crtc_state;
79e53945 11168 struct drm_display_mode *mode;
de330815
VS
11169 struct intel_crtc *crtc;
11170 enum pipe pipe;
11171
11172 if (!encoder->get_hw_state(encoder, &pipe))
11173 return NULL;
11174
11175 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
79e53945
JB
11176
11177 mode = kzalloc(sizeof(*mode), GFP_KERNEL);
11178 if (!mode)
11179 return NULL;
11180
de330815
VS
11181 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
11182 if (!crtc_state) {
3f36b937
TU
11183 kfree(mode);
11184 return NULL;
11185 }
11186
de330815 11187 crtc_state->base.crtc = &crtc->base;
3f36b937 11188
de330815
VS
11189 if (!dev_priv->display.get_pipe_config(crtc, crtc_state)) {
11190 kfree(crtc_state);
11191 kfree(mode);
11192 return NULL;
11193 }
e30a154b 11194
de330815 11195 encoder->get_config(encoder, crtc_state);
79e53945 11196
de330815 11197 intel_mode_from_pipe_config(mode, crtc_state);
79e53945 11198
de330815 11199 kfree(crtc_state);
3f36b937 11200
79e53945
JB
11201 return mode;
11202}
11203
11204static void intel_crtc_destroy(struct drm_crtc *crtc)
11205{
11206 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
11207
11208 drm_crtc_cleanup(crtc);
11209 kfree(intel_crtc);
11210}
11211
5a21b665
DV
11212/**
11213 * intel_wm_need_update - Check whether watermarks need updating
6bf19817
CW
11214 * @cur: current plane state
11215 * @new: new plane state
5a21b665
DV
11216 *
11217 * Check current plane state versus the new one to determine whether
11218 * watermarks need to be recalculated.
11219 *
11220 * Returns true or false.
11221 */
cd1d3ee9
MR
11222static bool intel_wm_need_update(struct intel_plane_state *cur,
11223 struct intel_plane_state *new)
5a21b665 11224{
5a21b665 11225 /* Update watermarks on tiling or size changes. */
936e71e3 11226 if (new->base.visible != cur->base.visible)
5a21b665
DV
11227 return true;
11228
11229 if (!cur->base.fb || !new->base.fb)
11230 return false;
11231
bae781b2 11232 if (cur->base.fb->modifier != new->base.fb->modifier ||
5a21b665 11233 cur->base.rotation != new->base.rotation ||
936e71e3
VS
11234 drm_rect_width(&new->base.src) != drm_rect_width(&cur->base.src) ||
11235 drm_rect_height(&new->base.src) != drm_rect_height(&cur->base.src) ||
11236 drm_rect_width(&new->base.dst) != drm_rect_width(&cur->base.dst) ||
11237 drm_rect_height(&new->base.dst) != drm_rect_height(&cur->base.dst))
5a21b665
DV
11238 return true;
11239
11240 return false;
11241}
11242
b2b55502 11243static bool needs_scaling(const struct intel_plane_state *state)
5a21b665 11244{
936e71e3
VS
11245 int src_w = drm_rect_width(&state->base.src) >> 16;
11246 int src_h = drm_rect_height(&state->base.src) >> 16;
11247 int dst_w = drm_rect_width(&state->base.dst);
11248 int dst_h = drm_rect_height(&state->base.dst);
5a21b665
DV
11249
11250 return (src_w != dst_w || src_h != dst_h);
11251}
d21fbe87 11252
b2b55502
VS
11253int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state,
11254 struct drm_crtc_state *crtc_state,
11255 const struct intel_plane_state *old_plane_state,
da20eabd
ML
11256 struct drm_plane_state *plane_state)
11257{
ab1d3a0e 11258 struct intel_crtc_state *pipe_config = to_intel_crtc_state(crtc_state);
da20eabd
ML
11259 struct drm_crtc *crtc = crtc_state->crtc;
11260 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
e9728bd8 11261 struct intel_plane *plane = to_intel_plane(plane_state->plane);
da20eabd 11262 struct drm_device *dev = crtc->dev;
ed4a6a7c 11263 struct drm_i915_private *dev_priv = to_i915(dev);
da20eabd 11264 bool mode_changed = needs_modeset(crtc_state);
b2b55502 11265 bool was_crtc_enabled = old_crtc_state->base.active;
da20eabd 11266 bool is_crtc_enabled = crtc_state->active;
da20eabd
ML
11267 bool turn_off, turn_on, visible, was_visible;
11268 struct drm_framebuffer *fb = plane_state->fb;
78108b7c 11269 int ret;
da20eabd 11270
e9728bd8 11271 if (INTEL_GEN(dev_priv) >= 9 && plane->id != PLANE_CURSOR) {
da20eabd
ML
11272 ret = skl_update_scaler_plane(
11273 to_intel_crtc_state(crtc_state),
11274 to_intel_plane_state(plane_state));
11275 if (ret)
11276 return ret;
11277 }
11278
936e71e3 11279 was_visible = old_plane_state->base.visible;
1d4258db 11280 visible = plane_state->visible;
da20eabd
ML
11281
11282 if (!was_crtc_enabled && WARN_ON(was_visible))
11283 was_visible = false;
11284
35c08f43
ML
11285 /*
11286 * Visibility is calculated as if the crtc was on, but
11287 * after scaler setup everything depends on it being off
11288 * when the crtc isn't active.
f818ffea
VS
11289 *
11290 * FIXME this is wrong for watermarks. Watermarks should also
11291 * be computed as if the pipe would be active. Perhaps move
11292 * per-plane wm computation to the .check_plane() hook, and
11293 * only combine the results from all planes in the current place?
35c08f43 11294 */
e9728bd8 11295 if (!is_crtc_enabled) {
1d4258db 11296 plane_state->visible = visible = false;
e9728bd8 11297 to_intel_crtc_state(crtc_state)->active_planes &= ~BIT(plane->id);
c457d9cf 11298 to_intel_crtc_state(crtc_state)->data_rate[plane->id] = 0;
e9728bd8 11299 }
da20eabd
ML
11300
11301 if (!was_visible && !visible)
11302 return 0;
11303
e8861675
ML
11304 if (fb != old_plane_state->base.fb)
11305 pipe_config->fb_changed = true;
11306
da20eabd
ML
11307 turn_off = was_visible && (!visible || mode_changed);
11308 turn_on = visible && (!was_visible || mode_changed);
11309
72660ce0 11310 DRM_DEBUG_ATOMIC("[CRTC:%d:%s] has [PLANE:%d:%s] with fb %i\n",
e9728bd8
VS
11311 intel_crtc->base.base.id, intel_crtc->base.name,
11312 plane->base.base.id, plane->base.name,
72660ce0 11313 fb ? fb->base.id : -1);
da20eabd 11314
72660ce0 11315 DRM_DEBUG_ATOMIC("[PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
e9728bd8 11316 plane->base.base.id, plane->base.name,
72660ce0 11317 was_visible, visible,
da20eabd
ML
11318 turn_off, turn_on, mode_changed);
11319
caed361d 11320 if (turn_on) {
04548cba 11321 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
b4ede6df 11322 pipe_config->update_wm_pre = true;
caed361d
VS
11323
11324 /* must disable cxsr around plane enable/disable */
e9728bd8 11325 if (plane->id != PLANE_CURSOR)
caed361d
VS
11326 pipe_config->disable_cxsr = true;
11327 } else if (turn_off) {
04548cba 11328 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
b4ede6df 11329 pipe_config->update_wm_post = true;
92826fcd 11330
852eb00d 11331 /* must disable cxsr around plane enable/disable */
e9728bd8 11332 if (plane->id != PLANE_CURSOR)
ab1d3a0e 11333 pipe_config->disable_cxsr = true;
cd1d3ee9
MR
11334 } else if (intel_wm_need_update(to_intel_plane_state(plane->base.state),
11335 to_intel_plane_state(plane_state))) {
04548cba 11336 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv)) {
b4ede6df
VS
11337 /* FIXME bollocks */
11338 pipe_config->update_wm_pre = true;
11339 pipe_config->update_wm_post = true;
11340 }
852eb00d 11341 }
da20eabd 11342
8be6ca85 11343 if (visible || was_visible)
e9728bd8 11344 pipe_config->fb_bits |= plane->frontbuffer_bit;
a9ff8714 11345
31ae71fc 11346 /*
8e7a4424
VS
11347 * ILK/SNB DVSACNTR/Sprite Enable
11348 * IVB SPR_CTL/Sprite Enable
11349 * "When in Self Refresh Big FIFO mode, a write to enable the
11350 * plane will be internally buffered and delayed while Big FIFO
11351 * mode is exiting."
11352 *
11353 * Which means that enabling the sprite can take an extra frame
11354 * when we start in big FIFO mode (LP1+). Thus we need to drop
11355 * down to LP0 and wait for vblank in order to make sure the
11356 * sprite gets enabled on the next vblank after the register write.
11357 * Doing otherwise would risk enabling the sprite one frame after
11358 * we've already signalled flip completion. We can resume LP1+
11359 * once the sprite has been enabled.
11360 *
11361 *
31ae71fc 11362 * WaCxSRDisabledForSpriteScaling:ivb
8e7a4424
VS
11363 * IVB SPR_SCALE/Scaling Enable
11364 * "Low Power watermarks must be disabled for at least one
11365 * frame before enabling sprite scaling, and kept disabled
11366 * until sprite scaling is disabled."
11367 *
11368 * ILK/SNB DVSASCALE/Scaling Enable
11369 * "When in Self Refresh Big FIFO mode, scaling enable will be
11370 * masked off while Big FIFO mode is exiting."
31ae71fc 11371 *
8e7a4424
VS
11372 * Despite the w/a only being listed for IVB we assume that
11373 * the ILK/SNB note has similar ramifications, hence we apply
11374 * the w/a on all three platforms.
d8af3270
JPH
11375 *
11376 * With experimental results seems this is needed also for primary
11377 * plane, not only sprite plane.
31ae71fc 11378 */
d8af3270 11379 if (plane->id != PLANE_CURSOR &&
f3ce44a0 11380 (IS_GEN_RANGE(dev_priv, 5, 6) ||
8e7a4424
VS
11381 IS_IVYBRIDGE(dev_priv)) &&
11382 (turn_on || (!needs_scaling(old_plane_state) &&
11383 needs_scaling(to_intel_plane_state(plane_state)))))
31ae71fc 11384 pipe_config->disable_lp_wm = true;
d21fbe87 11385
da20eabd
ML
11386 return 0;
11387}
11388
6d3a1ce7
ML
11389static bool encoders_cloneable(const struct intel_encoder *a,
11390 const struct intel_encoder *b)
11391{
11392 /* masks could be asymmetric, so check both ways */
11393 return a == b || (a->cloneable & (1 << b->type) &&
11394 b->cloneable & (1 << a->type));
11395}
11396
11397static bool check_single_encoder_cloning(struct drm_atomic_state *state,
11398 struct intel_crtc *crtc,
11399 struct intel_encoder *encoder)
11400{
11401 struct intel_encoder *source_encoder;
11402 struct drm_connector *connector;
11403 struct drm_connector_state *connector_state;
11404 int i;
11405
aa5e9b47 11406 for_each_new_connector_in_state(state, connector, connector_state, i) {
6d3a1ce7
ML
11407 if (connector_state->crtc != &crtc->base)
11408 continue;
11409
11410 source_encoder =
11411 to_intel_encoder(connector_state->best_encoder);
11412 if (!encoders_cloneable(encoder, source_encoder))
11413 return false;
11414 }
11415
11416 return true;
11417}
11418
1ab554b0
ML
11419static int icl_add_linked_planes(struct intel_atomic_state *state)
11420{
11421 struct intel_plane *plane, *linked;
11422 struct intel_plane_state *plane_state, *linked_plane_state;
11423 int i;
11424
11425 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11426 linked = plane_state->linked_plane;
11427
11428 if (!linked)
11429 continue;
11430
11431 linked_plane_state = intel_atomic_get_plane_state(state, linked);
11432 if (IS_ERR(linked_plane_state))
11433 return PTR_ERR(linked_plane_state);
11434
11435 WARN_ON(linked_plane_state->linked_plane != plane);
11436 WARN_ON(linked_plane_state->slave == plane_state->slave);
11437 }
11438
11439 return 0;
11440}
11441
11442static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
11443{
11444 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
11445 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11446 struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->base.state);
11447 struct intel_plane *plane, *linked;
11448 struct intel_plane_state *plane_state;
11449 int i;
11450
11451 if (INTEL_GEN(dev_priv) < 11)
11452 return 0;
11453
11454 /*
11455 * Destroy all old plane links and make the slave plane invisible
11456 * in the crtc_state->active_planes mask.
11457 */
11458 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11459 if (plane->pipe != crtc->pipe || !plane_state->linked_plane)
11460 continue;
11461
11462 plane_state->linked_plane = NULL;
afbd8a72 11463 if (plane_state->slave && !plane_state->base.visible) {
1ab554b0 11464 crtc_state->active_planes &= ~BIT(plane->id);
afbd8a72
VS
11465 crtc_state->update_planes |= BIT(plane->id);
11466 }
1ab554b0
ML
11467
11468 plane_state->slave = false;
11469 }
11470
11471 if (!crtc_state->nv12_planes)
11472 return 0;
11473
11474 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11475 struct intel_plane_state *linked_state = NULL;
11476
11477 if (plane->pipe != crtc->pipe ||
11478 !(crtc_state->nv12_planes & BIT(plane->id)))
11479 continue;
11480
11481 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, linked) {
11482 if (!icl_is_nv12_y_plane(linked->id))
11483 continue;
11484
11485 if (crtc_state->active_planes & BIT(linked->id))
11486 continue;
11487
11488 linked_state = intel_atomic_get_plane_state(state, linked);
11489 if (IS_ERR(linked_state))
11490 return PTR_ERR(linked_state);
11491
11492 break;
11493 }
11494
11495 if (!linked_state) {
df7d4156 11496 DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
1ab554b0
ML
11497 hweight8(crtc_state->nv12_planes));
11498
11499 return -EINVAL;
11500 }
11501
11502 plane_state->linked_plane = linked;
11503
11504 linked_state->slave = true;
11505 linked_state->linked_plane = plane;
11506 crtc_state->active_planes |= BIT(linked->id);
afbd8a72 11507 crtc_state->update_planes |= BIT(linked->id);
1ab554b0
ML
11508 DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
11509 }
11510
11511 return 0;
11512}
11513
638d87c4
VS
11514static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
11515{
11516 struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->base.crtc);
11517 struct intel_atomic_state *state =
11518 to_intel_atomic_state(new_crtc_state->base.state);
11519 const struct intel_crtc_state *old_crtc_state =
11520 intel_atomic_get_old_crtc_state(state, crtc);
11521
11522 return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
11523}
11524
6d3a1ce7
ML
11525static int intel_crtc_atomic_check(struct drm_crtc *crtc,
11526 struct drm_crtc_state *crtc_state)
11527{
cd1d3ee9 11528 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
6d3a1ce7 11529 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
cf5a15be
ML
11530 struct intel_crtc_state *pipe_config =
11531 to_intel_crtc_state(crtc_state);
4d20cd86 11532 int ret;
6d3a1ce7
ML
11533 bool mode_changed = needs_modeset(crtc_state);
11534
440e84a5
VS
11535 if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv) &&
11536 mode_changed && !crtc_state->active)
caed361d 11537 pipe_config->update_wm_post = true;
eddfcbcd 11538
ad421372
ML
11539 if (mode_changed && crtc_state->enable &&
11540 dev_priv->display.crtc_compute_clock &&
8106ddbd 11541 !WARN_ON(pipe_config->shared_dpll)) {
ad421372
ML
11542 ret = dev_priv->display.crtc_compute_clock(intel_crtc,
11543 pipe_config);
11544 if (ret)
11545 return ret;
11546 }
11547
638d87c4
VS
11548 /*
11549 * May need to update pipe gamma enable bits
11550 * when C8 planes are getting enabled/disabled.
11551 */
11552 if (c8_planes_changed(pipe_config))
11553 crtc_state->color_mgmt_changed = true;
11554
d168da8c
JRS
11555 if (mode_changed || pipe_config->update_pipe ||
11556 crtc_state->color_mgmt_changed) {
302da0cd 11557 ret = intel_color_check(pipe_config);
82cf435b
LL
11558 if (ret)
11559 return ret;
11560 }
11561
e435d6e5 11562 ret = 0;
86c8bbbe 11563 if (dev_priv->display.compute_pipe_wm) {
e3bddded 11564 ret = dev_priv->display.compute_pipe_wm(pipe_config);
ed4a6a7c
MR
11565 if (ret) {
11566 DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
11567 return ret;
11568 }
11569 }
11570
f255c624 11571 if (dev_priv->display.compute_intermediate_wm) {
ed4a6a7c
MR
11572 if (WARN_ON(!dev_priv->display.compute_pipe_wm))
11573 return 0;
11574
11575 /*
11576 * Calculate 'intermediate' watermarks that satisfy both the
11577 * old state and the new state. We can program these
11578 * immediately.
11579 */
cd1d3ee9 11580 ret = dev_priv->display.compute_intermediate_wm(pipe_config);
ed4a6a7c
MR
11581 if (ret) {
11582 DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
86c8bbbe 11583 return ret;
ed4a6a7c 11584 }
86c8bbbe
MR
11585 }
11586
6315b5d3 11587 if (INTEL_GEN(dev_priv) >= 9) {
2c5c415c 11588 if (mode_changed || pipe_config->update_pipe)
e435d6e5
ML
11589 ret = skl_update_scaler_crtc(pipe_config);
11590
1ab554b0
ML
11591 if (!ret)
11592 ret = icl_check_nv12_planes(pipe_config);
73b0ca8e
MK
11593 if (!ret)
11594 ret = skl_check_pipe_max_pixel_rate(intel_crtc,
11595 pipe_config);
e435d6e5 11596 if (!ret)
6ebc6923 11597 ret = intel_atomic_setup_scalers(dev_priv, intel_crtc,
e435d6e5
ML
11598 pipe_config);
11599 }
11600
24f28450
ML
11601 if (HAS_IPS(dev_priv))
11602 pipe_config->ips_enabled = hsw_compute_ips_config(pipe_config);
11603
e435d6e5 11604 return ret;
6d3a1ce7
ML
11605}
11606
65b38e0d 11607static const struct drm_crtc_helper_funcs intel_helper_funcs = {
6d3a1ce7 11608 .atomic_check = intel_crtc_atomic_check,
f6e5b160
CW
11609};
11610
d29b2f9d
ACO
11611static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
11612{
11613 struct intel_connector *connector;
f9e905ca 11614 struct drm_connector_list_iter conn_iter;
d29b2f9d 11615
f9e905ca
DV
11616 drm_connector_list_iter_begin(dev, &conn_iter);
11617 for_each_intel_connector_iter(connector, &conn_iter) {
8863dc7f 11618 if (connector->base.state->crtc)
ef196b5c 11619 drm_connector_put(&connector->base);
8863dc7f 11620
d29b2f9d
ACO
11621 if (connector->base.encoder) {
11622 connector->base.state->best_encoder =
11623 connector->base.encoder;
11624 connector->base.state->crtc =
11625 connector->base.encoder->crtc;
8863dc7f 11626
ef196b5c 11627 drm_connector_get(&connector->base);
d29b2f9d
ACO
11628 } else {
11629 connector->base.state->best_encoder = NULL;
11630 connector->base.state->crtc = NULL;
11631 }
11632 }
f9e905ca 11633 drm_connector_list_iter_end(&conn_iter);
d29b2f9d
ACO
11634}
11635
f1a12172 11636static int
bcce8d86
VS
11637compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
11638 struct intel_crtc_state *pipe_config)
050f7aeb 11639{
bcce8d86
VS
11640 struct drm_connector *connector = conn_state->connector;
11641 const struct drm_display_info *info = &connector->display_info;
f1a12172 11642 int bpp;
050f7aeb 11643
f1a12172
RS
11644 switch (conn_state->max_bpc) {
11645 case 6 ... 7:
11646 bpp = 6 * 3;
11647 break;
11648 case 8 ... 9:
11649 bpp = 8 * 3;
11650 break;
11651 case 10 ... 11:
11652 bpp = 10 * 3;
11653 break;
11654 case 12:
11655 bpp = 12 * 3;
11656 break;
11657 default:
11658 return -EINVAL;
050f7aeb
DV
11659 }
11660
f1a12172 11661 if (bpp < pipe_config->pipe_bpp) {
bcce8d86
VS
11662 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
11663 "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
11664 connector->base.id, connector->name,
11665 bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
f1a12172 11666 pipe_config->pipe_bpp);
bcce8d86 11667
f1a12172 11668 pipe_config->pipe_bpp = bpp;
050f7aeb 11669 }
bcce8d86 11670
f1a12172 11671 return 0;
050f7aeb
DV
11672}
11673
4e53c2e0 11674static int
050f7aeb 11675compute_baseline_pipe_bpp(struct intel_crtc *crtc,
5cec258b 11676 struct intel_crtc_state *pipe_config)
4e53c2e0 11677{
9beb5fea 11678 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
bcce8d86 11679 struct drm_atomic_state *state = pipe_config->base.state;
da3ced29
ACO
11680 struct drm_connector *connector;
11681 struct drm_connector_state *connector_state;
1486017f 11682 int bpp, i;
4e53c2e0 11683
9beb5fea
TU
11684 if ((IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) ||
11685 IS_CHERRYVIEW(dev_priv)))
4e53c2e0 11686 bpp = 10*3;
9beb5fea 11687 else if (INTEL_GEN(dev_priv) >= 5)
d328c9d7
DV
11688 bpp = 12*3;
11689 else
11690 bpp = 8*3;
11691
4e53c2e0
DV
11692 pipe_config->pipe_bpp = bpp;
11693
bcce8d86 11694 /* Clamp display bpp to connector max bpp */
aa5e9b47 11695 for_each_new_connector_in_state(state, connector, connector_state, i) {
bcce8d86
VS
11696 int ret;
11697
da3ced29 11698 if (connector_state->crtc != &crtc->base)
4e53c2e0
DV
11699 continue;
11700
bcce8d86
VS
11701 ret = compute_sink_pipe_bpp(connector_state, pipe_config);
11702 if (ret)
11703 return ret;
4e53c2e0
DV
11704 }
11705
bcce8d86 11706 return 0;
4e53c2e0
DV
11707}
11708
644db711
DV
11709static void intel_dump_crtc_timings(const struct drm_display_mode *mode)
11710{
11711 DRM_DEBUG_KMS("crtc timings: %d %d %d %d %d %d %d %d %d, "
926878fb
VS
11712 "type: 0x%x flags: 0x%x\n",
11713 mode->crtc_clock,
11714 mode->crtc_hdisplay, mode->crtc_hsync_start,
11715 mode->crtc_hsync_end, mode->crtc_htotal,
11716 mode->crtc_vdisplay, mode->crtc_vsync_start,
11717 mode->crtc_vsync_end, mode->crtc_vtotal,
11718 mode->type, mode->flags);
644db711
DV
11719}
11720
f6982332 11721static inline void
926878fb
VS
11722intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
11723 const char *id, unsigned int lane_count,
11724 const struct intel_link_m_n *m_n)
f6982332 11725{
a4309657
TU
11726 DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
11727 id, lane_count,
f6982332
TU
11728 m_n->gmch_m, m_n->gmch_n,
11729 m_n->link_m, m_n->link_n, m_n->tu);
11730}
11731
69e89032
VS
11732static void
11733intel_dump_infoframe(struct drm_i915_private *dev_priv,
11734 const union hdmi_infoframe *frame)
11735{
11736 if ((drm_debug & DRM_UT_KMS) == 0)
11737 return;
11738
11739 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, frame);
11740}
11741
40b2be41
VS
11742#define OUTPUT_TYPE(x) [INTEL_OUTPUT_ ## x] = #x
11743
11744static const char * const output_type_str[] = {
11745 OUTPUT_TYPE(UNUSED),
11746 OUTPUT_TYPE(ANALOG),
11747 OUTPUT_TYPE(DVO),
11748 OUTPUT_TYPE(SDVO),
11749 OUTPUT_TYPE(LVDS),
11750 OUTPUT_TYPE(TVOUT),
11751 OUTPUT_TYPE(HDMI),
11752 OUTPUT_TYPE(DP),
11753 OUTPUT_TYPE(EDP),
11754 OUTPUT_TYPE(DSI),
7e732cac 11755 OUTPUT_TYPE(DDI),
40b2be41
VS
11756 OUTPUT_TYPE(DP_MST),
11757};
11758
11759#undef OUTPUT_TYPE
11760
11761static void snprintf_output_types(char *buf, size_t len,
11762 unsigned int output_types)
11763{
11764 char *str = buf;
11765 int i;
11766
11767 str[0] = '\0';
11768
11769 for (i = 0; i < ARRAY_SIZE(output_type_str); i++) {
11770 int r;
11771
11772 if ((output_types & BIT(i)) == 0)
11773 continue;
11774
11775 r = snprintf(str, len, "%s%s",
11776 str != buf ? "," : "", output_type_str[i]);
11777 if (r >= len)
11778 break;
11779 str += r;
11780 len -= r;
11781
11782 output_types &= ~BIT(i);
11783 }
11784
11785 WARN_ON_ONCE(output_types != 0);
11786}
11787
d9facae6
SS
11788static const char * const output_format_str[] = {
11789 [INTEL_OUTPUT_FORMAT_INVALID] = "Invalid",
11790 [INTEL_OUTPUT_FORMAT_RGB] = "RGB",
33b7f3ee 11791 [INTEL_OUTPUT_FORMAT_YCBCR420] = "YCBCR4:2:0",
8c79f844 11792 [INTEL_OUTPUT_FORMAT_YCBCR444] = "YCBCR4:4:4",
d9facae6
SS
11793};
11794
11795static const char *output_formats(enum intel_output_format format)
11796{
33b7f3ee 11797 if (format >= ARRAY_SIZE(output_format_str))
d9facae6
SS
11798 format = INTEL_OUTPUT_FORMAT_INVALID;
11799 return output_format_str[format];
11800}
11801
10d75f54
VS
11802static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
11803{
11804 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
11805 const struct drm_framebuffer *fb = plane_state->base.fb;
11806 struct drm_format_name_buf format_name;
11807
11808 if (!fb) {
11809 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
11810 plane->base.base.id, plane->base.name,
11811 yesno(plane_state->base.visible));
11812 return;
11813 }
11814
11815 DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
11816 plane->base.base.id, plane->base.name,
11817 fb->base.id, fb->width, fb->height,
11818 drm_get_format_name(fb->format->format, &format_name),
11819 yesno(plane_state->base.visible));
11820 DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
11821 plane_state->base.rotation, plane_state->scaler_id);
11822 if (plane_state->base.visible)
11823 DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
11824 DRM_RECT_FP_ARG(&plane_state->base.src),
11825 DRM_RECT_ARG(&plane_state->base.dst));
11826}
11827
926878fb 11828static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
10d75f54 11829 struct intel_atomic_state *state,
c0b03411
DV
11830 const char *context)
11831{
1b9994c7 11832 struct intel_crtc *crtc = to_intel_crtc(pipe_config->base.crtc);
10d75f54
VS
11833 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
11834 const struct intel_plane_state *plane_state;
11835 struct intel_plane *plane;
40b2be41 11836 char buf[64];
10d75f54 11837 int i;
6a60cd87 11838
a0e70104
VS
11839 DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
11840 crtc->base.base.id, crtc->base.name,
11841 yesno(pipe_config->base.enable), context);
c0b03411 11842
10d75f54
VS
11843 if (!pipe_config->base.enable)
11844 goto dump_planes;
11845
40b2be41 11846 snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
64f6dbab
VS
11847 DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
11848 yesno(pipe_config->base.active),
11849 buf, pipe_config->output_types,
d9facae6
SS
11850 output_formats(pipe_config->output_format));
11851
2c89429e
TU
11852 DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
11853 transcoder_name(pipe_config->cpu_transcoder),
c0b03411 11854 pipe_config->pipe_bpp, pipe_config->dither);
a4309657
TU
11855
11856 if (pipe_config->has_pch_encoder)
11857 intel_dump_m_n_config(pipe_config, "fdi",
11858 pipe_config->fdi_lanes,
11859 &pipe_config->fdi_m_n);
f6982332
TU
11860
11861 if (intel_crtc_has_dp_encoder(pipe_config)) {
a4309657
TU
11862 intel_dump_m_n_config(pipe_config, "dp m_n",
11863 pipe_config->lane_count, &pipe_config->dp_m_n);
d806e682
TU
11864 if (pipe_config->has_drrs)
11865 intel_dump_m_n_config(pipe_config, "dp m2_n2",
11866 pipe_config->lane_count,
11867 &pipe_config->dp_m2_n2);
f6982332 11868 }
b95af8be 11869
64f6dbab
VS
11870 DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
11871 pipe_config->has_audio, pipe_config->has_infoframe,
69e89032
VS
11872 pipe_config->infoframes.enable);
11873
11874 if (pipe_config->infoframes.enable &
11875 intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
11876 DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
11877 if (pipe_config->infoframes.enable &
11878 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
11879 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
11880 if (pipe_config->infoframes.enable &
11881 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD))
11882 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.spd);
11883 if (pipe_config->infoframes.enable &
11884 intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
11885 intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
11886
c0b03411 11887 DRM_DEBUG_KMS("requested mode:\n");
2d112de7 11888 drm_mode_debug_printmodeline(&pipe_config->base.mode);
c0b03411 11889 DRM_DEBUG_KMS("adjusted mode:\n");
2d112de7
ACO
11890 drm_mode_debug_printmodeline(&pipe_config->base.adjusted_mode);
11891 intel_dump_crtc_timings(&pipe_config->base.adjusted_mode);
a7d1b3f4 11892 DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
2c89429e 11893 pipe_config->port_clock,
a7d1b3f4
VS
11894 pipe_config->pipe_src_w, pipe_config->pipe_src_h,
11895 pipe_config->pixel_rate);
dd2f616d
TU
11896
11897 if (INTEL_GEN(dev_priv) >= 9)
11898 DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
11899 crtc->num_scalers,
11900 pipe_config->scaler_state.scaler_users,
11901 pipe_config->scaler_state.scaler_id);
a74f8375 11902
b2ae318a 11903 if (HAS_GMCH(dev_priv))
a74f8375
TU
11904 DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
11905 pipe_config->gmch_pfit.control,
11906 pipe_config->gmch_pfit.pgm_ratios,
11907 pipe_config->gmch_pfit.lvds_border_bits);
11908 else
dc0c0bfe 11909 DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
a74f8375
TU
11910 pipe_config->pch_pfit.pos,
11911 pipe_config->pch_pfit.size,
dc0c0bfe
VS
11912 enableddisabled(pipe_config->pch_pfit.enabled),
11913 yesno(pipe_config->pch_pfit.force_thru));
a74f8375 11914
2c89429e
TU
11915 DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
11916 pipe_config->ips_enabled, pipe_config->double_wide);
6a60cd87 11917
f50b79f0 11918 intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
415ff0f6 11919
10d75f54
VS
11920dump_planes:
11921 if (!state)
11922 return;
6a60cd87 11923
10d75f54
VS
11924 for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
11925 if (plane->pipe == crtc->pipe)
11926 intel_dump_plane_state(plane_state);
6a60cd87 11927 }
c0b03411
DV
11928}
11929
85829eb5 11930static bool check_digital_port_conflicts(struct intel_atomic_state *state)
00f0b378 11931{
85829eb5 11932 struct drm_device *dev = state->base.dev;
da3ced29 11933 struct drm_connector *connector;
2fd96b41 11934 struct drm_connector_list_iter conn_iter;
00f0b378 11935 unsigned int used_ports = 0;
477321e0 11936 unsigned int used_mst_ports = 0;
bd67a8c1 11937 bool ret = true;
00f0b378
VS
11938
11939 /*
11940 * Walk the connector list instead of the encoder
11941 * list to detect the problem on ddi platforms
11942 * where there's just one encoder per digital port.
11943 */
2fd96b41
GP
11944 drm_connector_list_iter_begin(dev, &conn_iter);
11945 drm_for_each_connector_iter(connector, &conn_iter) {
0bff4858
VS
11946 struct drm_connector_state *connector_state;
11947 struct intel_encoder *encoder;
11948
85829eb5
VS
11949 connector_state =
11950 drm_atomic_get_new_connector_state(&state->base,
11951 connector);
0bff4858
VS
11952 if (!connector_state)
11953 connector_state = connector->state;
11954
5448a00d 11955 if (!connector_state->best_encoder)
00f0b378
VS
11956 continue;
11957
5448a00d
ACO
11958 encoder = to_intel_encoder(connector_state->best_encoder);
11959
11960 WARN_ON(!connector_state->crtc);
00f0b378
VS
11961
11962 switch (encoder->type) {
11963 unsigned int port_mask;
7e732cac 11964 case INTEL_OUTPUT_DDI:
4f8036a2 11965 if (WARN_ON(!HAS_DDI(to_i915(dev))))
00f0b378 11966 break;
f0d759f0 11967 /* else: fall through */
cca0502b 11968 case INTEL_OUTPUT_DP:
00f0b378
VS
11969 case INTEL_OUTPUT_HDMI:
11970 case INTEL_OUTPUT_EDP:
8f4f2797 11971 port_mask = 1 << encoder->port;
00f0b378
VS
11972
11973 /* the same port mustn't appear more than once */
11974 if (used_ports & port_mask)
bd67a8c1 11975 ret = false;
00f0b378
VS
11976
11977 used_ports |= port_mask;
477321e0
VS
11978 break;
11979 case INTEL_OUTPUT_DP_MST:
11980 used_mst_ports |=
8f4f2797 11981 1 << encoder->port;
477321e0 11982 break;
00f0b378
VS
11983 default:
11984 break;
11985 }
11986 }
2fd96b41 11987 drm_connector_list_iter_end(&conn_iter);
00f0b378 11988
477321e0
VS
11989 /* can't mix MST and SST/HDMI on the same port */
11990 if (used_ports & used_mst_ports)
11991 return false;
11992
bd67a8c1 11993 return ret;
00f0b378
VS
11994}
11995
f81b845f 11996static int
83a57153
ACO
11997clear_intel_crtc_state(struct intel_crtc_state *crtc_state)
11998{
ff32c54e
VS
11999 struct drm_i915_private *dev_priv =
12000 to_i915(crtc_state->base.crtc->dev);
f81b845f
CW
12001 struct intel_crtc_state *saved_state;
12002
12003 saved_state = kzalloc(sizeof(*saved_state), GFP_KERNEL);
12004 if (!saved_state)
12005 return -ENOMEM;
83a57153 12006
7546a384
ACO
12007 /* FIXME: before the switch to atomic started, a new pipe_config was
12008 * kzalloc'd. Code that depends on any field being zero should be
12009 * fixed, so that the crtc_state can be safely duplicated. For now,
12010 * only fields that are know to not cause problems are preserved. */
12011
f81b845f
CW
12012 saved_state->scaler_state = crtc_state->scaler_state;
12013 saved_state->shared_dpll = crtc_state->shared_dpll;
12014 saved_state->dpll_hw_state = crtc_state->dpll_hw_state;
a8ebf607 12015 saved_state->crc_enabled = crtc_state->crc_enabled;
04548cba
VS
12016 if (IS_G4X(dev_priv) ||
12017 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
f81b845f 12018 saved_state->wm = crtc_state->wm;
4978cc93 12019
d2fa80a5
CW
12020 /* Keep base drm_crtc_state intact, only clear our extended struct */
12021 BUILD_BUG_ON(offsetof(struct intel_crtc_state, base));
f81b845f 12022 memcpy(&crtc_state->base + 1, &saved_state->base + 1,
d2fa80a5 12023 sizeof(*crtc_state) - sizeof(crtc_state->base));
4978cc93 12024
f81b845f
CW
12025 kfree(saved_state);
12026 return 0;
83a57153
ACO
12027}
12028
548ee15b 12029static int
f239b799 12030intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
ee7b9f93 12031{
f239b799 12032 struct drm_crtc *crtc = pipe_config->base.crtc;
b359283a 12033 struct drm_atomic_state *state = pipe_config->base.state;
7758a113 12034 struct intel_encoder *encoder;
da3ced29 12035 struct drm_connector *connector;
0b901879 12036 struct drm_connector_state *connector_state;
d26592c6 12037 int base_bpp, ret;
0b901879 12038 int i;
e29c22c0 12039 bool retry = true;
ee7b9f93 12040
f81b845f
CW
12041 ret = clear_intel_crtc_state(pipe_config);
12042 if (ret)
12043 return ret;
7758a113 12044
e143a21c
DV
12045 pipe_config->cpu_transcoder =
12046 (enum transcoder) to_intel_crtc(crtc)->pipe;
b8cecdf5 12047
2960bc9c
ID
12048 /*
12049 * Sanitize sync polarity flags based on requested ones. If neither
12050 * positive or negative polarity is requested, treat this as meaning
12051 * negative polarity.
12052 */
2d112de7 12053 if (!(pipe_config->base.adjusted_mode.flags &
2960bc9c 12054 (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NHSYNC)))
2d112de7 12055 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NHSYNC;
2960bc9c 12056
2d112de7 12057 if (!(pipe_config->base.adjusted_mode.flags &
2960bc9c 12058 (DRM_MODE_FLAG_PVSYNC | DRM_MODE_FLAG_NVSYNC)))
2d112de7 12059 pipe_config->base.adjusted_mode.flags |= DRM_MODE_FLAG_NVSYNC;
2960bc9c 12060
bcce8d86
VS
12061 ret = compute_baseline_pipe_bpp(to_intel_crtc(crtc),
12062 pipe_config);
12063 if (ret)
12064 return ret;
12065
12066 base_bpp = pipe_config->pipe_bpp;
4e53c2e0 12067
e41a56be
VS
12068 /*
12069 * Determine the real pipe dimensions. Note that stereo modes can
12070 * increase the actual pipe size due to the frame doubling and
12071 * insertion of additional space for blanks between the frame. This
12072 * is stored in the crtc timings. We use the requested mode to do this
12073 * computation to clearly distinguish it from the adjusted mode, which
12074 * can be changed by the connectors in the below retry loop.
12075 */
196cd5d3 12076 drm_mode_get_hv_timing(&pipe_config->base.mode,
ecb7e16b
GP
12077 &pipe_config->pipe_src_w,
12078 &pipe_config->pipe_src_h);
e41a56be 12079
aa5e9b47 12080 for_each_new_connector_in_state(state, connector, connector_state, i) {
253c84c8
VS
12081 if (connector_state->crtc != crtc)
12082 continue;
12083
12084 encoder = to_intel_encoder(connector_state->best_encoder);
12085
e25148d0
VS
12086 if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
12087 DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
d26592c6 12088 return -EINVAL;
e25148d0
VS
12089 }
12090
253c84c8
VS
12091 /*
12092 * Determine output_types before calling the .compute_config()
12093 * hooks so that the hooks can use this information safely.
12094 */
7e732cac
VS
12095 if (encoder->compute_output_type)
12096 pipe_config->output_types |=
12097 BIT(encoder->compute_output_type(encoder, pipe_config,
12098 connector_state));
12099 else
12100 pipe_config->output_types |= BIT(encoder->type);
253c84c8
VS
12101 }
12102
e29c22c0 12103encoder_retry:
ef1b460d 12104 /* Ensure the port clock defaults are reset when retrying. */
ff9a6750 12105 pipe_config->port_clock = 0;
ef1b460d 12106 pipe_config->pixel_multiplier = 1;
ff9a6750 12107
135c81b8 12108 /* Fill in default crtc timings, allow encoders to overwrite them. */
2d112de7
ACO
12109 drm_mode_set_crtcinfo(&pipe_config->base.adjusted_mode,
12110 CRTC_STEREO_DOUBLE);
135c81b8 12111
7758a113
DV
12112 /* Pass our mode to the connectors and the CRTC to give them a chance to
12113 * adjust it according to limitations or connector properties, and also
12114 * a chance to reject the mode entirely.
47f1c6c9 12115 */
aa5e9b47 12116 for_each_new_connector_in_state(state, connector, connector_state, i) {
0b901879 12117 if (connector_state->crtc != crtc)
7758a113 12118 continue;
7ae89233 12119
0b901879 12120 encoder = to_intel_encoder(connector_state->best_encoder);
204474a6
LP
12121 ret = encoder->compute_config(encoder, pipe_config,
12122 connector_state);
12123 if (ret < 0) {
12124 if (ret != -EDEADLK)
12125 DRM_DEBUG_KMS("Encoder config failure: %d\n",
12126 ret);
12127 return ret;
7758a113 12128 }
ee7b9f93 12129 }
47f1c6c9 12130
ff9a6750
DV
12131 /* Set default port clock if not overwritten by the encoder. Needs to be
12132 * done afterwards in case the encoder adjusts the mode. */
12133 if (!pipe_config->port_clock)
2d112de7 12134 pipe_config->port_clock = pipe_config->base.adjusted_mode.crtc_clock
241bfc38 12135 * pipe_config->pixel_multiplier;
ff9a6750 12136
a43f6e0f 12137 ret = intel_crtc_compute_config(to_intel_crtc(crtc), pipe_config);
8e2b4dff 12138 if (ret == -EDEADLK)
d26592c6 12139 return ret;
e29c22c0 12140 if (ret < 0) {
7758a113 12141 DRM_DEBUG_KMS("CRTC fixup failed\n");
d26592c6 12142 return ret;
ee7b9f93 12143 }
e29c22c0
DV
12144
12145 if (ret == RETRY) {
d26592c6
VS
12146 if (WARN(!retry, "loop in pipe configuration computation\n"))
12147 return -EINVAL;
e29c22c0
DV
12148
12149 DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
12150 retry = false;
12151 goto encoder_retry;
12152 }
12153
e8fa4270 12154 /* Dithering seems to not pass-through bits correctly when it should, so
611032bf
MN
12155 * only enable it on 6bpc panels and when its not a compliance
12156 * test requesting 6bpc video pattern.
12157 */
12158 pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
12159 !pipe_config->dither_force_disable;
62f0ace5 12160 DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
d328c9d7 12161 base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
4e53c2e0 12162
d26592c6 12163 return 0;
ee7b9f93 12164}
47f1c6c9 12165
2c1c5525 12166bool intel_fuzzy_clock_check(int clock1, int clock2)
f1f644dc 12167{
3bd26263 12168 int diff;
f1f644dc
JB
12169
12170 if (clock1 == clock2)
12171 return true;
12172
12173 if (!clock1 || !clock2)
12174 return false;
12175
12176 diff = abs(clock1 - clock2);
12177
12178 if (((((diff + clock1 + clock2) * 100)) / (clock1 + clock2)) < 105)
12179 return true;
12180
12181 return false;
12182}
12183
cfb23ed6
ML
12184static bool
12185intel_compare_m_n(unsigned int m, unsigned int n,
12186 unsigned int m2, unsigned int n2,
12187 bool exact)
12188{
12189 if (m == m2 && n == n2)
12190 return true;
12191
12192 if (exact || !m || !n || !m2 || !n2)
12193 return false;
12194
12195 BUILD_BUG_ON(DATA_LINK_M_N_MASK > INT_MAX);
12196
31d10b57
ML
12197 if (n > n2) {
12198 while (n > n2) {
cfb23ed6
ML
12199 m2 <<= 1;
12200 n2 <<= 1;
12201 }
31d10b57
ML
12202 } else if (n < n2) {
12203 while (n < n2) {
cfb23ed6
ML
12204 m <<= 1;
12205 n <<= 1;
12206 }
12207 }
12208
31d10b57
ML
12209 if (n != n2)
12210 return false;
12211
12212 return intel_fuzzy_clock_check(m, m2);
cfb23ed6
ML
12213}
12214
12215static bool
12216intel_compare_link_m_n(const struct intel_link_m_n *m_n,
12217 struct intel_link_m_n *m2_n2,
12218 bool adjust)
12219{
12220 if (m_n->tu == m2_n2->tu &&
12221 intel_compare_m_n(m_n->gmch_m, m_n->gmch_n,
12222 m2_n2->gmch_m, m2_n2->gmch_n, !adjust) &&
12223 intel_compare_m_n(m_n->link_m, m_n->link_n,
12224 m2_n2->link_m, m2_n2->link_n, !adjust)) {
12225 if (adjust)
12226 *m2_n2 = *m_n;
12227
12228 return true;
12229 }
12230
12231 return false;
12232}
12233
6454cb9f
VS
12234static bool
12235intel_compare_infoframe(const union hdmi_infoframe *a,
12236 const union hdmi_infoframe *b)
12237{
12238 return memcmp(a, b, sizeof(*a)) == 0;
12239}
12240
12241static void
12242pipe_config_infoframe_err(struct drm_i915_private *dev_priv,
12243 bool adjust, const char *name,
12244 const union hdmi_infoframe *a,
12245 const union hdmi_infoframe *b)
12246{
12247 if (adjust) {
12248 if ((drm_debug & DRM_UT_KMS) == 0)
12249 return;
12250
12251 drm_dbg(DRM_UT_KMS, "mismatch in %s infoframe", name);
12252 drm_dbg(DRM_UT_KMS, "expected:");
12253 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
12254 drm_dbg(DRM_UT_KMS, "found");
12255 hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
12256 } else {
12257 drm_err("mismatch in %s infoframe", name);
12258 drm_err("expected:");
12259 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
12260 drm_err("found");
12261 hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
12262 }
12263}
12264
4e8048f8
TU
12265static void __printf(3, 4)
12266pipe_config_err(bool adjust, const char *name, const char *format, ...)
12267{
4e8048f8
TU
12268 struct va_format vaf;
12269 va_list args;
12270
4e8048f8
TU
12271 va_start(args, format);
12272 vaf.fmt = format;
12273 vaf.va = &args;
12274
99a95487
JP
12275 if (adjust)
12276 drm_dbg(DRM_UT_KMS, "mismatch in %s %pV", name, &vaf);
12277 else
12278 drm_err("mismatch in %s %pV", name, &vaf);
4e8048f8
TU
12279
12280 va_end(args);
12281}
12282
3d6535cb
HG
12283static bool fastboot_enabled(struct drm_i915_private *dev_priv)
12284{
12285 if (i915_modparams.fastboot != -1)
12286 return i915_modparams.fastboot;
12287
12288 /* Enable fastboot by default on Skylake and newer */
7360c9f6
HG
12289 if (INTEL_GEN(dev_priv) >= 9)
12290 return true;
12291
12292 /* Enable fastboot by default on VLV and CHV */
12293 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
12294 return true;
12295
12296 /* Disabled by default on all others */
12297 return false;
3d6535cb
HG
12298}
12299
0e8ffe1b 12300static bool
6315b5d3 12301intel_pipe_config_compare(struct drm_i915_private *dev_priv,
5cec258b 12302 struct intel_crtc_state *current_config,
cfb23ed6
ML
12303 struct intel_crtc_state *pipe_config,
12304 bool adjust)
0e8ffe1b 12305{
cfb23ed6 12306 bool ret = true;
4493e098
ML
12307 bool fixup_inherited = adjust &&
12308 (current_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED) &&
12309 !(pipe_config->base.mode.private_flags & I915_MODE_FLAG_INHERITED);
cfb23ed6 12310
3d6535cb 12311 if (fixup_inherited && !fastboot_enabled(dev_priv)) {
d19f958d
ML
12312 DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
12313 ret = false;
12314 }
12315
eadd2721 12316#define PIPE_CONF_CHECK_X(name) do { \
66e985c0 12317 if (current_config->name != pipe_config->name) { \
4e8048f8 12318 pipe_config_err(adjust, __stringify(name), \
66e985c0
DV
12319 "(expected 0x%08x, found 0x%08x)\n", \
12320 current_config->name, \
12321 pipe_config->name); \
cfb23ed6 12322 ret = false; \
eadd2721
VS
12323 } \
12324} while (0)
66e985c0 12325
eadd2721 12326#define PIPE_CONF_CHECK_I(name) do { \
08a24034 12327 if (current_config->name != pipe_config->name) { \
4e8048f8 12328 pipe_config_err(adjust, __stringify(name), \
08a24034
DV
12329 "(expected %i, found %i)\n", \
12330 current_config->name, \
12331 pipe_config->name); \
cfb23ed6 12332 ret = false; \
eadd2721
VS
12333 } \
12334} while (0)
cfb23ed6 12335
eadd2721 12336#define PIPE_CONF_CHECK_BOOL(name) do { \
d640bf79
ML
12337 if (current_config->name != pipe_config->name) { \
12338 pipe_config_err(adjust, __stringify(name), \
12339 "(expected %s, found %s)\n", \
12340 yesno(current_config->name), \
12341 yesno(pipe_config->name)); \
12342 ret = false; \
eadd2721
VS
12343 } \
12344} while (0)
d640bf79 12345
4493e098
ML
12346/*
12347 * Checks state where we only read out the enabling, but not the entire
12348 * state itself (like full infoframes or ELD for audio). These states
12349 * require a full modeset on bootup to fix up.
12350 */
eadd2721 12351#define PIPE_CONF_CHECK_BOOL_INCOMPLETE(name) do { \
4493e098
ML
12352 if (!fixup_inherited || (!current_config->name && !pipe_config->name)) { \
12353 PIPE_CONF_CHECK_BOOL(name); \
12354 } else { \
12355 pipe_config_err(adjust, __stringify(name), \
12356 "unable to verify whether state matches exactly, forcing modeset (expected %s, found %s)\n", \
12357 yesno(current_config->name), \
12358 yesno(pipe_config->name)); \
12359 ret = false; \
eadd2721
VS
12360 } \
12361} while (0)
4493e098 12362
eadd2721 12363#define PIPE_CONF_CHECK_P(name) do { \
8106ddbd 12364 if (current_config->name != pipe_config->name) { \
4e8048f8 12365 pipe_config_err(adjust, __stringify(name), \
8106ddbd
ACO
12366 "(expected %p, found %p)\n", \
12367 current_config->name, \
12368 pipe_config->name); \
12369 ret = false; \
eadd2721
VS
12370 } \
12371} while (0)
8106ddbd 12372
eadd2721 12373#define PIPE_CONF_CHECK_M_N(name) do { \
cfb23ed6
ML
12374 if (!intel_compare_link_m_n(&current_config->name, \
12375 &pipe_config->name,\
12376 adjust)) { \
4e8048f8 12377 pipe_config_err(adjust, __stringify(name), \
cfb23ed6
ML
12378 "(expected tu %i gmch %i/%i link %i/%i, " \
12379 "found tu %i, gmch %i/%i link %i/%i)\n", \
12380 current_config->name.tu, \
12381 current_config->name.gmch_m, \
12382 current_config->name.gmch_n, \
12383 current_config->name.link_m, \
12384 current_config->name.link_n, \
12385 pipe_config->name.tu, \
12386 pipe_config->name.gmch_m, \
12387 pipe_config->name.gmch_n, \
12388 pipe_config->name.link_m, \
12389 pipe_config->name.link_n); \
12390 ret = false; \
eadd2721
VS
12391 } \
12392} while (0)
cfb23ed6 12393
55c561a7
DV
12394/* This is required for BDW+ where there is only one set of registers for
12395 * switching between high and low RR.
12396 * This macro can be used whenever a comparison has to be made between one
12397 * hw state and multiple sw state variables.
12398 */
eadd2721 12399#define PIPE_CONF_CHECK_M_N_ALT(name, alt_name) do { \
cfb23ed6
ML
12400 if (!intel_compare_link_m_n(&current_config->name, \
12401 &pipe_config->name, adjust) && \
12402 !intel_compare_link_m_n(&current_config->alt_name, \
12403 &pipe_config->name, adjust)) { \
4e8048f8 12404 pipe_config_err(adjust, __stringify(name), \
cfb23ed6
ML
12405 "(expected tu %i gmch %i/%i link %i/%i, " \
12406 "or tu %i gmch %i/%i link %i/%i, " \
12407 "found tu %i, gmch %i/%i link %i/%i)\n", \
12408 current_config->name.tu, \
12409 current_config->name.gmch_m, \
12410 current_config->name.gmch_n, \
12411 current_config->name.link_m, \
12412 current_config->name.link_n, \
12413 current_config->alt_name.tu, \
12414 current_config->alt_name.gmch_m, \
12415 current_config->alt_name.gmch_n, \
12416 current_config->alt_name.link_m, \
12417 current_config->alt_name.link_n, \
12418 pipe_config->name.tu, \
12419 pipe_config->name.gmch_m, \
12420 pipe_config->name.gmch_n, \
12421 pipe_config->name.link_m, \
12422 pipe_config->name.link_n); \
12423 ret = false; \
eadd2721
VS
12424 } \
12425} while (0)
88adfff1 12426
eadd2721 12427#define PIPE_CONF_CHECK_FLAGS(name, mask) do { \
1bd1bd80 12428 if ((current_config->name ^ pipe_config->name) & (mask)) { \
4e8048f8
TU
12429 pipe_config_err(adjust, __stringify(name), \
12430 "(%x) (expected %i, found %i)\n", \
12431 (mask), \
1bd1bd80
DV
12432 current_config->name & (mask), \
12433 pipe_config->name & (mask)); \
cfb23ed6 12434 ret = false; \
eadd2721
VS
12435 } \
12436} while (0)
1bd1bd80 12437
eadd2721 12438#define PIPE_CONF_CHECK_CLOCK_FUZZY(name) do { \
5e550656 12439 if (!intel_fuzzy_clock_check(current_config->name, pipe_config->name)) { \
4e8048f8 12440 pipe_config_err(adjust, __stringify(name), \
5e550656
VS
12441 "(expected %i, found %i)\n", \
12442 current_config->name, \
12443 pipe_config->name); \
cfb23ed6 12444 ret = false; \
eadd2721
VS
12445 } \
12446} while (0)
5e550656 12447
6454cb9f
VS
12448#define PIPE_CONF_CHECK_INFOFRAME(name) do { \
12449 if (!intel_compare_infoframe(&current_config->infoframes.name, \
12450 &pipe_config->infoframes.name)) { \
12451 pipe_config_infoframe_err(dev_priv, adjust, __stringify(name), \
12452 &current_config->infoframes.name, \
12453 &pipe_config->infoframes.name); \
12454 ret = false; \
12455 } \
12456} while (0)
12457
12458#define PIPE_CONF_QUIRK(quirk) \
bb760063
DV
12459 ((current_config->quirks | pipe_config->quirks) & (quirk))
12460
eccb140b
DV
12461 PIPE_CONF_CHECK_I(cpu_transcoder);
12462
d640bf79 12463 PIPE_CONF_CHECK_BOOL(has_pch_encoder);
08a24034 12464 PIPE_CONF_CHECK_I(fdi_lanes);
cfb23ed6 12465 PIPE_CONF_CHECK_M_N(fdi_m_n);
08a24034 12466
90a6b7b0 12467 PIPE_CONF_CHECK_I(lane_count);
95a7a2ae 12468 PIPE_CONF_CHECK_X(lane_lat_optim_mask);
b95af8be 12469
6315b5d3 12470 if (INTEL_GEN(dev_priv) < 8) {
cfb23ed6
ML
12471 PIPE_CONF_CHECK_M_N(dp_m_n);
12472
cfb23ed6
ML
12473 if (current_config->has_drrs)
12474 PIPE_CONF_CHECK_M_N(dp_m2_n2);
12475 } else
12476 PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2);
eb14cb74 12477
253c84c8 12478 PIPE_CONF_CHECK_X(output_types);
a65347ba 12479
2d112de7
ACO
12480 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hdisplay);
12481 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_htotal);
12482 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_start);
12483 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hblank_end);
12484 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_start);
12485 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_hsync_end);
1bd1bd80 12486
2d112de7
ACO
12487 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vdisplay);
12488 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vtotal);
12489 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_start);
12490 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vblank_end);
12491 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_start);
12492 PIPE_CONF_CHECK_I(base.adjusted_mode.crtc_vsync_end);
1bd1bd80 12493
c93f54cf 12494 PIPE_CONF_CHECK_I(pixel_multiplier);
d9facae6 12495 PIPE_CONF_CHECK_I(output_format);
d640bf79 12496 PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
772c2a51 12497 if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
920a14b2 12498 IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
d640bf79 12499 PIPE_CONF_CHECK_BOOL(limited_color_range);
15953637 12500
d640bf79
ML
12501 PIPE_CONF_CHECK_BOOL(hdmi_scrambling);
12502 PIPE_CONF_CHECK_BOOL(hdmi_high_tmds_clock_ratio);
4493e098 12503 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_infoframe);
6c49f241 12504
4493e098 12505 PIPE_CONF_CHECK_BOOL_INCOMPLETE(has_audio);
9ed109a7 12506
2d112de7 12507 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
1bd1bd80
DV
12508 DRM_MODE_FLAG_INTERLACE);
12509
bb760063 12510 if (!PIPE_CONF_QUIRK(PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS)) {
2d112de7 12511 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12512 DRM_MODE_FLAG_PHSYNC);
2d112de7 12513 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12514 DRM_MODE_FLAG_NHSYNC);
2d112de7 12515 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063 12516 DRM_MODE_FLAG_PVSYNC);
2d112de7 12517 PIPE_CONF_CHECK_FLAGS(base.adjusted_mode.flags,
bb760063
DV
12518 DRM_MODE_FLAG_NVSYNC);
12519 }
045ac3b5 12520
333b8ca8 12521 PIPE_CONF_CHECK_X(gmch_pfit.control);
e2ff2d4a 12522 /* pfit ratios are autocomputed by the hw on gen4+ */
6315b5d3 12523 if (INTEL_GEN(dev_priv) < 4)
7f7d8dd6 12524 PIPE_CONF_CHECK_X(gmch_pfit.pgm_ratios);
333b8ca8 12525 PIPE_CONF_CHECK_X(gmch_pfit.lvds_border_bits);
9953599b 12526
13b7648b
VS
12527 /*
12528 * Changing the EDP transcoder input mux
12529 * (A_ONOFF vs. A_ON) requires a full modeset.
12530 */
dc0c0bfe 12531 PIPE_CONF_CHECK_BOOL(pch_pfit.force_thru);
13b7648b 12532
bfd16b2a
ML
12533 if (!adjust) {
12534 PIPE_CONF_CHECK_I(pipe_src_w);
12535 PIPE_CONF_CHECK_I(pipe_src_h);
12536
d640bf79 12537 PIPE_CONF_CHECK_BOOL(pch_pfit.enabled);
bfd16b2a
ML
12538 if (current_config->pch_pfit.enabled) {
12539 PIPE_CONF_CHECK_X(pch_pfit.pos);
12540 PIPE_CONF_CHECK_X(pch_pfit.size);
12541 }
2fa2fe9a 12542
7aefe2b5 12543 PIPE_CONF_CHECK_I(scaler_state.scaler_id);
a7d1b3f4 12544 PIPE_CONF_CHECK_CLOCK_FUZZY(pixel_rate);
9d5441de
VS
12545
12546 PIPE_CONF_CHECK_X(gamma_mode);
9fdfb8e7
VS
12547 if (IS_CHERRYVIEW(dev_priv))
12548 PIPE_CONF_CHECK_X(cgm_mode);
12549 else
12550 PIPE_CONF_CHECK_X(csc_mode);
5f29ab23 12551 PIPE_CONF_CHECK_BOOL(gamma_enable);
8271b2ef 12552 PIPE_CONF_CHECK_BOOL(csc_enable);
7aefe2b5 12553 }
a1b2278e 12554
d640bf79 12555 PIPE_CONF_CHECK_BOOL(double_wide);
282740f7 12556
8106ddbd 12557 PIPE_CONF_CHECK_P(shared_dpll);
66e985c0 12558 PIPE_CONF_CHECK_X(dpll_hw_state.dpll);
8bcc2795 12559 PIPE_CONF_CHECK_X(dpll_hw_state.dpll_md);
66e985c0
DV
12560 PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
12561 PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
d452c5b6 12562 PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
00490c22 12563 PIPE_CONF_CHECK_X(dpll_hw_state.spll);
3f4cd19f
DL
12564 PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
12565 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
12566 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
2de38138
PZ
12567 PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr0);
12568 PIPE_CONF_CHECK_X(dpll_hw_state.ebb0);
12569 PIPE_CONF_CHECK_X(dpll_hw_state.ebb4);
12570 PIPE_CONF_CHECK_X(dpll_hw_state.pll0);
12571 PIPE_CONF_CHECK_X(dpll_hw_state.pll1);
12572 PIPE_CONF_CHECK_X(dpll_hw_state.pll2);
12573 PIPE_CONF_CHECK_X(dpll_hw_state.pll3);
12574 PIPE_CONF_CHECK_X(dpll_hw_state.pll6);
12575 PIPE_CONF_CHECK_X(dpll_hw_state.pll8);
12576 PIPE_CONF_CHECK_X(dpll_hw_state.pll9);
12577 PIPE_CONF_CHECK_X(dpll_hw_state.pll10);
12578 PIPE_CONF_CHECK_X(dpll_hw_state.pcsdw12);
c27e917e
PZ
12579 PIPE_CONF_CHECK_X(dpll_hw_state.mg_refclkin_ctl);
12580 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_coreclkctl1);
12581 PIPE_CONF_CHECK_X(dpll_hw_state.mg_clktop2_hsclkctl);
12582 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div0);
12583 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_div1);
12584 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_lf);
12585 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_frac_lock);
12586 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_ssc);
12587 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_bias);
12588 PIPE_CONF_CHECK_X(dpll_hw_state.mg_pll_tdc_coldst_bias);
c0d43d62 12589
47eacbab
VS
12590 PIPE_CONF_CHECK_X(dsi_pll.ctrl);
12591 PIPE_CONF_CHECK_X(dsi_pll.div);
12592
9beb5fea 12593 if (IS_G4X(dev_priv) || INTEL_GEN(dev_priv) >= 5)
42571aef
VS
12594 PIPE_CONF_CHECK_I(pipe_bpp);
12595
2d112de7 12596 PIPE_CONF_CHECK_CLOCK_FUZZY(base.adjusted_mode.crtc_clock);
a9a7e98a 12597 PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
5e550656 12598
53e9bf5e
VS
12599 PIPE_CONF_CHECK_I(min_voltage_level);
12600
6454cb9f
VS
12601 PIPE_CONF_CHECK_X(infoframes.enable);
12602 PIPE_CONF_CHECK_X(infoframes.gcp);
12603 PIPE_CONF_CHECK_INFOFRAME(avi);
12604 PIPE_CONF_CHECK_INFOFRAME(spd);
12605 PIPE_CONF_CHECK_INFOFRAME(hdmi);
b37f588e 12606 PIPE_CONF_CHECK_INFOFRAME(drm);
6454cb9f 12607
66e985c0 12608#undef PIPE_CONF_CHECK_X
08a24034 12609#undef PIPE_CONF_CHECK_I
d640bf79 12610#undef PIPE_CONF_CHECK_BOOL
4493e098 12611#undef PIPE_CONF_CHECK_BOOL_INCOMPLETE
8106ddbd 12612#undef PIPE_CONF_CHECK_P
1bd1bd80 12613#undef PIPE_CONF_CHECK_FLAGS
5e550656 12614#undef PIPE_CONF_CHECK_CLOCK_FUZZY
bb760063 12615#undef PIPE_CONF_QUIRK
88adfff1 12616
cfb23ed6 12617 return ret;
0e8ffe1b
DV
12618}
12619
e3b247da
VS
12620static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
12621 const struct intel_crtc_state *pipe_config)
12622{
12623 if (pipe_config->has_pch_encoder) {
21a727b3 12624 int fdi_dotclock = intel_dotclock_calculate(intel_fdi_link_freq(dev_priv, pipe_config),
e3b247da
VS
12625 &pipe_config->fdi_m_n);
12626 int dotclock = pipe_config->base.adjusted_mode.crtc_clock;
12627
12628 /*
12629 * FDI already provided one idea for the dotclock.
12630 * Yell if the encoder disagrees.
12631 */
12632 WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
12633 "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
12634 fdi_dotclock, dotclock);
12635 }
12636}
12637
c0ead703
ML
12638static void verify_wm_state(struct drm_crtc *crtc,
12639 struct drm_crtc_state *new_state)
08db6652 12640{
6315b5d3 12641 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
370d757d
CW
12642 struct skl_hw_state {
12643 struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
12644 struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
12645 struct skl_ddb_allocation ddb;
12646 struct skl_pipe_wm wm;
12647 } *hw;
12648 struct skl_ddb_allocation *sw_ddb;
12649 struct skl_pipe_wm *sw_wm;
3de8a14c 12650 struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
e7c84544
ML
12651 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12652 const enum pipe pipe = intel_crtc->pipe;
3de8a14c 12653 int plane, level, max_level = ilk_wm_max_level(dev_priv);
08db6652 12654
6315b5d3 12655 if (INTEL_GEN(dev_priv) < 9 || !new_state->active)
08db6652
DL
12656 return;
12657
370d757d
CW
12658 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
12659 if (!hw)
12660 return;
12661
12662 skl_pipe_wm_get_hw_state(intel_crtc, &hw->wm);
03af79e0 12663 sw_wm = &to_intel_crtc_state(new_state)->wm.skl.optimal;
3de8a14c 12664
370d757d 12665 skl_pipe_ddb_get_hw_state(intel_crtc, hw->ddb_y, hw->ddb_uv);
ff43bc37 12666
370d757d 12667 skl_ddb_get_hw_state(dev_priv, &hw->ddb);
08db6652
DL
12668 sw_ddb = &dev_priv->wm.skl_hw.ddb;
12669
370d757d
CW
12670 if (INTEL_GEN(dev_priv) >= 11 &&
12671 hw->ddb.enabled_slices != sw_ddb->enabled_slices)
12672 DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
12673 sw_ddb->enabled_slices,
12674 hw->ddb.enabled_slices);
12675
e7c84544 12676 /* planes */
8b364b41 12677 for_each_universal_plane(dev_priv, pipe, plane) {
370d757d
CW
12678 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12679
12680 hw_plane_wm = &hw->wm.planes[plane];
3de8a14c 12681 sw_plane_wm = &sw_wm->planes[plane];
08db6652 12682
3de8a14c 12683 /* Watermarks */
12684 for (level = 0; level <= max_level; level++) {
12685 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12686 &sw_plane_wm->wm[level]))
12687 continue;
12688
12689 DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12690 pipe_name(pipe), plane + 1, level,
12691 sw_plane_wm->wm[level].plane_en,
12692 sw_plane_wm->wm[level].plane_res_b,
12693 sw_plane_wm->wm[level].plane_res_l,
12694 hw_plane_wm->wm[level].plane_en,
12695 hw_plane_wm->wm[level].plane_res_b,
12696 hw_plane_wm->wm[level].plane_res_l);
12697 }
08db6652 12698
3de8a14c 12699 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12700 &sw_plane_wm->trans_wm)) {
12701 DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12702 pipe_name(pipe), plane + 1,
12703 sw_plane_wm->trans_wm.plane_en,
12704 sw_plane_wm->trans_wm.plane_res_b,
12705 sw_plane_wm->trans_wm.plane_res_l,
12706 hw_plane_wm->trans_wm.plane_en,
12707 hw_plane_wm->trans_wm.plane_res_b,
12708 hw_plane_wm->trans_wm.plane_res_l);
12709 }
12710
12711 /* DDB */
370d757d 12712 hw_ddb_entry = &hw->ddb_y[plane];
ff43bc37 12713 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[plane];
3de8a14c 12714
12715 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
faccd994 12716 DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
3de8a14c 12717 pipe_name(pipe), plane + 1,
12718 sw_ddb_entry->start, sw_ddb_entry->end,
12719 hw_ddb_entry->start, hw_ddb_entry->end);
12720 }
e7c84544 12721 }
08db6652 12722
27082493
L
12723 /*
12724 * cursor
12725 * If the cursor plane isn't active, we may not have updated it's ddb
12726 * allocation. In that case since the ddb allocation will be updated
12727 * once the plane becomes visible, we can skip this check
12728 */
cd5dcbf1 12729 if (1) {
370d757d
CW
12730 struct skl_plane_wm *hw_plane_wm, *sw_plane_wm;
12731
12732 hw_plane_wm = &hw->wm.planes[PLANE_CURSOR];
3de8a14c 12733 sw_plane_wm = &sw_wm->planes[PLANE_CURSOR];
12734
12735 /* Watermarks */
12736 for (level = 0; level <= max_level; level++) {
12737 if (skl_wm_level_equals(&hw_plane_wm->wm[level],
12738 &sw_plane_wm->wm[level]))
12739 continue;
12740
12741 DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12742 pipe_name(pipe), level,
12743 sw_plane_wm->wm[level].plane_en,
12744 sw_plane_wm->wm[level].plane_res_b,
12745 sw_plane_wm->wm[level].plane_res_l,
12746 hw_plane_wm->wm[level].plane_en,
12747 hw_plane_wm->wm[level].plane_res_b,
12748 hw_plane_wm->wm[level].plane_res_l);
12749 }
12750
12751 if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
12752 &sw_plane_wm->trans_wm)) {
12753 DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
12754 pipe_name(pipe),
12755 sw_plane_wm->trans_wm.plane_en,
12756 sw_plane_wm->trans_wm.plane_res_b,
12757 sw_plane_wm->trans_wm.plane_res_l,
12758 hw_plane_wm->trans_wm.plane_en,
12759 hw_plane_wm->trans_wm.plane_res_b,
12760 hw_plane_wm->trans_wm.plane_res_l);
12761 }
12762
12763 /* DDB */
370d757d 12764 hw_ddb_entry = &hw->ddb_y[PLANE_CURSOR];
ff43bc37 12765 sw_ddb_entry = &to_intel_crtc_state(new_state)->wm.skl.plane_ddb_y[PLANE_CURSOR];
27082493 12766
3de8a14c 12767 if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
faccd994 12768 DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
27082493 12769 pipe_name(pipe),
3de8a14c 12770 sw_ddb_entry->start, sw_ddb_entry->end,
12771 hw_ddb_entry->start, hw_ddb_entry->end);
27082493 12772 }
08db6652 12773 }
370d757d
CW
12774
12775 kfree(hw);
08db6652
DL
12776}
12777
91d1b4bd 12778static void
677100ce
ML
12779verify_connector_state(struct drm_device *dev,
12780 struct drm_atomic_state *state,
12781 struct drm_crtc *crtc)
8af6cf88 12782{
35dd3c64 12783 struct drm_connector *connector;
aa5e9b47 12784 struct drm_connector_state *new_conn_state;
677100ce 12785 int i;
8af6cf88 12786
aa5e9b47 12787 for_each_new_connector_in_state(state, connector, new_conn_state, i) {
35dd3c64 12788 struct drm_encoder *encoder = connector->encoder;
749d98b8 12789 struct drm_crtc_state *crtc_state = NULL;
ad3c558f 12790
aa5e9b47 12791 if (new_conn_state->crtc != crtc)
e7c84544
ML
12792 continue;
12793
749d98b8
ML
12794 if (crtc)
12795 crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
12796
12797 intel_connector_verify_state(crtc_state, new_conn_state);
8af6cf88 12798
aa5e9b47 12799 I915_STATE_WARN(new_conn_state->best_encoder != encoder,
35dd3c64 12800 "connector's atomic encoder doesn't match legacy encoder\n");
8af6cf88 12801 }
91d1b4bd
DV
12802}
12803
12804static void
86b04268 12805verify_encoder_state(struct drm_device *dev, struct drm_atomic_state *state)
91d1b4bd
DV
12806{
12807 struct intel_encoder *encoder;
86b04268
DV
12808 struct drm_connector *connector;
12809 struct drm_connector_state *old_conn_state, *new_conn_state;
12810 int i;
8af6cf88 12811
b2784e15 12812 for_each_intel_encoder(dev, encoder) {
86b04268 12813 bool enabled = false, found = false;
4d20cd86 12814 enum pipe pipe;
8af6cf88
DV
12815
12816 DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
12817 encoder->base.base.id,
8e329a03 12818 encoder->base.name);
8af6cf88 12819
86b04268
DV
12820 for_each_oldnew_connector_in_state(state, connector, old_conn_state,
12821 new_conn_state, i) {
12822 if (old_conn_state->best_encoder == &encoder->base)
12823 found = true;
12824
12825 if (new_conn_state->best_encoder != &encoder->base)
8af6cf88 12826 continue;
86b04268 12827 found = enabled = true;
ad3c558f 12828
86b04268 12829 I915_STATE_WARN(new_conn_state->crtc !=
ad3c558f
ML
12830 encoder->base.crtc,
12831 "connector's crtc doesn't match encoder crtc\n");
8af6cf88 12832 }
86b04268
DV
12833
12834 if (!found)
12835 continue;
0e32b39c 12836
e2c719b7 12837 I915_STATE_WARN(!!encoder->base.crtc != enabled,
8af6cf88
DV
12838 "encoder's enabled state mismatch "
12839 "(expected %i, found %i)\n",
12840 !!encoder->base.crtc, enabled);
7c60d198
ML
12841
12842 if (!encoder->base.crtc) {
4d20cd86 12843 bool active;
7c60d198 12844
4d20cd86
ML
12845 active = encoder->get_hw_state(encoder, &pipe);
12846 I915_STATE_WARN(active,
12847 "encoder detached but still enabled on pipe %c.\n",
12848 pipe_name(pipe));
7c60d198 12849 }
8af6cf88 12850 }
91d1b4bd
DV
12851}
12852
12853static void
c0ead703
ML
12854verify_crtc_state(struct drm_crtc *crtc,
12855 struct drm_crtc_state *old_crtc_state,
12856 struct drm_crtc_state *new_crtc_state)
91d1b4bd 12857{
e7c84544 12858 struct drm_device *dev = crtc->dev;
fac5e23e 12859 struct drm_i915_private *dev_priv = to_i915(dev);
91d1b4bd 12860 struct intel_encoder *encoder;
e7c84544
ML
12861 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
12862 struct intel_crtc_state *pipe_config, *sw_config;
12863 struct drm_atomic_state *old_state;
12864 bool active;
045ac3b5 12865
e7c84544 12866 old_state = old_crtc_state->state;
ec2dc6a0 12867 __drm_atomic_helper_crtc_destroy_state(old_crtc_state);
e7c84544
ML
12868 pipe_config = to_intel_crtc_state(old_crtc_state);
12869 memset(pipe_config, 0, sizeof(*pipe_config));
12870 pipe_config->base.crtc = crtc;
12871 pipe_config->base.state = old_state;
8af6cf88 12872
78108b7c 12873 DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
8af6cf88 12874
e7c84544 12875 active = dev_priv->display.get_pipe_config(intel_crtc, pipe_config);
d62cf62a 12876
e56134bc
VS
12877 /* we keep both pipes enabled on 830 */
12878 if (IS_I830(dev_priv))
e7c84544 12879 active = new_crtc_state->active;
6c49f241 12880
e7c84544
ML
12881 I915_STATE_WARN(new_crtc_state->active != active,
12882 "crtc active state doesn't match with hw state "
12883 "(expected %i, found %i)\n", new_crtc_state->active, active);
0e8ffe1b 12884
e7c84544
ML
12885 I915_STATE_WARN(intel_crtc->active != new_crtc_state->active,
12886 "transitional active state does not match atomic hw state "
12887 "(expected %i, found %i)\n", new_crtc_state->active, intel_crtc->active);
4d20cd86 12888
e7c84544
ML
12889 for_each_encoder_on_crtc(dev, crtc, encoder) {
12890 enum pipe pipe;
4d20cd86 12891
e7c84544
ML
12892 active = encoder->get_hw_state(encoder, &pipe);
12893 I915_STATE_WARN(active != new_crtc_state->active,
12894 "[ENCODER:%i] active %i with crtc active %i\n",
12895 encoder->base.base.id, active, new_crtc_state->active);
4d20cd86 12896
e7c84544
ML
12897 I915_STATE_WARN(active && intel_crtc->pipe != pipe,
12898 "Encoder connected to wrong pipe %c\n",
12899 pipe_name(pipe));
4d20cd86 12900
e1214b95 12901 if (active)
e7c84544
ML
12902 encoder->get_config(encoder, pipe_config);
12903 }
53d9f4e9 12904
a7d1b3f4
VS
12905 intel_crtc_compute_pixel_rate(pipe_config);
12906
e7c84544
ML
12907 if (!new_crtc_state->active)
12908 return;
cfb23ed6 12909
e7c84544 12910 intel_pipe_config_sanity_check(dev_priv, pipe_config);
e3b247da 12911
749d98b8 12912 sw_config = to_intel_crtc_state(new_crtc_state);
6315b5d3 12913 if (!intel_pipe_config_compare(dev_priv, sw_config,
e7c84544
ML
12914 pipe_config, false)) {
12915 I915_STATE_WARN(1, "pipe state doesn't match!\n");
10d75f54
VS
12916 intel_dump_pipe_config(pipe_config, NULL, "[hw state]");
12917 intel_dump_pipe_config(sw_config, NULL, "[sw state]");
8af6cf88
DV
12918 }
12919}
12920
cff109f0
VS
12921static void
12922intel_verify_planes(struct intel_atomic_state *state)
12923{
12924 struct intel_plane *plane;
12925 const struct intel_plane_state *plane_state;
12926 int i;
12927
12928 for_each_new_intel_plane_in_state(state, plane,
12929 plane_state, i)
3e1d87dd
VS
12930 assert_plane(plane, plane_state->slave ||
12931 plane_state->base.visible);
cff109f0
VS
12932}
12933
91d1b4bd 12934static void
c0ead703
ML
12935verify_single_dpll_state(struct drm_i915_private *dev_priv,
12936 struct intel_shared_dpll *pll,
12937 struct drm_crtc *crtc,
12938 struct drm_crtc_state *new_state)
91d1b4bd 12939{
91d1b4bd 12940 struct intel_dpll_hw_state dpll_hw_state;
40560e26 12941 unsigned int crtc_mask;
e7c84544 12942 bool active;
5358901f 12943
e7c84544 12944 memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
5358901f 12945
72f775fa 12946 DRM_DEBUG_KMS("%s\n", pll->info->name);
5358901f 12947
ee1398ba 12948 active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
5358901f 12949
5cd281f6 12950 if (!(pll->info->flags & INTEL_DPLL_ALWAYS_ON)) {
e7c84544
ML
12951 I915_STATE_WARN(!pll->on && pll->active_mask,
12952 "pll in active use but not on in sw tracking\n");
12953 I915_STATE_WARN(pll->on && !pll->active_mask,
12954 "pll is on but not used by any active crtc\n");
12955 I915_STATE_WARN(pll->on != active,
12956 "pll on state mismatch (expected %i, found %i)\n",
12957 pll->on, active);
12958 }
5358901f 12959
e7c84544 12960 if (!crtc) {
2c42e535 12961 I915_STATE_WARN(pll->active_mask & ~pll->state.crtc_mask,
e7c84544 12962 "more active pll users than references: %x vs %x\n",
2c42e535 12963 pll->active_mask, pll->state.crtc_mask);
5358901f 12964
e7c84544
ML
12965 return;
12966 }
12967
40560e26 12968 crtc_mask = drm_crtc_mask(crtc);
e7c84544
ML
12969
12970 if (new_state->active)
12971 I915_STATE_WARN(!(pll->active_mask & crtc_mask),
12972 "pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
12973 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
12974 else
12975 I915_STATE_WARN(pll->active_mask & crtc_mask,
12976 "pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
12977 pipe_name(drm_crtc_index(crtc)), pll->active_mask);
2dd66ebd 12978
2c42e535 12979 I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
e7c84544 12980 "pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
2c42e535 12981 crtc_mask, pll->state.crtc_mask);
66e985c0 12982
2c42e535 12983 I915_STATE_WARN(pll->on && memcmp(&pll->state.hw_state,
e7c84544
ML
12984 &dpll_hw_state,
12985 sizeof(dpll_hw_state)),
12986 "pll hw state mismatch\n");
12987}
12988
12989static void
c0ead703
ML
12990verify_shared_dpll_state(struct drm_device *dev, struct drm_crtc *crtc,
12991 struct drm_crtc_state *old_crtc_state,
12992 struct drm_crtc_state *new_crtc_state)
e7c84544 12993{
fac5e23e 12994 struct drm_i915_private *dev_priv = to_i915(dev);
e7c84544
ML
12995 struct intel_crtc_state *old_state = to_intel_crtc_state(old_crtc_state);
12996 struct intel_crtc_state *new_state = to_intel_crtc_state(new_crtc_state);
12997
12998 if (new_state->shared_dpll)
c0ead703 12999 verify_single_dpll_state(dev_priv, new_state->shared_dpll, crtc, new_crtc_state);
e7c84544
ML
13000
13001 if (old_state->shared_dpll &&
13002 old_state->shared_dpll != new_state->shared_dpll) {
40560e26 13003 unsigned int crtc_mask = drm_crtc_mask(crtc);
e7c84544
ML
13004 struct intel_shared_dpll *pll = old_state->shared_dpll;
13005
13006 I915_STATE_WARN(pll->active_mask & crtc_mask,
13007 "pll active mismatch (didn't expect pipe %c in active mask)\n",
13008 pipe_name(drm_crtc_index(crtc)));
2c42e535 13009 I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
e7c84544
ML
13010 "pll enabled crtcs mismatch (found %x in enabled mask)\n",
13011 pipe_name(drm_crtc_index(crtc)));
5358901f 13012 }
8af6cf88
DV
13013}
13014
e7c84544 13015static void
c0ead703 13016intel_modeset_verify_crtc(struct drm_crtc *crtc,
677100ce
ML
13017 struct drm_atomic_state *state,
13018 struct drm_crtc_state *old_state,
13019 struct drm_crtc_state *new_state)
e7c84544 13020{
5a21b665
DV
13021 if (!needs_modeset(new_state) &&
13022 !to_intel_crtc_state(new_state)->update_pipe)
13023 return;
13024
c0ead703 13025 verify_wm_state(crtc, new_state);
677100ce 13026 verify_connector_state(crtc->dev, state, crtc);
c0ead703
ML
13027 verify_crtc_state(crtc, old_state, new_state);
13028 verify_shared_dpll_state(crtc->dev, crtc, old_state, new_state);
e7c84544
ML
13029}
13030
13031static void
c0ead703 13032verify_disabled_dpll_state(struct drm_device *dev)
e7c84544 13033{
fac5e23e 13034 struct drm_i915_private *dev_priv = to_i915(dev);
e7c84544
ML
13035 int i;
13036
13037 for (i = 0; i < dev_priv->num_shared_dpll; i++)
c0ead703 13038 verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
e7c84544
ML
13039}
13040
13041static void
677100ce
ML
13042intel_modeset_verify_disabled(struct drm_device *dev,
13043 struct drm_atomic_state *state)
e7c84544 13044{
86b04268 13045 verify_encoder_state(dev, state);
677100ce 13046 verify_connector_state(dev, state, NULL);
c0ead703 13047 verify_disabled_dpll_state(dev);
e7c84544
ML
13048}
13049
f2bdd112 13050static void update_scanline_offset(const struct intel_crtc_state *crtc_state)
80715b2f 13051{
f2bdd112 13052 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
4f8036a2 13053 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
80715b2f
VS
13054
13055 /*
13056 * The scanline counter increments at the leading edge of hsync.
13057 *
13058 * On most platforms it starts counting from vtotal-1 on the
13059 * first active line. That means the scanline counter value is
13060 * always one less than what we would expect. Ie. just after
13061 * start of vblank, which also occurs at start of hsync (on the
13062 * last active line), the scanline counter will read vblank_start-1.
13063 *
13064 * On gen2 the scanline counter starts counting from 1 instead
13065 * of vtotal-1, so we have to subtract one (or rather add vtotal-1
13066 * to keep the value positive), instead of adding one.
13067 *
13068 * On HSW+ the behaviour of the scanline counter depends on the output
13069 * type. For DP ports it behaves like most other platforms, but on HDMI
13070 * there's an extra 1 line difference. So we need to add two instead of
13071 * one to the value.
ec1b4ee2
VS
13072 *
13073 * On VLV/CHV DSI the scanline counter would appear to increment
13074 * approx. 1/3 of a scanline before start of vblank. Unfortunately
13075 * that means we can't tell whether we're in vblank or not while
13076 * we're on that particular line. We must still set scanline_offset
13077 * to 1 so that the vblank timestamps come out correct when we query
13078 * the scanline counter from within the vblank interrupt handler.
13079 * However if queried just before the start of vblank we'll get an
13080 * answer that's slightly in the future.
80715b2f 13081 */
cf819eff 13082 if (IS_GEN(dev_priv, 2)) {
f2bdd112 13083 const struct drm_display_mode *adjusted_mode = &crtc_state->base.adjusted_mode;
80715b2f
VS
13084 int vtotal;
13085
124abe07
VS
13086 vtotal = adjusted_mode->crtc_vtotal;
13087 if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
80715b2f
VS
13088 vtotal /= 2;
13089
13090 crtc->scanline_offset = vtotal - 1;
4f8036a2 13091 } else if (HAS_DDI(dev_priv) &&
f2bdd112 13092 intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
80715b2f
VS
13093 crtc->scanline_offset = 2;
13094 } else
13095 crtc->scanline_offset = 1;
13096}
13097
c3b1e6c6 13098static void intel_modeset_clear_plls(struct intel_atomic_state *state)
ed6739ef 13099{
c3b1e6c6
VS
13100 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13101 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13102 struct intel_crtc *crtc;
0a9ab303 13103 int i;
ed6739ef
ACO
13104
13105 if (!dev_priv->display.crtc_compute_clock)
ad421372 13106 return;
ed6739ef 13107
c3b1e6c6
VS
13108 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13109 new_crtc_state, i) {
8106ddbd 13110 struct intel_shared_dpll *old_dpll =
c3b1e6c6 13111 old_crtc_state->shared_dpll;
0a9ab303 13112
c3b1e6c6 13113 if (!needs_modeset(&new_crtc_state->base))
225da59b
ACO
13114 continue;
13115
c3b1e6c6 13116 new_crtc_state->shared_dpll = NULL;
fb1a38a9 13117
8106ddbd 13118 if (!old_dpll)
fb1a38a9 13119 continue;
0a9ab303 13120
c3b1e6c6 13121 intel_release_shared_dpll(old_dpll, crtc, &state->base);
ad421372 13122 }
ed6739ef
ACO
13123}
13124
99d736a2
ML
13125/*
13126 * This implements the workaround described in the "notes" section of the mode
13127 * set sequence documentation. When going from no pipes or single pipe to
13128 * multiple pipes, and planes are enabled after the pipe, we need to wait at
13129 * least 2 vblanks on the first pipe before enabling planes on the second pipe.
13130 */
bca0bfa3 13131static int haswell_mode_set_planes_workaround(struct intel_atomic_state *state)
99d736a2 13132{
bca0bfa3
VS
13133 struct intel_crtc_state *crtc_state;
13134 struct intel_crtc *crtc;
99d736a2
ML
13135 struct intel_crtc_state *first_crtc_state = NULL;
13136 struct intel_crtc_state *other_crtc_state = NULL;
13137 enum pipe first_pipe = INVALID_PIPE, enabled_pipe = INVALID_PIPE;
13138 int i;
13139
13140 /* look at all crtc's that are going to be enabled in during modeset */
bca0bfa3
VS
13141 for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
13142 if (!crtc_state->base.active ||
13143 !needs_modeset(&crtc_state->base))
99d736a2
ML
13144 continue;
13145
13146 if (first_crtc_state) {
bca0bfa3 13147 other_crtc_state = crtc_state;
99d736a2
ML
13148 break;
13149 } else {
bca0bfa3
VS
13150 first_crtc_state = crtc_state;
13151 first_pipe = crtc->pipe;
99d736a2
ML
13152 }
13153 }
13154
13155 /* No workaround needed? */
13156 if (!first_crtc_state)
13157 return 0;
13158
13159 /* w/a possibly needed, check how many crtc's are already enabled. */
bca0bfa3
VS
13160 for_each_intel_crtc(state->base.dev, crtc) {
13161 crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
13162 if (IS_ERR(crtc_state))
13163 return PTR_ERR(crtc_state);
99d736a2 13164
bca0bfa3 13165 crtc_state->hsw_workaround_pipe = INVALID_PIPE;
99d736a2 13166
bca0bfa3
VS
13167 if (!crtc_state->base.active ||
13168 needs_modeset(&crtc_state->base))
99d736a2
ML
13169 continue;
13170
13171 /* 2 or more enabled crtcs means no need for w/a */
13172 if (enabled_pipe != INVALID_PIPE)
13173 return 0;
13174
bca0bfa3 13175 enabled_pipe = crtc->pipe;
99d736a2
ML
13176 }
13177
13178 if (enabled_pipe != INVALID_PIPE)
13179 first_crtc_state->hsw_workaround_pipe = enabled_pipe;
13180 else if (other_crtc_state)
13181 other_crtc_state->hsw_workaround_pipe = first_pipe;
13182
13183 return 0;
13184}
13185
8d96561a
VS
13186static int intel_lock_all_pipes(struct drm_atomic_state *state)
13187{
13188 struct drm_crtc *crtc;
13189
13190 /* Add all pipes to the state */
13191 for_each_crtc(state->dev, crtc) {
13192 struct drm_crtc_state *crtc_state;
13193
13194 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13195 if (IS_ERR(crtc_state))
13196 return PTR_ERR(crtc_state);
13197 }
13198
13199 return 0;
13200}
13201
27c329ed
ML
13202static int intel_modeset_all_pipes(struct drm_atomic_state *state)
13203{
13204 struct drm_crtc *crtc;
27c329ed 13205
8d96561a
VS
13206 /*
13207 * Add all pipes to the state, and force
13208 * a modeset on all the active ones.
13209 */
27c329ed 13210 for_each_crtc(state->dev, crtc) {
9780aad5
VS
13211 struct drm_crtc_state *crtc_state;
13212 int ret;
13213
27c329ed
ML
13214 crtc_state = drm_atomic_get_crtc_state(state, crtc);
13215 if (IS_ERR(crtc_state))
13216 return PTR_ERR(crtc_state);
13217
13218 if (!crtc_state->active || needs_modeset(crtc_state))
13219 continue;
13220
13221 crtc_state->mode_changed = true;
13222
13223 ret = drm_atomic_add_affected_connectors(state, crtc);
13224 if (ret)
9780aad5 13225 return ret;
27c329ed
ML
13226
13227 ret = drm_atomic_add_affected_planes(state, crtc);
13228 if (ret)
9780aad5 13229 return ret;
27c329ed
ML
13230 }
13231
9780aad5 13232 return 0;
27c329ed
ML
13233}
13234
5643dd9c 13235static int intel_modeset_checks(struct intel_atomic_state *state)
054518dd 13236{
5643dd9c
VS
13237 struct drm_i915_private *dev_priv = to_i915(state->base.dev);
13238 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13239 struct intel_crtc *crtc;
565602d7 13240 int ret = 0, i;
054518dd 13241
5643dd9c 13242 if (!check_digital_port_conflicts(state)) {
b359283a
ML
13243 DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
13244 return -EINVAL;
13245 }
13246
905801fe 13247 /* keep the current setting */
5643dd9c
VS
13248 if (!state->cdclk.force_min_cdclk_changed)
13249 state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
13250
13251 state->modeset = true;
13252 state->active_crtcs = dev_priv->active_crtcs;
13253 state->cdclk.logical = dev_priv->cdclk.logical;
13254 state->cdclk.actual = dev_priv->cdclk.actual;
13255 state->cdclk.pipe = INVALID_PIPE;
13256
13257 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13258 new_crtc_state, i) {
13259 if (new_crtc_state->base.active)
13260 state->active_crtcs |= 1 << i;
565602d7 13261 else
5643dd9c 13262 state->active_crtcs &= ~(1 << i);
8b4a7d05 13263
5643dd9c
VS
13264 if (old_crtc_state->base.active != new_crtc_state->base.active)
13265 state->active_pipe_changes |= drm_crtc_mask(&crtc->base);
565602d7
ML
13266 }
13267
054518dd
ACO
13268 /*
13269 * See if the config requires any additional preparation, e.g.
13270 * to adjust global state with pipes off. We need to do this
13271 * here so we can get the modeset_pipe updated config for the new
13272 * mode set on this crtc. For other crtcs we need to use the
13273 * adjusted_mode bits in the crtc directly.
13274 */
27c329ed 13275 if (dev_priv->display.modeset_calc_cdclk) {
59f9e9ca
VS
13276 enum pipe pipe;
13277
5643dd9c 13278 ret = dev_priv->display.modeset_calc_cdclk(state);
c89e39f3
CT
13279 if (ret < 0)
13280 return ret;
27c329ed 13281
8d96561a 13282 /*
bb0f4aab 13283 * Writes to dev_priv->cdclk.logical must protected by
8d96561a
VS
13284 * holding all the crtc locks, even if we don't end up
13285 * touching the hardware
13286 */
64600bd5 13287 if (intel_cdclk_changed(&dev_priv->cdclk.logical,
5643dd9c
VS
13288 &state->cdclk.logical)) {
13289 ret = intel_lock_all_pipes(&state->base);
8d96561a
VS
13290 if (ret < 0)
13291 return ret;
13292 }
13293
5643dd9c 13294 if (is_power_of_2(state->active_crtcs)) {
59f9e9ca
VS
13295 struct drm_crtc *crtc;
13296 struct drm_crtc_state *crtc_state;
13297
5643dd9c 13298 pipe = ilog2(state->active_crtcs);
59f9e9ca 13299 crtc = &intel_get_crtc_for_pipe(dev_priv, pipe)->base;
5643dd9c 13300 crtc_state = drm_atomic_get_new_crtc_state(&state->base, crtc);
59f9e9ca
VS
13301 if (crtc_state && needs_modeset(crtc_state))
13302 pipe = INVALID_PIPE;
13303 } else {
13304 pipe = INVALID_PIPE;
13305 }
13306
8d96561a 13307 /* All pipes must be switched off while we change the cdclk. */
59f9e9ca
VS
13308 if (pipe != INVALID_PIPE &&
13309 intel_cdclk_needs_cd2x_update(dev_priv,
13310 &dev_priv->cdclk.actual,
5643dd9c
VS
13311 &state->cdclk.actual)) {
13312 ret = intel_lock_all_pipes(&state->base);
59f9e9ca
VS
13313 if (ret < 0)
13314 return ret;
13315
5643dd9c 13316 state->cdclk.pipe = pipe;
59f9e9ca 13317 } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
5643dd9c
VS
13318 &state->cdclk.actual)) {
13319 ret = intel_modeset_all_pipes(&state->base);
8d96561a
VS
13320 if (ret < 0)
13321 return ret;
59f9e9ca 13322
5643dd9c 13323 state->cdclk.pipe = INVALID_PIPE;
8d96561a 13324 }
e8788cbc 13325
bb0f4aab 13326 DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
5643dd9c
VS
13327 state->cdclk.logical.cdclk,
13328 state->cdclk.actual.cdclk);
53e9bf5e 13329 DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
5643dd9c
VS
13330 state->cdclk.logical.voltage_level,
13331 state->cdclk.actual.voltage_level);
e0ca7a6b 13332 }
054518dd 13333
5643dd9c 13334 intel_modeset_clear_plls(state);
054518dd 13335
565602d7 13336 if (IS_HASWELL(dev_priv))
5643dd9c 13337 return haswell_mode_set_planes_workaround(state);
99d736a2 13338
ad421372 13339 return 0;
c347a676
ACO
13340}
13341
aa363136
MR
13342/*
13343 * Handle calculation of various watermark data at the end of the atomic check
13344 * phase. The code here should be run after the per-crtc and per-plane 'check'
13345 * handlers to ensure that all derived state has been updated.
13346 */
cd1d3ee9 13347static int calc_watermark_data(struct intel_atomic_state *state)
aa363136 13348{
cd1d3ee9 13349 struct drm_device *dev = state->base.dev;
98d39494 13350 struct drm_i915_private *dev_priv = to_i915(dev);
98d39494
MR
13351
13352 /* Is there platform-specific watermark information to calculate? */
13353 if (dev_priv->display.compute_global_watermarks)
55994c2c
MR
13354 return dev_priv->display.compute_global_watermarks(state);
13355
13356 return 0;
aa363136
MR
13357}
13358
74c090b1
ML
13359/**
13360 * intel_atomic_check - validate state object
13361 * @dev: drm device
13362 * @state: state to validate
13363 */
13364static int intel_atomic_check(struct drm_device *dev,
9a86a07c 13365 struct drm_atomic_state *_state)
c347a676 13366{
dd8b3bdb 13367 struct drm_i915_private *dev_priv = to_i915(dev);
9a86a07c
VS
13368 struct intel_atomic_state *state = to_intel_atomic_state(_state);
13369 struct intel_crtc_state *old_crtc_state, *new_crtc_state;
13370 struct intel_crtc *crtc;
c347a676 13371 int ret, i;
9a86a07c 13372 bool any_ms = state->cdclk.force_min_cdclk_changed;
c347a676 13373
8c58f73c 13374 /* Catch I915_MODE_FLAG_INHERITED */
9a86a07c
VS
13375 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13376 new_crtc_state, i) {
13377 if (new_crtc_state->base.mode.private_flags !=
13378 old_crtc_state->base.mode.private_flags)
13379 new_crtc_state->base.mode_changed = true;
8c58f73c
ML
13380 }
13381
9a86a07c 13382 ret = drm_atomic_helper_check_modeset(dev, &state->base);
054518dd 13383 if (ret)
2833920d 13384 goto fail;
054518dd 13385
9a86a07c
VS
13386 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13387 new_crtc_state, i) {
13388 if (!needs_modeset(&new_crtc_state->base))
c347a676
ACO
13389 continue;
13390
9a86a07c 13391 if (!new_crtc_state->base.enable) {
af4a879e 13392 any_ms = true;
cfb23ed6 13393 continue;
af4a879e 13394 }
cfb23ed6 13395
9a86a07c 13396 ret = intel_modeset_pipe_config(new_crtc_state);
2833920d
VS
13397 if (ret)
13398 goto fail;
c347a676 13399
9a86a07c
VS
13400 if (intel_pipe_config_compare(dev_priv, old_crtc_state,
13401 new_crtc_state, true)) {
13402 new_crtc_state->base.mode_changed = false;
13403 new_crtc_state->update_pipe = true;
26495481
DV
13404 }
13405
9a86a07c 13406 if (needs_modeset(&new_crtc_state->base))
26495481 13407 any_ms = true;
c347a676
ACO
13408 }
13409
9a86a07c 13410 ret = drm_dp_mst_atomic_check(&state->base);
eceae147 13411 if (ret)
2833920d 13412 goto fail;
eceae147 13413
61333b60 13414 if (any_ms) {
9a86a07c 13415 ret = intel_modeset_checks(state);
61333b60 13416 if (ret)
2833920d 13417 goto fail;
e0ca7a6b 13418 } else {
9a86a07c 13419 state->cdclk.logical = dev_priv->cdclk.logical;
e0ca7a6b 13420 }
76305b1a 13421
9a86a07c 13422 ret = icl_add_linked_planes(state);
1ab554b0 13423 if (ret)
2833920d 13424 goto fail;
1ab554b0 13425
9a86a07c 13426 ret = drm_atomic_helper_check_planes(dev, &state->base);
aa363136 13427 if (ret)
2833920d 13428 goto fail;
aa363136 13429
9a86a07c
VS
13430 intel_fbc_choose_crtc(dev_priv, state);
13431 ret = calc_watermark_data(state);
c457d9cf 13432 if (ret)
2833920d 13433 goto fail;
c457d9cf 13434
9a86a07c 13435 ret = intel_bw_atomic_check(state);
c457d9cf 13436 if (ret)
2833920d 13437 goto fail;
c457d9cf 13438
a0e70104
VS
13439 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13440 new_crtc_state, i) {
13441 if (!needs_modeset(&new_crtc_state->base) &&
13442 !new_crtc_state->update_pipe)
13443 continue;
13444
10d75f54 13445 intel_dump_pipe_config(new_crtc_state, state,
a0e70104
VS
13446 needs_modeset(&new_crtc_state->base) ?
13447 "[modeset]" : "[fastset]");
13448 }
13449
c457d9cf 13450 return 0;
2833920d
VS
13451
13452 fail:
13453 if (ret == -EDEADLK)
13454 return ret;
13455
13456 /*
13457 * FIXME would probably be nice to know which crtc specifically
13458 * caused the failure, in cases where we can pinpoint it.
13459 */
13460 for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
13461 new_crtc_state, i)
10d75f54 13462 intel_dump_pipe_config(new_crtc_state, state, "[failed]");
2833920d
VS
13463
13464 return ret;
054518dd
ACO
13465}
13466
5008e874 13467static int intel_atomic_prepare_commit(struct drm_device *dev,
d07f0e59 13468 struct drm_atomic_state *state)
5008e874 13469{
fd70075f 13470 return drm_atomic_helper_prepare_planes(dev, state);
5008e874
ML
13471}
13472
a2991414
ML
13473u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc)
13474{
13475 struct drm_device *dev = crtc->base.dev;
32db0b65 13476 struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(&crtc->base)];
a2991414 13477
32db0b65 13478 if (!vblank->max_vblank_count)
734cbbf3 13479 return (u32)drm_crtc_accurate_vblank_count(&crtc->base);
a2991414
ML
13480
13481 return dev->driver->get_vblank_counter(dev, crtc->pipe);
13482}
13483
896e5bb0
L
13484static void intel_update_crtc(struct drm_crtc *crtc,
13485 struct drm_atomic_state *state,
13486 struct drm_crtc_state *old_crtc_state,
b44d5c0c 13487 struct drm_crtc_state *new_crtc_state)
896e5bb0
L
13488{
13489 struct drm_device *dev = crtc->dev;
13490 struct drm_i915_private *dev_priv = to_i915(dev);
13491 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
aa5e9b47
ML
13492 struct intel_crtc_state *pipe_config = to_intel_crtc_state(new_crtc_state);
13493 bool modeset = needs_modeset(new_crtc_state);
8b69449d
ML
13494 struct intel_plane_state *new_plane_state =
13495 intel_atomic_get_new_plane_state(to_intel_atomic_state(state),
13496 to_intel_plane(crtc->primary));
896e5bb0
L
13497
13498 if (modeset) {
f2bdd112 13499 update_scanline_offset(pipe_config);
896e5bb0 13500 dev_priv->display.crtc_enable(pipe_config, state);
033b7a23
ML
13501
13502 /* vblanks work again, re-enable pipe CRC. */
13503 intel_crtc_enable_pipe_crc(intel_crtc);
896e5bb0 13504 } else {
aa5e9b47
ML
13505 intel_pre_plane_update(to_intel_crtc_state(old_crtc_state),
13506 pipe_config);
608ed4ab
HG
13507
13508 if (pipe_config->update_pipe)
13509 intel_encoders_update_pipe(crtc, pipe_config, state);
896e5bb0
L
13510 }
13511
50c42fc9
ML
13512 if (pipe_config->update_pipe && !pipe_config->enable_fbc)
13513 intel_fbc_disable(intel_crtc);
13514 else if (new_plane_state)
8b69449d 13515 intel_fbc_enable(intel_crtc, pipe_config, new_plane_state);
896e5bb0 13516
c856dbc8 13517 intel_begin_crtc_commit(to_intel_atomic_state(state), intel_crtc);
6c246b81 13518
5f2e5112
VS
13519 if (INTEL_GEN(dev_priv) >= 9)
13520 skl_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
13521 else
13522 i9xx_update_planes_on_crtc(to_intel_atomic_state(state), intel_crtc);
6c246b81 13523
c856dbc8 13524 intel_finish_crtc_commit(to_intel_atomic_state(state), intel_crtc);
896e5bb0
L
13525}
13526
b44d5c0c 13527static void intel_update_crtcs(struct drm_atomic_state *state)
896e5bb0
L
13528{
13529 struct drm_crtc *crtc;
aa5e9b47 13530 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
896e5bb0
L
13531 int i;
13532
aa5e9b47
ML
13533 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
13534 if (!new_crtc_state->active)
896e5bb0
L
13535 continue;
13536
13537 intel_update_crtc(crtc, state, old_crtc_state,
b44d5c0c 13538 new_crtc_state);
896e5bb0
L
13539 }
13540}
13541
b44d5c0c 13542static void skl_update_crtcs(struct drm_atomic_state *state)
27082493 13543{
0f0f74bc 13544 struct drm_i915_private *dev_priv = to_i915(state->dev);
27082493
L
13545 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
13546 struct drm_crtc *crtc;
ce0ba283 13547 struct intel_crtc *intel_crtc;
aa5e9b47 13548 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
ce0ba283 13549 struct intel_crtc_state *cstate;
27082493
L
13550 unsigned int updated = 0;
13551 bool progress;
13552 enum pipe pipe;
5eff503b 13553 int i;
aa9664ff
MK
13554 u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
13555 u8 required_slices = intel_state->wm_results.ddb.enabled_slices;
53cc6880 13556 struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
5eff503b 13557
aa5e9b47 13558 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i)
5eff503b 13559 /* ignore allocations for crtc's that have been turned off. */
aa5e9b47 13560 if (new_crtc_state->active)
53cc6880 13561 entries[i] = to_intel_crtc_state(old_crtc_state)->wm.skl.ddb;
27082493 13562
aa9664ff
MK
13563 /* If 2nd DBuf slice required, enable it here */
13564 if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
13565 icl_dbuf_slices_update(dev_priv, required_slices);
13566
27082493
L
13567 /*
13568 * Whenever the number of active pipes changes, we need to make sure we
13569 * update the pipes in the right order so that their ddb allocations
13570 * never overlap with eachother inbetween CRTC updates. Otherwise we'll
13571 * cause pipe underruns and other bad stuff.
13572 */
13573 do {
27082493
L
13574 progress = false;
13575
aa5e9b47 13576 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
27082493
L
13577 bool vbl_wait = false;
13578 unsigned int cmask = drm_crtc_mask(crtc);
ce0ba283
L
13579
13580 intel_crtc = to_intel_crtc(crtc);
21794813 13581 cstate = to_intel_crtc_state(new_crtc_state);
ce0ba283 13582 pipe = intel_crtc->pipe;
27082493 13583
5eff503b 13584 if (updated & cmask || !cstate->base.active)
27082493 13585 continue;
5eff503b 13586
53cc6880 13587 if (skl_ddb_allocation_overlaps(&cstate->wm.skl.ddb,
2b68504b 13588 entries,
53cc6880 13589 INTEL_INFO(dev_priv)->num_pipes, i))
27082493
L
13590 continue;
13591
13592 updated |= cmask;
53cc6880 13593 entries[i] = cstate->wm.skl.ddb;
27082493
L
13594
13595 /*
13596 * If this is an already active pipe, it's DDB changed,
13597 * and this isn't the last pipe that needs updating
13598 * then we need to wait for a vblank to pass for the
13599 * new ddb allocation to take effect.
13600 */
ce0ba283 13601 if (!skl_ddb_entry_equal(&cstate->wm.skl.ddb,
512b5527 13602 &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb) &&
aa5e9b47 13603 !new_crtc_state->active_changed &&
27082493
L
13604 intel_state->wm_results.dirty_pipes != updated)
13605 vbl_wait = true;
13606
13607 intel_update_crtc(crtc, state, old_crtc_state,
b44d5c0c 13608 new_crtc_state);
27082493
L
13609
13610 if (vbl_wait)
0f0f74bc 13611 intel_wait_for_vblank(dev_priv, pipe);
27082493
L
13612
13613 progress = true;
13614 }
13615 } while (progress);
aa9664ff
MK
13616
13617 /* If 2nd DBuf slice is no more required disable it */
13618 if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
13619 icl_dbuf_slices_update(dev_priv, required_slices);
27082493
L
13620}
13621
ba318c61
CW
13622static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
13623{
13624 struct intel_atomic_state *state, *next;
13625 struct llist_node *freed;
13626
13627 freed = llist_del_all(&dev_priv->atomic_helper.free_list);
13628 llist_for_each_entry_safe(state, next, freed, freed)
13629 drm_atomic_state_put(&state->base);
13630}
13631
13632static void intel_atomic_helper_free_state_worker(struct work_struct *work)
13633{
13634 struct drm_i915_private *dev_priv =
13635 container_of(work, typeof(*dev_priv), atomic_helper.free_work);
13636
13637 intel_atomic_helper_free_state(dev_priv);
13638}
13639
9db529aa
DV
13640static void intel_atomic_commit_fence_wait(struct intel_atomic_state *intel_state)
13641{
13642 struct wait_queue_entry wait_fence, wait_reset;
13643 struct drm_i915_private *dev_priv = to_i915(intel_state->base.dev);
13644
13645 init_wait_entry(&wait_fence, 0);
13646 init_wait_entry(&wait_reset, 0);
13647 for (;;) {
13648 prepare_to_wait(&intel_state->commit_ready.wait,
13649 &wait_fence, TASK_UNINTERRUPTIBLE);
13650 prepare_to_wait(&dev_priv->gpu_error.wait_queue,
13651 &wait_reset, TASK_UNINTERRUPTIBLE);
13652
13653
13654 if (i915_sw_fence_done(&intel_state->commit_ready)
13655 || test_bit(I915_RESET_MODESET, &dev_priv->gpu_error.flags))
13656 break;
13657
13658 schedule();
13659 }
13660 finish_wait(&intel_state->commit_ready.wait, &wait_fence);
13661 finish_wait(&dev_priv->gpu_error.wait_queue, &wait_reset);
13662}
13663
8d52e447
CW
13664static void intel_atomic_cleanup_work(struct work_struct *work)
13665{
13666 struct drm_atomic_state *state =
13667 container_of(work, struct drm_atomic_state, commit_work);
13668 struct drm_i915_private *i915 = to_i915(state->dev);
13669
13670 drm_atomic_helper_cleanup_planes(&i915->drm, state);
13671 drm_atomic_helper_commit_cleanup_done(state);
13672 drm_atomic_state_put(state);
13673
13674 intel_atomic_helper_free_state(i915);
13675}
13676
94f05024 13677static void intel_atomic_commit_tail(struct drm_atomic_state *state)
a6778b3c 13678{
94f05024 13679 struct drm_device *dev = state->dev;
565602d7 13680 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
fac5e23e 13681 struct drm_i915_private *dev_priv = to_i915(dev);
aa5e9b47 13682 struct drm_crtc_state *old_crtc_state, *new_crtc_state;
a1cccdcf 13683 struct intel_crtc_state *new_intel_crtc_state, *old_intel_crtc_state;
7580d774 13684 struct drm_crtc *crtc;
a1cccdcf 13685 struct intel_crtc *intel_crtc;
d8fc70b7 13686 u64 put_domains[I915_MAX_PIPES] = {};
0e6e0be4 13687 intel_wakeref_t wakeref = 0;
e95433c7 13688 int i;
a6778b3c 13689
9db529aa 13690 intel_atomic_commit_fence_wait(intel_state);
42b062b0 13691
ea0000f0
DV
13692 drm_atomic_helper_wait_for_dependencies(state);
13693
c3b32658 13694 if (intel_state->modeset)
0e6e0be4 13695 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_MODESET);
565602d7 13696
aa5e9b47 13697 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
a1cccdcf
ML
13698 old_intel_crtc_state = to_intel_crtc_state(old_crtc_state);
13699 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13700 intel_crtc = to_intel_crtc(crtc);
a539205a 13701
aa5e9b47
ML
13702 if (needs_modeset(new_crtc_state) ||
13703 to_intel_crtc_state(new_crtc_state)->update_pipe) {
5a21b665 13704
a1cccdcf 13705 put_domains[intel_crtc->pipe] =
5a21b665 13706 modeset_get_crtc_power_domains(crtc,
a1cccdcf 13707 new_intel_crtc_state);
5a21b665
DV
13708 }
13709
aa5e9b47 13710 if (!needs_modeset(new_crtc_state))
61333b60
ML
13711 continue;
13712
a1cccdcf 13713 intel_pre_plane_update(old_intel_crtc_state, new_intel_crtc_state);
460da916 13714
29ceb0e6 13715 if (old_crtc_state->active) {
0dd14be3 13716 intel_crtc_disable_planes(intel_state, intel_crtc);
033b7a23
ML
13717
13718 /*
13719 * We need to disable pipe CRC before disabling the pipe,
13720 * or we race against vblank off.
13721 */
13722 intel_crtc_disable_pipe_crc(intel_crtc);
13723
a1cccdcf 13724 dev_priv->display.crtc_disable(old_intel_crtc_state, state);
eddfcbcd 13725 intel_crtc->active = false;
58f9c0bc 13726 intel_fbc_disable(intel_crtc);
65c307fd 13727 intel_disable_shared_dpll(old_intel_crtc_state);
9bbc8258
VS
13728
13729 /*
13730 * Underruns don't always raise
13731 * interrupts, so check manually.
13732 */
13733 intel_check_cpu_fifo_underruns(dev_priv);
13734 intel_check_pch_fifo_underruns(dev_priv);
b9001114 13735
a748faea
VS
13736 /* FIXME unify this for all platforms */
13737 if (!new_crtc_state->active &&
b2ae318a 13738 !HAS_GMCH(dev_priv) &&
a748faea
VS
13739 dev_priv->display.initial_watermarks)
13740 dev_priv->display.initial_watermarks(intel_state,
13741 new_intel_crtc_state);
a539205a 13742 }
b8cecdf5 13743 }
7758a113 13744
7a1530d7
DV
13745 /* FIXME: Eventually get rid of our intel_crtc->config pointer */
13746 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i)
13747 to_intel_crtc(crtc)->config = to_intel_crtc_state(new_crtc_state);
f6e5b160 13748
565602d7 13749 if (intel_state->modeset) {
4740b0f2 13750 drm_atomic_helper_update_legacy_modeset_state(state->dev, state);
33c8df89 13751
59f9e9ca
VS
13752 intel_set_cdclk_pre_plane_update(dev_priv,
13753 &intel_state->cdclk.actual,
13754 &dev_priv->cdclk.actual,
13755 intel_state->cdclk.pipe);
f6d1973d 13756
656d1b89
L
13757 /*
13758 * SKL workaround: bspec recommends we disable the SAGV when we
13759 * have more then one pipe enabled
13760 */
56feca91 13761 if (!intel_can_enable_sagv(state))
16dcdc4e 13762 intel_disable_sagv(dev_priv);
656d1b89 13763
677100ce 13764 intel_modeset_verify_disabled(dev, state);
4740b0f2 13765 }
47fab737 13766
896e5bb0 13767 /* Complete the events for pipes that have now been disabled */
aa5e9b47
ML
13768 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13769 bool modeset = needs_modeset(new_crtc_state);
80715b2f 13770
1f7528c4 13771 /* Complete events for now disable pipes here. */
aa5e9b47 13772 if (modeset && !new_crtc_state->active && new_crtc_state->event) {
1f7528c4 13773 spin_lock_irq(&dev->event_lock);
aa5e9b47 13774 drm_crtc_send_vblank_event(crtc, new_crtc_state->event);
1f7528c4
DV
13775 spin_unlock_irq(&dev->event_lock);
13776
aa5e9b47 13777 new_crtc_state->event = NULL;
1f7528c4 13778 }
177246a8
MR
13779 }
13780
896e5bb0 13781 /* Now enable the clocks, plane, pipe, and connectors that we set up. */
b44d5c0c 13782 dev_priv->display.update_crtcs(state);
896e5bb0 13783
59f9e9ca
VS
13784 if (intel_state->modeset)
13785 intel_set_cdclk_post_plane_update(dev_priv,
13786 &intel_state->cdclk.actual,
13787 &dev_priv->cdclk.actual,
13788 intel_state->cdclk.pipe);
13789
94f05024
DV
13790 /* FIXME: We should call drm_atomic_helper_commit_hw_done() here
13791 * already, but still need the state for the delayed optimization. To
13792 * fix this:
13793 * - wrap the optimization/post_plane_update stuff into a per-crtc work.
13794 * - schedule that vblank worker _before_ calling hw_done
13795 * - at the start of commit_tail, cancel it _synchrously
13796 * - switch over to the vblank wait helper in the core after that since
13797 * we don't need out special handling any more.
13798 */
b44d5c0c 13799 drm_atomic_helper_wait_for_flip_done(dev, state);
5a21b665 13800
051a6d8d
VS
13801 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
13802 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
13803
13804 if (new_crtc_state->active &&
13805 !needs_modeset(new_crtc_state) &&
13806 (new_intel_crtc_state->base.color_mgmt_changed ||
13807 new_intel_crtc_state->update_pipe))
13808 intel_color_load_luts(new_intel_crtc_state);
13809 }
13810
5a21b665
DV
13811 /*
13812 * Now that the vblank has passed, we can go ahead and program the
13813 * optimal watermarks on platforms that need two-step watermark
13814 * programming.
13815 *
13816 * TODO: Move this (and other cleanup) to an async worker eventually.
13817 */
aa5e9b47 13818 for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) {
a1cccdcf 13819 new_intel_crtc_state = to_intel_crtc_state(new_crtc_state);
5a21b665
DV
13820
13821 if (dev_priv->display.optimize_watermarks)
ccf010fb 13822 dev_priv->display.optimize_watermarks(intel_state,
a1cccdcf 13823 new_intel_crtc_state);
5a21b665
DV
13824 }
13825
aa5e9b47 13826 for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
5a21b665
DV
13827 intel_post_plane_update(to_intel_crtc_state(old_crtc_state));
13828
13829 if (put_domains[i])
13830 modeset_put_power_domains(dev_priv, put_domains[i]);
13831
aa5e9b47 13832 intel_modeset_verify_crtc(crtc, state, old_crtc_state, new_crtc_state);
5a21b665
DV
13833 }
13834
cff109f0
VS
13835 if (intel_state->modeset)
13836 intel_verify_planes(intel_state);
13837
56feca91 13838 if (intel_state->modeset && intel_can_enable_sagv(state))
16dcdc4e 13839 intel_enable_sagv(dev_priv);
656d1b89 13840
94f05024
DV
13841 drm_atomic_helper_commit_hw_done(state);
13842
d5553c09
CW
13843 if (intel_state->modeset) {
13844 /* As one of the primary mmio accessors, KMS has a high
13845 * likelihood of triggering bugs in unclaimed access. After we
13846 * finish modesetting, see if an error has been flagged, and if
13847 * so enable debugging for the next modeset - and hope we catch
13848 * the culprit.
13849 */
2cf7bf6f 13850 intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore);
0e6e0be4 13851 intel_display_power_put(dev_priv, POWER_DOMAIN_MODESET, wakeref);
d5553c09 13852 }
2e2f08d0 13853 intel_runtime_pm_put(dev_priv, intel_state->wakeref);
5a21b665 13854
8d52e447
CW
13855 /*
13856 * Defer the cleanup of the old state to a separate worker to not
13857 * impede the current task (userspace for blocking modesets) that
13858 * are executed inline. For out-of-line asynchronous modesets/flips,
13859 * deferring to a new worker seems overkill, but we would place a
13860 * schedule point (cond_resched()) here anyway to keep latencies
13861 * down.
13862 */
13863 INIT_WORK(&state->commit_work, intel_atomic_cleanup_work);
41db645a 13864 queue_work(system_highpri_wq, &state->commit_work);
94f05024
DV
13865}
13866
13867static void intel_atomic_commit_work(struct work_struct *work)
13868{
c004a90b
CW
13869 struct drm_atomic_state *state =
13870 container_of(work, struct drm_atomic_state, commit_work);
13871
94f05024
DV
13872 intel_atomic_commit_tail(state);
13873}
13874
c004a90b
CW
13875static int __i915_sw_fence_call
13876intel_atomic_commit_ready(struct i915_sw_fence *fence,
13877 enum i915_sw_fence_notify notify)
13878{
13879 struct intel_atomic_state *state =
13880 container_of(fence, struct intel_atomic_state, commit_ready);
13881
13882 switch (notify) {
13883 case FENCE_COMPLETE:
42b062b0 13884 /* we do blocking waits in the worker, nothing to do here */
c004a90b 13885 break;
c004a90b 13886 case FENCE_FREE:
eb955eee
CW
13887 {
13888 struct intel_atomic_helper *helper =
13889 &to_i915(state->base.dev)->atomic_helper;
13890
13891 if (llist_add(&state->freed, &helper->free_list))
13892 schedule_work(&helper->free_work);
13893 break;
13894 }
c004a90b
CW
13895 }
13896
13897 return NOTIFY_DONE;
13898}
13899
6c9c1b38
DV
13900static void intel_atomic_track_fbs(struct drm_atomic_state *state)
13901{
aa5e9b47 13902 struct drm_plane_state *old_plane_state, *new_plane_state;
6c9c1b38 13903 struct drm_plane *plane;
6c9c1b38
DV
13904 int i;
13905
aa5e9b47 13906 for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i)
faf5bf0a 13907 i915_gem_track_fb(intel_fb_obj(old_plane_state->fb),
aa5e9b47 13908 intel_fb_obj(new_plane_state->fb),
faf5bf0a 13909 to_intel_plane(plane)->frontbuffer_bit);
6c9c1b38
DV
13910}
13911
94f05024
DV
13912/**
13913 * intel_atomic_commit - commit validated state object
13914 * @dev: DRM device
13915 * @state: the top-level driver state object
13916 * @nonblock: nonblocking commit
13917 *
13918 * This function commits a top-level state object that has been validated
13919 * with drm_atomic_helper_check().
13920 *
94f05024
DV
13921 * RETURNS
13922 * Zero for success or -errno.
13923 */
13924static int intel_atomic_commit(struct drm_device *dev,
13925 struct drm_atomic_state *state,
13926 bool nonblock)
13927{
13928 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
fac5e23e 13929 struct drm_i915_private *dev_priv = to_i915(dev);
94f05024
DV
13930 int ret = 0;
13931
2e2f08d0
CW
13932 intel_state->wakeref = intel_runtime_pm_get(dev_priv);
13933
c004a90b
CW
13934 drm_atomic_state_get(state);
13935 i915_sw_fence_init(&intel_state->commit_ready,
13936 intel_atomic_commit_ready);
94f05024 13937
440df938
VS
13938 /*
13939 * The intel_legacy_cursor_update() fast path takes care
13940 * of avoiding the vblank waits for simple cursor
13941 * movement and flips. For cursor on/off and size changes,
13942 * we want to perform the vblank waits so that watermark
13943 * updates happen during the correct frames. Gen9+ have
13944 * double buffered watermarks and so shouldn't need this.
13945 *
3cf50c63
ML
13946 * Unset state->legacy_cursor_update before the call to
13947 * drm_atomic_helper_setup_commit() because otherwise
13948 * drm_atomic_helper_wait_for_flip_done() is a noop and
13949 * we get FIFO underruns because we didn't wait
13950 * for vblank.
440df938
VS
13951 *
13952 * FIXME doing watermarks and fb cleanup from a vblank worker
13953 * (assuming we had any) would solve these problems.
13954 */
213f1bd0
ML
13955 if (INTEL_GEN(dev_priv) < 9 && state->legacy_cursor_update) {
13956 struct intel_crtc_state *new_crtc_state;
13957 struct intel_crtc *crtc;
13958 int i;
13959
13960 for_each_new_intel_crtc_in_state(intel_state, crtc, new_crtc_state, i)
13961 if (new_crtc_state->wm.need_postvbl_update ||
13962 new_crtc_state->update_wm_post)
13963 state->legacy_cursor_update = false;
13964 }
440df938 13965
3cf50c63
ML
13966 ret = intel_atomic_prepare_commit(dev, state);
13967 if (ret) {
13968 DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
13969 i915_sw_fence_commit(&intel_state->commit_ready);
2e2f08d0 13970 intel_runtime_pm_put(dev_priv, intel_state->wakeref);
3cf50c63
ML
13971 return ret;
13972 }
13973
13974 ret = drm_atomic_helper_setup_commit(state, nonblock);
13975 if (!ret)
13976 ret = drm_atomic_helper_swap_state(state, true);
13977
0806f4ee
ML
13978 if (ret) {
13979 i915_sw_fence_commit(&intel_state->commit_ready);
13980
0806f4ee 13981 drm_atomic_helper_cleanup_planes(dev, state);
2e2f08d0 13982 intel_runtime_pm_put(dev_priv, intel_state->wakeref);
0806f4ee
ML
13983 return ret;
13984 }
94f05024 13985 dev_priv->wm.distrust_bios_wm = false;
3c0fb588 13986 intel_shared_dpll_swap_state(state);
6c9c1b38 13987 intel_atomic_track_fbs(state);
94f05024 13988
c3b32658 13989 if (intel_state->modeset) {
d305e061
VS
13990 memcpy(dev_priv->min_cdclk, intel_state->min_cdclk,
13991 sizeof(intel_state->min_cdclk));
53e9bf5e
VS
13992 memcpy(dev_priv->min_voltage_level,
13993 intel_state->min_voltage_level,
13994 sizeof(intel_state->min_voltage_level));
c3b32658 13995 dev_priv->active_crtcs = intel_state->active_crtcs;
905801fe
VS
13996 dev_priv->cdclk.force_min_cdclk =
13997 intel_state->cdclk.force_min_cdclk;
48d9f87d
ID
13998
13999 intel_cdclk_swap_state(intel_state);
c3b32658
ML
14000 }
14001
0853695c 14002 drm_atomic_state_get(state);
42b062b0 14003 INIT_WORK(&state->commit_work, intel_atomic_commit_work);
c004a90b
CW
14004
14005 i915_sw_fence_commit(&intel_state->commit_ready);
757fffcf
VS
14006 if (nonblock && intel_state->modeset) {
14007 queue_work(dev_priv->modeset_wq, &state->commit_work);
14008 } else if (nonblock) {
42b062b0 14009 queue_work(system_unbound_wq, &state->commit_work);
757fffcf
VS
14010 } else {
14011 if (intel_state->modeset)
14012 flush_workqueue(dev_priv->modeset_wq);
94f05024 14013 intel_atomic_commit_tail(state);
757fffcf 14014 }
75714940 14015
74c090b1 14016 return 0;
7f27126e
JB
14017}
14018
f6e5b160 14019static const struct drm_crtc_funcs intel_crtc_funcs = {
3fab2f09 14020 .gamma_set = drm_atomic_helper_legacy_gamma_set,
74c090b1 14021 .set_config = drm_atomic_helper_set_config,
f6e5b160 14022 .destroy = intel_crtc_destroy,
4c01ded5 14023 .page_flip = drm_atomic_helper_page_flip,
1356837e
MR
14024 .atomic_duplicate_state = intel_crtc_duplicate_state,
14025 .atomic_destroy_state = intel_crtc_destroy_state,
8c6b709d 14026 .set_crc_source = intel_crtc_set_crc_source,
a8c20833 14027 .verify_crc_source = intel_crtc_verify_crc_source,
260bc551 14028 .get_crc_sources = intel_crtc_get_crc_sources,
f6e5b160
CW
14029};
14030
74d290f8
CW
14031struct wait_rps_boost {
14032 struct wait_queue_entry wait;
14033
14034 struct drm_crtc *crtc;
e61e0f51 14035 struct i915_request *request;
74d290f8
CW
14036};
14037
14038static int do_rps_boost(struct wait_queue_entry *_wait,
14039 unsigned mode, int sync, void *key)
14040{
14041 struct wait_rps_boost *wait = container_of(_wait, typeof(*wait), wait);
e61e0f51 14042 struct i915_request *rq = wait->request;
74d290f8 14043
e9af4ea2
CW
14044 /*
14045 * If we missed the vblank, but the request is already running it
14046 * is reasonable to assume that it will complete before the next
14047 * vblank without our intervention, so leave RPS alone.
14048 */
e61e0f51 14049 if (!i915_request_started(rq))
62eb3c24 14050 gen6_rps_boost(rq);
e61e0f51 14051 i915_request_put(rq);
74d290f8
CW
14052
14053 drm_crtc_vblank_put(wait->crtc);
14054
14055 list_del(&wait->wait.entry);
14056 kfree(wait);
14057 return 1;
14058}
14059
14060static void add_rps_boost_after_vblank(struct drm_crtc *crtc,
14061 struct dma_fence *fence)
14062{
14063 struct wait_rps_boost *wait;
14064
14065 if (!dma_fence_is_i915(fence))
14066 return;
14067
14068 if (INTEL_GEN(to_i915(crtc->dev)) < 6)
14069 return;
14070
14071 if (drm_crtc_vblank_get(crtc))
14072 return;
14073
14074 wait = kmalloc(sizeof(*wait), GFP_KERNEL);
14075 if (!wait) {
14076 drm_crtc_vblank_put(crtc);
14077 return;
14078 }
14079
14080 wait->request = to_request(dma_fence_get(fence));
14081 wait->crtc = crtc;
14082
14083 wait->wait.func = do_rps_boost;
14084 wait->wait.flags = 0;
14085
14086 add_wait_queue(drm_crtc_vblank_waitqueue(crtc), &wait->wait);
14087}
14088
ef1a1914
VS
14089static int intel_plane_pin_fb(struct intel_plane_state *plane_state)
14090{
14091 struct intel_plane *plane = to_intel_plane(plane_state->base.plane);
14092 struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
14093 struct drm_framebuffer *fb = plane_state->base.fb;
14094 struct i915_vma *vma;
14095
14096 if (plane->id == PLANE_CURSOR &&
d53db442 14097 INTEL_INFO(dev_priv)->display.cursor_needs_physical) {
ef1a1914
VS
14098 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
14099 const int align = intel_cursor_alignment(dev_priv);
4a477651 14100 int err;
ef1a1914 14101
4a477651
CW
14102 err = i915_gem_object_attach_phys(obj, align);
14103 if (err)
14104 return err;
ef1a1914
VS
14105 }
14106
14107 vma = intel_pin_and_fence_fb_obj(fb,
f5929c53 14108 &plane_state->view,
ef1a1914
VS
14109 intel_plane_uses_fence(plane_state),
14110 &plane_state->flags);
14111 if (IS_ERR(vma))
14112 return PTR_ERR(vma);
14113
14114 plane_state->vma = vma;
14115
14116 return 0;
14117}
14118
14119static void intel_plane_unpin_fb(struct intel_plane_state *old_plane_state)
14120{
14121 struct i915_vma *vma;
14122
14123 vma = fetch_and_zero(&old_plane_state->vma);
14124 if (vma)
14125 intel_unpin_fb_vma(vma, old_plane_state->flags);
14126}
14127
b7268c5e
CW
14128static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
14129{
14130 struct i915_sched_attr attr = {
14131 .priority = I915_PRIORITY_DISPLAY,
14132 };
14133
14134 i915_gem_object_wait_priority(obj, 0, &attr);
14135}
14136
6beb8c23
MR
14137/**
14138 * intel_prepare_plane_fb - Prepare fb for usage on plane
14139 * @plane: drm plane to prepare for
c38c1455 14140 * @new_state: the plane state being prepared
6beb8c23
MR
14141 *
14142 * Prepares a framebuffer for usage on a display plane. Generally this
14143 * involves pinning the underlying object and updating the frontbuffer tracking
14144 * bits. Some older platforms need special physical address handling for
14145 * cursor planes.
14146 *
f935675f
ML
14147 * Must be called with struct_mutex held.
14148 *
6beb8c23
MR
14149 * Returns 0 on success, negative error code on failure.
14150 */
14151int
14152intel_prepare_plane_fb(struct drm_plane *plane,
1832040d 14153 struct drm_plane_state *new_state)
465c120c 14154{
c004a90b
CW
14155 struct intel_atomic_state *intel_state =
14156 to_intel_atomic_state(new_state->state);
b7f05d4a 14157 struct drm_i915_private *dev_priv = to_i915(plane->dev);
844f9111 14158 struct drm_framebuffer *fb = new_state->fb;
6beb8c23 14159 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
1ee49399 14160 struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
c004a90b 14161 int ret;
465c120c 14162
5008e874
ML
14163 if (old_obj) {
14164 struct drm_crtc_state *crtc_state =
8b69449d
ML
14165 drm_atomic_get_new_crtc_state(new_state->state,
14166 plane->state->crtc);
5008e874
ML
14167
14168 /* Big Hammer, we also need to ensure that any pending
14169 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
14170 * current scanout is retired before unpinning the old
14171 * framebuffer. Note that we rely on userspace rendering
14172 * into the buffer attached to the pipe they are waiting
14173 * on. If not, userspace generates a GPU hang with IPEHR
14174 * point to the MI_WAIT_FOR_EVENT.
14175 *
14176 * This should only fail upon a hung GPU, in which case we
14177 * can safely continue.
14178 */
c004a90b
CW
14179 if (needs_modeset(crtc_state)) {
14180 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14181 old_obj->resv, NULL,
14182 false, 0,
14183 GFP_KERNEL);
14184 if (ret < 0)
14185 return ret;
f4457ae7 14186 }
5008e874
ML
14187 }
14188
c004a90b
CW
14189 if (new_state->fence) { /* explicit fencing */
14190 ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
14191 new_state->fence,
14192 I915_FENCE_TIMEOUT,
14193 GFP_KERNEL);
14194 if (ret < 0)
14195 return ret;
14196 }
14197
c37efb99
CW
14198 if (!obj)
14199 return 0;
14200
4d3088c7 14201 ret = i915_gem_object_pin_pages(obj);
fd70075f
CW
14202 if (ret)
14203 return ret;
14204
4d3088c7
CW
14205 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14206 if (ret) {
14207 i915_gem_object_unpin_pages(obj);
14208 return ret;
14209 }
14210
ef1a1914 14211 ret = intel_plane_pin_fb(to_intel_plane_state(new_state));
fd70075f 14212
fd70075f 14213 mutex_unlock(&dev_priv->drm.struct_mutex);
4d3088c7 14214 i915_gem_object_unpin_pages(obj);
fd70075f
CW
14215 if (ret)
14216 return ret;
14217
e2f3496e 14218 fb_obj_bump_render_priority(obj);
07bcd99b
DP
14219 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
14220
c004a90b 14221 if (!new_state->fence) { /* implicit fencing */
74d290f8
CW
14222 struct dma_fence *fence;
14223
c004a90b
CW
14224 ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
14225 obj->resv, NULL,
14226 false, I915_FENCE_TIMEOUT,
14227 GFP_KERNEL);
14228 if (ret < 0)
14229 return ret;
74d290f8
CW
14230
14231 fence = reservation_object_get_excl_rcu(obj->resv);
14232 if (fence) {
14233 add_rps_boost_after_vblank(new_state->crtc, fence);
14234 dma_fence_put(fence);
14235 }
14236 } else {
14237 add_rps_boost_after_vblank(new_state->crtc, new_state->fence);
c004a90b 14238 }
5a21b665 14239
60548c55
CW
14240 /*
14241 * We declare pageflips to be interactive and so merit a small bias
14242 * towards upclocking to deliver the frame on time. By only changing
14243 * the RPS thresholds to sample more regularly and aim for higher
14244 * clocks we can hopefully deliver low power workloads (like kodi)
14245 * that are not quite steady state without resorting to forcing
14246 * maximum clocks following a vblank miss (see do_rps_boost()).
14247 */
14248 if (!intel_state->rps_interactive) {
14249 intel_rps_mark_interactive(dev_priv, true);
14250 intel_state->rps_interactive = true;
14251 }
14252
d07f0e59 14253 return 0;
6beb8c23
MR
14254}
14255
38f3ce3a
MR
14256/**
14257 * intel_cleanup_plane_fb - Cleans up an fb after plane use
14258 * @plane: drm plane to clean up for
c38c1455 14259 * @old_state: the state from the previous modeset
38f3ce3a
MR
14260 *
14261 * Cleans up a framebuffer that has just been removed from a plane.
f935675f
ML
14262 *
14263 * Must be called with struct_mutex held.
38f3ce3a
MR
14264 */
14265void
14266intel_cleanup_plane_fb(struct drm_plane *plane,
1832040d 14267 struct drm_plane_state *old_state)
38f3ce3a 14268{
60548c55
CW
14269 struct intel_atomic_state *intel_state =
14270 to_intel_atomic_state(old_state->state);
ef1a1914 14271 struct drm_i915_private *dev_priv = to_i915(plane->dev);
38f3ce3a 14272
60548c55
CW
14273 if (intel_state->rps_interactive) {
14274 intel_rps_mark_interactive(dev_priv, false);
14275 intel_state->rps_interactive = false;
14276 }
14277
be1e3415 14278 /* Should only be called after a successful intel_prepare_plane_fb()! */
ef1a1914
VS
14279 mutex_lock(&dev_priv->drm.struct_mutex);
14280 intel_plane_unpin_fb(to_intel_plane_state(old_state));
14281 mutex_unlock(&dev_priv->drm.struct_mutex);
465c120c
MR
14282}
14283
6156a456 14284int
4e0b83a5
VS
14285skl_max_scale(const struct intel_crtc_state *crtc_state,
14286 u32 pixel_format)
6156a456 14287{
4e0b83a5
VS
14288 struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
14289 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
77224cd5
CK
14290 int max_scale, mult;
14291 int crtc_clock, max_dotclk, tmpclk1, tmpclk2;
6156a456 14292
4e0b83a5 14293 if (!crtc_state->base.enable)
6156a456
CK
14294 return DRM_PLANE_HELPER_NO_SCALING;
14295
6156a456 14296 crtc_clock = crtc_state->base.adjusted_mode.crtc_clock;
5b7280f0
ACO
14297 max_dotclk = to_intel_atomic_state(crtc_state->base.state)->cdclk.logical.cdclk;
14298
43037c86 14299 if (IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10)
5b7280f0 14300 max_dotclk *= 2;
6156a456 14301
5b7280f0 14302 if (WARN_ON_ONCE(!crtc_clock || max_dotclk < crtc_clock))
6156a456
CK
14303 return DRM_PLANE_HELPER_NO_SCALING;
14304
14305 /*
14306 * skl max scale is lower of:
14307 * close to 3 but not 3, -1 is for that purpose
14308 * or
14309 * cdclk/crtc_clock
14310 */
df7d4156 14311 mult = is_planar_yuv_format(pixel_format) ? 2 : 3;
77224cd5
CK
14312 tmpclk1 = (1 << 16) * mult - 1;
14313 tmpclk2 = (1 << 8) * ((max_dotclk << 8) / crtc_clock);
14314 max_scale = min(tmpclk1, tmpclk2);
6156a456
CK
14315
14316 return max_scale;
14317}
14318
c856dbc8
MN
14319static void intel_begin_crtc_commit(struct intel_atomic_state *state,
14320 struct intel_crtc *crtc)
5a21b665 14321{
c856dbc8
MN
14322 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14323 struct intel_crtc_state *old_crtc_state =
14324 intel_atomic_get_old_crtc_state(state, crtc);
14325 struct intel_crtc_state *new_crtc_state =
14326 intel_atomic_get_new_crtc_state(state, crtc);
14327 bool modeset = needs_modeset(&new_crtc_state->base);
5a21b665
DV
14328
14329 /* Perform vblank evasion around commit operation */
c856dbc8 14330 intel_pipe_update_start(new_crtc_state);
5a21b665
DV
14331
14332 if (modeset)
e62929b3 14333 goto out;
5a21b665 14334
c856dbc8
MN
14335 if (new_crtc_state->base.color_mgmt_changed ||
14336 new_crtc_state->update_pipe)
14337 intel_color_commit(new_crtc_state);
4d8ed54c 14338
c856dbc8
MN
14339 if (new_crtc_state->update_pipe)
14340 intel_update_pipe_config(old_crtc_state, new_crtc_state);
ccf010fb 14341 else if (INTEL_GEN(dev_priv) >= 9)
c856dbc8 14342 skl_detach_scalers(new_crtc_state);
62e0fb88 14343
a832d357
VS
14344 if (INTEL_GEN(dev_priv) >= 9 || IS_BROADWELL(dev_priv))
14345 bdw_set_pipemisc(new_crtc_state);
14346
e62929b3 14347out:
ccf010fb 14348 if (dev_priv->display.atomic_update_watermarks)
c856dbc8
MN
14349 dev_priv->display.atomic_update_watermarks(state,
14350 new_crtc_state);
5a21b665
DV
14351}
14352
d52ad9cb
ML
14353void intel_crtc_arm_fifo_underrun(struct intel_crtc *crtc,
14354 struct intel_crtc_state *crtc_state)
14355{
14356 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
14357
cf819eff 14358 if (!IS_GEN(dev_priv, 2))
d52ad9cb
ML
14359 intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, true);
14360
14361 if (crtc_state->has_pch_encoder) {
14362 enum pipe pch_transcoder =
14363 intel_crtc_pch_transcoder(crtc);
14364
14365 intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder, true);
14366 }
14367}
14368
c856dbc8
MN
14369static void intel_finish_crtc_commit(struct intel_atomic_state *state,
14370 struct intel_crtc *crtc)
5a21b665 14371{
c856dbc8
MN
14372 struct intel_crtc_state *old_crtc_state =
14373 intel_atomic_get_old_crtc_state(state, crtc);
d3a8fb32 14374 struct intel_crtc_state *new_crtc_state =
c856dbc8 14375 intel_atomic_get_new_crtc_state(state, crtc);
5a21b665 14376
d3a8fb32 14377 intel_pipe_update_end(new_crtc_state);
33a49868
ML
14378
14379 if (new_crtc_state->update_pipe &&
14380 !needs_modeset(&new_crtc_state->base) &&
c856dbc8
MN
14381 old_crtc_state->base.mode.private_flags & I915_MODE_FLAG_INHERITED)
14382 intel_crtc_arm_fifo_underrun(crtc, new_crtc_state);
5a21b665
DV
14383}
14384
cf4c7c12 14385/**
4a3b8769
MR
14386 * intel_plane_destroy - destroy a plane
14387 * @plane: plane to destroy
cf4c7c12 14388 *
4a3b8769
MR
14389 * Common destruction function for all types of planes (primary, cursor,
14390 * sprite).
cf4c7c12 14391 */
4a3b8769 14392void intel_plane_destroy(struct drm_plane *plane)
465c120c 14393{
465c120c 14394 drm_plane_cleanup(plane);
69ae561f 14395 kfree(to_intel_plane(plane));
465c120c
MR
14396}
14397
a38189c5
VS
14398static bool i8xx_plane_format_mod_supported(struct drm_plane *_plane,
14399 u32 format, u64 modifier)
714244e2 14400{
a38189c5
VS
14401 switch (modifier) {
14402 case DRM_FORMAT_MOD_LINEAR:
14403 case I915_FORMAT_MOD_X_TILED:
14404 break;
14405 default:
14406 return false;
14407 }
14408
714244e2
BW
14409 switch (format) {
14410 case DRM_FORMAT_C8:
14411 case DRM_FORMAT_RGB565:
14412 case DRM_FORMAT_XRGB1555:
14413 case DRM_FORMAT_XRGB8888:
14414 return modifier == DRM_FORMAT_MOD_LINEAR ||
14415 modifier == I915_FORMAT_MOD_X_TILED;
14416 default:
14417 return false;
14418 }
14419}
14420
a38189c5
VS
14421static bool i965_plane_format_mod_supported(struct drm_plane *_plane,
14422 u32 format, u64 modifier)
714244e2 14423{
a38189c5
VS
14424 switch (modifier) {
14425 case DRM_FORMAT_MOD_LINEAR:
14426 case I915_FORMAT_MOD_X_TILED:
14427 break;
14428 default:
14429 return false;
14430 }
14431
714244e2
BW
14432 switch (format) {
14433 case DRM_FORMAT_C8:
14434 case DRM_FORMAT_RGB565:
14435 case DRM_FORMAT_XRGB8888:
14436 case DRM_FORMAT_XBGR8888:
14437 case DRM_FORMAT_XRGB2101010:
14438 case DRM_FORMAT_XBGR2101010:
14439 return modifier == DRM_FORMAT_MOD_LINEAR ||
14440 modifier == I915_FORMAT_MOD_X_TILED;
14441 default:
14442 return false;
14443 }
14444}
14445
a38189c5
VS
14446static bool intel_cursor_format_mod_supported(struct drm_plane *_plane,
14447 u32 format, u64 modifier)
714244e2 14448{
a38189c5
VS
14449 return modifier == DRM_FORMAT_MOD_LINEAR &&
14450 format == DRM_FORMAT_ARGB8888;
714244e2
BW
14451}
14452
679bfe84 14453static const struct drm_plane_funcs i965_plane_funcs = {
a38189c5
VS
14454 .update_plane = drm_atomic_helper_update_plane,
14455 .disable_plane = drm_atomic_helper_disable_plane,
14456 .destroy = intel_plane_destroy,
14457 .atomic_get_property = intel_plane_atomic_get_property,
14458 .atomic_set_property = intel_plane_atomic_set_property,
14459 .atomic_duplicate_state = intel_plane_duplicate_state,
14460 .atomic_destroy_state = intel_plane_destroy_state,
14461 .format_mod_supported = i965_plane_format_mod_supported,
14462};
714244e2 14463
679bfe84 14464static const struct drm_plane_funcs i8xx_plane_funcs = {
70a101f8
MR
14465 .update_plane = drm_atomic_helper_update_plane,
14466 .disable_plane = drm_atomic_helper_disable_plane,
3d7d6510 14467 .destroy = intel_plane_destroy,
a98b3431
MR
14468 .atomic_get_property = intel_plane_atomic_get_property,
14469 .atomic_set_property = intel_plane_atomic_set_property,
ea2c67bb
MR
14470 .atomic_duplicate_state = intel_plane_duplicate_state,
14471 .atomic_destroy_state = intel_plane_destroy_state,
a38189c5 14472 .format_mod_supported = i8xx_plane_format_mod_supported,
465c120c
MR
14473};
14474
f79f2692
ML
14475static int
14476intel_legacy_cursor_update(struct drm_plane *plane,
14477 struct drm_crtc *crtc,
14478 struct drm_framebuffer *fb,
14479 int crtc_x, int crtc_y,
14480 unsigned int crtc_w, unsigned int crtc_h,
ba3f4d0a
JN
14481 u32 src_x, u32 src_y,
14482 u32 src_w, u32 src_h,
34a2ab5e 14483 struct drm_modeset_acquire_ctx *ctx)
f79f2692
ML
14484{
14485 struct drm_i915_private *dev_priv = to_i915(crtc->dev);
14486 int ret;
14487 struct drm_plane_state *old_plane_state, *new_plane_state;
14488 struct intel_plane *intel_plane = to_intel_plane(plane);
14489 struct drm_framebuffer *old_fb;
c249c5f6
ML
14490 struct intel_crtc_state *crtc_state =
14491 to_intel_crtc_state(crtc->state);
14492 struct intel_crtc_state *new_crtc_state;
f79f2692
ML
14493
14494 /*
14495 * When crtc is inactive or there is a modeset pending,
14496 * wait for it to complete in the slowpath
14497 */
c249c5f6
ML
14498 if (!crtc_state->base.active || needs_modeset(&crtc_state->base) ||
14499 crtc_state->update_pipe)
f79f2692
ML
14500 goto slow;
14501
14502 old_plane_state = plane->state;
669c9215
ML
14503 /*
14504 * Don't do an async update if there is an outstanding commit modifying
14505 * the plane. This prevents our async update's changes from getting
14506 * overridden by a previous synchronous update's state.
14507 */
14508 if (old_plane_state->commit &&
14509 !try_wait_for_completion(&old_plane_state->commit->hw_done))
14510 goto slow;
f79f2692
ML
14511
14512 /*
14513 * If any parameters change that may affect watermarks,
14514 * take the slowpath. Only changing fb or position should be
14515 * in the fastpath.
14516 */
14517 if (old_plane_state->crtc != crtc ||
14518 old_plane_state->src_w != src_w ||
14519 old_plane_state->src_h != src_h ||
14520 old_plane_state->crtc_w != crtc_w ||
14521 old_plane_state->crtc_h != crtc_h ||
a5509abd 14522 !old_plane_state->fb != !fb)
f79f2692
ML
14523 goto slow;
14524
14525 new_plane_state = intel_plane_duplicate_state(plane);
14526 if (!new_plane_state)
14527 return -ENOMEM;
14528
c249c5f6
ML
14529 new_crtc_state = to_intel_crtc_state(intel_crtc_duplicate_state(crtc));
14530 if (!new_crtc_state) {
14531 ret = -ENOMEM;
14532 goto out_free;
14533 }
14534
f79f2692
ML
14535 drm_atomic_set_fb_for_plane(new_plane_state, fb);
14536
14537 new_plane_state->src_x = src_x;
14538 new_plane_state->src_y = src_y;
14539 new_plane_state->src_w = src_w;
14540 new_plane_state->src_h = src_h;
14541 new_plane_state->crtc_x = crtc_x;
14542 new_plane_state->crtc_y = crtc_y;
14543 new_plane_state->crtc_w = crtc_w;
14544 new_plane_state->crtc_h = crtc_h;
14545
c249c5f6
ML
14546 ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
14547 to_intel_plane_state(old_plane_state),
f79f2692
ML
14548 to_intel_plane_state(new_plane_state));
14549 if (ret)
14550 goto out_free;
14551
f79f2692
ML
14552 ret = mutex_lock_interruptible(&dev_priv->drm.struct_mutex);
14553 if (ret)
14554 goto out_free;
14555
ef1a1914
VS
14556 ret = intel_plane_pin_fb(to_intel_plane_state(new_plane_state));
14557 if (ret)
14558 goto out_unlock;
f79f2692 14559
a694e226 14560 intel_fb_obj_flush(intel_fb_obj(fb), ORIGIN_FLIP);
f79f2692 14561
07bcd99b 14562 old_fb = old_plane_state->fb;
f79f2692
ML
14563 i915_gem_track_fb(intel_fb_obj(old_fb), intel_fb_obj(fb),
14564 intel_plane->frontbuffer_bit);
14565
14566 /* Swap plane state */
669c9215 14567 plane->state = new_plane_state;
f79f2692 14568
c249c5f6
ML
14569 /*
14570 * We cannot swap crtc_state as it may be in use by an atomic commit or
14571 * page flip that's running simultaneously. If we swap crtc_state and
14572 * destroy the old state, we will cause a use-after-free there.
14573 *
14574 * Only update active_planes, which is needed for our internal
14575 * bookkeeping. Either value will do the right thing when updating
14576 * planes atomically. If the cursor was part of the atomic update then
14577 * we would have taken the slowpath.
14578 */
14579 crtc_state->active_planes = new_crtc_state->active_planes;
14580
c48b86f9
VS
14581 if (plane->state->visible)
14582 intel_update_plane(intel_plane, crtc_state,
14583 to_intel_plane_state(plane->state));
14584 else
14585 intel_disable_plane(intel_plane, crtc_state);
f79f2692 14586
ef1a1914 14587 intel_plane_unpin_fb(to_intel_plane_state(old_plane_state));
f79f2692
ML
14588
14589out_unlock:
14590 mutex_unlock(&dev_priv->drm.struct_mutex);
14591out_free:
c249c5f6
ML
14592 if (new_crtc_state)
14593 intel_crtc_destroy_state(crtc, &new_crtc_state->base);
669c9215
ML
14594 if (ret)
14595 intel_plane_destroy_state(plane, new_plane_state);
14596 else
14597 intel_plane_destroy_state(plane, old_plane_state);
f79f2692
ML
14598 return ret;
14599
f79f2692
ML
14600slow:
14601 return drm_atomic_helper_update_plane(plane, crtc, fb,
14602 crtc_x, crtc_y, crtc_w, crtc_h,
34a2ab5e 14603 src_x, src_y, src_w, src_h, ctx);
f79f2692
ML
14604}
14605
14606static const struct drm_plane_funcs intel_cursor_plane_funcs = {
14607 .update_plane = intel_legacy_cursor_update,
14608 .disable_plane = drm_atomic_helper_disable_plane,
14609 .destroy = intel_plane_destroy,
f79f2692
ML
14610 .atomic_get_property = intel_plane_atomic_get_property,
14611 .atomic_set_property = intel_plane_atomic_set_property,
14612 .atomic_duplicate_state = intel_plane_duplicate_state,
14613 .atomic_destroy_state = intel_plane_destroy_state,
a38189c5 14614 .format_mod_supported = intel_cursor_format_mod_supported,
f79f2692
ML
14615};
14616
cf1805e6
VS
14617static bool i9xx_plane_has_fbc(struct drm_i915_private *dev_priv,
14618 enum i9xx_plane_id i9xx_plane)
14619{
14620 if (!HAS_FBC(dev_priv))
14621 return false;
14622
14623 if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
14624 return i9xx_plane == PLANE_A; /* tied to pipe A */
14625 else if (IS_IVYBRIDGE(dev_priv))
14626 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B ||
14627 i9xx_plane == PLANE_C;
14628 else if (INTEL_GEN(dev_priv) >= 4)
14629 return i9xx_plane == PLANE_A || i9xx_plane == PLANE_B;
14630 else
14631 return i9xx_plane == PLANE_A;
14632}
14633
b079bd17 14634static struct intel_plane *
580503c7 14635intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
465c120c 14636{
881440a8 14637 struct intel_plane *plane;
a38189c5 14638 const struct drm_plane_funcs *plane_funcs;
93ca7e00 14639 unsigned int supported_rotations;
deb19689 14640 unsigned int possible_crtcs;
881440a8
VS
14641 const u64 *modifiers;
14642 const u32 *formats;
14643 int num_formats;
fca0ce2a 14644 int ret;
465c120c 14645
b7c80600
VS
14646 if (INTEL_GEN(dev_priv) >= 9)
14647 return skl_universal_plane_create(dev_priv, pipe,
14648 PLANE_PRIMARY);
14649
881440a8
VS
14650 plane = intel_plane_alloc();
14651 if (IS_ERR(plane))
14652 return plane;
ea2c67bb 14653
881440a8 14654 plane->pipe = pipe;
e3c566df
VS
14655 /*
14656 * On gen2/3 only plane A can do FBC, but the panel fitter and LVDS
14657 * port is hooked to pipe B. Hence we want plane A feeding pipe B.
14658 */
14659 if (HAS_FBC(dev_priv) && INTEL_GEN(dev_priv) < 4)
881440a8 14660 plane->i9xx_plane = (enum i9xx_plane_id) !pipe;
e3c566df 14661 else
881440a8
VS
14662 plane->i9xx_plane = (enum i9xx_plane_id) pipe;
14663 plane->id = PLANE_PRIMARY;
14664 plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
cf1805e6 14665
881440a8
VS
14666 plane->has_fbc = i9xx_plane_has_fbc(dev_priv, plane->i9xx_plane);
14667 if (plane->has_fbc) {
cf1805e6
VS
14668 struct intel_fbc *fbc = &dev_priv->fbc;
14669
881440a8 14670 fbc->possible_framebuffer_bits |= plane->frontbuffer_bit;
cf1805e6
VS
14671 }
14672
b7c80600 14673 if (INTEL_GEN(dev_priv) >= 4) {
881440a8 14674 formats = i965_primary_formats;
568db4f2 14675 num_formats = ARRAY_SIZE(i965_primary_formats);
714244e2 14676 modifiers = i9xx_format_modifiers;
a8d201af 14677
881440a8
VS
14678 plane->max_stride = i9xx_plane_max_stride;
14679 plane->update_plane = i9xx_update_plane;
14680 plane->disable_plane = i9xx_disable_plane;
14681 plane->get_hw_state = i9xx_plane_get_hw_state;
14682 plane->check_plane = i9xx_plane_check;
a38189c5
VS
14683
14684 plane_funcs = &i965_plane_funcs;
6c0fd451 14685 } else {
881440a8 14686 formats = i8xx_primary_formats;
6c0fd451 14687 num_formats = ARRAY_SIZE(i8xx_primary_formats);
714244e2 14688 modifiers = i9xx_format_modifiers;
a8d201af 14689
881440a8
VS
14690 plane->max_stride = i9xx_plane_max_stride;
14691 plane->update_plane = i9xx_update_plane;
14692 plane->disable_plane = i9xx_disable_plane;
14693 plane->get_hw_state = i9xx_plane_get_hw_state;
14694 plane->check_plane = i9xx_plane_check;
a38189c5
VS
14695
14696 plane_funcs = &i8xx_plane_funcs;
465c120c
MR
14697 }
14698
deb19689
VS
14699 possible_crtcs = BIT(pipe);
14700
b7c80600 14701 if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
881440a8 14702 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
deb19689 14703 possible_crtcs, plane_funcs,
881440a8 14704 formats, num_formats, modifiers,
38573dc1
VS
14705 DRM_PLANE_TYPE_PRIMARY,
14706 "primary %c", pipe_name(pipe));
14707 else
881440a8 14708 ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
deb19689 14709 possible_crtcs, plane_funcs,
881440a8 14710 formats, num_formats, modifiers,
38573dc1 14711 DRM_PLANE_TYPE_PRIMARY,
ed15030d 14712 "plane %c",
881440a8 14713 plane_name(plane->i9xx_plane));
fca0ce2a
VS
14714 if (ret)
14715 goto fail;
48404c1e 14716
b7c80600 14717 if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
4ea7be2b 14718 supported_rotations =
c2c446ad
RF
14719 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180 |
14720 DRM_MODE_REFLECT_X;
5481e27f 14721 } else if (INTEL_GEN(dev_priv) >= 4) {
93ca7e00 14722 supported_rotations =
c2c446ad 14723 DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180;
93ca7e00 14724 } else {
c2c446ad 14725 supported_rotations = DRM_MODE_ROTATE_0;
93ca7e00
VS
14726 }
14727
5481e27f 14728 if (INTEL_GEN(dev_priv) >= 4)
881440a8 14729 drm_plane_create_rotation_property(&plane->base,
c2c446ad 14730 DRM_MODE_ROTATE_0,
93ca7e00 14731 supported_rotations);
48404c1e 14732
881440a8 14733 drm_plane_helper_add(&plane->base, &intel_plane_helper_funcs);
ea2c67bb 14734
881440a8 14735 return plane;
fca0ce2a
VS
14736
14737fail:
881440a8 14738 intel_plane_free(plane);
fca0ce2a 14739
b079bd17 14740 return ERR_PTR(ret);
465c120c
MR
14741}
14742
b079bd17 14743static struct intel_plane *
b2d03b0d
VS
14744intel_cursor_plane_create(struct drm_i915_private *dev_priv,
14745 enum pipe pipe)
3d7d6510 14746{
deb19689 14747 unsigned int possible_crtcs;
c539b579 14748 struct intel_plane *cursor;
fca0ce2a 14749 int ret;
3d7d6510 14750
c539b579
VS
14751 cursor = intel_plane_alloc();
14752 if (IS_ERR(cursor))
14753 return cursor;
ea2c67bb 14754
3d7d6510 14755 cursor->pipe = pipe;
ed15030d 14756 cursor->i9xx_plane = (enum i9xx_plane_id) pipe;
b14e5848 14757 cursor->id = PLANE_CURSOR;
c19e1124 14758 cursor->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, cursor->id);
b2d03b0d
VS
14759
14760 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
ddd5713d 14761 cursor->max_stride = i845_cursor_max_stride;
b2d03b0d
VS
14762 cursor->update_plane = i845_update_cursor;
14763 cursor->disable_plane = i845_disable_cursor;
51f5a096 14764 cursor->get_hw_state = i845_cursor_get_hw_state;
659056f2 14765 cursor->check_plane = i845_check_cursor;
b2d03b0d 14766 } else {
ddd5713d 14767 cursor->max_stride = i9xx_cursor_max_stride;
b2d03b0d
VS
14768 cursor->update_plane = i9xx_update_cursor;
14769 cursor->disable_plane = i9xx_disable_cursor;
51f5a096 14770 cursor->get_hw_state = i9xx_cursor_get_hw_state;
659056f2 14771 cursor->check_plane = i9xx_check_cursor;
b2d03b0d 14772 }
3d7d6510 14773
cd5dcbf1
VS
14774 cursor->cursor.base = ~0;
14775 cursor->cursor.cntl = ~0;
024faac7
VS
14776
14777 if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
14778 cursor->cursor.size = ~0;
3d7d6510 14779
deb19689
VS
14780 possible_crtcs = BIT(pipe);
14781
580503c7 14782 ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
deb19689 14783 possible_crtcs, &intel_cursor_plane_funcs,
fca0ce2a
VS
14784 intel_cursor_formats,
14785 ARRAY_SIZE(intel_cursor_formats),
714244e2
BW
14786 cursor_format_modifiers,
14787 DRM_PLANE_TYPE_CURSOR,
38573dc1 14788 "cursor %c", pipe_name(pipe));
fca0ce2a
VS
14789 if (ret)
14790 goto fail;
4398ad45 14791
5481e27f 14792 if (INTEL_GEN(dev_priv) >= 4)
93ca7e00 14793 drm_plane_create_rotation_property(&cursor->base,
c2c446ad
RF
14794 DRM_MODE_ROTATE_0,
14795 DRM_MODE_ROTATE_0 |
14796 DRM_MODE_ROTATE_180);
4398ad45 14797
ea2c67bb
MR
14798 drm_plane_helper_add(&cursor->base, &intel_plane_helper_funcs);
14799
b079bd17 14800 return cursor;
fca0ce2a
VS
14801
14802fail:
c539b579 14803 intel_plane_free(cursor);
fca0ce2a 14804
b079bd17 14805 return ERR_PTR(ret);
3d7d6510
MR
14806}
14807
1c74eeaf
NM
14808static void intel_crtc_init_scalers(struct intel_crtc *crtc,
14809 struct intel_crtc_state *crtc_state)
549e2bfb 14810{
65edccce
VS
14811 struct intel_crtc_scaler_state *scaler_state =
14812 &crtc_state->scaler_state;
1c74eeaf 14813 struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
549e2bfb 14814 int i;
549e2bfb 14815
0258404f 14816 crtc->num_scalers = RUNTIME_INFO(dev_priv)->num_scalers[crtc->pipe];
1c74eeaf
NM
14817 if (!crtc->num_scalers)
14818 return;
14819
65edccce
VS
14820 for (i = 0; i < crtc->num_scalers; i++) {
14821 struct intel_scaler *scaler = &scaler_state->scalers[i];
14822
14823 scaler->in_use = 0;
0aaf29b3 14824 scaler->mode = 0;
549e2bfb
CK
14825 }
14826
14827 scaler_state->scaler_id = -1;
14828}
14829
5ab0d85b 14830static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
79e53945
JB
14831{
14832 struct intel_crtc *intel_crtc;
f5de6e07 14833 struct intel_crtc_state *crtc_state = NULL;
b079bd17
VS
14834 struct intel_plane *primary = NULL;
14835 struct intel_plane *cursor = NULL;
a81d6fa0 14836 int sprite, ret;
79e53945 14837
955382f3 14838 intel_crtc = kzalloc(sizeof(*intel_crtc), GFP_KERNEL);
b079bd17
VS
14839 if (!intel_crtc)
14840 return -ENOMEM;
79e53945 14841
f5de6e07 14842 crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL);
b079bd17
VS
14843 if (!crtc_state) {
14844 ret = -ENOMEM;
f5de6e07 14845 goto fail;
b079bd17 14846 }
842a07a7 14847 __drm_atomic_helper_crtc_reset(&intel_crtc->base, &crtc_state->base);
550acefd 14848 intel_crtc->config = crtc_state;
f5de6e07 14849
580503c7 14850 primary = intel_primary_plane_create(dev_priv, pipe);
b079bd17
VS
14851 if (IS_ERR(primary)) {
14852 ret = PTR_ERR(primary);
3d7d6510 14853 goto fail;
b079bd17 14854 }
d97d7b48 14855 intel_crtc->plane_ids_mask |= BIT(primary->id);
3d7d6510 14856
a81d6fa0 14857 for_each_sprite(dev_priv, pipe, sprite) {
b079bd17
VS
14858 struct intel_plane *plane;
14859
580503c7 14860 plane = intel_sprite_plane_create(dev_priv, pipe, sprite);
d2b2cbce 14861 if (IS_ERR(plane)) {
b079bd17
VS
14862 ret = PTR_ERR(plane);
14863 goto fail;
14864 }
d97d7b48 14865 intel_crtc->plane_ids_mask |= BIT(plane->id);
a81d6fa0
VS
14866 }
14867
580503c7 14868 cursor = intel_cursor_plane_create(dev_priv, pipe);
d2b2cbce 14869 if (IS_ERR(cursor)) {
b079bd17 14870 ret = PTR_ERR(cursor);
3d7d6510 14871 goto fail;
b079bd17 14872 }
d97d7b48 14873 intel_crtc->plane_ids_mask |= BIT(cursor->id);
3d7d6510 14874
5ab0d85b 14875 ret = drm_crtc_init_with_planes(&dev_priv->drm, &intel_crtc->base,
b079bd17
VS
14876 &primary->base, &cursor->base,
14877 &intel_crtc_funcs,
4d5d72b7 14878 "pipe %c", pipe_name(pipe));
3d7d6510
MR
14879 if (ret)
14880 goto fail;
79e53945 14881
80824003 14882 intel_crtc->pipe = pipe;
80824003 14883
1c74eeaf
NM
14884 /* initialize shared scalers */
14885 intel_crtc_init_scalers(intel_crtc, crtc_state);
14886
1947fd13
VS
14887 BUG_ON(pipe >= ARRAY_SIZE(dev_priv->pipe_to_crtc_mapping) ||
14888 dev_priv->pipe_to_crtc_mapping[pipe] != NULL);
14889 dev_priv->pipe_to_crtc_mapping[pipe] = intel_crtc;
14890
14891 if (INTEL_GEN(dev_priv) < 9) {
14892 enum i9xx_plane_id i9xx_plane = primary->i9xx_plane;
14893
14894 BUG_ON(i9xx_plane >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) ||
14895 dev_priv->plane_to_crtc_mapping[i9xx_plane] != NULL);
14896 dev_priv->plane_to_crtc_mapping[i9xx_plane] = intel_crtc;
14897 }
22fd0fab 14898
79e53945 14899 drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs);
87b6b101 14900
302da0cd 14901 intel_color_init(intel_crtc);
8563b1e8 14902
87b6b101 14903 WARN_ON(drm_crtc_index(&intel_crtc->base) != intel_crtc->pipe);
b079bd17
VS
14904
14905 return 0;
3d7d6510
MR
14906
14907fail:
b079bd17
VS
14908 /*
14909 * drm_mode_config_cleanup() will free up any
14910 * crtcs/planes already initialized.
14911 */
f5de6e07 14912 kfree(crtc_state);
3d7d6510 14913 kfree(intel_crtc);
b079bd17
VS
14914
14915 return ret;
79e53945
JB
14916}
14917
6a20fe7b
VS
14918int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
14919 struct drm_file *file)
08d7b3d1 14920{
08d7b3d1 14921 struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
7707e653 14922 struct drm_crtc *drmmode_crtc;
c05422d5 14923 struct intel_crtc *crtc;
08d7b3d1 14924
418da172 14925 drmmode_crtc = drm_crtc_find(dev, file, pipe_from_crtc_id->crtc_id);
71240ed2 14926 if (!drmmode_crtc)
3f2c2057 14927 return -ENOENT;
08d7b3d1 14928
7707e653 14929 crtc = to_intel_crtc(drmmode_crtc);
c05422d5 14930 pipe_from_crtc_id->pipe = crtc->pipe;
08d7b3d1 14931
c05422d5 14932 return 0;
08d7b3d1
CW
14933}
14934
66a9278e 14935static int intel_encoder_clones(struct intel_encoder *encoder)
79e53945 14936{
66a9278e
DV
14937 struct drm_device *dev = encoder->base.dev;
14938 struct intel_encoder *source_encoder;
79e53945 14939 int index_mask = 0;
79e53945
JB
14940 int entry = 0;
14941
b2784e15 14942 for_each_intel_encoder(dev, source_encoder) {
bc079e8b 14943 if (encoders_cloneable(encoder, source_encoder))
66a9278e
DV
14944 index_mask |= (1 << entry);
14945
79e53945
JB
14946 entry++;
14947 }
4ef69c7a 14948
79e53945
JB
14949 return index_mask;
14950}
14951
a5916fd7 14952static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
4d302442 14953{
646d5772 14954 if (!IS_MOBILE(dev_priv))
4d302442
CW
14955 return false;
14956
14957 if ((I915_READ(DP_A) & DP_DETECTED) == 0)
14958 return false;
14959
cf819eff 14960 if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
4d302442
CW
14961 return false;
14962
14963 return true;
14964}
14965
63cb4e64 14966static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
84b4e042 14967{
6315b5d3 14968 if (INTEL_GEN(dev_priv) >= 9)
884497ed
DL
14969 return false;
14970
50a0bc90 14971 if (IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv))
84b4e042
JB
14972 return false;
14973
4f8036a2
TU
14974 if (HAS_PCH_LPT_H(dev_priv) &&
14975 I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
65e472e4
VS
14976 return false;
14977
70ac54d0 14978 /* DDI E can't be used if DDI A requires 4 lanes */
63cb4e64 14979 if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
70ac54d0
VS
14980 return false;
14981
e4abb733 14982 if (!dev_priv->vbt.int_crt_support)
84b4e042
JB
14983 return false;
14984
14985 return true;
14986}
14987
8090ba8c
ID
14988void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
14989{
14990 int pps_num;
14991 int pps_idx;
14992
14993 if (HAS_DDI(dev_priv))
14994 return;
14995 /*
14996 * This w/a is needed at least on CPT/PPT, but to be sure apply it
14997 * everywhere where registers can be write protected.
14998 */
14999 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15000 pps_num = 2;
15001 else
15002 pps_num = 1;
15003
15004 for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
15005 u32 val = I915_READ(PP_CONTROL(pps_idx));
15006
15007 val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
15008 I915_WRITE(PP_CONTROL(pps_idx), val);
15009 }
15010}
15011
44cb734c
ID
15012static void intel_pps_init(struct drm_i915_private *dev_priv)
15013{
cc3f90f0 15014 if (HAS_PCH_SPLIT(dev_priv) || IS_GEN9_LP(dev_priv))
44cb734c
ID
15015 dev_priv->pps_mmio_base = PCH_PPS_BASE;
15016 else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15017 dev_priv->pps_mmio_base = VLV_PPS_BASE;
15018 else
15019 dev_priv->pps_mmio_base = PPS_BASE;
8090ba8c
ID
15020
15021 intel_pps_unlock_regs_wa(dev_priv);
44cb734c
ID
15022}
15023
c39055b0 15024static void intel_setup_outputs(struct drm_i915_private *dev_priv)
79e53945 15025{
4ef69c7a 15026 struct intel_encoder *encoder;
cb0953d7 15027 bool dpd_is_edp = false;
79e53945 15028
44cb734c
ID
15029 intel_pps_init(dev_priv);
15030
e1bf094b 15031 if (!HAS_DISPLAY(dev_priv))
fc0c5a9d
CW
15032 return;
15033
759c9ab5
BP
15034 if (IS_ELKHARTLAKE(dev_priv)) {
15035 intel_ddi_init(dev_priv, PORT_A);
15036 intel_ddi_init(dev_priv, PORT_B);
15037 intel_ddi_init(dev_priv, PORT_C);
15038 icl_dsi_init(dev_priv);
15039 } else if (INTEL_GEN(dev_priv) >= 11) {
00c92d92
PZ
15040 intel_ddi_init(dev_priv, PORT_A);
15041 intel_ddi_init(dev_priv, PORT_B);
15042 intel_ddi_init(dev_priv, PORT_C);
15043 intel_ddi_init(dev_priv, PORT_D);
15044 intel_ddi_init(dev_priv, PORT_E);
3f2e9ed0
ID
15045 /*
15046 * On some ICL SKUs port F is not present. No strap bits for
15047 * this, so rely on VBT.
2b34e562 15048 * Work around broken VBTs on SKUs known to have no port F.
3f2e9ed0 15049 */
2b34e562
ID
15050 if (IS_ICL_WITH_PORT_F(dev_priv) &&
15051 intel_bios_is_port_present(dev_priv, PORT_F))
3f2e9ed0
ID
15052 intel_ddi_init(dev_priv, PORT_F);
15053
bf4d57ff 15054 icl_dsi_init(dev_priv);
00c92d92 15055 } else if (IS_GEN9_LP(dev_priv)) {
c776eb2e
VK
15056 /*
15057 * FIXME: Broxton doesn't support port detection via the
15058 * DDI_BUF_CTL_A or SFUSE_STRAP registers, find another way to
15059 * detect the ports.
15060 */
c39055b0
ACO
15061 intel_ddi_init(dev_priv, PORT_A);
15062 intel_ddi_init(dev_priv, PORT_B);
15063 intel_ddi_init(dev_priv, PORT_C);
c6c794a2 15064
e518634b 15065 vlv_dsi_init(dev_priv);
4f8036a2 15066 } else if (HAS_DDI(dev_priv)) {
0e72a5b5
ED
15067 int found;
15068
63cb4e64
JN
15069 if (intel_ddi_crt_present(dev_priv))
15070 intel_crt_init(dev_priv);
15071
de31facd
JB
15072 /*
15073 * Haswell uses DDI functions to detect digital outputs.
15074 * On SKL pre-D0 the strap isn't connected, so we assume
15075 * it's there.
15076 */
77179400 15077 found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
de31facd 15078 /* WaIgnoreDDIAStrap: skl */
b976dc53 15079 if (found || IS_GEN9_BC(dev_priv))
c39055b0 15080 intel_ddi_init(dev_priv, PORT_A);
0e72a5b5 15081
9787e835 15082 /* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
0e72a5b5
ED
15083 * register */
15084 found = I915_READ(SFUSE_STRAP);
15085
15086 if (found & SFUSE_STRAP_DDIB_DETECTED)
c39055b0 15087 intel_ddi_init(dev_priv, PORT_B);
0e72a5b5 15088 if (found & SFUSE_STRAP_DDIC_DETECTED)
c39055b0 15089 intel_ddi_init(dev_priv, PORT_C);
0e72a5b5 15090 if (found & SFUSE_STRAP_DDID_DETECTED)
c39055b0 15091 intel_ddi_init(dev_priv, PORT_D);
9787e835
RV
15092 if (found & SFUSE_STRAP_DDIF_DETECTED)
15093 intel_ddi_init(dev_priv, PORT_F);
2800e4c2
RV
15094 /*
15095 * On SKL we don't have a way to detect DDI-E so we rely on VBT.
15096 */
b976dc53 15097 if (IS_GEN9_BC(dev_priv) &&
e9d49bb7 15098 intel_bios_is_port_present(dev_priv, PORT_E))
c39055b0 15099 intel_ddi_init(dev_priv, PORT_E);
2800e4c2 15100
6e266956 15101 } else if (HAS_PCH_SPLIT(dev_priv)) {
cb0953d7 15102 int found;
63cb4e64 15103
0fafa226
JN
15104 /*
15105 * intel_edp_init_connector() depends on this completing first,
15106 * to prevent the registration of both eDP and LVDS and the
15107 * incorrect sharing of the PPS.
15108 */
15109 intel_lvds_init(dev_priv);
74d021ea 15110 intel_crt_init(dev_priv);
63cb4e64 15111
7b91bf7f 15112 dpd_is_edp = intel_dp_is_port_edp(dev_priv, PORT_D);
270b3042 15113
a5916fd7 15114 if (ilk_has_edp_a(dev_priv))
c39055b0 15115 intel_dp_init(dev_priv, DP_A, PORT_A);
cb0953d7 15116
dc0fa718 15117 if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
461ed3ca 15118 /* PCH SDVOB multiplex with HDMIB */
c39055b0 15119 found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
30ad48b7 15120 if (!found)
c39055b0 15121 intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
5eb08b69 15122 if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
c39055b0 15123 intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
30ad48b7
ZW
15124 }
15125
dc0fa718 15126 if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
c39055b0 15127 intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
30ad48b7 15128
dc0fa718 15129 if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
c39055b0 15130 intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
30ad48b7 15131
5eb08b69 15132 if (I915_READ(PCH_DP_C) & DP_DETECTED)
c39055b0 15133 intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
5eb08b69 15134
270b3042 15135 if (I915_READ(PCH_DP_D) & DP_DETECTED)
c39055b0 15136 intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
920a14b2 15137 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
22f35042 15138 bool has_edp, has_port;
457c52d8 15139
63cb4e64
JN
15140 if (IS_VALLEYVIEW(dev_priv) && dev_priv->vbt.int_crt_support)
15141 intel_crt_init(dev_priv);
15142
e17ac6db
VS
15143 /*
15144 * The DP_DETECTED bit is the latched state of the DDC
15145 * SDA pin at boot. However since eDP doesn't require DDC
15146 * (no way to plug in a DP->HDMI dongle) the DDC pins for
15147 * eDP ports may have been muxed to an alternate function.
15148 * Thus we can't rely on the DP_DETECTED bit alone to detect
15149 * eDP ports. Consult the VBT as well as DP_DETECTED to
15150 * detect eDP ports.
22f35042
VS
15151 *
15152 * Sadly the straps seem to be missing sometimes even for HDMI
15153 * ports (eg. on Voyo V3 - CHT x7-Z8700), so check both strap
15154 * and VBT for the presence of the port. Additionally we can't
15155 * trust the port type the VBT declares as we've seen at least
15156 * HDMI ports that the VBT claim are DP or eDP.
e17ac6db 15157 */
7b91bf7f 15158 has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
22f35042
VS
15159 has_port = intel_bios_is_port_present(dev_priv, PORT_B);
15160 if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
c39055b0 15161 has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
22f35042 15162 if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
c39055b0 15163 intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
585a94b8 15164
7b91bf7f 15165 has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
22f35042
VS
15166 has_port = intel_bios_is_port_present(dev_priv, PORT_C);
15167 if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
c39055b0 15168 has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
22f35042 15169 if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
c39055b0 15170 intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
19c03924 15171
920a14b2 15172 if (IS_CHERRYVIEW(dev_priv)) {
22f35042
VS
15173 /*
15174 * eDP not supported on port D,
15175 * so no need to worry about it
15176 */
15177 has_port = intel_bios_is_port_present(dev_priv, PORT_D);
15178 if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
c39055b0 15179 intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
22f35042 15180 if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
c39055b0 15181 intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
9418c1f1
VS
15182 }
15183
e518634b 15184 vlv_dsi_init(dev_priv);
63cb4e64 15185 } else if (IS_PINEVIEW(dev_priv)) {
0fafa226 15186 intel_lvds_init(dev_priv);
74d021ea 15187 intel_crt_init(dev_priv);
63cb4e64 15188 } else if (IS_GEN_RANGE(dev_priv, 3, 4)) {
27185ae1 15189 bool found = false;
7d57382e 15190
9bedc7ed
JN
15191 if (IS_MOBILE(dev_priv))
15192 intel_lvds_init(dev_priv);
0fafa226 15193
74d021ea 15194 intel_crt_init(dev_priv);
63cb4e64 15195
e2debe91 15196 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 15197 DRM_DEBUG_KMS("probing SDVOB\n");
c39055b0 15198 found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
9beb5fea 15199 if (!found && IS_G4X(dev_priv)) {
b01f2c3a 15200 DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
c39055b0 15201 intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
b01f2c3a 15202 }
27185ae1 15203
9beb5fea 15204 if (!found && IS_G4X(dev_priv))
c39055b0 15205 intel_dp_init(dev_priv, DP_B, PORT_B);
725e30ad 15206 }
13520b05
KH
15207
15208 /* Before G4X SDVOC doesn't have its own detect register */
13520b05 15209
e2debe91 15210 if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
b01f2c3a 15211 DRM_DEBUG_KMS("probing SDVOC\n");
c39055b0 15212 found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
b01f2c3a 15213 }
27185ae1 15214
e2debe91 15215 if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
27185ae1 15216
9beb5fea 15217 if (IS_G4X(dev_priv)) {
b01f2c3a 15218 DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
c39055b0 15219 intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
b01f2c3a 15220 }
9beb5fea 15221 if (IS_G4X(dev_priv))
c39055b0 15222 intel_dp_init(dev_priv, DP_C, PORT_C);
725e30ad 15223 }
27185ae1 15224
9beb5fea 15225 if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
c39055b0 15226 intel_dp_init(dev_priv, DP_D, PORT_D);
d6521463
JN
15227
15228 if (SUPPORTS_TV(dev_priv))
15229 intel_tv_init(dev_priv);
63cb4e64 15230 } else if (IS_GEN(dev_priv, 2)) {
346073ce 15231 if (IS_I85X(dev_priv))
9bedc7ed 15232 intel_lvds_init(dev_priv);
0fafa226 15233
74d021ea 15234 intel_crt_init(dev_priv);
c39055b0 15235 intel_dvo_init(dev_priv);
63cb4e64 15236 }
79e53945 15237
c39055b0 15238 intel_psr_init(dev_priv);
7c8f8a70 15239
c39055b0 15240 for_each_intel_encoder(&dev_priv->drm, encoder) {
4ef69c7a
CW
15241 encoder->base.possible_crtcs = encoder->crtc_mask;
15242 encoder->base.possible_clones =
66a9278e 15243 intel_encoder_clones(encoder);
79e53945 15244 }
47356eb6 15245
c39055b0 15246 intel_init_pch_refclk(dev_priv);
270b3042 15247
c39055b0 15248 drm_helper_move_panel_connectors_to_head(&dev_priv->drm);
79e53945
JB
15249}
15250
15251static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
15252{
15253 struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
a5ff7a45 15254 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
79e53945 15255
ef2d633e 15256 drm_framebuffer_cleanup(fb);
70001cd2 15257
a5ff7a45
DS
15258 i915_gem_object_lock(obj);
15259 WARN_ON(!obj->framebuffer_references--);
15260 i915_gem_object_unlock(obj);
dd689287 15261
a5ff7a45 15262 i915_gem_object_put(obj);
70001cd2 15263
79e53945
JB
15264 kfree(intel_fb);
15265}
15266
15267static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
05394f39 15268 struct drm_file *file,
79e53945
JB
15269 unsigned int *handle)
15270{
a5ff7a45 15271 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
79e53945 15272
cc917ab4
CW
15273 if (obj->userptr.mm) {
15274 DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
15275 return -EINVAL;
15276 }
15277
05394f39 15278 return drm_gem_handle_create(file, &obj->base, handle);
79e53945
JB
15279}
15280
86c98588
RV
15281static int intel_user_framebuffer_dirty(struct drm_framebuffer *fb,
15282 struct drm_file *file,
15283 unsigned flags, unsigned color,
15284 struct drm_clip_rect *clips,
15285 unsigned num_clips)
15286{
5a97bcc6 15287 struct drm_i915_gem_object *obj = intel_fb_obj(fb);
86c98588 15288
5a97bcc6 15289 i915_gem_object_flush_if_display(obj);
d59b21ec 15290 intel_fb_obj_flush(obj, ORIGIN_DIRTYFB);
86c98588
RV
15291
15292 return 0;
15293}
15294
79e53945
JB
15295static const struct drm_framebuffer_funcs intel_fb_funcs = {
15296 .destroy = intel_user_framebuffer_destroy,
15297 .create_handle = intel_user_framebuffer_create_handle,
86c98588 15298 .dirty = intel_user_framebuffer_dirty,
79e53945
JB
15299};
15300
24dbf51a
CW
15301static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
15302 struct drm_i915_gem_object *obj,
15303 struct drm_mode_fb_cmd2 *mode_cmd)
79e53945 15304{
24dbf51a 15305 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
2e2adb05 15306 struct drm_framebuffer *fb = &intel_fb->base;
a88c40eb 15307 u32 max_stride;
dd689287 15308 unsigned int tiling, stride;
24dbf51a 15309 int ret = -EINVAL;
2e2adb05 15310 int i;
79e53945 15311
dd689287
CW
15312 i915_gem_object_lock(obj);
15313 obj->framebuffer_references++;
15314 tiling = i915_gem_object_get_tiling(obj);
15315 stride = i915_gem_object_get_stride(obj);
15316 i915_gem_object_unlock(obj);
dd4916c5 15317
2a80eada 15318 if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
c2ff7370
VS
15319 /*
15320 * If there's a fence, enforce that
15321 * the fb modifier and tiling mode match.
15322 */
15323 if (tiling != I915_TILING_NONE &&
15324 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
144cc143 15325 DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
24dbf51a 15326 goto err;
2a80eada
DV
15327 }
15328 } else {
c2ff7370 15329 if (tiling == I915_TILING_X) {
2a80eada 15330 mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
c2ff7370 15331 } else if (tiling == I915_TILING_Y) {
144cc143 15332 DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
24dbf51a 15333 goto err;
2a80eada
DV
15334 }
15335 }
15336
17e8fd11
VS
15337 if (!drm_any_plane_has_format(&dev_priv->drm,
15338 mode_cmd->pixel_format,
15339 mode_cmd->modifier[0])) {
15340 struct drm_format_name_buf format_name;
15341
15342 DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
15343 drm_get_format_name(mode_cmd->pixel_format,
15344 &format_name),
144cc143 15345 mode_cmd->modifier[0]);
24dbf51a 15346 goto err;
c16ed4be 15347 }
57cd6508 15348
c2ff7370
VS
15349 /*
15350 * gen2/3 display engine uses the fence if present,
15351 * so the tiling mode must match the fb modifier exactly.
15352 */
c56b89f1 15353 if (INTEL_GEN(dev_priv) < 4 &&
c2ff7370 15354 tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
144cc143 15355 DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
9aceb5c1 15356 goto err;
c2ff7370
VS
15357 }
15358
a88c40eb
VS
15359 max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
15360 mode_cmd->modifier[0]);
15361 if (mode_cmd->pitches[0] > max_stride) {
144cc143 15362 DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
2f075565 15363 mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
144cc143 15364 "tiled" : "linear",
a88c40eb 15365 mode_cmd->pitches[0], max_stride);
24dbf51a 15366 goto err;
c16ed4be 15367 }
5d7bd705 15368
c2ff7370
VS
15369 /*
15370 * If there's a fence, enforce that
15371 * the fb pitch and fence stride match.
15372 */
144cc143
VS
15373 if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
15374 DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
15375 mode_cmd->pitches[0], stride);
24dbf51a 15376 goto err;
c16ed4be 15377 }
5d7bd705 15378
90f9a336
VS
15379 /* FIXME need to adjust LINOFF/TILEOFF accordingly. */
15380 if (mode_cmd->offsets[0] != 0)
24dbf51a 15381 goto err;
90f9a336 15382
2e2adb05 15383 drm_helper_mode_fill_fb_struct(&dev_priv->drm, fb, mode_cmd);
d88c4afd 15384
2e2adb05
VS
15385 for (i = 0; i < fb->format->num_planes; i++) {
15386 u32 stride_alignment;
15387
15388 if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
15389 DRM_DEBUG_KMS("bad plane %d handle\n", i);
37875d6b 15390 goto err;
2e2adb05
VS
15391 }
15392
15393 stride_alignment = intel_fb_stride_alignment(fb, i);
15394
15395 /*
15396 * Display WA #0531: skl,bxt,kbl,glk
15397 *
15398 * Render decompression and plane width > 3840
15399 * combined with horizontal panning requires the
15400 * plane stride to be a multiple of 4. We'll just
15401 * require the entire fb to accommodate that to avoid
15402 * potential runtime errors at plane configuration time.
15403 */
cf819eff 15404 if (IS_GEN(dev_priv, 9) && i == 0 && fb->width > 3840 &&
63eaf9ac 15405 is_ccs_modifier(fb->modifier))
2e2adb05
VS
15406 stride_alignment *= 4;
15407
15408 if (fb->pitches[i] & (stride_alignment - 1)) {
15409 DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
15410 i, fb->pitches[i], stride_alignment);
15411 goto err;
15412 }
d88c4afd 15413
a268bcd7
DS
15414 fb->obj[i] = &obj->base;
15415 }
c7d73f6a 15416
2e2adb05 15417 ret = intel_fill_fb_info(dev_priv, fb);
6687c906 15418 if (ret)
9aceb5c1 15419 goto err;
2d7a215f 15420
2e2adb05 15421 ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
79e53945
JB
15422 if (ret) {
15423 DRM_ERROR("framebuffer init failed %d\n", ret);
24dbf51a 15424 goto err;
79e53945
JB
15425 }
15426
79e53945 15427 return 0;
24dbf51a
CW
15428
15429err:
dd689287
CW
15430 i915_gem_object_lock(obj);
15431 obj->framebuffer_references--;
15432 i915_gem_object_unlock(obj);
24dbf51a 15433 return ret;
79e53945
JB
15434}
15435
79e53945
JB
15436static struct drm_framebuffer *
15437intel_user_framebuffer_create(struct drm_device *dev,
15438 struct drm_file *filp,
1eb83451 15439 const struct drm_mode_fb_cmd2 *user_mode_cmd)
79e53945 15440{
dcb1394e 15441 struct drm_framebuffer *fb;
05394f39 15442 struct drm_i915_gem_object *obj;
76dc3769 15443 struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
79e53945 15444
03ac0642
CW
15445 obj = i915_gem_object_lookup(filp, mode_cmd.handles[0]);
15446 if (!obj)
cce13ff7 15447 return ERR_PTR(-ENOENT);
79e53945 15448
24dbf51a 15449 fb = intel_framebuffer_create(obj, &mode_cmd);
dcb1394e 15450 if (IS_ERR(fb))
f0cd5182 15451 i915_gem_object_put(obj);
dcb1394e
LW
15452
15453 return fb;
79e53945
JB
15454}
15455
778e23a9
CW
15456static void intel_atomic_state_free(struct drm_atomic_state *state)
15457{
15458 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
15459
15460 drm_atomic_state_default_release(state);
15461
15462 i915_sw_fence_fini(&intel_state->commit_ready);
15463
15464 kfree(state);
15465}
15466
e995ca0b
VS
15467static enum drm_mode_status
15468intel_mode_valid(struct drm_device *dev,
15469 const struct drm_display_mode *mode)
15470{
ad77c537
VS
15471 struct drm_i915_private *dev_priv = to_i915(dev);
15472 int hdisplay_max, htotal_max;
15473 int vdisplay_max, vtotal_max;
15474
e4dd27aa
VS
15475 /*
15476 * Can't reject DBLSCAN here because Xorg ddxen can add piles
15477 * of DBLSCAN modes to the output's mode list when they detect
15478 * the scaling mode property on the connector. And they don't
15479 * ask the kernel to validate those modes in any way until
15480 * modeset time at which point the client gets a protocol error.
15481 * So in order to not upset those clients we silently ignore the
15482 * DBLSCAN flag on such connectors. For other connectors we will
15483 * reject modes with the DBLSCAN flag in encoder->compute_config().
15484 * And we always reject DBLSCAN modes in connector->mode_valid()
15485 * as we never want such modes on the connector's mode list.
15486 */
15487
e995ca0b
VS
15488 if (mode->vscan > 1)
15489 return MODE_NO_VSCAN;
15490
e995ca0b
VS
15491 if (mode->flags & DRM_MODE_FLAG_HSKEW)
15492 return MODE_H_ILLEGAL;
15493
15494 if (mode->flags & (DRM_MODE_FLAG_CSYNC |
15495 DRM_MODE_FLAG_NCSYNC |
15496 DRM_MODE_FLAG_PCSYNC))
15497 return MODE_HSYNC;
15498
15499 if (mode->flags & (DRM_MODE_FLAG_BCAST |
15500 DRM_MODE_FLAG_PIXMUX |
15501 DRM_MODE_FLAG_CLKDIV2))
15502 return MODE_BAD;
15503
ad77c537
VS
15504 if (INTEL_GEN(dev_priv) >= 9 ||
15505 IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
15506 hdisplay_max = 8192; /* FDI max 4096 handled elsewhere */
15507 vdisplay_max = 4096;
15508 htotal_max = 8192;
15509 vtotal_max = 8192;
15510 } else if (INTEL_GEN(dev_priv) >= 3) {
15511 hdisplay_max = 4096;
15512 vdisplay_max = 4096;
15513 htotal_max = 8192;
15514 vtotal_max = 8192;
15515 } else {
15516 hdisplay_max = 2048;
15517 vdisplay_max = 2048;
15518 htotal_max = 4096;
15519 vtotal_max = 4096;
15520 }
15521
15522 if (mode->hdisplay > hdisplay_max ||
15523 mode->hsync_start > htotal_max ||
15524 mode->hsync_end > htotal_max ||
15525 mode->htotal > htotal_max)
15526 return MODE_H_ILLEGAL;
15527
15528 if (mode->vdisplay > vdisplay_max ||
15529 mode->vsync_start > vtotal_max ||
15530 mode->vsync_end > vtotal_max ||
15531 mode->vtotal > vtotal_max)
15532 return MODE_V_ILLEGAL;
15533
e995ca0b
VS
15534 return MODE_OK;
15535}
15536
79e53945 15537static const struct drm_mode_config_funcs intel_mode_funcs = {
79e53945 15538 .fb_create = intel_user_framebuffer_create,
bbfb6ce8 15539 .get_format_info = intel_get_format_info,
0632fef6 15540 .output_poll_changed = intel_fbdev_output_poll_changed,
e995ca0b 15541 .mode_valid = intel_mode_valid,
5ee67f1c
MR
15542 .atomic_check = intel_atomic_check,
15543 .atomic_commit = intel_atomic_commit,
de419ab6
ML
15544 .atomic_state_alloc = intel_atomic_state_alloc,
15545 .atomic_state_clear = intel_atomic_state_clear,
778e23a9 15546 .atomic_state_free = intel_atomic_state_free,
79e53945
JB
15547};
15548
88212941
ID
15549/**
15550 * intel_init_display_hooks - initialize the display modesetting hooks
15551 * @dev_priv: device private
15552 */
15553void intel_init_display_hooks(struct drm_i915_private *dev_priv)
e70236a8 15554{
7ff89ca2
VS
15555 intel_init_cdclk_hooks(dev_priv);
15556
c56b89f1 15557 if (INTEL_GEN(dev_priv) >= 9) {
bc8d7dff 15558 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5724dbd1
DL
15559 dev_priv->display.get_initial_plane_config =
15560 skylake_get_initial_plane_config;
bc8d7dff
DL
15561 dev_priv->display.crtc_compute_clock =
15562 haswell_crtc_compute_clock;
15563 dev_priv->display.crtc_enable = haswell_crtc_enable;
15564 dev_priv->display.crtc_disable = haswell_crtc_disable;
88212941 15565 } else if (HAS_DDI(dev_priv)) {
0e8ffe1b 15566 dev_priv->display.get_pipe_config = haswell_get_pipe_config;
5724dbd1 15567 dev_priv->display.get_initial_plane_config =
81894b2f 15568 i9xx_get_initial_plane_config;
797d0259
ACO
15569 dev_priv->display.crtc_compute_clock =
15570 haswell_crtc_compute_clock;
4f771f10
PZ
15571 dev_priv->display.crtc_enable = haswell_crtc_enable;
15572 dev_priv->display.crtc_disable = haswell_crtc_disable;
88212941 15573 } else if (HAS_PCH_SPLIT(dev_priv)) {
0e8ffe1b 15574 dev_priv->display.get_pipe_config = ironlake_get_pipe_config;
5724dbd1 15575 dev_priv->display.get_initial_plane_config =
81894b2f 15576 i9xx_get_initial_plane_config;
3fb37703
ACO
15577 dev_priv->display.crtc_compute_clock =
15578 ironlake_crtc_compute_clock;
76e5a89c
DV
15579 dev_priv->display.crtc_enable = ironlake_crtc_enable;
15580 dev_priv->display.crtc_disable = ironlake_crtc_disable;
65b3d6a9 15581 } else if (IS_CHERRYVIEW(dev_priv)) {
89b667f8 15582 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5724dbd1
DL
15583 dev_priv->display.get_initial_plane_config =
15584 i9xx_get_initial_plane_config;
65b3d6a9
ACO
15585 dev_priv->display.crtc_compute_clock = chv_crtc_compute_clock;
15586 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15587 dev_priv->display.crtc_disable = i9xx_crtc_disable;
15588 } else if (IS_VALLEYVIEW(dev_priv)) {
15589 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15590 dev_priv->display.get_initial_plane_config =
15591 i9xx_get_initial_plane_config;
15592 dev_priv->display.crtc_compute_clock = vlv_crtc_compute_clock;
89b667f8
JB
15593 dev_priv->display.crtc_enable = valleyview_crtc_enable;
15594 dev_priv->display.crtc_disable = i9xx_crtc_disable;
19ec6693
ACO
15595 } else if (IS_G4X(dev_priv)) {
15596 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15597 dev_priv->display.get_initial_plane_config =
15598 i9xx_get_initial_plane_config;
15599 dev_priv->display.crtc_compute_clock = g4x_crtc_compute_clock;
15600 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15601 dev_priv->display.crtc_disable = i9xx_crtc_disable;
70e8aa21
ACO
15602 } else if (IS_PINEVIEW(dev_priv)) {
15603 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15604 dev_priv->display.get_initial_plane_config =
15605 i9xx_get_initial_plane_config;
15606 dev_priv->display.crtc_compute_clock = pnv_crtc_compute_clock;
15607 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15608 dev_priv->display.crtc_disable = i9xx_crtc_disable;
cf819eff 15609 } else if (!IS_GEN(dev_priv, 2)) {
0e8ffe1b 15610 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
5724dbd1
DL
15611 dev_priv->display.get_initial_plane_config =
15612 i9xx_get_initial_plane_config;
d6dfee7a 15613 dev_priv->display.crtc_compute_clock = i9xx_crtc_compute_clock;
76e5a89c
DV
15614 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15615 dev_priv->display.crtc_disable = i9xx_crtc_disable;
81c97f52
ACO
15616 } else {
15617 dev_priv->display.get_pipe_config = i9xx_get_pipe_config;
15618 dev_priv->display.get_initial_plane_config =
15619 i9xx_get_initial_plane_config;
15620 dev_priv->display.crtc_compute_clock = i8xx_crtc_compute_clock;
15621 dev_priv->display.crtc_enable = i9xx_crtc_enable;
15622 dev_priv->display.crtc_disable = i9xx_crtc_disable;
f564048e 15623 }
e70236a8 15624
cf819eff 15625 if (IS_GEN(dev_priv, 5)) {
3bb11b53 15626 dev_priv->display.fdi_link_train = ironlake_fdi_link_train;
cf819eff 15627 } else if (IS_GEN(dev_priv, 6)) {
3bb11b53 15628 dev_priv->display.fdi_link_train = gen6_fdi_link_train;
88212941 15629 } else if (IS_IVYBRIDGE(dev_priv)) {
3bb11b53
SJ
15630 /* FIXME: detect B0+ stepping and use auto training */
15631 dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train;
88212941 15632 } else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
3bb11b53 15633 dev_priv->display.fdi_link_train = hsw_fdi_link_train;
445e780b
VS
15634 }
15635
bd30ca2d 15636 if (INTEL_GEN(dev_priv) >= 9)
27082493
L
15637 dev_priv->display.update_crtcs = skl_update_crtcs;
15638 else
15639 dev_priv->display.update_crtcs = intel_update_crtcs;
e70236a8
JB
15640}
15641
fa03cc2e
JN
15642static i915_reg_t i915_vgacntrl_reg(struct drm_i915_private *dev_priv)
15643{
15644 if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
15645 return VLV_VGACNTRL;
15646 else if (INTEL_GEN(dev_priv) >= 5)
15647 return CPU_VGACNTRL;
15648 else
15649 return VGACNTRL;
15650}
15651
9cce37f4 15652/* Disable the VGA plane that we never use */
29b74b7f 15653static void i915_disable_vga(struct drm_i915_private *dev_priv)
9cce37f4 15654{
52a05c30 15655 struct pci_dev *pdev = dev_priv->drm.pdev;
9cce37f4 15656 u8 sr1;
920a14b2 15657 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
9cce37f4 15658
2b37c616 15659 /* WaEnableVGAAccessThroughIOPort:ctg,elk,ilk,snb,ivb,vlv,hsw */
52a05c30 15660 vga_get_uninterruptible(pdev, VGA_RSRC_LEGACY_IO);
3fdcf431 15661 outb(SR01, VGA_SR_INDEX);
9cce37f4
JB
15662 sr1 = inb(VGA_SR_DATA);
15663 outb(sr1 | 1<<5, VGA_SR_DATA);
52a05c30 15664 vga_put(pdev, VGA_RSRC_LEGACY_IO);
9cce37f4
JB
15665 udelay(300);
15666
01f5a626 15667 I915_WRITE(vga_reg, VGA_DISP_DISABLE);
9cce37f4
JB
15668 POSTING_READ(vga_reg);
15669}
15670
f817586c
DV
15671void intel_modeset_init_hw(struct drm_device *dev)
15672{
fac5e23e 15673 struct drm_i915_private *dev_priv = to_i915(dev);
1a617b77 15674
4c75b940 15675 intel_update_cdclk(dev_priv);
cfddadc9 15676 intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
bb0f4aab 15677 dev_priv->cdclk.logical = dev_priv->cdclk.actual = dev_priv->cdclk.hw;
f817586c
DV
15678}
15679
d93c0372
MR
15680/*
15681 * Calculate what we think the watermarks should be for the state we've read
15682 * out of the hardware and then immediately program those watermarks so that
15683 * we ensure the hardware settings match our internal state.
15684 *
15685 * We can calculate what we think WM's should be by creating a duplicate of the
15686 * current state (which was constructed during hardware readout) and running it
15687 * through the atomic check code to calculate new watermark values in the
15688 * state object.
15689 */
15690static void sanitize_watermarks(struct drm_device *dev)
15691{
15692 struct drm_i915_private *dev_priv = to_i915(dev);
15693 struct drm_atomic_state *state;
ccf010fb 15694 struct intel_atomic_state *intel_state;
d93c0372
MR
15695 struct drm_crtc *crtc;
15696 struct drm_crtc_state *cstate;
15697 struct drm_modeset_acquire_ctx ctx;
15698 int ret;
15699 int i;
15700
15701 /* Only supported on platforms that use atomic watermark design */
ed4a6a7c 15702 if (!dev_priv->display.optimize_watermarks)
d93c0372
MR
15703 return;
15704
15705 /*
15706 * We need to hold connection_mutex before calling duplicate_state so
15707 * that the connector loop is protected.
15708 */
15709 drm_modeset_acquire_init(&ctx, 0);
15710retry:
0cd1262d 15711 ret = drm_modeset_lock_all_ctx(dev, &ctx);
d93c0372
MR
15712 if (ret == -EDEADLK) {
15713 drm_modeset_backoff(&ctx);
15714 goto retry;
15715 } else if (WARN_ON(ret)) {
0cd1262d 15716 goto fail;
d93c0372
MR
15717 }
15718
15719 state = drm_atomic_helper_duplicate_state(dev, &ctx);
15720 if (WARN_ON(IS_ERR(state)))
0cd1262d 15721 goto fail;
d93c0372 15722
ccf010fb
ML
15723 intel_state = to_intel_atomic_state(state);
15724
ed4a6a7c
MR
15725 /*
15726 * Hardware readout is the only time we don't want to calculate
15727 * intermediate watermarks (since we don't trust the current
15728 * watermarks).
15729 */
b2ae318a 15730 if (!HAS_GMCH(dev_priv))
602ae835 15731 intel_state->skip_intermediate_wm = true;
ed4a6a7c 15732
d93c0372
MR
15733 ret = intel_atomic_check(dev, state);
15734 if (ret) {
15735 /*
15736 * If we fail here, it means that the hardware appears to be
15737 * programmed in a way that shouldn't be possible, given our
15738 * understanding of watermark requirements. This might mean a
15739 * mistake in the hardware readout code or a mistake in the
15740 * watermark calculations for a given platform. Raise a WARN
15741 * so that this is noticeable.
15742 *
15743 * If this actually happens, we'll have to just leave the
15744 * BIOS-programmed watermarks untouched and hope for the best.
15745 */
15746 WARN(true, "Could not determine valid watermarks for inherited state\n");
b9a1b717 15747 goto put_state;
d93c0372
MR
15748 }
15749
15750 /* Write calculated watermark values back */
aa5e9b47 15751 for_each_new_crtc_in_state(state, crtc, cstate, i) {
d93c0372
MR
15752 struct intel_crtc_state *cs = to_intel_crtc_state(cstate);
15753
ed4a6a7c 15754 cs->wm.need_postvbl_update = true;
ccf010fb 15755 dev_priv->display.optimize_watermarks(intel_state, cs);
556fe36d
ML
15756
15757 to_intel_crtc_state(crtc->state)->wm = cs->wm;
d93c0372
MR
15758 }
15759
b9a1b717 15760put_state:
0853695c 15761 drm_atomic_state_put(state);
0cd1262d 15762fail:
d93c0372
MR
15763 drm_modeset_drop_locks(&ctx);
15764 drm_modeset_acquire_fini(&ctx);
15765}
15766
58ecd9d5
CW
15767static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
15768{
cf819eff 15769 if (IS_GEN(dev_priv, 5)) {
58ecd9d5
CW
15770 u32 fdi_pll_clk =
15771 I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
15772
15773 dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
cf819eff 15774 } else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
58ecd9d5
CW
15775 dev_priv->fdi_pll_freq = 270000;
15776 } else {
15777 return;
15778 }
15779
15780 DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
15781}
15782
516a49cc
AS
15783static int intel_initial_commit(struct drm_device *dev)
15784{
15785 struct drm_atomic_state *state = NULL;
15786 struct drm_modeset_acquire_ctx ctx;
15787 struct drm_crtc *crtc;
15788 struct drm_crtc_state *crtc_state;
15789 int ret = 0;
15790
15791 state = drm_atomic_state_alloc(dev);
15792 if (!state)
15793 return -ENOMEM;
15794
15795 drm_modeset_acquire_init(&ctx, 0);
15796
15797retry:
15798 state->acquire_ctx = &ctx;
15799
15800 drm_for_each_crtc(crtc, dev) {
15801 crtc_state = drm_atomic_get_crtc_state(state, crtc);
15802 if (IS_ERR(crtc_state)) {
15803 ret = PTR_ERR(crtc_state);
15804 goto out;
15805 }
15806
15807 if (crtc_state->active) {
15808 ret = drm_atomic_add_affected_planes(state, crtc);
15809 if (ret)
15810 goto out;
fa6af514
VS
15811
15812 /*
15813 * FIXME hack to force a LUT update to avoid the
15814 * plane update forcing the pipe gamma on without
15815 * having a proper LUT loaded. Remove once we
15816 * have readout for pipe gamma enable.
15817 */
15818 crtc_state->color_mgmt_changed = true;
516a49cc
AS
15819 }
15820 }
15821
15822 ret = drm_atomic_commit(state);
15823
15824out:
15825 if (ret == -EDEADLK) {
15826 drm_atomic_state_clear(state);
15827 drm_modeset_backoff(&ctx);
15828 goto retry;
15829 }
15830
15831 drm_atomic_state_put(state);
15832
15833 drm_modeset_drop_locks(&ctx);
15834 drm_modeset_acquire_fini(&ctx);
15835
15836 return ret;
15837}
15838
b079bd17 15839int intel_modeset_init(struct drm_device *dev)
79e53945 15840{
72e96d64
JL
15841 struct drm_i915_private *dev_priv = to_i915(dev);
15842 struct i915_ggtt *ggtt = &dev_priv->ggtt;
8cc87b75 15843 enum pipe pipe;
46f297fb 15844 struct intel_crtc *crtc;
516a49cc 15845 int ret;
79e53945 15846
757fffcf
VS
15847 dev_priv->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
15848
79e53945
JB
15849 drm_mode_config_init(dev);
15850
c457d9cf
VS
15851 ret = intel_bw_init(dev_priv);
15852 if (ret)
15853 return ret;
15854
79e53945
JB
15855 dev->mode_config.min_width = 0;
15856 dev->mode_config.min_height = 0;
15857
019d96cb
DA
15858 dev->mode_config.preferred_depth = 24;
15859 dev->mode_config.prefer_shadow = 1;
15860
25bab385
TU
15861 dev->mode_config.allow_fb_modifiers = true;
15862
e6ecefaa 15863 dev->mode_config.funcs = &intel_mode_funcs;
79e53945 15864
400c19d9 15865 init_llist_head(&dev_priv->atomic_helper.free_list);
eb955eee 15866 INIT_WORK(&dev_priv->atomic_helper.free_work,
ba318c61 15867 intel_atomic_helper_free_state_worker);
eb955eee 15868
27a981b6 15869 intel_init_quirks(dev_priv);
b690e96c 15870
acde44b5
JRS
15871 intel_fbc_init(dev_priv);
15872
62d75df7 15873 intel_init_pm(dev_priv);
1fa61106 15874
69f92f67
LW
15875 /*
15876 * There may be no VBT; and if the BIOS enabled SSC we can
15877 * just keep using it to avoid unnecessary flicker. Whereas if the
15878 * BIOS isn't using it, don't assume it will work even if the VBT
15879 * indicates as much.
15880 */
6e266956 15881 if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
69f92f67
LW
15882 bool bios_lvds_use_ssc = !!(I915_READ(PCH_DREF_CONTROL) &
15883 DREF_SSC1_ENABLE);
15884
15885 if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
15886 DRM_DEBUG_KMS("SSC %sabled by BIOS, overriding VBT which says %sabled\n",
15887 bios_lvds_use_ssc ? "en" : "dis",
15888 dev_priv->vbt.lvds_use_ssc ? "en" : "dis");
15889 dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
15890 }
15891 }
15892
5c000fb3
VS
15893 /*
15894 * Maximum framebuffer dimensions, chosen to match
15895 * the maximum render engine surface size on gen4+.
15896 */
15897 if (INTEL_GEN(dev_priv) >= 7) {
15898 dev->mode_config.max_width = 16384;
15899 dev->mode_config.max_height = 16384;
15900 } else if (INTEL_GEN(dev_priv) >= 4) {
15901 dev->mode_config.max_width = 8192;
15902 dev->mode_config.max_height = 8192;
cf819eff 15903 } else if (IS_GEN(dev_priv, 3)) {
5e4d6fa7
KP
15904 dev->mode_config.max_width = 4096;
15905 dev->mode_config.max_height = 4096;
79e53945 15906 } else {
5c000fb3
VS
15907 dev->mode_config.max_width = 2048;
15908 dev->mode_config.max_height = 2048;
79e53945 15909 }
068be561 15910
2a307c2e
JN
15911 if (IS_I845G(dev_priv) || IS_I865G(dev_priv)) {
15912 dev->mode_config.cursor_width = IS_I845G(dev_priv) ? 64 : 512;
dc41c154 15913 dev->mode_config.cursor_height = 1023;
cf819eff 15914 } else if (IS_GEN(dev_priv, 2)) {
98fac1d5
VS
15915 dev->mode_config.cursor_width = 64;
15916 dev->mode_config.cursor_height = 64;
068be561 15917 } else {
98fac1d5
VS
15918 dev->mode_config.cursor_width = 256;
15919 dev->mode_config.cursor_height = 256;
068be561
DL
15920 }
15921
73ebd503 15922 dev->mode_config.fb_base = ggtt->gmadr.start;
79e53945 15923
28c97730 15924 DRM_DEBUG_KMS("%d display pipe%s available.\n",
b7f05d4a
TU
15925 INTEL_INFO(dev_priv)->num_pipes,
15926 INTEL_INFO(dev_priv)->num_pipes > 1 ? "s" : "");
79e53945 15927
055e393f 15928 for_each_pipe(dev_priv, pipe) {
5ab0d85b 15929 ret = intel_crtc_init(dev_priv, pipe);
b079bd17
VS
15930 if (ret) {
15931 drm_mode_config_cleanup(dev);
15932 return ret;
15933 }
79e53945
JB
15934 }
15935
e72f9fbf 15936 intel_shared_dpll_init(dev);
58ecd9d5 15937 intel_update_fdi_pll_freq(dev_priv);
ee7b9f93 15938
5be6e334
VS
15939 intel_update_czclk(dev_priv);
15940 intel_modeset_init_hw(dev);
15941
9055aac7
R
15942 intel_hdcp_component_init(dev_priv);
15943
b2045352 15944 if (dev_priv->max_cdclk_freq == 0)
4c75b940 15945 intel_update_max_cdclk(dev_priv);
b2045352 15946
9cce37f4 15947 /* Just disable it once at startup */
29b74b7f 15948 i915_disable_vga(dev_priv);
c39055b0 15949 intel_setup_outputs(dev_priv);
11be49eb 15950
6e9f798d 15951 drm_modeset_lock_all(dev);
aecd36b8 15952 intel_modeset_setup_hw_state(dev, dev->mode_config.acquire_ctx);
6e9f798d 15953 drm_modeset_unlock_all(dev);
46f297fb 15954
d3fcc808 15955 for_each_intel_crtc(dev, crtc) {
eeebeac5
ML
15956 struct intel_initial_plane_config plane_config = {};
15957
46f297fb
JB
15958 if (!crtc->active)
15959 continue;
15960
46f297fb 15961 /*
46f297fb
JB
15962 * Note that reserving the BIOS fb up front prevents us
15963 * from stuffing other stolen allocations like the ring
15964 * on top. This prevents some ugliness at boot time, and
15965 * can even allow for smooth boot transitions if the BIOS
15966 * fb is large enough for the active pipe configuration.
15967 */
eeebeac5
ML
15968 dev_priv->display.get_initial_plane_config(crtc,
15969 &plane_config);
15970
15971 /*
15972 * If the fb is shared between multiple heads, we'll
15973 * just get the first one.
15974 */
15975 intel_find_initial_plane_obj(crtc, &plane_config);
46f297fb 15976 }
d93c0372
MR
15977
15978 /*
15979 * Make sure hardware watermarks really match the state we read out.
15980 * Note that we need to do this after reconstructing the BIOS fb's
15981 * since the watermark calculation done here will use pstate->fb.
15982 */
b2ae318a 15983 if (!HAS_GMCH(dev_priv))
602ae835 15984 sanitize_watermarks(dev);
b079bd17 15985
516a49cc
AS
15986 /*
15987 * Force all active planes to recompute their states. So that on
15988 * mode_setcrtc after probe, all the intel_plane_state variables
15989 * are already calculated and there is no assert_plane warnings
15990 * during bootup.
15991 */
15992 ret = intel_initial_commit(dev);
15993 if (ret)
15994 DRM_DEBUG_KMS("Initial commit in probe failed.\n");
15995
b079bd17 15996 return 0;
2c7111db
CW
15997}
15998
2ee0da16
VS
15999void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16000{
d5fb43cb 16001 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
2ee0da16
VS
16002 /* 640x480@60Hz, ~25175 kHz */
16003 struct dpll clock = {
16004 .m1 = 18,
16005 .m2 = 7,
16006 .p1 = 13,
16007 .p2 = 4,
16008 .n = 2,
16009 };
16010 u32 dpll, fp;
16011 int i;
16012
16013 WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
16014
16015 DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
16016 pipe_name(pipe), clock.vco, clock.dot);
16017
16018 fp = i9xx_dpll_compute_fp(&clock);
171d1562 16019 dpll = DPLL_DVO_2X_MODE |
2ee0da16
VS
16020 DPLL_VGA_MODE_DIS |
16021 ((clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT) |
16022 PLL_P2_DIVIDE_BY_4 |
16023 PLL_REF_INPUT_DREFCLK |
16024 DPLL_VCO_ENABLE;
16025
16026 I915_WRITE(FP0(pipe), fp);
16027 I915_WRITE(FP1(pipe), fp);
16028
16029 I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
16030 I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
16031 I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
16032 I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
16033 I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
16034 I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
16035 I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
16036
16037 /*
16038 * Apparently we need to have VGA mode enabled prior to changing
16039 * the P1/P2 dividers. Otherwise the DPLL will keep using the old
16040 * dividers, even though the register value does change.
16041 */
16042 I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
16043 I915_WRITE(DPLL(pipe), dpll);
16044
16045 /* Wait for the clocks to stabilize. */
16046 POSTING_READ(DPLL(pipe));
16047 udelay(150);
16048
16049 /* The pixel multiplier can only be updated once the
16050 * DPLL is enabled and the clocks are stable.
16051 *
16052 * So write it again.
16053 */
16054 I915_WRITE(DPLL(pipe), dpll);
16055
16056 /* We do this three times for luck */
16057 for (i = 0; i < 3 ; i++) {
16058 I915_WRITE(DPLL(pipe), dpll);
16059 POSTING_READ(DPLL(pipe));
16060 udelay(150); /* wait for warmup */
16061 }
16062
16063 I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
16064 POSTING_READ(PIPECONF(pipe));
d5fb43cb
VS
16065
16066 intel_wait_for_pipe_scanline_moving(crtc);
2ee0da16
VS
16067}
16068
16069void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
16070{
8fedd64d
VS
16071 struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16072
2ee0da16
VS
16073 DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
16074 pipe_name(pipe));
16075
5816d9cb
VS
16076 WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
16077 WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
16078 WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
b99b9ec1
VS
16079 WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
16080 WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
2ee0da16
VS
16081
16082 I915_WRITE(PIPECONF(pipe), 0);
16083 POSTING_READ(PIPECONF(pipe));
16084
8fedd64d 16085 intel_wait_for_pipe_scanline_stopped(crtc);
2ee0da16
VS
16086
16087 I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
16088 POSTING_READ(DPLL(pipe));
16089}
16090
b1e01595
VS
16091static void
16092intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
16093{
16094 struct intel_crtc *crtc;
fa555837 16095
b1e01595
VS
16096 if (INTEL_GEN(dev_priv) >= 4)
16097 return;
fa555837 16098
b1e01595
VS
16099 for_each_intel_crtc(&dev_priv->drm, crtc) {
16100 struct intel_plane *plane =
16101 to_intel_plane(crtc->base.primary);
62358aa4
VS
16102 struct intel_crtc *plane_crtc;
16103 enum pipe pipe;
b1e01595 16104
62358aa4
VS
16105 if (!plane->get_hw_state(plane, &pipe))
16106 continue;
16107
16108 if (pipe == crtc->pipe)
b1e01595
VS
16109 continue;
16110
7a4a2a46
VS
16111 DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
16112 plane->base.base.id, plane->base.name);
62358aa4
VS
16113
16114 plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16115 intel_plane_disable_noatomic(plane_crtc, plane);
b1e01595 16116 }
fa555837
DV
16117}
16118
02e93c35
VS
16119static bool intel_crtc_has_encoders(struct intel_crtc *crtc)
16120{
16121 struct drm_device *dev = crtc->base.dev;
16122 struct intel_encoder *encoder;
16123
16124 for_each_encoder_on_crtc(dev, &crtc->base, encoder)
16125 return true;
16126
16127 return false;
16128}
16129
496b0fc3
ML
16130static struct intel_connector *intel_encoder_find_connector(struct intel_encoder *encoder)
16131{
16132 struct drm_device *dev = encoder->base.dev;
16133 struct intel_connector *connector;
16134
16135 for_each_connector_on_encoder(dev, &encoder->base, connector)
16136 return connector;
16137
16138 return NULL;
16139}
16140
a168f5b3 16141static bool has_pch_trancoder(struct drm_i915_private *dev_priv,
ecf837d9 16142 enum pipe pch_transcoder)
a168f5b3
VS
16143{
16144 return HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv) ||
ecf837d9 16145 (HAS_PCH_LPT_H(dev_priv) && pch_transcoder == PIPE_A);
a168f5b3
VS
16146}
16147
aecd36b8
VS
16148static void intel_sanitize_crtc(struct intel_crtc *crtc,
16149 struct drm_modeset_acquire_ctx *ctx)
24929352
DV
16150{
16151 struct drm_device *dev = crtc->base.dev;
fac5e23e 16152 struct drm_i915_private *dev_priv = to_i915(dev);
1b52ad46
ML
16153 struct intel_crtc_state *crtc_state = to_intel_crtc_state(crtc->base.state);
16154 enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
24929352 16155
24929352 16156 /* Clear any frame start delays used for debugging left by the BIOS */
738a8143 16157 if (crtc->active && !transcoder_is_dsi(cpu_transcoder)) {
4d1de975
JN
16158 i915_reg_t reg = PIPECONF(cpu_transcoder);
16159
16160 I915_WRITE(reg,
16161 I915_READ(reg) & ~PIPECONF_FRAME_START_DELAY_MASK);
16162 }
24929352 16163
1b52ad46 16164 if (crtc_state->base.active) {
f9cd7b88
VS
16165 struct intel_plane *plane;
16166
f9cd7b88
VS
16167 /* Disable everything but the primary plane */
16168 for_each_intel_plane_on_crtc(dev, crtc, plane) {
b1e01595
VS
16169 const struct intel_plane_state *plane_state =
16170 to_intel_plane_state(plane->base.state);
f9cd7b88 16171
b1e01595
VS
16172 if (plane_state->base.visible &&
16173 plane->base.type != DRM_PLANE_TYPE_PRIMARY)
16174 intel_plane_disable_noatomic(crtc, plane);
f9cd7b88 16175 }
c0550305
MR
16176
16177 /*
16178 * Disable any background color set by the BIOS, but enable the
16179 * gamma and CSC to match how we program our planes.
16180 */
16181 if (INTEL_GEN(dev_priv) >= 9)
16182 I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
16183 SKL_BOTTOM_COLOR_GAMMA_ENABLE |
16184 SKL_BOTTOM_COLOR_CSC_ENABLE);
9625604c 16185 }
d3eaf884 16186
24929352
DV
16187 /* Adjust the state of the output pipe according to whether we
16188 * have active connectors/encoders. */
1b52ad46 16189 if (crtc_state->base.active && !intel_crtc_has_encoders(crtc))
da1d0e26 16190 intel_crtc_disable_noatomic(&crtc->base, ctx);
24929352 16191
b2ae318a 16192 if (crtc_state->base.active || HAS_GMCH(dev_priv)) {
4cc31489
DV
16193 /*
16194 * We start out with underrun reporting disabled to avoid races.
16195 * For correct bookkeeping mark this on active crtcs.
16196 *
c5ab3bc0
DV
16197 * Also on gmch platforms we dont have any hardware bits to
16198 * disable the underrun reporting. Which means we need to start
16199 * out with underrun reporting disabled also on inactive pipes,
16200 * since otherwise we'll complain about the garbage we read when
16201 * e.g. coming up after runtime pm.
16202 *
4cc31489
DV
16203 * No protection against concurrent access is required - at
16204 * worst a fifo underrun happens which also sets this to false.
16205 */
16206 crtc->cpu_fifo_underrun_disabled = true;
a168f5b3
VS
16207 /*
16208 * We track the PCH trancoder underrun reporting state
16209 * within the crtc. With crtc for pipe A housing the underrun
16210 * reporting state for PCH transcoder A, crtc for pipe B housing
16211 * it for PCH transcoder B, etc. LPT-H has only PCH transcoder A,
16212 * and marking underrun reporting as disabled for the non-existing
16213 * PCH transcoders B and C would prevent enabling the south
16214 * error interrupt (see cpt_can_enable_serr_int()).
16215 */
ecf837d9 16216 if (has_pch_trancoder(dev_priv, crtc->pipe))
a168f5b3 16217 crtc->pch_fifo_underrun_disabled = true;
4cc31489 16218 }
24929352
DV
16219}
16220
7bed8adc
VS
16221static bool has_bogus_dpll_config(const struct intel_crtc_state *crtc_state)
16222{
16223 struct drm_i915_private *dev_priv = to_i915(crtc_state->base.crtc->dev);
16224
16225 /*
16226 * Some SNB BIOSen (eg. ASUS K53SV) are known to misprogram
16227 * the hardware when a high res displays plugged in. DPLL P
16228 * divider is zero, and the pipe timings are bonkers. We'll
16229 * try to disable everything in that case.
16230 *
16231 * FIXME would be nice to be able to sanitize this state
16232 * without several WARNs, but for now let's take the easy
16233 * road.
16234 */
16235 return IS_GEN(dev_priv, 6) &&
16236 crtc_state->base.active &&
16237 crtc_state->shared_dpll &&
16238 crtc_state->port_clock == 0;
16239}
16240
24929352
DV
16241static void intel_sanitize_encoder(struct intel_encoder *encoder)
16242{
70332ac5 16243 struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
24929352 16244 struct intel_connector *connector;
7bed8adc
VS
16245 struct intel_crtc *crtc = to_intel_crtc(encoder->base.crtc);
16246 struct intel_crtc_state *crtc_state = crtc ?
16247 to_intel_crtc_state(crtc->base.state) : NULL;
24929352
DV
16248
16249 /* We need to check both for a crtc link (meaning that the
16250 * encoder is active and trying to read from a pipe) and the
16251 * pipe itself being active. */
7bed8adc
VS
16252 bool has_active_crtc = crtc_state &&
16253 crtc_state->base.active;
16254
16255 if (crtc_state && has_bogus_dpll_config(crtc_state)) {
16256 DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
16257 pipe_name(crtc->pipe));
16258 has_active_crtc = false;
16259 }
24929352 16260
496b0fc3
ML
16261 connector = intel_encoder_find_connector(encoder);
16262 if (connector && !has_active_crtc) {
24929352
DV
16263 DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
16264 encoder->base.base.id,
8e329a03 16265 encoder->base.name);
24929352
DV
16266
16267 /* Connector is active, but has no active pipe. This is
16268 * fallout from our resume register restoring. Disable
16269 * the encoder manually again. */
7bed8adc
VS
16270 if (crtc_state) {
16271 struct drm_encoder *best_encoder;
fd6bbda9 16272
24929352
DV
16273 DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
16274 encoder->base.base.id,
8e329a03 16275 encoder->base.name);
7bed8adc
VS
16276
16277 /* avoid oopsing in case the hooks consult best_encoder */
16278 best_encoder = connector->base.state->best_encoder;
16279 connector->base.state->best_encoder = &encoder->base;
16280
c84c6fe3 16281 if (encoder->disable)
7bed8adc
VS
16282 encoder->disable(encoder, crtc_state,
16283 connector->base.state);
a62d1497 16284 if (encoder->post_disable)
7bed8adc
VS
16285 encoder->post_disable(encoder, crtc_state,
16286 connector->base.state);
16287
16288 connector->base.state->best_encoder = best_encoder;
24929352 16289 }
7f1950fb 16290 encoder->base.crtc = NULL;
24929352
DV
16291
16292 /* Inconsistent output/port/pipe state happens presumably due to
16293 * a bug in one of the get_hw_state functions. Or someplace else
16294 * in our code, like the register restore mess on resume. Clamp
16295 * things to off as a safer default. */
fd6bbda9
ML
16296
16297 connector->base.dpms = DRM_MODE_DPMS_OFF;
16298 connector->base.encoder = NULL;
24929352 16299 }
d6cae4aa
ML
16300
16301 /* notify opregion of the sanitized encoder state */
16302 intel_opregion_notify_encoder(encoder, connector && has_active_crtc);
70332ac5
ID
16303
16304 if (INTEL_GEN(dev_priv) >= 11)
16305 icl_sanitize_encoder_pll_mapping(encoder);
24929352
DV
16306}
16307
29b74b7f 16308void i915_redisable_vga_power_on(struct drm_i915_private *dev_priv)
0fde901f 16309{
920a14b2 16310 i915_reg_t vga_reg = i915_vgacntrl_reg(dev_priv);
0fde901f 16311
04098753
ID
16312 if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
16313 DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
29b74b7f 16314 i915_disable_vga(dev_priv);
04098753
ID
16315 }
16316}
16317
29b74b7f 16318void i915_redisable_vga(struct drm_i915_private *dev_priv)
04098753 16319{
0e6e0be4
CW
16320 intel_wakeref_t wakeref;
16321
16322 /*
16323 * This function can be called both from intel_modeset_setup_hw_state or
8dc8a27c
PZ
16324 * at a very early point in our resume sequence, where the power well
16325 * structures are not yet restored. Since this function is at a very
16326 * paranoid "someone might have enabled VGA while we were not looking"
16327 * level, just check if the power well is enabled instead of trying to
16328 * follow the "don't touch the power well if we don't need it" policy
0e6e0be4
CW
16329 * the rest of the driver uses.
16330 */
16331 wakeref = intel_display_power_get_if_enabled(dev_priv,
16332 POWER_DOMAIN_VGA);
16333 if (!wakeref)
8dc8a27c
PZ
16334 return;
16335
29b74b7f 16336 i915_redisable_vga_power_on(dev_priv);
6392f847 16337
0e6e0be4 16338 intel_display_power_put(dev_priv, POWER_DOMAIN_VGA, wakeref);
0fde901f
KM
16339}
16340
f9cd7b88 16341/* FIXME read out full plane state for all planes */
62358aa4 16342static void readout_plane_state(struct drm_i915_private *dev_priv)
d032ffa0 16343{
b1e01595 16344 struct intel_plane *plane;
62358aa4 16345 struct intel_crtc *crtc;
d032ffa0 16346
62358aa4 16347 for_each_intel_plane(&dev_priv->drm, plane) {
b1e01595
VS
16348 struct intel_plane_state *plane_state =
16349 to_intel_plane_state(plane->base.state);
62358aa4
VS
16350 struct intel_crtc_state *crtc_state;
16351 enum pipe pipe = PIPE_A;
eade6c89
VS
16352 bool visible;
16353
16354 visible = plane->get_hw_state(plane, &pipe);
b26d3ea3 16355
62358aa4
VS
16356 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
16357 crtc_state = to_intel_crtc_state(crtc->base.state);
16358
b1e01595 16359 intel_set_plane_visible(crtc_state, plane_state, visible);
7a4a2a46
VS
16360
16361 DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
16362 plane->base.base.id, plane->base.name,
16363 enableddisabled(visible), pipe_name(pipe));
b1e01595 16364 }
62358aa4
VS
16365
16366 for_each_intel_crtc(&dev_priv->drm, crtc) {
16367 struct intel_crtc_state *crtc_state =
16368 to_intel_crtc_state(crtc->base.state);
16369
16370 fixup_active_planes(crtc_state);
16371 }
98ec7739
VS
16372}
16373
30e984df 16374static void intel_modeset_readout_hw_state(struct drm_device *dev)
24929352 16375{
fac5e23e 16376 struct drm_i915_private *dev_priv = to_i915(dev);
24929352 16377 enum pipe pipe;
24929352
DV
16378 struct intel_crtc *crtc;
16379 struct intel_encoder *encoder;
16380 struct intel_connector *connector;
f9e905ca 16381 struct drm_connector_list_iter conn_iter;
5358901f 16382 int i;
24929352 16383
565602d7
ML
16384 dev_priv->active_crtcs = 0;
16385
d3fcc808 16386 for_each_intel_crtc(dev, crtc) {
a8cd6da0
VS
16387 struct intel_crtc_state *crtc_state =
16388 to_intel_crtc_state(crtc->base.state);
3b117c8f 16389
ec2dc6a0 16390 __drm_atomic_helper_crtc_destroy_state(&crtc_state->base);
565602d7 16391 memset(crtc_state, 0, sizeof(*crtc_state));
842a07a7 16392 __drm_atomic_helper_crtc_reset(&crtc->base, &crtc_state->base);
24929352 16393
565602d7
ML
16394 crtc_state->base.active = crtc_state->base.enable =
16395 dev_priv->display.get_pipe_config(crtc, crtc_state);
16396
16397 crtc->base.enabled = crtc_state->base.enable;
16398 crtc->active = crtc_state->base.active;
16399
aca1ebf4 16400 if (crtc_state->base.active)
565602d7
ML
16401 dev_priv->active_crtcs |= 1 << crtc->pipe;
16402
78108b7c
VS
16403 DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
16404 crtc->base.base.id, crtc->base.name,
a8cd6da0 16405 enableddisabled(crtc_state->base.active));
24929352
DV
16406 }
16407
62358aa4
VS
16408 readout_plane_state(dev_priv);
16409
5358901f
DV
16410 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16411 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16412
ee1398ba
LDM
16413 pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
16414 &pll->state.hw_state);
2c42e535 16415 pll->state.crtc_mask = 0;
d3fcc808 16416 for_each_intel_crtc(dev, crtc) {
a8cd6da0
VS
16417 struct intel_crtc_state *crtc_state =
16418 to_intel_crtc_state(crtc->base.state);
16419
16420 if (crtc_state->base.active &&
16421 crtc_state->shared_dpll == pll)
2c42e535 16422 pll->state.crtc_mask |= 1 << crtc->pipe;
5358901f 16423 }
2c42e535 16424 pll->active_mask = pll->state.crtc_mask;
5358901f 16425
1e6f2ddc 16426 DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
72f775fa 16427 pll->info->name, pll->state.crtc_mask, pll->on);
5358901f
DV
16428 }
16429
b2784e15 16430 for_each_intel_encoder(dev, encoder) {
24929352
DV
16431 pipe = 0;
16432
16433 if (encoder->get_hw_state(encoder, &pipe)) {
a8cd6da0
VS
16434 struct intel_crtc_state *crtc_state;
16435
98187836 16436 crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
a8cd6da0 16437 crtc_state = to_intel_crtc_state(crtc->base.state);
e2af48c6 16438
045ac3b5 16439 encoder->base.crtc = &crtc->base;
a8cd6da0 16440 encoder->get_config(encoder, crtc_state);
24929352
DV
16441 } else {
16442 encoder->base.crtc = NULL;
16443 }
16444
6f2bcceb 16445 DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
08c4d7fc
TU
16446 encoder->base.base.id, encoder->base.name,
16447 enableddisabled(encoder->base.crtc),
6f2bcceb 16448 pipe_name(pipe));
24929352
DV
16449 }
16450
f9e905ca
DV
16451 drm_connector_list_iter_begin(dev, &conn_iter);
16452 for_each_intel_connector_iter(connector, &conn_iter) {
24929352
DV
16453 if (connector->get_hw_state(connector)) {
16454 connector->base.dpms = DRM_MODE_DPMS_ON;
2aa974c9
ML
16455
16456 encoder = connector->encoder;
16457 connector->base.encoder = &encoder->base;
16458
16459 if (encoder->base.crtc &&
16460 encoder->base.crtc->state->active) {
16461 /*
16462 * This has to be done during hardware readout
16463 * because anything calling .crtc_disable may
16464 * rely on the connector_mask being accurate.
16465 */
16466 encoder->base.crtc->state->connector_mask |=
40560e26 16467 drm_connector_mask(&connector->base);
e87a52b3 16468 encoder->base.crtc->state->encoder_mask |=
40560e26 16469 drm_encoder_mask(&encoder->base);
2aa974c9
ML
16470 }
16471
24929352
DV
16472 } else {
16473 connector->base.dpms = DRM_MODE_DPMS_OFF;
16474 connector->base.encoder = NULL;
16475 }
16476 DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
08c4d7fc
TU
16477 connector->base.base.id, connector->base.name,
16478 enableddisabled(connector->base.encoder));
24929352 16479 }
f9e905ca 16480 drm_connector_list_iter_end(&conn_iter);
7f4c6284
VS
16481
16482 for_each_intel_crtc(dev, crtc) {
c457d9cf
VS
16483 struct intel_bw_state *bw_state =
16484 to_intel_bw_state(dev_priv->bw_obj.state);
a8cd6da0
VS
16485 struct intel_crtc_state *crtc_state =
16486 to_intel_crtc_state(crtc->base.state);
c457d9cf 16487 struct intel_plane *plane;
d305e061 16488 int min_cdclk = 0;
aca1ebf4 16489
7f4c6284 16490 memset(&crtc->base.mode, 0, sizeof(crtc->base.mode));
a8cd6da0
VS
16491 if (crtc_state->base.active) {
16492 intel_mode_from_pipe_config(&crtc->base.mode, crtc_state);
bd4cd03c
VS
16493 crtc->base.mode.hdisplay = crtc_state->pipe_src_w;
16494 crtc->base.mode.vdisplay = crtc_state->pipe_src_h;
a8cd6da0 16495 intel_mode_from_pipe_config(&crtc_state->base.adjusted_mode, crtc_state);
7f4c6284
VS
16496 WARN_ON(drm_atomic_set_mode_for_crtc(crtc->base.state, &crtc->base.mode));
16497
16498 /*
16499 * The initial mode needs to be set in order to keep
16500 * the atomic core happy. It wants a valid mode if the
16501 * crtc's enabled, so we do the above call.
16502 *
7800fb69
DV
16503 * But we don't set all the derived state fully, hence
16504 * set a flag to indicate that a full recalculation is
16505 * needed on the next commit.
7f4c6284 16506 */
a8cd6da0 16507 crtc_state->base.mode.private_flags = I915_MODE_FLAG_INHERITED;
9eca6832 16508
a7d1b3f4
VS
16509 intel_crtc_compute_pixel_rate(crtc_state);
16510
9c61de4c 16511 if (dev_priv->display.modeset_calc_cdclk) {
d305e061 16512 min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
9c61de4c
VS
16513 if (WARN_ON(min_cdclk < 0))
16514 min_cdclk = 0;
16515 }
aca1ebf4 16516
5caa0fea
DV
16517 drm_calc_timestamping_constants(&crtc->base,
16518 &crtc_state->base.adjusted_mode);
f2bdd112 16519 update_scanline_offset(crtc_state);
7f4c6284 16520 }
e3b247da 16521
d305e061 16522 dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
53e9bf5e
VS
16523 dev_priv->min_voltage_level[crtc->pipe] =
16524 crtc_state->min_voltage_level;
aca1ebf4 16525
c457d9cf
VS
16526 for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
16527 const struct intel_plane_state *plane_state =
16528 to_intel_plane_state(plane->base.state);
16529
16530 /*
16531 * FIXME don't have the fb yet, so can't
16532 * use intel_plane_data_rate() :(
16533 */
16534 if (plane_state->base.visible)
16535 crtc_state->data_rate[plane->id] =
16536 4 * crtc_state->pixel_rate;
16537 }
16538
16539 intel_bw_crtc_update(bw_state, crtc_state);
16540
a8cd6da0 16541 intel_pipe_config_sanity_check(dev_priv, crtc_state);
7f4c6284 16542 }
30e984df
DV
16543}
16544
62b69566
ACO
16545static void
16546get_encoder_power_domains(struct drm_i915_private *dev_priv)
16547{
16548 struct intel_encoder *encoder;
16549
16550 for_each_intel_encoder(&dev_priv->drm, encoder) {
52528055 16551 struct intel_crtc_state *crtc_state;
62b69566
ACO
16552
16553 if (!encoder->get_power_domains)
16554 continue;
16555
52528055 16556 /*
b79ebe74
ID
16557 * MST-primary and inactive encoders don't have a crtc state
16558 * and neither of these require any power domain references.
52528055 16559 */
b79ebe74
ID
16560 if (!encoder->base.crtc)
16561 continue;
52528055 16562
b79ebe74 16563 crtc_state = to_intel_crtc_state(encoder->base.crtc->state);
3a52fb7e 16564 encoder->get_power_domains(encoder, crtc_state);
62b69566
ACO
16565 }
16566}
16567
df49ec82
RV
16568static void intel_early_display_was(struct drm_i915_private *dev_priv)
16569{
16570 /* Display WA #1185 WaDisableDARBFClkGating:cnl,glk */
16571 if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
16572 I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
16573 DARBF_GATING_DIS);
16574
16575 if (IS_HASWELL(dev_priv)) {
16576 /*
16577 * WaRsPkgCStateDisplayPMReq:hsw
16578 * System hang if this isn't done before disabling all planes!
16579 */
16580 I915_WRITE(CHICKEN_PAR1_1,
16581 I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
16582 }
16583}
16584
3aefb67f
VS
16585static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
16586 enum port port, i915_reg_t hdmi_reg)
16587{
16588 u32 val = I915_READ(hdmi_reg);
16589
16590 if (val & SDVO_ENABLE ||
16591 (val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
16592 return;
16593
16594 DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
16595 port_name(port));
16596
16597 val &= ~SDVO_PIPE_SEL_MASK;
16598 val |= SDVO_PIPE_SEL(PIPE_A);
16599
16600 I915_WRITE(hdmi_reg, val);
16601}
16602
16603static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
16604 enum port port, i915_reg_t dp_reg)
16605{
16606 u32 val = I915_READ(dp_reg);
16607
16608 if (val & DP_PORT_EN ||
16609 (val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
16610 return;
16611
16612 DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
16613 port_name(port));
16614
16615 val &= ~DP_PIPE_SEL_MASK;
16616 val |= DP_PIPE_SEL(PIPE_A);
16617
16618 I915_WRITE(dp_reg, val);
16619}
16620
16621static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
16622{
16623 /*
16624 * The BIOS may select transcoder B on some of the PCH
16625 * ports even it doesn't enable the port. This would trip
16626 * assert_pch_dp_disabled() and assert_pch_hdmi_disabled().
16627 * Sanitize the transcoder select bits to prevent that. We
16628 * assume that the BIOS never actually enabled the port,
16629 * because if it did we'd actually have to toggle the port
16630 * on and back off to make the transcoder A select stick
16631 * (see. intel_dp_link_down(), intel_disable_hdmi(),
16632 * intel_disable_sdvo()).
16633 */
16634 ibx_sanitize_pch_dp_port(dev_priv, PORT_B, PCH_DP_B);
16635 ibx_sanitize_pch_dp_port(dev_priv, PORT_C, PCH_DP_C);
16636 ibx_sanitize_pch_dp_port(dev_priv, PORT_D, PCH_DP_D);
16637
16638 /* PCH SDVOB multiplex with HDMIB */
16639 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_B, PCH_HDMIB);
16640 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_C, PCH_HDMIC);
16641 ibx_sanitize_pch_hdmi_port(dev_priv, PORT_D, PCH_HDMID);
16642}
16643
043e9bda
ML
16644/* Scan out the current hw modeset state,
16645 * and sanitizes it to the current state
16646 */
16647static void
aecd36b8
VS
16648intel_modeset_setup_hw_state(struct drm_device *dev,
16649 struct drm_modeset_acquire_ctx *ctx)
30e984df 16650{
fac5e23e 16651 struct drm_i915_private *dev_priv = to_i915(dev);
91d78197 16652 struct intel_crtc_state *crtc_state;
30e984df 16653 struct intel_encoder *encoder;
0e6e0be4
CW
16654 struct intel_crtc *crtc;
16655 intel_wakeref_t wakeref;
35c95375 16656 int i;
30e984df 16657
0e6e0be4 16658 wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
2cd9a689 16659
df49ec82 16660 intel_early_display_was(dev_priv);
30e984df 16661 intel_modeset_readout_hw_state(dev);
24929352
DV
16662
16663 /* HW state is read out, now we need to sanitize this mess. */
62b69566 16664 get_encoder_power_domains(dev_priv);
3aefb67f
VS
16665
16666 if (HAS_PCH_IBX(dev_priv))
16667 ibx_sanitize_pch_ports(dev_priv);
62b69566 16668
68bc30de
VS
16669 /*
16670 * intel_sanitize_plane_mapping() may need to do vblank
16671 * waits, so we need vblank interrupts restored beforehand.
16672 */
16673 for_each_intel_crtc(&dev_priv->drm, crtc) {
32db0b65
VS
16674 crtc_state = to_intel_crtc_state(crtc->base.state);
16675
68bc30de 16676 drm_crtc_vblank_reset(&crtc->base);
b1e01595 16677
32db0b65
VS
16678 if (crtc_state->base.active)
16679 intel_crtc_vblank_on(crtc_state);
24929352
DV
16680 }
16681
68bc30de 16682 intel_sanitize_plane_mapping(dev_priv);
e2af48c6 16683
68bc30de
VS
16684 for_each_intel_encoder(dev, encoder)
16685 intel_sanitize_encoder(encoder);
16686
16687 for_each_intel_crtc(&dev_priv->drm, crtc) {
91d78197 16688 crtc_state = to_intel_crtc_state(crtc->base.state);
aecd36b8 16689 intel_sanitize_crtc(crtc, ctx);
10d75f54 16690 intel_dump_pipe_config(crtc_state, NULL, "[setup_hw_state]");
24929352 16691 }
9a935856 16692
d29b2f9d
ACO
16693 intel_modeset_update_connector_atomic_state(dev);
16694
35c95375
DV
16695 for (i = 0; i < dev_priv->num_shared_dpll; i++) {
16696 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
16697
2dd66ebd 16698 if (!pll->on || pll->active_mask)
35c95375
DV
16699 continue;
16700
72f775fa
LDM
16701 DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
16702 pll->info->name);
35c95375 16703
ee1398ba 16704 pll->info->funcs->disable(dev_priv, pll);
35c95375
DV
16705 pll->on = false;
16706 }
16707
04548cba 16708 if (IS_G4X(dev_priv)) {
cd1d3ee9 16709 g4x_wm_get_hw_state(dev_priv);
04548cba
VS
16710 g4x_wm_sanitize(dev_priv);
16711 } else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
cd1d3ee9 16712 vlv_wm_get_hw_state(dev_priv);
602ae835 16713 vlv_wm_sanitize(dev_priv);
a029fa4d 16714 } else if (INTEL_GEN(dev_priv) >= 9) {
cd1d3ee9 16715 skl_wm_get_hw_state(dev_priv);
602ae835 16716 } else if (HAS_PCH_SPLIT(dev_priv)) {
cd1d3ee9 16717 ilk_wm_get_hw_state(dev_priv);
602ae835 16718 }
292b990e
ML
16719
16720 for_each_intel_crtc(dev, crtc) {
d8fc70b7 16721 u64 put_domains;
292b990e 16722
91d78197
ML
16723 crtc_state = to_intel_crtc_state(crtc->base.state);
16724 put_domains = modeset_get_crtc_power_domains(&crtc->base, crtc_state);
292b990e
ML
16725 if (WARN_ON(put_domains))
16726 modeset_put_power_domains(dev_priv, put_domains);
16727 }
2cd9a689 16728
0e6e0be4 16729 intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
010cf73d
PZ
16730
16731 intel_fbc_init_pipe_state(dev_priv);
043e9bda 16732}
7d0bc1ea 16733
043e9bda
ML
16734void intel_display_resume(struct drm_device *dev)
16735{
e2c8b870
ML
16736 struct drm_i915_private *dev_priv = to_i915(dev);
16737 struct drm_atomic_state *state = dev_priv->modeset_restore_state;
16738 struct drm_modeset_acquire_ctx ctx;
043e9bda 16739 int ret;
f30da187 16740
e2c8b870 16741 dev_priv->modeset_restore_state = NULL;
73974893
ML
16742 if (state)
16743 state->acquire_ctx = &ctx;
043e9bda 16744
e2c8b870 16745 drm_modeset_acquire_init(&ctx, 0);
043e9bda 16746
73974893
ML
16747 while (1) {
16748 ret = drm_modeset_lock_all_ctx(dev, &ctx);
16749 if (ret != -EDEADLK)
16750 break;
043e9bda 16751
e2c8b870 16752 drm_modeset_backoff(&ctx);
e2c8b870 16753 }
043e9bda 16754
73974893 16755 if (!ret)
581e49fe 16756 ret = __intel_display_resume(dev, state, &ctx);
73974893 16757
2503a0fe 16758 intel_enable_ipc(dev_priv);
e2c8b870
ML
16759 drm_modeset_drop_locks(&ctx);
16760 drm_modeset_acquire_fini(&ctx);
043e9bda 16761
0853695c 16762 if (ret)
e2c8b870 16763 DRM_ERROR("Restoring old state failed with %i\n", ret);
3c5e37f1
CW
16764 if (state)
16765 drm_atomic_state_put(state);
2c7111db
CW
16766}
16767
886c6b86
MN
16768static void intel_hpd_poll_fini(struct drm_device *dev)
16769{
16770 struct intel_connector *connector;
16771 struct drm_connector_list_iter conn_iter;
16772
448aa911 16773 /* Kill all the work that may have been queued by hpd. */
886c6b86
MN
16774 drm_connector_list_iter_begin(dev, &conn_iter);
16775 for_each_intel_connector_iter(connector, &conn_iter) {
16776 if (connector->modeset_retry_work.func)
16777 cancel_work_sync(&connector->modeset_retry_work);
d3dacc70
R
16778 if (connector->hdcp.shim) {
16779 cancel_delayed_work_sync(&connector->hdcp.check_work);
16780 cancel_work_sync(&connector->hdcp.prop_work);
ee5e5e7a 16781 }
886c6b86
MN
16782 }
16783 drm_connector_list_iter_end(&conn_iter);
16784}
16785
79e53945
JB
16786void intel_modeset_cleanup(struct drm_device *dev)
16787{
fac5e23e 16788 struct drm_i915_private *dev_priv = to_i915(dev);
652c393a 16789
8bcf9f70
CW
16790 flush_workqueue(dev_priv->modeset_wq);
16791
eb955eee
CW
16792 flush_work(&dev_priv->atomic_helper.free_work);
16793 WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
16794
fd0c0642
DV
16795 /*
16796 * Interrupts and polling as the first thing to avoid creating havoc.
2eb5252e 16797 * Too much stuff here (turning of connectors, ...) would
fd0c0642
DV
16798 * experience fancy races otherwise.
16799 */
2aeb7d3a 16800 intel_irq_uninstall(dev_priv);
eb21b92b 16801
fd0c0642
DV
16802 /*
16803 * Due to the hpd irq storm handling the hotplug work can re-arm the
16804 * poll handlers. Hence disable polling after hpd handling is shut down.
16805 */
886c6b86 16806 intel_hpd_poll_fini(dev);
fd0c0642 16807
4f256d82
DV
16808 /* poll work can call into fbdev, hence clean that up afterwards */
16809 intel_fbdev_fini(dev_priv);
16810
723bfd70
JB
16811 intel_unregister_dsm_handler();
16812
c937ab3e 16813 intel_fbc_global_disable(dev_priv);
69341a5e 16814
1630fe75
CW
16815 /* flush any delayed tasks or pending work */
16816 flush_scheduled_work();
16817
9055aac7
R
16818 intel_hdcp_component_fini(dev_priv);
16819
79e53945 16820 drm_mode_config_cleanup(dev);
4d7bb011 16821
58db08a7 16822 intel_overlay_cleanup(dev_priv);
ae48434c 16823
3ce2ea65 16824 intel_gmbus_teardown(dev_priv);
757fffcf
VS
16825
16826 destroy_workqueue(dev_priv->modeset_wq);
acde44b5
JRS
16827
16828 intel_fbc_cleanup_cfb(dev_priv);
79e53945
JB
16829}
16830
28d52043
DA
16831/*
16832 * set vga decode state - true == enable VGA decode
16833 */
6315b5d3 16834int intel_modeset_vga_set_state(struct drm_i915_private *dev_priv, bool state)
28d52043 16835{
6315b5d3 16836 unsigned reg = INTEL_GEN(dev_priv) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
28d52043
DA
16837 u16 gmch_ctrl;
16838
75fa041d
CW
16839 if (pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl)) {
16840 DRM_ERROR("failed to read control word\n");
16841 return -EIO;
16842 }
16843
c0cc8a55
CW
16844 if (!!(gmch_ctrl & INTEL_GMCH_VGA_DISABLE) == !state)
16845 return 0;
16846
28d52043
DA
16847 if (state)
16848 gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
16849 else
16850 gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
75fa041d
CW
16851
16852 if (pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl)) {
16853 DRM_ERROR("failed to write control word\n");
16854 return -EIO;
16855 }
16856
28d52043
DA
16857 return 0;
16858}
c4a1d9e4 16859
98a2f411
CW
16860#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
16861
c4a1d9e4 16862struct intel_display_error_state {
ff57f1b0
PZ
16863
16864 u32 power_well_driver;
16865
c4a1d9e4
CW
16866 struct intel_cursor_error_state {
16867 u32 control;
16868 u32 position;
16869 u32 base;
16870 u32 size;
52331309 16871 } cursor[I915_MAX_PIPES];
c4a1d9e4
CW
16872
16873 struct intel_pipe_error_state {
ddf9c536 16874 bool power_domain_on;
c4a1d9e4 16875 u32 source;
f301b1e1 16876 u32 stat;
52331309 16877 } pipe[I915_MAX_PIPES];
c4a1d9e4
CW
16878
16879 struct intel_plane_error_state {
16880 u32 control;
16881 u32 stride;
16882 u32 size;
16883 u32 pos;
16884 u32 addr;
16885 u32 surface;
16886 u32 tile_offset;
52331309 16887 } plane[I915_MAX_PIPES];
63b66e5b
CW
16888
16889 struct intel_transcoder_error_state {
062de72b 16890 bool available;
ddf9c536 16891 bool power_domain_on;
63b66e5b
CW
16892 enum transcoder cpu_transcoder;
16893
16894 u32 conf;
16895
16896 u32 htotal;
16897 u32 hblank;
16898 u32 hsync;
16899 u32 vtotal;
16900 u32 vblank;
16901 u32 vsync;
16902 } transcoder[4];
c4a1d9e4
CW
16903};
16904
16905struct intel_display_error_state *
c033666a 16906intel_display_capture_error_state(struct drm_i915_private *dev_priv)
c4a1d9e4 16907{
c4a1d9e4 16908 struct intel_display_error_state *error;
63b66e5b
CW
16909 int transcoders[] = {
16910 TRANSCODER_A,
16911 TRANSCODER_B,
16912 TRANSCODER_C,
16913 TRANSCODER_EDP,
16914 };
c4a1d9e4
CW
16915 int i;
16916
062de72b
LDM
16917 BUILD_BUG_ON(ARRAY_SIZE(transcoders) != ARRAY_SIZE(error->transcoder));
16918
e1bf094b 16919 if (!HAS_DISPLAY(dev_priv))
63b66e5b
CW
16920 return NULL;
16921
9d1cb914 16922 error = kzalloc(sizeof(*error), GFP_ATOMIC);
c4a1d9e4
CW
16923 if (error == NULL)
16924 return NULL;
16925
c033666a 16926 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
75e39688 16927 error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
ff57f1b0 16928
055e393f 16929 for_each_pipe(dev_priv, i) {
ddf9c536 16930 error->pipe[i].power_domain_on =
f458ebbc
DV
16931 __intel_display_power_is_enabled(dev_priv,
16932 POWER_DOMAIN_PIPE(i));
ddf9c536 16933 if (!error->pipe[i].power_domain_on)
9d1cb914
PZ
16934 continue;
16935
5efb3e28
VS
16936 error->cursor[i].control = I915_READ(CURCNTR(i));
16937 error->cursor[i].position = I915_READ(CURPOS(i));
16938 error->cursor[i].base = I915_READ(CURBASE(i));
c4a1d9e4
CW
16939
16940 error->plane[i].control = I915_READ(DSPCNTR(i));
16941 error->plane[i].stride = I915_READ(DSPSTRIDE(i));
c033666a 16942 if (INTEL_GEN(dev_priv) <= 3) {
51889b35 16943 error->plane[i].size = I915_READ(DSPSIZE(i));
80ca378b
PZ
16944 error->plane[i].pos = I915_READ(DSPPOS(i));
16945 }
c033666a 16946 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
ca291363 16947 error->plane[i].addr = I915_READ(DSPADDR(i));
c033666a 16948 if (INTEL_GEN(dev_priv) >= 4) {
c4a1d9e4
CW
16949 error->plane[i].surface = I915_READ(DSPSURF(i));
16950 error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
16951 }
16952
c4a1d9e4 16953 error->pipe[i].source = I915_READ(PIPESRC(i));
f301b1e1 16954
b2ae318a 16955 if (HAS_GMCH(dev_priv))
f301b1e1 16956 error->pipe[i].stat = I915_READ(PIPESTAT(i));
63b66e5b
CW
16957 }
16958
062de72b 16959 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
63b66e5b
CW
16960 enum transcoder cpu_transcoder = transcoders[i];
16961
062de72b
LDM
16962 if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
16963 continue;
16964
16965 error->transcoder[i].available = true;
ddf9c536 16966 error->transcoder[i].power_domain_on =
f458ebbc 16967 __intel_display_power_is_enabled(dev_priv,
38cc1daf 16968 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
ddf9c536 16969 if (!error->transcoder[i].power_domain_on)
9d1cb914
PZ
16970 continue;
16971
63b66e5b
CW
16972 error->transcoder[i].cpu_transcoder = cpu_transcoder;
16973
16974 error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
16975 error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
16976 error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
16977 error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
16978 error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
16979 error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
16980 error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
c4a1d9e4
CW
16981 }
16982
16983 return error;
16984}
16985
edc3d884
MK
16986#define err_printf(e, ...) i915_error_printf(e, __VA_ARGS__)
16987
c4a1d9e4 16988void
edc3d884 16989intel_display_print_error_state(struct drm_i915_error_state_buf *m,
c4a1d9e4
CW
16990 struct intel_display_error_state *error)
16991{
5a4c6f1b 16992 struct drm_i915_private *dev_priv = m->i915;
c4a1d9e4
CW
16993 int i;
16994
63b66e5b
CW
16995 if (!error)
16996 return;
16997
b7f05d4a 16998 err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev_priv)->num_pipes);
8652744b 16999 if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
edc3d884 17000 err_printf(m, "PWR_WELL_CTL2: %08x\n",
ff57f1b0 17001 error->power_well_driver);
055e393f 17002 for_each_pipe(dev_priv, i) {
edc3d884 17003 err_printf(m, "Pipe [%d]:\n", i);
ddf9c536 17004 err_printf(m, " Power: %s\n",
87ad3212 17005 onoff(error->pipe[i].power_domain_on));
edc3d884 17006 err_printf(m, " SRC: %08x\n", error->pipe[i].source);
f301b1e1 17007 err_printf(m, " STAT: %08x\n", error->pipe[i].stat);
edc3d884
MK
17008
17009 err_printf(m, "Plane [%d]:\n", i);
17010 err_printf(m, " CNTR: %08x\n", error->plane[i].control);
17011 err_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
5f56d5f9 17012 if (INTEL_GEN(dev_priv) <= 3) {
edc3d884
MK
17013 err_printf(m, " SIZE: %08x\n", error->plane[i].size);
17014 err_printf(m, " POS: %08x\n", error->plane[i].pos);
80ca378b 17015 }
772c2a51 17016 if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
edc3d884 17017 err_printf(m, " ADDR: %08x\n", error->plane[i].addr);
5f56d5f9 17018 if (INTEL_GEN(dev_priv) >= 4) {
edc3d884
MK
17019 err_printf(m, " SURF: %08x\n", error->plane[i].surface);
17020 err_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
c4a1d9e4
CW
17021 }
17022
edc3d884
MK
17023 err_printf(m, "Cursor [%d]:\n", i);
17024 err_printf(m, " CNTR: %08x\n", error->cursor[i].control);
17025 err_printf(m, " POS: %08x\n", error->cursor[i].position);
17026 err_printf(m, " BASE: %08x\n", error->cursor[i].base);
c4a1d9e4 17027 }
63b66e5b 17028
062de72b
LDM
17029 for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
17030 if (!error->transcoder[i].available)
17031 continue;
17032
da205630 17033 err_printf(m, "CPU transcoder: %s\n",
63b66e5b 17034 transcoder_name(error->transcoder[i].cpu_transcoder));
ddf9c536 17035 err_printf(m, " Power: %s\n",
87ad3212 17036 onoff(error->transcoder[i].power_domain_on));
63b66e5b
CW
17037 err_printf(m, " CONF: %08x\n", error->transcoder[i].conf);
17038 err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal);
17039 err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank);
17040 err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync);
17041 err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal);
17042 err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank);
17043 err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync);
17044 }
c4a1d9e4 17045}
98a2f411
CW
17046
17047#endif