]>
Commit | Line | Data |
---|---|---|
79e53945 JB |
1 | /* |
2 | * Copyright © 2006-2007 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
21 | * DEALINGS IN THE SOFTWARE. | |
22 | * | |
23 | * Authors: | |
24 | * Eric Anholt <eric@anholt.net> | |
25 | */ | |
26 | ||
23b2f8bb | 27 | #include <linux/cpufreq.h> |
c1c7af60 JB |
28 | #include <linux/module.h> |
29 | #include <linux/input.h> | |
79e53945 | 30 | #include <linux/i2c.h> |
7662c8bd | 31 | #include <linux/kernel.h> |
5a0e3ad6 | 32 | #include <linux/slab.h> |
9cce37f4 | 33 | #include <linux/vgaarb.h> |
e0dac65e | 34 | #include <drm/drm_edid.h> |
79e53945 JB |
35 | #include "drmP.h" |
36 | #include "intel_drv.h" | |
37 | #include "i915_drm.h" | |
38 | #include "i915_drv.h" | |
e5510fac | 39 | #include "i915_trace.h" |
ab2c0672 | 40 | #include "drm_dp_helper.h" |
79e53945 | 41 | #include "drm_crtc_helper.h" |
c0f372b3 | 42 | #include <linux/dma_remapping.h> |
79e53945 | 43 | |
32f9d658 ZW |
44 | #define HAS_eDP (intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP)) |
45 | ||
0206e353 | 46 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type); |
7662c8bd | 47 | static void intel_update_watermarks(struct drm_device *dev); |
3dec0095 | 48 | static void intel_increase_pllclock(struct drm_crtc *crtc); |
6b383a7f | 49 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); |
79e53945 JB |
50 | |
51 | typedef struct { | |
0206e353 AJ |
52 | /* given values */ |
53 | int n; | |
54 | int m1, m2; | |
55 | int p1, p2; | |
56 | /* derived values */ | |
57 | int dot; | |
58 | int vco; | |
59 | int m; | |
60 | int p; | |
79e53945 JB |
61 | } intel_clock_t; |
62 | ||
63 | typedef struct { | |
0206e353 | 64 | int min, max; |
79e53945 JB |
65 | } intel_range_t; |
66 | ||
67 | typedef struct { | |
0206e353 AJ |
68 | int dot_limit; |
69 | int p2_slow, p2_fast; | |
79e53945 JB |
70 | } intel_p2_t; |
71 | ||
72 | #define INTEL_P2_NUM 2 | |
d4906093 ML |
73 | typedef struct intel_limit intel_limit_t; |
74 | struct intel_limit { | |
0206e353 AJ |
75 | intel_range_t dot, vco, n, m, m1, m2, p, p1; |
76 | intel_p2_t p2; | |
77 | bool (* find_pll)(const intel_limit_t *, struct drm_crtc *, | |
78 | int, int, intel_clock_t *); | |
d4906093 | 79 | }; |
79e53945 | 80 | |
2377b741 JB |
81 | /* FDI */ |
82 | #define IRONLAKE_FDI_FREQ 2700000 /* in kHz for mode->clock */ | |
83 | ||
d4906093 ML |
84 | static bool |
85 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |
86 | int target, int refclk, intel_clock_t *best_clock); | |
87 | static bool | |
88 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |
89 | int target, int refclk, intel_clock_t *best_clock); | |
79e53945 | 90 | |
a4fc5ed6 KP |
91 | static bool |
92 | intel_find_pll_g4x_dp(const intel_limit_t *, struct drm_crtc *crtc, | |
93 | int target, int refclk, intel_clock_t *best_clock); | |
5eb08b69 | 94 | static bool |
f2b115e6 AJ |
95 | intel_find_pll_ironlake_dp(const intel_limit_t *, struct drm_crtc *crtc, |
96 | int target, int refclk, intel_clock_t *best_clock); | |
a4fc5ed6 | 97 | |
021357ac CW |
98 | static inline u32 /* units of 100MHz */ |
99 | intel_fdi_link_freq(struct drm_device *dev) | |
100 | { | |
8b99e68c CW |
101 | if (IS_GEN5(dev)) { |
102 | struct drm_i915_private *dev_priv = dev->dev_private; | |
103 | return (I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK) + 2; | |
104 | } else | |
105 | return 27; | |
021357ac CW |
106 | } |
107 | ||
e4b36699 | 108 | static const intel_limit_t intel_limits_i8xx_dvo = { |
0206e353 AJ |
109 | .dot = { .min = 25000, .max = 350000 }, |
110 | .vco = { .min = 930000, .max = 1400000 }, | |
111 | .n = { .min = 3, .max = 16 }, | |
112 | .m = { .min = 96, .max = 140 }, | |
113 | .m1 = { .min = 18, .max = 26 }, | |
114 | .m2 = { .min = 6, .max = 16 }, | |
115 | .p = { .min = 4, .max = 128 }, | |
116 | .p1 = { .min = 2, .max = 33 }, | |
273e27ca EA |
117 | .p2 = { .dot_limit = 165000, |
118 | .p2_slow = 4, .p2_fast = 2 }, | |
d4906093 | 119 | .find_pll = intel_find_best_PLL, |
e4b36699 KP |
120 | }; |
121 | ||
122 | static const intel_limit_t intel_limits_i8xx_lvds = { | |
0206e353 AJ |
123 | .dot = { .min = 25000, .max = 350000 }, |
124 | .vco = { .min = 930000, .max = 1400000 }, | |
125 | .n = { .min = 3, .max = 16 }, | |
126 | .m = { .min = 96, .max = 140 }, | |
127 | .m1 = { .min = 18, .max = 26 }, | |
128 | .m2 = { .min = 6, .max = 16 }, | |
129 | .p = { .min = 4, .max = 128 }, | |
130 | .p1 = { .min = 1, .max = 6 }, | |
273e27ca EA |
131 | .p2 = { .dot_limit = 165000, |
132 | .p2_slow = 14, .p2_fast = 7 }, | |
d4906093 | 133 | .find_pll = intel_find_best_PLL, |
e4b36699 | 134 | }; |
273e27ca | 135 | |
e4b36699 | 136 | static const intel_limit_t intel_limits_i9xx_sdvo = { |
0206e353 AJ |
137 | .dot = { .min = 20000, .max = 400000 }, |
138 | .vco = { .min = 1400000, .max = 2800000 }, | |
139 | .n = { .min = 1, .max = 6 }, | |
140 | .m = { .min = 70, .max = 120 }, | |
141 | .m1 = { .min = 10, .max = 22 }, | |
142 | .m2 = { .min = 5, .max = 9 }, | |
143 | .p = { .min = 5, .max = 80 }, | |
144 | .p1 = { .min = 1, .max = 8 }, | |
273e27ca EA |
145 | .p2 = { .dot_limit = 200000, |
146 | .p2_slow = 10, .p2_fast = 5 }, | |
d4906093 | 147 | .find_pll = intel_find_best_PLL, |
e4b36699 KP |
148 | }; |
149 | ||
150 | static const intel_limit_t intel_limits_i9xx_lvds = { | |
0206e353 AJ |
151 | .dot = { .min = 20000, .max = 400000 }, |
152 | .vco = { .min = 1400000, .max = 2800000 }, | |
153 | .n = { .min = 1, .max = 6 }, | |
154 | .m = { .min = 70, .max = 120 }, | |
155 | .m1 = { .min = 10, .max = 22 }, | |
156 | .m2 = { .min = 5, .max = 9 }, | |
157 | .p = { .min = 7, .max = 98 }, | |
158 | .p1 = { .min = 1, .max = 8 }, | |
273e27ca EA |
159 | .p2 = { .dot_limit = 112000, |
160 | .p2_slow = 14, .p2_fast = 7 }, | |
d4906093 | 161 | .find_pll = intel_find_best_PLL, |
e4b36699 KP |
162 | }; |
163 | ||
273e27ca | 164 | |
e4b36699 | 165 | static const intel_limit_t intel_limits_g4x_sdvo = { |
273e27ca EA |
166 | .dot = { .min = 25000, .max = 270000 }, |
167 | .vco = { .min = 1750000, .max = 3500000}, | |
168 | .n = { .min = 1, .max = 4 }, | |
169 | .m = { .min = 104, .max = 138 }, | |
170 | .m1 = { .min = 17, .max = 23 }, | |
171 | .m2 = { .min = 5, .max = 11 }, | |
172 | .p = { .min = 10, .max = 30 }, | |
173 | .p1 = { .min = 1, .max = 3}, | |
174 | .p2 = { .dot_limit = 270000, | |
175 | .p2_slow = 10, | |
176 | .p2_fast = 10 | |
044c7c41 | 177 | }, |
d4906093 | 178 | .find_pll = intel_g4x_find_best_PLL, |
e4b36699 KP |
179 | }; |
180 | ||
181 | static const intel_limit_t intel_limits_g4x_hdmi = { | |
273e27ca EA |
182 | .dot = { .min = 22000, .max = 400000 }, |
183 | .vco = { .min = 1750000, .max = 3500000}, | |
184 | .n = { .min = 1, .max = 4 }, | |
185 | .m = { .min = 104, .max = 138 }, | |
186 | .m1 = { .min = 16, .max = 23 }, | |
187 | .m2 = { .min = 5, .max = 11 }, | |
188 | .p = { .min = 5, .max = 80 }, | |
189 | .p1 = { .min = 1, .max = 8}, | |
190 | .p2 = { .dot_limit = 165000, | |
191 | .p2_slow = 10, .p2_fast = 5 }, | |
d4906093 | 192 | .find_pll = intel_g4x_find_best_PLL, |
e4b36699 KP |
193 | }; |
194 | ||
195 | static const intel_limit_t intel_limits_g4x_single_channel_lvds = { | |
273e27ca EA |
196 | .dot = { .min = 20000, .max = 115000 }, |
197 | .vco = { .min = 1750000, .max = 3500000 }, | |
198 | .n = { .min = 1, .max = 3 }, | |
199 | .m = { .min = 104, .max = 138 }, | |
200 | .m1 = { .min = 17, .max = 23 }, | |
201 | .m2 = { .min = 5, .max = 11 }, | |
202 | .p = { .min = 28, .max = 112 }, | |
203 | .p1 = { .min = 2, .max = 8 }, | |
204 | .p2 = { .dot_limit = 0, | |
205 | .p2_slow = 14, .p2_fast = 14 | |
044c7c41 | 206 | }, |
d4906093 | 207 | .find_pll = intel_g4x_find_best_PLL, |
e4b36699 KP |
208 | }; |
209 | ||
210 | static const intel_limit_t intel_limits_g4x_dual_channel_lvds = { | |
273e27ca EA |
211 | .dot = { .min = 80000, .max = 224000 }, |
212 | .vco = { .min = 1750000, .max = 3500000 }, | |
213 | .n = { .min = 1, .max = 3 }, | |
214 | .m = { .min = 104, .max = 138 }, | |
215 | .m1 = { .min = 17, .max = 23 }, | |
216 | .m2 = { .min = 5, .max = 11 }, | |
217 | .p = { .min = 14, .max = 42 }, | |
218 | .p1 = { .min = 2, .max = 6 }, | |
219 | .p2 = { .dot_limit = 0, | |
220 | .p2_slow = 7, .p2_fast = 7 | |
044c7c41 | 221 | }, |
d4906093 | 222 | .find_pll = intel_g4x_find_best_PLL, |
e4b36699 KP |
223 | }; |
224 | ||
225 | static const intel_limit_t intel_limits_g4x_display_port = { | |
0206e353 AJ |
226 | .dot = { .min = 161670, .max = 227000 }, |
227 | .vco = { .min = 1750000, .max = 3500000}, | |
228 | .n = { .min = 1, .max = 2 }, | |
229 | .m = { .min = 97, .max = 108 }, | |
230 | .m1 = { .min = 0x10, .max = 0x12 }, | |
231 | .m2 = { .min = 0x05, .max = 0x06 }, | |
232 | .p = { .min = 10, .max = 20 }, | |
233 | .p1 = { .min = 1, .max = 2}, | |
234 | .p2 = { .dot_limit = 0, | |
273e27ca | 235 | .p2_slow = 10, .p2_fast = 10 }, |
0206e353 | 236 | .find_pll = intel_find_pll_g4x_dp, |
e4b36699 KP |
237 | }; |
238 | ||
f2b115e6 | 239 | static const intel_limit_t intel_limits_pineview_sdvo = { |
0206e353 AJ |
240 | .dot = { .min = 20000, .max = 400000}, |
241 | .vco = { .min = 1700000, .max = 3500000 }, | |
273e27ca | 242 | /* Pineview's Ncounter is a ring counter */ |
0206e353 AJ |
243 | .n = { .min = 3, .max = 6 }, |
244 | .m = { .min = 2, .max = 256 }, | |
273e27ca | 245 | /* Pineview only has one combined m divider, which we treat as m2. */ |
0206e353 AJ |
246 | .m1 = { .min = 0, .max = 0 }, |
247 | .m2 = { .min = 0, .max = 254 }, | |
248 | .p = { .min = 5, .max = 80 }, | |
249 | .p1 = { .min = 1, .max = 8 }, | |
273e27ca EA |
250 | .p2 = { .dot_limit = 200000, |
251 | .p2_slow = 10, .p2_fast = 5 }, | |
6115707b | 252 | .find_pll = intel_find_best_PLL, |
e4b36699 KP |
253 | }; |
254 | ||
f2b115e6 | 255 | static const intel_limit_t intel_limits_pineview_lvds = { |
0206e353 AJ |
256 | .dot = { .min = 20000, .max = 400000 }, |
257 | .vco = { .min = 1700000, .max = 3500000 }, | |
258 | .n = { .min = 3, .max = 6 }, | |
259 | .m = { .min = 2, .max = 256 }, | |
260 | .m1 = { .min = 0, .max = 0 }, | |
261 | .m2 = { .min = 0, .max = 254 }, | |
262 | .p = { .min = 7, .max = 112 }, | |
263 | .p1 = { .min = 1, .max = 8 }, | |
273e27ca EA |
264 | .p2 = { .dot_limit = 112000, |
265 | .p2_slow = 14, .p2_fast = 14 }, | |
6115707b | 266 | .find_pll = intel_find_best_PLL, |
e4b36699 KP |
267 | }; |
268 | ||
273e27ca EA |
269 | /* Ironlake / Sandybridge |
270 | * | |
271 | * We calculate clock using (register_value + 2) for N/M1/M2, so here | |
272 | * the range value for them is (actual_value - 2). | |
273 | */ | |
b91ad0ec | 274 | static const intel_limit_t intel_limits_ironlake_dac = { |
273e27ca EA |
275 | .dot = { .min = 25000, .max = 350000 }, |
276 | .vco = { .min = 1760000, .max = 3510000 }, | |
277 | .n = { .min = 1, .max = 5 }, | |
278 | .m = { .min = 79, .max = 127 }, | |
279 | .m1 = { .min = 12, .max = 22 }, | |
280 | .m2 = { .min = 5, .max = 9 }, | |
281 | .p = { .min = 5, .max = 80 }, | |
282 | .p1 = { .min = 1, .max = 8 }, | |
283 | .p2 = { .dot_limit = 225000, | |
284 | .p2_slow = 10, .p2_fast = 5 }, | |
4547668a | 285 | .find_pll = intel_g4x_find_best_PLL, |
e4b36699 KP |
286 | }; |
287 | ||
b91ad0ec | 288 | static const intel_limit_t intel_limits_ironlake_single_lvds = { |
273e27ca EA |
289 | .dot = { .min = 25000, .max = 350000 }, |
290 | .vco = { .min = 1760000, .max = 3510000 }, | |
291 | .n = { .min = 1, .max = 3 }, | |
292 | .m = { .min = 79, .max = 118 }, | |
293 | .m1 = { .min = 12, .max = 22 }, | |
294 | .m2 = { .min = 5, .max = 9 }, | |
295 | .p = { .min = 28, .max = 112 }, | |
296 | .p1 = { .min = 2, .max = 8 }, | |
297 | .p2 = { .dot_limit = 225000, | |
298 | .p2_slow = 14, .p2_fast = 14 }, | |
b91ad0ec ZW |
299 | .find_pll = intel_g4x_find_best_PLL, |
300 | }; | |
301 | ||
302 | static const intel_limit_t intel_limits_ironlake_dual_lvds = { | |
273e27ca EA |
303 | .dot = { .min = 25000, .max = 350000 }, |
304 | .vco = { .min = 1760000, .max = 3510000 }, | |
305 | .n = { .min = 1, .max = 3 }, | |
306 | .m = { .min = 79, .max = 127 }, | |
307 | .m1 = { .min = 12, .max = 22 }, | |
308 | .m2 = { .min = 5, .max = 9 }, | |
309 | .p = { .min = 14, .max = 56 }, | |
310 | .p1 = { .min = 2, .max = 8 }, | |
311 | .p2 = { .dot_limit = 225000, | |
312 | .p2_slow = 7, .p2_fast = 7 }, | |
b91ad0ec ZW |
313 | .find_pll = intel_g4x_find_best_PLL, |
314 | }; | |
315 | ||
273e27ca | 316 | /* LVDS 100mhz refclk limits. */ |
b91ad0ec | 317 | static const intel_limit_t intel_limits_ironlake_single_lvds_100m = { |
273e27ca EA |
318 | .dot = { .min = 25000, .max = 350000 }, |
319 | .vco = { .min = 1760000, .max = 3510000 }, | |
320 | .n = { .min = 1, .max = 2 }, | |
321 | .m = { .min = 79, .max = 126 }, | |
322 | .m1 = { .min = 12, .max = 22 }, | |
323 | .m2 = { .min = 5, .max = 9 }, | |
324 | .p = { .min = 28, .max = 112 }, | |
0206e353 | 325 | .p1 = { .min = 2, .max = 8 }, |
273e27ca EA |
326 | .p2 = { .dot_limit = 225000, |
327 | .p2_slow = 14, .p2_fast = 14 }, | |
b91ad0ec ZW |
328 | .find_pll = intel_g4x_find_best_PLL, |
329 | }; | |
330 | ||
331 | static const intel_limit_t intel_limits_ironlake_dual_lvds_100m = { | |
273e27ca EA |
332 | .dot = { .min = 25000, .max = 350000 }, |
333 | .vco = { .min = 1760000, .max = 3510000 }, | |
334 | .n = { .min = 1, .max = 3 }, | |
335 | .m = { .min = 79, .max = 126 }, | |
336 | .m1 = { .min = 12, .max = 22 }, | |
337 | .m2 = { .min = 5, .max = 9 }, | |
338 | .p = { .min = 14, .max = 42 }, | |
0206e353 | 339 | .p1 = { .min = 2, .max = 6 }, |
273e27ca EA |
340 | .p2 = { .dot_limit = 225000, |
341 | .p2_slow = 7, .p2_fast = 7 }, | |
4547668a ZY |
342 | .find_pll = intel_g4x_find_best_PLL, |
343 | }; | |
344 | ||
345 | static const intel_limit_t intel_limits_ironlake_display_port = { | |
0206e353 AJ |
346 | .dot = { .min = 25000, .max = 350000 }, |
347 | .vco = { .min = 1760000, .max = 3510000}, | |
348 | .n = { .min = 1, .max = 2 }, | |
349 | .m = { .min = 81, .max = 90 }, | |
350 | .m1 = { .min = 12, .max = 22 }, | |
351 | .m2 = { .min = 5, .max = 9 }, | |
352 | .p = { .min = 10, .max = 20 }, | |
353 | .p1 = { .min = 1, .max = 2}, | |
354 | .p2 = { .dot_limit = 0, | |
273e27ca | 355 | .p2_slow = 10, .p2_fast = 10 }, |
0206e353 | 356 | .find_pll = intel_find_pll_ironlake_dp, |
79e53945 JB |
357 | }; |
358 | ||
1b894b59 CW |
359 | static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, |
360 | int refclk) | |
2c07245f | 361 | { |
b91ad0ec ZW |
362 | struct drm_device *dev = crtc->dev; |
363 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2c07245f | 364 | const intel_limit_t *limit; |
b91ad0ec ZW |
365 | |
366 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | |
b91ad0ec ZW |
367 | if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == |
368 | LVDS_CLKB_POWER_UP) { | |
369 | /* LVDS dual channel */ | |
1b894b59 | 370 | if (refclk == 100000) |
b91ad0ec ZW |
371 | limit = &intel_limits_ironlake_dual_lvds_100m; |
372 | else | |
373 | limit = &intel_limits_ironlake_dual_lvds; | |
374 | } else { | |
1b894b59 | 375 | if (refclk == 100000) |
b91ad0ec ZW |
376 | limit = &intel_limits_ironlake_single_lvds_100m; |
377 | else | |
378 | limit = &intel_limits_ironlake_single_lvds; | |
379 | } | |
380 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || | |
4547668a ZY |
381 | HAS_eDP) |
382 | limit = &intel_limits_ironlake_display_port; | |
2c07245f | 383 | else |
b91ad0ec | 384 | limit = &intel_limits_ironlake_dac; |
2c07245f ZW |
385 | |
386 | return limit; | |
387 | } | |
388 | ||
044c7c41 ML |
389 | static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) |
390 | { | |
391 | struct drm_device *dev = crtc->dev; | |
392 | struct drm_i915_private *dev_priv = dev->dev_private; | |
393 | const intel_limit_t *limit; | |
394 | ||
395 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | |
396 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | |
397 | LVDS_CLKB_POWER_UP) | |
398 | /* LVDS with dual channel */ | |
e4b36699 | 399 | limit = &intel_limits_g4x_dual_channel_lvds; |
044c7c41 ML |
400 | else |
401 | /* LVDS with dual channel */ | |
e4b36699 | 402 | limit = &intel_limits_g4x_single_channel_lvds; |
044c7c41 ML |
403 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_HDMI) || |
404 | intel_pipe_has_type(crtc, INTEL_OUTPUT_ANALOG)) { | |
e4b36699 | 405 | limit = &intel_limits_g4x_hdmi; |
044c7c41 | 406 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_SDVO)) { |
e4b36699 | 407 | limit = &intel_limits_g4x_sdvo; |
0206e353 | 408 | } else if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
e4b36699 | 409 | limit = &intel_limits_g4x_display_port; |
044c7c41 | 410 | } else /* The option is for other outputs */ |
e4b36699 | 411 | limit = &intel_limits_i9xx_sdvo; |
044c7c41 ML |
412 | |
413 | return limit; | |
414 | } | |
415 | ||
1b894b59 | 416 | static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk) |
79e53945 JB |
417 | { |
418 | struct drm_device *dev = crtc->dev; | |
419 | const intel_limit_t *limit; | |
420 | ||
bad720ff | 421 | if (HAS_PCH_SPLIT(dev)) |
1b894b59 | 422 | limit = intel_ironlake_limit(crtc, refclk); |
2c07245f | 423 | else if (IS_G4X(dev)) { |
044c7c41 | 424 | limit = intel_g4x_limit(crtc); |
f2b115e6 | 425 | } else if (IS_PINEVIEW(dev)) { |
2177832f | 426 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) |
f2b115e6 | 427 | limit = &intel_limits_pineview_lvds; |
2177832f | 428 | else |
f2b115e6 | 429 | limit = &intel_limits_pineview_sdvo; |
a6c45cf0 CW |
430 | } else if (!IS_GEN2(dev)) { |
431 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | |
432 | limit = &intel_limits_i9xx_lvds; | |
433 | else | |
434 | limit = &intel_limits_i9xx_sdvo; | |
79e53945 JB |
435 | } else { |
436 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) | |
e4b36699 | 437 | limit = &intel_limits_i8xx_lvds; |
79e53945 | 438 | else |
e4b36699 | 439 | limit = &intel_limits_i8xx_dvo; |
79e53945 JB |
440 | } |
441 | return limit; | |
442 | } | |
443 | ||
f2b115e6 AJ |
444 | /* m1 is reserved as 0 in Pineview, n is a ring counter */ |
445 | static void pineview_clock(int refclk, intel_clock_t *clock) | |
79e53945 | 446 | { |
2177832f SL |
447 | clock->m = clock->m2 + 2; |
448 | clock->p = clock->p1 * clock->p2; | |
449 | clock->vco = refclk * clock->m / clock->n; | |
450 | clock->dot = clock->vco / clock->p; | |
451 | } | |
452 | ||
453 | static void intel_clock(struct drm_device *dev, int refclk, intel_clock_t *clock) | |
454 | { | |
f2b115e6 AJ |
455 | if (IS_PINEVIEW(dev)) { |
456 | pineview_clock(refclk, clock); | |
2177832f SL |
457 | return; |
458 | } | |
79e53945 JB |
459 | clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); |
460 | clock->p = clock->p1 * clock->p2; | |
461 | clock->vco = refclk * clock->m / (clock->n + 2); | |
462 | clock->dot = clock->vco / clock->p; | |
463 | } | |
464 | ||
79e53945 JB |
465 | /** |
466 | * Returns whether any output on the specified pipe is of the specified type | |
467 | */ | |
4ef69c7a | 468 | bool intel_pipe_has_type(struct drm_crtc *crtc, int type) |
79e53945 | 469 | { |
4ef69c7a CW |
470 | struct drm_device *dev = crtc->dev; |
471 | struct drm_mode_config *mode_config = &dev->mode_config; | |
472 | struct intel_encoder *encoder; | |
473 | ||
474 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) | |
475 | if (encoder->base.crtc == crtc && encoder->type == type) | |
476 | return true; | |
477 | ||
478 | return false; | |
79e53945 JB |
479 | } |
480 | ||
7c04d1d9 | 481 | #define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0) |
79e53945 JB |
482 | /** |
483 | * Returns whether the given set of divisors are valid for a given refclk with | |
484 | * the given connectors. | |
485 | */ | |
486 | ||
1b894b59 CW |
487 | static bool intel_PLL_is_valid(struct drm_device *dev, |
488 | const intel_limit_t *limit, | |
489 | const intel_clock_t *clock) | |
79e53945 | 490 | { |
79e53945 | 491 | if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1) |
0206e353 | 492 | INTELPllInvalid("p1 out of range\n"); |
79e53945 | 493 | if (clock->p < limit->p.min || limit->p.max < clock->p) |
0206e353 | 494 | INTELPllInvalid("p out of range\n"); |
79e53945 | 495 | if (clock->m2 < limit->m2.min || limit->m2.max < clock->m2) |
0206e353 | 496 | INTELPllInvalid("m2 out of range\n"); |
79e53945 | 497 | if (clock->m1 < limit->m1.min || limit->m1.max < clock->m1) |
0206e353 | 498 | INTELPllInvalid("m1 out of range\n"); |
f2b115e6 | 499 | if (clock->m1 <= clock->m2 && !IS_PINEVIEW(dev)) |
0206e353 | 500 | INTELPllInvalid("m1 <= m2\n"); |
79e53945 | 501 | if (clock->m < limit->m.min || limit->m.max < clock->m) |
0206e353 | 502 | INTELPllInvalid("m out of range\n"); |
79e53945 | 503 | if (clock->n < limit->n.min || limit->n.max < clock->n) |
0206e353 | 504 | INTELPllInvalid("n out of range\n"); |
79e53945 | 505 | if (clock->vco < limit->vco.min || limit->vco.max < clock->vco) |
0206e353 | 506 | INTELPllInvalid("vco out of range\n"); |
79e53945 JB |
507 | /* XXX: We may need to be checking "Dot clock" depending on the multiplier, |
508 | * connector, etc., rather than just a single range. | |
509 | */ | |
510 | if (clock->dot < limit->dot.min || limit->dot.max < clock->dot) | |
0206e353 | 511 | INTELPllInvalid("dot out of range\n"); |
79e53945 JB |
512 | |
513 | return true; | |
514 | } | |
515 | ||
d4906093 ML |
516 | static bool |
517 | intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |
518 | int target, int refclk, intel_clock_t *best_clock) | |
519 | ||
79e53945 JB |
520 | { |
521 | struct drm_device *dev = crtc->dev; | |
522 | struct drm_i915_private *dev_priv = dev->dev_private; | |
523 | intel_clock_t clock; | |
79e53945 JB |
524 | int err = target; |
525 | ||
bc5e5718 | 526 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) && |
832cc28d | 527 | (I915_READ(LVDS)) != 0) { |
79e53945 JB |
528 | /* |
529 | * For LVDS, if the panel is on, just rely on its current | |
530 | * settings for dual-channel. We haven't figured out how to | |
531 | * reliably set up different single/dual channel state, if we | |
532 | * even can. | |
533 | */ | |
534 | if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == | |
535 | LVDS_CLKB_POWER_UP) | |
536 | clock.p2 = limit->p2.p2_fast; | |
537 | else | |
538 | clock.p2 = limit->p2.p2_slow; | |
539 | } else { | |
540 | if (target < limit->p2.dot_limit) | |
541 | clock.p2 = limit->p2.p2_slow; | |
542 | else | |
543 | clock.p2 = limit->p2.p2_fast; | |
544 | } | |
545 | ||
0206e353 | 546 | memset(best_clock, 0, sizeof(*best_clock)); |
79e53945 | 547 | |
42158660 ZY |
548 | for (clock.m1 = limit->m1.min; clock.m1 <= limit->m1.max; |
549 | clock.m1++) { | |
550 | for (clock.m2 = limit->m2.min; | |
551 | clock.m2 <= limit->m2.max; clock.m2++) { | |
f2b115e6 AJ |
552 | /* m1 is always 0 in Pineview */ |
553 | if (clock.m2 >= clock.m1 && !IS_PINEVIEW(dev)) | |
42158660 ZY |
554 | break; |
555 | for (clock.n = limit->n.min; | |
556 | clock.n <= limit->n.max; clock.n++) { | |
557 | for (clock.p1 = limit->p1.min; | |
558 | clock.p1 <= limit->p1.max; clock.p1++) { | |
79e53945 JB |
559 | int this_err; |
560 | ||
2177832f | 561 | intel_clock(dev, refclk, &clock); |
1b894b59 CW |
562 | if (!intel_PLL_is_valid(dev, limit, |
563 | &clock)) | |
79e53945 JB |
564 | continue; |
565 | ||
566 | this_err = abs(clock.dot - target); | |
567 | if (this_err < err) { | |
568 | *best_clock = clock; | |
569 | err = this_err; | |
570 | } | |
571 | } | |
572 | } | |
573 | } | |
574 | } | |
575 | ||
576 | return (err != target); | |
577 | } | |
578 | ||
d4906093 ML |
579 | static bool |
580 | intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, | |
581 | int target, int refclk, intel_clock_t *best_clock) | |
582 | { | |
583 | struct drm_device *dev = crtc->dev; | |
584 | struct drm_i915_private *dev_priv = dev->dev_private; | |
585 | intel_clock_t clock; | |
586 | int max_n; | |
587 | bool found; | |
6ba770dc AJ |
588 | /* approximately equals target * 0.00585 */ |
589 | int err_most = (target >> 8) + (target >> 9); | |
d4906093 ML |
590 | found = false; |
591 | ||
592 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | |
4547668a ZY |
593 | int lvds_reg; |
594 | ||
c619eed4 | 595 | if (HAS_PCH_SPLIT(dev)) |
4547668a ZY |
596 | lvds_reg = PCH_LVDS; |
597 | else | |
598 | lvds_reg = LVDS; | |
599 | if ((I915_READ(lvds_reg) & LVDS_CLKB_POWER_MASK) == | |
d4906093 ML |
600 | LVDS_CLKB_POWER_UP) |
601 | clock.p2 = limit->p2.p2_fast; | |
602 | else | |
603 | clock.p2 = limit->p2.p2_slow; | |
604 | } else { | |
605 | if (target < limit->p2.dot_limit) | |
606 | clock.p2 = limit->p2.p2_slow; | |
607 | else | |
608 | clock.p2 = limit->p2.p2_fast; | |
609 | } | |
610 | ||
611 | memset(best_clock, 0, sizeof(*best_clock)); | |
612 | max_n = limit->n.max; | |
f77f13e2 | 613 | /* based on hardware requirement, prefer smaller n to precision */ |
d4906093 | 614 | for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) { |
f77f13e2 | 615 | /* based on hardware requirement, prefere larger m1,m2 */ |
d4906093 ML |
616 | for (clock.m1 = limit->m1.max; |
617 | clock.m1 >= limit->m1.min; clock.m1--) { | |
618 | for (clock.m2 = limit->m2.max; | |
619 | clock.m2 >= limit->m2.min; clock.m2--) { | |
620 | for (clock.p1 = limit->p1.max; | |
621 | clock.p1 >= limit->p1.min; clock.p1--) { | |
622 | int this_err; | |
623 | ||
2177832f | 624 | intel_clock(dev, refclk, &clock); |
1b894b59 CW |
625 | if (!intel_PLL_is_valid(dev, limit, |
626 | &clock)) | |
d4906093 | 627 | continue; |
1b894b59 CW |
628 | |
629 | this_err = abs(clock.dot - target); | |
d4906093 ML |
630 | if (this_err < err_most) { |
631 | *best_clock = clock; | |
632 | err_most = this_err; | |
633 | max_n = clock.n; | |
634 | found = true; | |
635 | } | |
636 | } | |
637 | } | |
638 | } | |
639 | } | |
2c07245f ZW |
640 | return found; |
641 | } | |
642 | ||
5eb08b69 | 643 | static bool |
f2b115e6 AJ |
644 | intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, |
645 | int target, int refclk, intel_clock_t *best_clock) | |
5eb08b69 ZW |
646 | { |
647 | struct drm_device *dev = crtc->dev; | |
648 | intel_clock_t clock; | |
4547668a | 649 | |
5eb08b69 ZW |
650 | if (target < 200000) { |
651 | clock.n = 1; | |
652 | clock.p1 = 2; | |
653 | clock.p2 = 10; | |
654 | clock.m1 = 12; | |
655 | clock.m2 = 9; | |
656 | } else { | |
657 | clock.n = 2; | |
658 | clock.p1 = 1; | |
659 | clock.p2 = 10; | |
660 | clock.m1 = 14; | |
661 | clock.m2 = 8; | |
662 | } | |
663 | intel_clock(dev, refclk, &clock); | |
664 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); | |
665 | return true; | |
666 | } | |
667 | ||
a4fc5ed6 KP |
668 | /* DisplayPort has only two frequencies, 162MHz and 270MHz */ |
669 | static bool | |
670 | intel_find_pll_g4x_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |
671 | int target, int refclk, intel_clock_t *best_clock) | |
672 | { | |
5eddb70b CW |
673 | intel_clock_t clock; |
674 | if (target < 200000) { | |
675 | clock.p1 = 2; | |
676 | clock.p2 = 10; | |
677 | clock.n = 2; | |
678 | clock.m1 = 23; | |
679 | clock.m2 = 8; | |
680 | } else { | |
681 | clock.p1 = 1; | |
682 | clock.p2 = 10; | |
683 | clock.n = 1; | |
684 | clock.m1 = 14; | |
685 | clock.m2 = 2; | |
686 | } | |
687 | clock.m = 5 * (clock.m1 + 2) + (clock.m2 + 2); | |
688 | clock.p = (clock.p1 * clock.p2); | |
689 | clock.dot = 96000 * clock.m / (clock.n + 2) / clock.p; | |
690 | clock.vco = 0; | |
691 | memcpy(best_clock, &clock, sizeof(intel_clock_t)); | |
692 | return true; | |
a4fc5ed6 KP |
693 | } |
694 | ||
9d0498a2 JB |
695 | /** |
696 | * intel_wait_for_vblank - wait for vblank on a given pipe | |
697 | * @dev: drm device | |
698 | * @pipe: pipe to wait for | |
699 | * | |
700 | * Wait for vblank to occur on a given pipe. Needed for various bits of | |
701 | * mode setting code. | |
702 | */ | |
703 | void intel_wait_for_vblank(struct drm_device *dev, int pipe) | |
79e53945 | 704 | { |
9d0498a2 | 705 | struct drm_i915_private *dev_priv = dev->dev_private; |
9db4a9c7 | 706 | int pipestat_reg = PIPESTAT(pipe); |
9d0498a2 | 707 | |
300387c0 CW |
708 | /* Clear existing vblank status. Note this will clear any other |
709 | * sticky status fields as well. | |
710 | * | |
711 | * This races with i915_driver_irq_handler() with the result | |
712 | * that either function could miss a vblank event. Here it is not | |
713 | * fatal, as we will either wait upon the next vblank interrupt or | |
714 | * timeout. Generally speaking intel_wait_for_vblank() is only | |
715 | * called during modeset at which time the GPU should be idle and | |
716 | * should *not* be performing page flips and thus not waiting on | |
717 | * vblanks... | |
718 | * Currently, the result of us stealing a vblank from the irq | |
719 | * handler is that a single frame will be skipped during swapbuffers. | |
720 | */ | |
721 | I915_WRITE(pipestat_reg, | |
722 | I915_READ(pipestat_reg) | PIPE_VBLANK_INTERRUPT_STATUS); | |
723 | ||
9d0498a2 | 724 | /* Wait for vblank interrupt bit to set */ |
481b6af3 CW |
725 | if (wait_for(I915_READ(pipestat_reg) & |
726 | PIPE_VBLANK_INTERRUPT_STATUS, | |
727 | 50)) | |
9d0498a2 JB |
728 | DRM_DEBUG_KMS("vblank wait timed out\n"); |
729 | } | |
730 | ||
ab7ad7f6 KP |
731 | /* |
732 | * intel_wait_for_pipe_off - wait for pipe to turn off | |
9d0498a2 JB |
733 | * @dev: drm device |
734 | * @pipe: pipe to wait for | |
735 | * | |
736 | * After disabling a pipe, we can't wait for vblank in the usual way, | |
737 | * spinning on the vblank interrupt status bit, since we won't actually | |
738 | * see an interrupt when the pipe is disabled. | |
739 | * | |
ab7ad7f6 KP |
740 | * On Gen4 and above: |
741 | * wait for the pipe register state bit to turn off | |
742 | * | |
743 | * Otherwise: | |
744 | * wait for the display line value to settle (it usually | |
745 | * ends up stopping at the start of the next frame). | |
58e10eb9 | 746 | * |
9d0498a2 | 747 | */ |
58e10eb9 | 748 | void intel_wait_for_pipe_off(struct drm_device *dev, int pipe) |
9d0498a2 JB |
749 | { |
750 | struct drm_i915_private *dev_priv = dev->dev_private; | |
ab7ad7f6 KP |
751 | |
752 | if (INTEL_INFO(dev)->gen >= 4) { | |
58e10eb9 | 753 | int reg = PIPECONF(pipe); |
ab7ad7f6 KP |
754 | |
755 | /* Wait for the Pipe State to go off */ | |
58e10eb9 CW |
756 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, |
757 | 100)) | |
ab7ad7f6 KP |
758 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); |
759 | } else { | |
760 | u32 last_line; | |
58e10eb9 | 761 | int reg = PIPEDSL(pipe); |
ab7ad7f6 KP |
762 | unsigned long timeout = jiffies + msecs_to_jiffies(100); |
763 | ||
764 | /* Wait for the display line to settle */ | |
765 | do { | |
58e10eb9 | 766 | last_line = I915_READ(reg) & DSL_LINEMASK; |
ab7ad7f6 | 767 | mdelay(5); |
58e10eb9 | 768 | } while (((I915_READ(reg) & DSL_LINEMASK) != last_line) && |
ab7ad7f6 KP |
769 | time_after(timeout, jiffies)); |
770 | if (time_after(jiffies, timeout)) | |
771 | DRM_DEBUG_KMS("pipe_off wait timed out\n"); | |
772 | } | |
79e53945 JB |
773 | } |
774 | ||
b24e7179 JB |
775 | static const char *state_string(bool enabled) |
776 | { | |
777 | return enabled ? "on" : "off"; | |
778 | } | |
779 | ||
780 | /* Only for pre-ILK configs */ | |
781 | static void assert_pll(struct drm_i915_private *dev_priv, | |
782 | enum pipe pipe, bool state) | |
783 | { | |
784 | int reg; | |
785 | u32 val; | |
786 | bool cur_state; | |
787 | ||
788 | reg = DPLL(pipe); | |
789 | val = I915_READ(reg); | |
790 | cur_state = !!(val & DPLL_VCO_ENABLE); | |
791 | WARN(cur_state != state, | |
792 | "PLL state assertion failure (expected %s, current %s)\n", | |
793 | state_string(state), state_string(cur_state)); | |
794 | } | |
795 | #define assert_pll_enabled(d, p) assert_pll(d, p, true) | |
796 | #define assert_pll_disabled(d, p) assert_pll(d, p, false) | |
797 | ||
040484af JB |
798 | /* For ILK+ */ |
799 | static void assert_pch_pll(struct drm_i915_private *dev_priv, | |
800 | enum pipe pipe, bool state) | |
801 | { | |
802 | int reg; | |
803 | u32 val; | |
804 | bool cur_state; | |
805 | ||
d3ccbe86 JB |
806 | if (HAS_PCH_CPT(dev_priv->dev)) { |
807 | u32 pch_dpll; | |
808 | ||
809 | pch_dpll = I915_READ(PCH_DPLL_SEL); | |
810 | ||
811 | /* Make sure the selected PLL is enabled to the transcoder */ | |
812 | WARN(!((pch_dpll >> (4 * pipe)) & 8), | |
813 | "transcoder %d PLL not enabled\n", pipe); | |
814 | ||
815 | /* Convert the transcoder pipe number to a pll pipe number */ | |
816 | pipe = (pch_dpll >> (4 * pipe)) & 1; | |
817 | } | |
818 | ||
040484af JB |
819 | reg = PCH_DPLL(pipe); |
820 | val = I915_READ(reg); | |
821 | cur_state = !!(val & DPLL_VCO_ENABLE); | |
822 | WARN(cur_state != state, | |
823 | "PCH PLL state assertion failure (expected %s, current %s)\n", | |
824 | state_string(state), state_string(cur_state)); | |
825 | } | |
826 | #define assert_pch_pll_enabled(d, p) assert_pch_pll(d, p, true) | |
827 | #define assert_pch_pll_disabled(d, p) assert_pch_pll(d, p, false) | |
828 | ||
829 | static void assert_fdi_tx(struct drm_i915_private *dev_priv, | |
830 | enum pipe pipe, bool state) | |
831 | { | |
832 | int reg; | |
833 | u32 val; | |
834 | bool cur_state; | |
835 | ||
836 | reg = FDI_TX_CTL(pipe); | |
837 | val = I915_READ(reg); | |
838 | cur_state = !!(val & FDI_TX_ENABLE); | |
839 | WARN(cur_state != state, | |
840 | "FDI TX state assertion failure (expected %s, current %s)\n", | |
841 | state_string(state), state_string(cur_state)); | |
842 | } | |
843 | #define assert_fdi_tx_enabled(d, p) assert_fdi_tx(d, p, true) | |
844 | #define assert_fdi_tx_disabled(d, p) assert_fdi_tx(d, p, false) | |
845 | ||
846 | static void assert_fdi_rx(struct drm_i915_private *dev_priv, | |
847 | enum pipe pipe, bool state) | |
848 | { | |
849 | int reg; | |
850 | u32 val; | |
851 | bool cur_state; | |
852 | ||
853 | reg = FDI_RX_CTL(pipe); | |
854 | val = I915_READ(reg); | |
855 | cur_state = !!(val & FDI_RX_ENABLE); | |
856 | WARN(cur_state != state, | |
857 | "FDI RX state assertion failure (expected %s, current %s)\n", | |
858 | state_string(state), state_string(cur_state)); | |
859 | } | |
860 | #define assert_fdi_rx_enabled(d, p) assert_fdi_rx(d, p, true) | |
861 | #define assert_fdi_rx_disabled(d, p) assert_fdi_rx(d, p, false) | |
862 | ||
863 | static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv, | |
864 | enum pipe pipe) | |
865 | { | |
866 | int reg; | |
867 | u32 val; | |
868 | ||
869 | /* ILK FDI PLL is always enabled */ | |
870 | if (dev_priv->info->gen == 5) | |
871 | return; | |
872 | ||
873 | reg = FDI_TX_CTL(pipe); | |
874 | val = I915_READ(reg); | |
875 | WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n"); | |
876 | } | |
877 | ||
878 | static void assert_fdi_rx_pll_enabled(struct drm_i915_private *dev_priv, | |
879 | enum pipe pipe) | |
880 | { | |
881 | int reg; | |
882 | u32 val; | |
883 | ||
884 | reg = FDI_RX_CTL(pipe); | |
885 | val = I915_READ(reg); | |
886 | WARN(!(val & FDI_RX_PLL_ENABLE), "FDI RX PLL assertion failure, should be active but is disabled\n"); | |
887 | } | |
888 | ||
ea0760cf JB |
889 | static void assert_panel_unlocked(struct drm_i915_private *dev_priv, |
890 | enum pipe pipe) | |
891 | { | |
892 | int pp_reg, lvds_reg; | |
893 | u32 val; | |
894 | enum pipe panel_pipe = PIPE_A; | |
0de3b485 | 895 | bool locked = true; |
ea0760cf JB |
896 | |
897 | if (HAS_PCH_SPLIT(dev_priv->dev)) { | |
898 | pp_reg = PCH_PP_CONTROL; | |
899 | lvds_reg = PCH_LVDS; | |
900 | } else { | |
901 | pp_reg = PP_CONTROL; | |
902 | lvds_reg = LVDS; | |
903 | } | |
904 | ||
905 | val = I915_READ(pp_reg); | |
906 | if (!(val & PANEL_POWER_ON) || | |
907 | ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)) | |
908 | locked = false; | |
909 | ||
910 | if (I915_READ(lvds_reg) & LVDS_PIPEB_SELECT) | |
911 | panel_pipe = PIPE_B; | |
912 | ||
913 | WARN(panel_pipe == pipe && locked, | |
914 | "panel assertion failure, pipe %c regs locked\n", | |
9db4a9c7 | 915 | pipe_name(pipe)); |
ea0760cf JB |
916 | } |
917 | ||
63d7bbe9 JB |
918 | static void assert_pipe(struct drm_i915_private *dev_priv, |
919 | enum pipe pipe, bool state) | |
b24e7179 JB |
920 | { |
921 | int reg; | |
922 | u32 val; | |
63d7bbe9 | 923 | bool cur_state; |
b24e7179 JB |
924 | |
925 | reg = PIPECONF(pipe); | |
926 | val = I915_READ(reg); | |
63d7bbe9 JB |
927 | cur_state = !!(val & PIPECONF_ENABLE); |
928 | WARN(cur_state != state, | |
929 | "pipe %c assertion failure (expected %s, current %s)\n", | |
9db4a9c7 | 930 | pipe_name(pipe), state_string(state), state_string(cur_state)); |
b24e7179 | 931 | } |
63d7bbe9 JB |
932 | #define assert_pipe_enabled(d, p) assert_pipe(d, p, true) |
933 | #define assert_pipe_disabled(d, p) assert_pipe(d, p, false) | |
b24e7179 JB |
934 | |
935 | static void assert_plane_enabled(struct drm_i915_private *dev_priv, | |
936 | enum plane plane) | |
937 | { | |
938 | int reg; | |
939 | u32 val; | |
940 | ||
941 | reg = DSPCNTR(plane); | |
942 | val = I915_READ(reg); | |
943 | WARN(!(val & DISPLAY_PLANE_ENABLE), | |
944 | "plane %c assertion failure, should be active but is disabled\n", | |
9db4a9c7 | 945 | plane_name(plane)); |
b24e7179 JB |
946 | } |
947 | ||
948 | static void assert_planes_disabled(struct drm_i915_private *dev_priv, | |
949 | enum pipe pipe) | |
950 | { | |
951 | int reg, i; | |
952 | u32 val; | |
953 | int cur_pipe; | |
954 | ||
19ec1358 JB |
955 | /* Planes are fixed to pipes on ILK+ */ |
956 | if (HAS_PCH_SPLIT(dev_priv->dev)) | |
957 | return; | |
958 | ||
b24e7179 JB |
959 | /* Need to check both planes against the pipe */ |
960 | for (i = 0; i < 2; i++) { | |
961 | reg = DSPCNTR(i); | |
962 | val = I915_READ(reg); | |
963 | cur_pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> | |
964 | DISPPLANE_SEL_PIPE_SHIFT; | |
965 | WARN((val & DISPLAY_PLANE_ENABLE) && pipe == cur_pipe, | |
9db4a9c7 JB |
966 | "plane %c assertion failure, should be off on pipe %c but is still active\n", |
967 | plane_name(i), pipe_name(pipe)); | |
b24e7179 JB |
968 | } |
969 | } | |
970 | ||
92f2584a JB |
971 | static void assert_pch_refclk_enabled(struct drm_i915_private *dev_priv) |
972 | { | |
973 | u32 val; | |
974 | bool enabled; | |
975 | ||
976 | val = I915_READ(PCH_DREF_CONTROL); | |
977 | enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK | | |
978 | DREF_SUPERSPREAD_SOURCE_MASK)); | |
979 | WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n"); | |
980 | } | |
981 | ||
982 | static void assert_transcoder_disabled(struct drm_i915_private *dev_priv, | |
983 | enum pipe pipe) | |
984 | { | |
985 | int reg; | |
986 | u32 val; | |
987 | bool enabled; | |
988 | ||
989 | reg = TRANSCONF(pipe); | |
990 | val = I915_READ(reg); | |
991 | enabled = !!(val & TRANS_ENABLE); | |
9db4a9c7 JB |
992 | WARN(enabled, |
993 | "transcoder assertion failed, should be off on pipe %c but is still active\n", | |
994 | pipe_name(pipe)); | |
92f2584a JB |
995 | } |
996 | ||
4e634389 KP |
997 | static bool dp_pipe_enabled(struct drm_i915_private *dev_priv, |
998 | enum pipe pipe, u32 port_sel, u32 val) | |
f0575e92 KP |
999 | { |
1000 | if ((val & DP_PORT_EN) == 0) | |
1001 | return false; | |
1002 | ||
1003 | if (HAS_PCH_CPT(dev_priv->dev)) { | |
1004 | u32 trans_dp_ctl_reg = TRANS_DP_CTL(pipe); | |
1005 | u32 trans_dp_ctl = I915_READ(trans_dp_ctl_reg); | |
1006 | if ((trans_dp_ctl & TRANS_DP_PORT_SEL_MASK) != port_sel) | |
1007 | return false; | |
1008 | } else { | |
1009 | if ((val & DP_PIPE_MASK) != (pipe << 30)) | |
1010 | return false; | |
1011 | } | |
1012 | return true; | |
1013 | } | |
1014 | ||
1519b995 KP |
1015 | static bool hdmi_pipe_enabled(struct drm_i915_private *dev_priv, |
1016 | enum pipe pipe, u32 val) | |
1017 | { | |
1018 | if ((val & PORT_ENABLE) == 0) | |
1019 | return false; | |
1020 | ||
1021 | if (HAS_PCH_CPT(dev_priv->dev)) { | |
1022 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | |
1023 | return false; | |
1024 | } else { | |
1025 | if ((val & TRANSCODER_MASK) != TRANSCODER(pipe)) | |
1026 | return false; | |
1027 | } | |
1028 | return true; | |
1029 | } | |
1030 | ||
1031 | static bool lvds_pipe_enabled(struct drm_i915_private *dev_priv, | |
1032 | enum pipe pipe, u32 val) | |
1033 | { | |
1034 | if ((val & LVDS_PORT_EN) == 0) | |
1035 | return false; | |
1036 | ||
1037 | if (HAS_PCH_CPT(dev_priv->dev)) { | |
1038 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | |
1039 | return false; | |
1040 | } else { | |
1041 | if ((val & LVDS_PIPE_MASK) != LVDS_PIPE(pipe)) | |
1042 | return false; | |
1043 | } | |
1044 | return true; | |
1045 | } | |
1046 | ||
1047 | static bool adpa_pipe_enabled(struct drm_i915_private *dev_priv, | |
1048 | enum pipe pipe, u32 val) | |
1049 | { | |
1050 | if ((val & ADPA_DAC_ENABLE) == 0) | |
1051 | return false; | |
1052 | if (HAS_PCH_CPT(dev_priv->dev)) { | |
1053 | if ((val & PORT_TRANS_SEL_MASK) != PORT_TRANS_SEL_CPT(pipe)) | |
1054 | return false; | |
1055 | } else { | |
1056 | if ((val & ADPA_PIPE_SELECT_MASK) != ADPA_PIPE_SELECT(pipe)) | |
1057 | return false; | |
1058 | } | |
1059 | return true; | |
1060 | } | |
1061 | ||
291906f1 | 1062 | static void assert_pch_dp_disabled(struct drm_i915_private *dev_priv, |
f0575e92 | 1063 | enum pipe pipe, int reg, u32 port_sel) |
291906f1 | 1064 | { |
47a05eca | 1065 | u32 val = I915_READ(reg); |
4e634389 | 1066 | WARN(dp_pipe_enabled(dev_priv, pipe, port_sel, val), |
291906f1 | 1067 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
9db4a9c7 | 1068 | reg, pipe_name(pipe)); |
291906f1 JB |
1069 | } |
1070 | ||
1071 | static void assert_pch_hdmi_disabled(struct drm_i915_private *dev_priv, | |
1072 | enum pipe pipe, int reg) | |
1073 | { | |
47a05eca | 1074 | u32 val = I915_READ(reg); |
1519b995 | 1075 | WARN(hdmi_pipe_enabled(dev_priv, val, pipe), |
291906f1 | 1076 | "PCH DP (0x%08x) enabled on transcoder %c, should be disabled\n", |
9db4a9c7 | 1077 | reg, pipe_name(pipe)); |
291906f1 JB |
1078 | } |
1079 | ||
1080 | static void assert_pch_ports_disabled(struct drm_i915_private *dev_priv, | |
1081 | enum pipe pipe) | |
1082 | { | |
1083 | int reg; | |
1084 | u32 val; | |
291906f1 | 1085 | |
f0575e92 KP |
1086 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1087 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); | |
1088 | assert_pch_dp_disabled(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); | |
291906f1 JB |
1089 | |
1090 | reg = PCH_ADPA; | |
1091 | val = I915_READ(reg); | |
1519b995 | 1092 | WARN(adpa_pipe_enabled(dev_priv, val, pipe), |
291906f1 | 1093 | "PCH VGA enabled on transcoder %c, should be disabled\n", |
9db4a9c7 | 1094 | pipe_name(pipe)); |
291906f1 JB |
1095 | |
1096 | reg = PCH_LVDS; | |
1097 | val = I915_READ(reg); | |
1519b995 | 1098 | WARN(lvds_pipe_enabled(dev_priv, val, pipe), |
291906f1 | 1099 | "PCH LVDS enabled on transcoder %c, should be disabled\n", |
9db4a9c7 | 1100 | pipe_name(pipe)); |
291906f1 JB |
1101 | |
1102 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIB); | |
1103 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMIC); | |
1104 | assert_pch_hdmi_disabled(dev_priv, pipe, HDMID); | |
1105 | } | |
1106 | ||
63d7bbe9 JB |
1107 | /** |
1108 | * intel_enable_pll - enable a PLL | |
1109 | * @dev_priv: i915 private structure | |
1110 | * @pipe: pipe PLL to enable | |
1111 | * | |
1112 | * Enable @pipe's PLL so we can start pumping pixels from a plane. Check to | |
1113 | * make sure the PLL reg is writable first though, since the panel write | |
1114 | * protect mechanism may be enabled. | |
1115 | * | |
1116 | * Note! This is for pre-ILK only. | |
1117 | */ | |
1118 | static void intel_enable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | |
1119 | { | |
1120 | int reg; | |
1121 | u32 val; | |
1122 | ||
1123 | /* No really, not for ILK+ */ | |
1124 | BUG_ON(dev_priv->info->gen >= 5); | |
1125 | ||
1126 | /* PLL is protected by panel, make sure we can write it */ | |
1127 | if (IS_MOBILE(dev_priv->dev) && !IS_I830(dev_priv->dev)) | |
1128 | assert_panel_unlocked(dev_priv, pipe); | |
1129 | ||
1130 | reg = DPLL(pipe); | |
1131 | val = I915_READ(reg); | |
1132 | val |= DPLL_VCO_ENABLE; | |
1133 | ||
1134 | /* We do this three times for luck */ | |
1135 | I915_WRITE(reg, val); | |
1136 | POSTING_READ(reg); | |
1137 | udelay(150); /* wait for warmup */ | |
1138 | I915_WRITE(reg, val); | |
1139 | POSTING_READ(reg); | |
1140 | udelay(150); /* wait for warmup */ | |
1141 | I915_WRITE(reg, val); | |
1142 | POSTING_READ(reg); | |
1143 | udelay(150); /* wait for warmup */ | |
1144 | } | |
1145 | ||
1146 | /** | |
1147 | * intel_disable_pll - disable a PLL | |
1148 | * @dev_priv: i915 private structure | |
1149 | * @pipe: pipe PLL to disable | |
1150 | * | |
1151 | * Disable the PLL for @pipe, making sure the pipe is off first. | |
1152 | * | |
1153 | * Note! This is for pre-ILK only. | |
1154 | */ | |
1155 | static void intel_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe) | |
1156 | { | |
1157 | int reg; | |
1158 | u32 val; | |
1159 | ||
1160 | /* Don't disable pipe A or pipe A PLLs if needed */ | |
1161 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | |
1162 | return; | |
1163 | ||
1164 | /* Make sure the pipe isn't still relying on us */ | |
1165 | assert_pipe_disabled(dev_priv, pipe); | |
1166 | ||
1167 | reg = DPLL(pipe); | |
1168 | val = I915_READ(reg); | |
1169 | val &= ~DPLL_VCO_ENABLE; | |
1170 | I915_WRITE(reg, val); | |
1171 | POSTING_READ(reg); | |
1172 | } | |
1173 | ||
92f2584a JB |
1174 | /** |
1175 | * intel_enable_pch_pll - enable PCH PLL | |
1176 | * @dev_priv: i915 private structure | |
1177 | * @pipe: pipe PLL to enable | |
1178 | * | |
1179 | * The PCH PLL needs to be enabled before the PCH transcoder, since it | |
1180 | * drives the transcoder clock. | |
1181 | */ | |
1182 | static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, | |
1183 | enum pipe pipe) | |
1184 | { | |
1185 | int reg; | |
1186 | u32 val; | |
1187 | ||
4c609cb8 JB |
1188 | if (pipe > 1) |
1189 | return; | |
1190 | ||
92f2584a JB |
1191 | /* PCH only available on ILK+ */ |
1192 | BUG_ON(dev_priv->info->gen < 5); | |
1193 | ||
1194 | /* PCH refclock must be enabled first */ | |
1195 | assert_pch_refclk_enabled(dev_priv); | |
1196 | ||
1197 | reg = PCH_DPLL(pipe); | |
1198 | val = I915_READ(reg); | |
1199 | val |= DPLL_VCO_ENABLE; | |
1200 | I915_WRITE(reg, val); | |
1201 | POSTING_READ(reg); | |
1202 | udelay(200); | |
1203 | } | |
1204 | ||
1205 | static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, | |
1206 | enum pipe pipe) | |
1207 | { | |
1208 | int reg; | |
1209 | u32 val; | |
1210 | ||
4c609cb8 JB |
1211 | if (pipe > 1) |
1212 | return; | |
1213 | ||
92f2584a JB |
1214 | /* PCH only available on ILK+ */ |
1215 | BUG_ON(dev_priv->info->gen < 5); | |
1216 | ||
1217 | /* Make sure transcoder isn't still depending on us */ | |
1218 | assert_transcoder_disabled(dev_priv, pipe); | |
1219 | ||
1220 | reg = PCH_DPLL(pipe); | |
1221 | val = I915_READ(reg); | |
1222 | val &= ~DPLL_VCO_ENABLE; | |
1223 | I915_WRITE(reg, val); | |
1224 | POSTING_READ(reg); | |
1225 | udelay(200); | |
1226 | } | |
1227 | ||
040484af JB |
1228 | static void intel_enable_transcoder(struct drm_i915_private *dev_priv, |
1229 | enum pipe pipe) | |
1230 | { | |
1231 | int reg; | |
1232 | u32 val; | |
1233 | ||
1234 | /* PCH only available on ILK+ */ | |
1235 | BUG_ON(dev_priv->info->gen < 5); | |
1236 | ||
1237 | /* Make sure PCH DPLL is enabled */ | |
1238 | assert_pch_pll_enabled(dev_priv, pipe); | |
1239 | ||
1240 | /* FDI must be feeding us bits for PCH ports */ | |
1241 | assert_fdi_tx_enabled(dev_priv, pipe); | |
1242 | assert_fdi_rx_enabled(dev_priv, pipe); | |
1243 | ||
1244 | reg = TRANSCONF(pipe); | |
1245 | val = I915_READ(reg); | |
e9bcff5c JB |
1246 | |
1247 | if (HAS_PCH_IBX(dev_priv->dev)) { | |
1248 | /* | |
1249 | * make the BPC in transcoder be consistent with | |
1250 | * that in pipeconf reg. | |
1251 | */ | |
1252 | val &= ~PIPE_BPC_MASK; | |
1253 | val |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; | |
1254 | } | |
040484af JB |
1255 | I915_WRITE(reg, val | TRANS_ENABLE); |
1256 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | |
1257 | DRM_ERROR("failed to enable transcoder %d\n", pipe); | |
1258 | } | |
1259 | ||
1260 | static void intel_disable_transcoder(struct drm_i915_private *dev_priv, | |
1261 | enum pipe pipe) | |
1262 | { | |
1263 | int reg; | |
1264 | u32 val; | |
1265 | ||
1266 | /* FDI relies on the transcoder */ | |
1267 | assert_fdi_tx_disabled(dev_priv, pipe); | |
1268 | assert_fdi_rx_disabled(dev_priv, pipe); | |
1269 | ||
291906f1 JB |
1270 | /* Ports must be off as well */ |
1271 | assert_pch_ports_disabled(dev_priv, pipe); | |
1272 | ||
040484af JB |
1273 | reg = TRANSCONF(pipe); |
1274 | val = I915_READ(reg); | |
1275 | val &= ~TRANS_ENABLE; | |
1276 | I915_WRITE(reg, val); | |
1277 | /* wait for PCH transcoder off, transcoder state */ | |
1278 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) | |
4c9c18c2 | 1279 | DRM_ERROR("failed to disable transcoder %d\n", pipe); |
040484af JB |
1280 | } |
1281 | ||
b24e7179 | 1282 | /** |
309cfea8 | 1283 | * intel_enable_pipe - enable a pipe, asserting requirements |
b24e7179 JB |
1284 | * @dev_priv: i915 private structure |
1285 | * @pipe: pipe to enable | |
040484af | 1286 | * @pch_port: on ILK+, is this pipe driving a PCH port or not |
b24e7179 JB |
1287 | * |
1288 | * Enable @pipe, making sure that various hardware specific requirements | |
1289 | * are met, if applicable, e.g. PLL enabled, LVDS pairs enabled, etc. | |
1290 | * | |
1291 | * @pipe should be %PIPE_A or %PIPE_B. | |
1292 | * | |
1293 | * Will wait until the pipe is actually running (i.e. first vblank) before | |
1294 | * returning. | |
1295 | */ | |
040484af JB |
1296 | static void intel_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, |
1297 | bool pch_port) | |
b24e7179 JB |
1298 | { |
1299 | int reg; | |
1300 | u32 val; | |
1301 | ||
1302 | /* | |
1303 | * A pipe without a PLL won't actually be able to drive bits from | |
1304 | * a plane. On ILK+ the pipe PLLs are integrated, so we don't | |
1305 | * need the check. | |
1306 | */ | |
1307 | if (!HAS_PCH_SPLIT(dev_priv->dev)) | |
1308 | assert_pll_enabled(dev_priv, pipe); | |
040484af JB |
1309 | else { |
1310 | if (pch_port) { | |
1311 | /* if driving the PCH, we need FDI enabled */ | |
1312 | assert_fdi_rx_pll_enabled(dev_priv, pipe); | |
1313 | assert_fdi_tx_pll_enabled(dev_priv, pipe); | |
1314 | } | |
1315 | /* FIXME: assert CPU port conditions for SNB+ */ | |
1316 | } | |
b24e7179 JB |
1317 | |
1318 | reg = PIPECONF(pipe); | |
1319 | val = I915_READ(reg); | |
00d70b15 CW |
1320 | if (val & PIPECONF_ENABLE) |
1321 | return; | |
1322 | ||
1323 | I915_WRITE(reg, val | PIPECONF_ENABLE); | |
b24e7179 JB |
1324 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1325 | } | |
1326 | ||
1327 | /** | |
309cfea8 | 1328 | * intel_disable_pipe - disable a pipe, asserting requirements |
b24e7179 JB |
1329 | * @dev_priv: i915 private structure |
1330 | * @pipe: pipe to disable | |
1331 | * | |
1332 | * Disable @pipe, making sure that various hardware specific requirements | |
1333 | * are met, if applicable, e.g. plane disabled, panel fitter off, etc. | |
1334 | * | |
1335 | * @pipe should be %PIPE_A or %PIPE_B. | |
1336 | * | |
1337 | * Will wait until the pipe has shut down before returning. | |
1338 | */ | |
1339 | static void intel_disable_pipe(struct drm_i915_private *dev_priv, | |
1340 | enum pipe pipe) | |
1341 | { | |
1342 | int reg; | |
1343 | u32 val; | |
1344 | ||
1345 | /* | |
1346 | * Make sure planes won't keep trying to pump pixels to us, | |
1347 | * or we might hang the display. | |
1348 | */ | |
1349 | assert_planes_disabled(dev_priv, pipe); | |
1350 | ||
1351 | /* Don't disable pipe A or pipe A PLLs if needed */ | |
1352 | if (pipe == PIPE_A && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | |
1353 | return; | |
1354 | ||
1355 | reg = PIPECONF(pipe); | |
1356 | val = I915_READ(reg); | |
00d70b15 CW |
1357 | if ((val & PIPECONF_ENABLE) == 0) |
1358 | return; | |
1359 | ||
1360 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); | |
b24e7179 JB |
1361 | intel_wait_for_pipe_off(dev_priv->dev, pipe); |
1362 | } | |
1363 | ||
d74362c9 KP |
1364 | /* |
1365 | * Plane regs are double buffered, going from enabled->disabled needs a | |
1366 | * trigger in order to latch. The display address reg provides this. | |
1367 | */ | |
1368 | static void intel_flush_display_plane(struct drm_i915_private *dev_priv, | |
1369 | enum plane plane) | |
1370 | { | |
1371 | I915_WRITE(DSPADDR(plane), I915_READ(DSPADDR(plane))); | |
1372 | I915_WRITE(DSPSURF(plane), I915_READ(DSPSURF(plane))); | |
1373 | } | |
1374 | ||
b24e7179 JB |
1375 | /** |
1376 | * intel_enable_plane - enable a display plane on a given pipe | |
1377 | * @dev_priv: i915 private structure | |
1378 | * @plane: plane to enable | |
1379 | * @pipe: pipe being fed | |
1380 | * | |
1381 | * Enable @plane on @pipe, making sure that @pipe is running first. | |
1382 | */ | |
1383 | static void intel_enable_plane(struct drm_i915_private *dev_priv, | |
1384 | enum plane plane, enum pipe pipe) | |
1385 | { | |
1386 | int reg; | |
1387 | u32 val; | |
1388 | ||
1389 | /* If the pipe isn't enabled, we can't pump pixels and may hang */ | |
1390 | assert_pipe_enabled(dev_priv, pipe); | |
1391 | ||
1392 | reg = DSPCNTR(plane); | |
1393 | val = I915_READ(reg); | |
00d70b15 CW |
1394 | if (val & DISPLAY_PLANE_ENABLE) |
1395 | return; | |
1396 | ||
1397 | I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); | |
d74362c9 | 1398 | intel_flush_display_plane(dev_priv, plane); |
b24e7179 JB |
1399 | intel_wait_for_vblank(dev_priv->dev, pipe); |
1400 | } | |
1401 | ||
b24e7179 JB |
1402 | /** |
1403 | * intel_disable_plane - disable a display plane | |
1404 | * @dev_priv: i915 private structure | |
1405 | * @plane: plane to disable | |
1406 | * @pipe: pipe consuming the data | |
1407 | * | |
1408 | * Disable @plane; should be an independent operation. | |
1409 | */ | |
1410 | static void intel_disable_plane(struct drm_i915_private *dev_priv, | |
1411 | enum plane plane, enum pipe pipe) | |
1412 | { | |
1413 | int reg; | |
1414 | u32 val; | |
1415 | ||
1416 | reg = DSPCNTR(plane); | |
1417 | val = I915_READ(reg); | |
00d70b15 CW |
1418 | if ((val & DISPLAY_PLANE_ENABLE) == 0) |
1419 | return; | |
1420 | ||
1421 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); | |
b24e7179 JB |
1422 | intel_flush_display_plane(dev_priv, plane); |
1423 | intel_wait_for_vblank(dev_priv->dev, pipe); | |
1424 | } | |
1425 | ||
47a05eca | 1426 | static void disable_pch_dp(struct drm_i915_private *dev_priv, |
f0575e92 | 1427 | enum pipe pipe, int reg, u32 port_sel) |
47a05eca JB |
1428 | { |
1429 | u32 val = I915_READ(reg); | |
4e634389 | 1430 | if (dp_pipe_enabled(dev_priv, pipe, port_sel, val)) { |
f0575e92 | 1431 | DRM_DEBUG_KMS("Disabling pch dp %x on pipe %d\n", reg, pipe); |
47a05eca | 1432 | I915_WRITE(reg, val & ~DP_PORT_EN); |
f0575e92 | 1433 | } |
47a05eca JB |
1434 | } |
1435 | ||
1436 | static void disable_pch_hdmi(struct drm_i915_private *dev_priv, | |
1437 | enum pipe pipe, int reg) | |
1438 | { | |
1439 | u32 val = I915_READ(reg); | |
1519b995 | 1440 | if (hdmi_pipe_enabled(dev_priv, val, pipe)) { |
f0575e92 KP |
1441 | DRM_DEBUG_KMS("Disabling pch HDMI %x on pipe %d\n", |
1442 | reg, pipe); | |
47a05eca | 1443 | I915_WRITE(reg, val & ~PORT_ENABLE); |
f0575e92 | 1444 | } |
47a05eca JB |
1445 | } |
1446 | ||
1447 | /* Disable any ports connected to this transcoder */ | |
1448 | static void intel_disable_pch_ports(struct drm_i915_private *dev_priv, | |
1449 | enum pipe pipe) | |
1450 | { | |
1451 | u32 reg, val; | |
1452 | ||
1453 | val = I915_READ(PCH_PP_CONTROL); | |
1454 | I915_WRITE(PCH_PP_CONTROL, val | PANEL_UNLOCK_REGS); | |
1455 | ||
f0575e92 KP |
1456 | disable_pch_dp(dev_priv, pipe, PCH_DP_B, TRANS_DP_PORT_SEL_B); |
1457 | disable_pch_dp(dev_priv, pipe, PCH_DP_C, TRANS_DP_PORT_SEL_C); | |
1458 | disable_pch_dp(dev_priv, pipe, PCH_DP_D, TRANS_DP_PORT_SEL_D); | |
47a05eca JB |
1459 | |
1460 | reg = PCH_ADPA; | |
1461 | val = I915_READ(reg); | |
1519b995 | 1462 | if (adpa_pipe_enabled(dev_priv, val, pipe)) |
47a05eca JB |
1463 | I915_WRITE(reg, val & ~ADPA_DAC_ENABLE); |
1464 | ||
1465 | reg = PCH_LVDS; | |
1466 | val = I915_READ(reg); | |
1519b995 KP |
1467 | if (lvds_pipe_enabled(dev_priv, val, pipe)) { |
1468 | DRM_DEBUG_KMS("disable lvds on pipe %d val 0x%08x\n", pipe, val); | |
47a05eca JB |
1469 | I915_WRITE(reg, val & ~LVDS_PORT_EN); |
1470 | POSTING_READ(reg); | |
1471 | udelay(100); | |
1472 | } | |
1473 | ||
1474 | disable_pch_hdmi(dev_priv, pipe, HDMIB); | |
1475 | disable_pch_hdmi(dev_priv, pipe, HDMIC); | |
1476 | disable_pch_hdmi(dev_priv, pipe, HDMID); | |
1477 | } | |
1478 | ||
43a9539f CW |
1479 | static void i8xx_disable_fbc(struct drm_device *dev) |
1480 | { | |
1481 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1482 | u32 fbc_ctl; | |
1483 | ||
1484 | /* Disable compression */ | |
1485 | fbc_ctl = I915_READ(FBC_CONTROL); | |
1486 | if ((fbc_ctl & FBC_CTL_EN) == 0) | |
1487 | return; | |
1488 | ||
1489 | fbc_ctl &= ~FBC_CTL_EN; | |
1490 | I915_WRITE(FBC_CONTROL, fbc_ctl); | |
1491 | ||
1492 | /* Wait for compressing bit to clear */ | |
1493 | if (wait_for((I915_READ(FBC_STATUS) & FBC_STAT_COMPRESSING) == 0, 10)) { | |
1494 | DRM_DEBUG_KMS("FBC idle timed out\n"); | |
1495 | return; | |
1496 | } | |
1497 | ||
1498 | DRM_DEBUG_KMS("disabled FBC\n"); | |
1499 | } | |
1500 | ||
80824003 JB |
1501 | static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1502 | { | |
1503 | struct drm_device *dev = crtc->dev; | |
1504 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1505 | struct drm_framebuffer *fb = crtc->fb; | |
1506 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | |
05394f39 | 1507 | struct drm_i915_gem_object *obj = intel_fb->obj; |
80824003 | 1508 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
016b9b61 | 1509 | int cfb_pitch; |
80824003 JB |
1510 | int plane, i; |
1511 | u32 fbc_ctl, fbc_ctl2; | |
1512 | ||
016b9b61 CW |
1513 | cfb_pitch = dev_priv->cfb_size / FBC_LL_SIZE; |
1514 | if (fb->pitch < cfb_pitch) | |
1515 | cfb_pitch = fb->pitch; | |
80824003 JB |
1516 | |
1517 | /* FBC_CTL wants 64B units */ | |
016b9b61 CW |
1518 | cfb_pitch = (cfb_pitch / 64) - 1; |
1519 | plane = intel_crtc->plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB; | |
80824003 JB |
1520 | |
1521 | /* Clear old tags */ | |
1522 | for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++) | |
1523 | I915_WRITE(FBC_TAG + (i * 4), 0); | |
1524 | ||
1525 | /* Set it up... */ | |
de568510 CW |
1526 | fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | FBC_CTL_CPU_FENCE; |
1527 | fbc_ctl2 |= plane; | |
80824003 JB |
1528 | I915_WRITE(FBC_CONTROL2, fbc_ctl2); |
1529 | I915_WRITE(FBC_FENCE_OFF, crtc->y); | |
1530 | ||
1531 | /* enable it... */ | |
1532 | fbc_ctl = FBC_CTL_EN | FBC_CTL_PERIODIC; | |
ee25df2b | 1533 | if (IS_I945GM(dev)) |
49677901 | 1534 | fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */ |
016b9b61 | 1535 | fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT; |
80824003 | 1536 | fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT; |
016b9b61 | 1537 | fbc_ctl |= obj->fence_reg; |
80824003 JB |
1538 | I915_WRITE(FBC_CONTROL, fbc_ctl); |
1539 | ||
016b9b61 CW |
1540 | DRM_DEBUG_KMS("enabled FBC, pitch %d, yoff %d, plane %d, ", |
1541 | cfb_pitch, crtc->y, intel_crtc->plane); | |
80824003 JB |
1542 | } |
1543 | ||
ee5382ae | 1544 | static bool i8xx_fbc_enabled(struct drm_device *dev) |
80824003 | 1545 | { |
80824003 JB |
1546 | struct drm_i915_private *dev_priv = dev->dev_private; |
1547 | ||
1548 | return I915_READ(FBC_CONTROL) & FBC_CTL_EN; | |
1549 | } | |
1550 | ||
74dff282 JB |
1551 | static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1552 | { | |
1553 | struct drm_device *dev = crtc->dev; | |
1554 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1555 | struct drm_framebuffer *fb = crtc->fb; | |
1556 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | |
05394f39 | 1557 | struct drm_i915_gem_object *obj = intel_fb->obj; |
74dff282 | 1558 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5eddb70b | 1559 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
74dff282 JB |
1560 | unsigned long stall_watermark = 200; |
1561 | u32 dpfc_ctl; | |
1562 | ||
74dff282 | 1563 | dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X; |
016b9b61 | 1564 | dpfc_ctl |= DPFC_CTL_FENCE_EN | obj->fence_reg; |
de568510 | 1565 | I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY); |
74dff282 | 1566 | |
74dff282 JB |
1567 | I915_WRITE(DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1568 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | |
1569 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | |
1570 | I915_WRITE(DPFC_FENCE_YOFF, crtc->y); | |
1571 | ||
1572 | /* enable it... */ | |
1573 | I915_WRITE(DPFC_CONTROL, I915_READ(DPFC_CONTROL) | DPFC_CTL_EN); | |
1574 | ||
28c97730 | 1575 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
74dff282 JB |
1576 | } |
1577 | ||
43a9539f | 1578 | static void g4x_disable_fbc(struct drm_device *dev) |
74dff282 JB |
1579 | { |
1580 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1581 | u32 dpfc_ctl; | |
1582 | ||
1583 | /* Disable compression */ | |
1584 | dpfc_ctl = I915_READ(DPFC_CONTROL); | |
bed4a673 CW |
1585 | if (dpfc_ctl & DPFC_CTL_EN) { |
1586 | dpfc_ctl &= ~DPFC_CTL_EN; | |
1587 | I915_WRITE(DPFC_CONTROL, dpfc_ctl); | |
74dff282 | 1588 | |
bed4a673 CW |
1589 | DRM_DEBUG_KMS("disabled FBC\n"); |
1590 | } | |
74dff282 JB |
1591 | } |
1592 | ||
ee5382ae | 1593 | static bool g4x_fbc_enabled(struct drm_device *dev) |
74dff282 | 1594 | { |
74dff282 JB |
1595 | struct drm_i915_private *dev_priv = dev->dev_private; |
1596 | ||
1597 | return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; | |
1598 | } | |
1599 | ||
4efe0708 JB |
1600 | static void sandybridge_blit_fbc_update(struct drm_device *dev) |
1601 | { | |
1602 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1603 | u32 blt_ecoskpd; | |
1604 | ||
1605 | /* Make sure blitter notifies FBC of writes */ | |
fcca7926 | 1606 | gen6_gt_force_wake_get(dev_priv); |
4efe0708 JB |
1607 | blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); |
1608 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << | |
1609 | GEN6_BLITTER_LOCK_SHIFT; | |
1610 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | |
1611 | blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; | |
1612 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | |
1613 | blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << | |
1614 | GEN6_BLITTER_LOCK_SHIFT); | |
1615 | I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); | |
1616 | POSTING_READ(GEN6_BLITTER_ECOSKPD); | |
fcca7926 | 1617 | gen6_gt_force_wake_put(dev_priv); |
4efe0708 JB |
1618 | } |
1619 | ||
b52eb4dc ZY |
1620 | static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
1621 | { | |
1622 | struct drm_device *dev = crtc->dev; | |
1623 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1624 | struct drm_framebuffer *fb = crtc->fb; | |
1625 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | |
05394f39 | 1626 | struct drm_i915_gem_object *obj = intel_fb->obj; |
b52eb4dc | 1627 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5eddb70b | 1628 | int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB; |
b52eb4dc ZY |
1629 | unsigned long stall_watermark = 200; |
1630 | u32 dpfc_ctl; | |
1631 | ||
bed4a673 | 1632 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); |
b52eb4dc ZY |
1633 | dpfc_ctl &= DPFC_RESERVED; |
1634 | dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X); | |
9ce9d069 CW |
1635 | /* Set persistent mode for front-buffer rendering, ala X. */ |
1636 | dpfc_ctl |= DPFC_CTL_PERSISTENT_MODE; | |
016b9b61 | 1637 | dpfc_ctl |= (DPFC_CTL_FENCE_EN | obj->fence_reg); |
de568510 | 1638 | I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY); |
b52eb4dc | 1639 | |
b52eb4dc ZY |
1640 | I915_WRITE(ILK_DPFC_RECOMP_CTL, DPFC_RECOMP_STALL_EN | |
1641 | (stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) | | |
1642 | (interval << DPFC_RECOMP_TIMER_COUNT_SHIFT)); | |
1643 | I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y); | |
05394f39 | 1644 | I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID); |
b52eb4dc | 1645 | /* enable it... */ |
bed4a673 | 1646 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN); |
b52eb4dc | 1647 | |
9c04f015 YL |
1648 | if (IS_GEN6(dev)) { |
1649 | I915_WRITE(SNB_DPFC_CTL_SA, | |
016b9b61 | 1650 | SNB_CPU_FENCE_ENABLE | obj->fence_reg); |
9c04f015 | 1651 | I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); |
4efe0708 | 1652 | sandybridge_blit_fbc_update(dev); |
9c04f015 YL |
1653 | } |
1654 | ||
b52eb4dc ZY |
1655 | DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane); |
1656 | } | |
1657 | ||
43a9539f | 1658 | static void ironlake_disable_fbc(struct drm_device *dev) |
b52eb4dc ZY |
1659 | { |
1660 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1661 | u32 dpfc_ctl; | |
1662 | ||
1663 | /* Disable compression */ | |
1664 | dpfc_ctl = I915_READ(ILK_DPFC_CONTROL); | |
bed4a673 CW |
1665 | if (dpfc_ctl & DPFC_CTL_EN) { |
1666 | dpfc_ctl &= ~DPFC_CTL_EN; | |
1667 | I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl); | |
b52eb4dc | 1668 | |
bed4a673 CW |
1669 | DRM_DEBUG_KMS("disabled FBC\n"); |
1670 | } | |
b52eb4dc ZY |
1671 | } |
1672 | ||
1673 | static bool ironlake_fbc_enabled(struct drm_device *dev) | |
1674 | { | |
1675 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1676 | ||
1677 | return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN; | |
1678 | } | |
1679 | ||
ee5382ae AJ |
1680 | bool intel_fbc_enabled(struct drm_device *dev) |
1681 | { | |
1682 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1683 | ||
1684 | if (!dev_priv->display.fbc_enabled) | |
1685 | return false; | |
1686 | ||
1687 | return dev_priv->display.fbc_enabled(dev); | |
1688 | } | |
1689 | ||
1630fe75 CW |
1690 | static void intel_fbc_work_fn(struct work_struct *__work) |
1691 | { | |
1692 | struct intel_fbc_work *work = | |
1693 | container_of(to_delayed_work(__work), | |
1694 | struct intel_fbc_work, work); | |
1695 | struct drm_device *dev = work->crtc->dev; | |
1696 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1697 | ||
1698 | mutex_lock(&dev->struct_mutex); | |
1699 | if (work == dev_priv->fbc_work) { | |
1700 | /* Double check that we haven't switched fb without cancelling | |
1701 | * the prior work. | |
1702 | */ | |
016b9b61 | 1703 | if (work->crtc->fb == work->fb) { |
1630fe75 CW |
1704 | dev_priv->display.enable_fbc(work->crtc, |
1705 | work->interval); | |
1706 | ||
016b9b61 CW |
1707 | dev_priv->cfb_plane = to_intel_crtc(work->crtc)->plane; |
1708 | dev_priv->cfb_fb = work->crtc->fb->base.id; | |
1709 | dev_priv->cfb_y = work->crtc->y; | |
1710 | } | |
1711 | ||
1630fe75 CW |
1712 | dev_priv->fbc_work = NULL; |
1713 | } | |
1714 | mutex_unlock(&dev->struct_mutex); | |
1715 | ||
1716 | kfree(work); | |
1717 | } | |
1718 | ||
1719 | static void intel_cancel_fbc_work(struct drm_i915_private *dev_priv) | |
1720 | { | |
1721 | if (dev_priv->fbc_work == NULL) | |
1722 | return; | |
1723 | ||
1724 | DRM_DEBUG_KMS("cancelling pending FBC enable\n"); | |
1725 | ||
1726 | /* Synchronisation is provided by struct_mutex and checking of | |
1727 | * dev_priv->fbc_work, so we can perform the cancellation | |
1728 | * entirely asynchronously. | |
1729 | */ | |
1730 | if (cancel_delayed_work(&dev_priv->fbc_work->work)) | |
1731 | /* tasklet was killed before being run, clean up */ | |
1732 | kfree(dev_priv->fbc_work); | |
1733 | ||
1734 | /* Mark the work as no longer wanted so that if it does | |
1735 | * wake-up (because the work was already running and waiting | |
1736 | * for our mutex), it will discover that is no longer | |
1737 | * necessary to run. | |
1738 | */ | |
1739 | dev_priv->fbc_work = NULL; | |
1740 | } | |
1741 | ||
43a9539f | 1742 | static void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval) |
ee5382ae | 1743 | { |
1630fe75 CW |
1744 | struct intel_fbc_work *work; |
1745 | struct drm_device *dev = crtc->dev; | |
1746 | struct drm_i915_private *dev_priv = dev->dev_private; | |
ee5382ae AJ |
1747 | |
1748 | if (!dev_priv->display.enable_fbc) | |
1749 | return; | |
1750 | ||
1630fe75 CW |
1751 | intel_cancel_fbc_work(dev_priv); |
1752 | ||
1753 | work = kzalloc(sizeof *work, GFP_KERNEL); | |
1754 | if (work == NULL) { | |
1755 | dev_priv->display.enable_fbc(crtc, interval); | |
1756 | return; | |
1757 | } | |
1758 | ||
1759 | work->crtc = crtc; | |
1760 | work->fb = crtc->fb; | |
1761 | work->interval = interval; | |
1762 | INIT_DELAYED_WORK(&work->work, intel_fbc_work_fn); | |
1763 | ||
1764 | dev_priv->fbc_work = work; | |
1765 | ||
1766 | DRM_DEBUG_KMS("scheduling delayed FBC enable\n"); | |
1767 | ||
1768 | /* Delay the actual enabling to let pageflipping cease and the | |
016b9b61 CW |
1769 | * display to settle before starting the compression. Note that |
1770 | * this delay also serves a second purpose: it allows for a | |
1771 | * vblank to pass after disabling the FBC before we attempt | |
1772 | * to modify the control registers. | |
1630fe75 CW |
1773 | * |
1774 | * A more complicated solution would involve tracking vblanks | |
1775 | * following the termination of the page-flipping sequence | |
1776 | * and indeed performing the enable as a co-routine and not | |
1777 | * waiting synchronously upon the vblank. | |
1778 | */ | |
1779 | schedule_delayed_work(&work->work, msecs_to_jiffies(50)); | |
ee5382ae AJ |
1780 | } |
1781 | ||
1782 | void intel_disable_fbc(struct drm_device *dev) | |
1783 | { | |
1784 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1785 | ||
1630fe75 CW |
1786 | intel_cancel_fbc_work(dev_priv); |
1787 | ||
ee5382ae AJ |
1788 | if (!dev_priv->display.disable_fbc) |
1789 | return; | |
1790 | ||
1791 | dev_priv->display.disable_fbc(dev); | |
016b9b61 | 1792 | dev_priv->cfb_plane = -1; |
ee5382ae AJ |
1793 | } |
1794 | ||
80824003 JB |
1795 | /** |
1796 | * intel_update_fbc - enable/disable FBC as needed | |
bed4a673 | 1797 | * @dev: the drm_device |
80824003 JB |
1798 | * |
1799 | * Set up the framebuffer compression hardware at mode set time. We | |
1800 | * enable it if possible: | |
1801 | * - plane A only (on pre-965) | |
1802 | * - no pixel mulitply/line duplication | |
1803 | * - no alpha buffer discard | |
1804 | * - no dual wide | |
1805 | * - framebuffer <= 2048 in width, 1536 in height | |
1806 | * | |
1807 | * We can't assume that any compression will take place (worst case), | |
1808 | * so the compressed buffer has to be the same size as the uncompressed | |
1809 | * one. It also must reside (along with the line length buffer) in | |
1810 | * stolen memory. | |
1811 | * | |
1812 | * We need to enable/disable FBC on a global basis. | |
1813 | */ | |
bed4a673 | 1814 | static void intel_update_fbc(struct drm_device *dev) |
80824003 | 1815 | { |
80824003 | 1816 | struct drm_i915_private *dev_priv = dev->dev_private; |
bed4a673 CW |
1817 | struct drm_crtc *crtc = NULL, *tmp_crtc; |
1818 | struct intel_crtc *intel_crtc; | |
1819 | struct drm_framebuffer *fb; | |
80824003 | 1820 | struct intel_framebuffer *intel_fb; |
05394f39 | 1821 | struct drm_i915_gem_object *obj; |
cd0de039 | 1822 | int enable_fbc; |
9c928d16 JB |
1823 | |
1824 | DRM_DEBUG_KMS("\n"); | |
80824003 JB |
1825 | |
1826 | if (!i915_powersave) | |
1827 | return; | |
1828 | ||
ee5382ae | 1829 | if (!I915_HAS_FBC(dev)) |
e70236a8 JB |
1830 | return; |
1831 | ||
80824003 JB |
1832 | /* |
1833 | * If FBC is already on, we just have to verify that we can | |
1834 | * keep it that way... | |
1835 | * Need to disable if: | |
9c928d16 | 1836 | * - more than one pipe is active |
80824003 JB |
1837 | * - changing FBC params (stride, fence, mode) |
1838 | * - new fb is too large to fit in compressed buffer | |
1839 | * - going to an unsupported config (interlace, pixel multiply, etc.) | |
1840 | */ | |
9c928d16 | 1841 | list_for_each_entry(tmp_crtc, &dev->mode_config.crtc_list, head) { |
d210246a | 1842 | if (tmp_crtc->enabled && tmp_crtc->fb) { |
bed4a673 CW |
1843 | if (crtc) { |
1844 | DRM_DEBUG_KMS("more than one pipe active, disabling compression\n"); | |
1845 | dev_priv->no_fbc_reason = FBC_MULTIPLE_PIPES; | |
1846 | goto out_disable; | |
1847 | } | |
1848 | crtc = tmp_crtc; | |
1849 | } | |
9c928d16 | 1850 | } |
bed4a673 CW |
1851 | |
1852 | if (!crtc || crtc->fb == NULL) { | |
1853 | DRM_DEBUG_KMS("no output, disabling\n"); | |
1854 | dev_priv->no_fbc_reason = FBC_NO_OUTPUT; | |
9c928d16 JB |
1855 | goto out_disable; |
1856 | } | |
bed4a673 CW |
1857 | |
1858 | intel_crtc = to_intel_crtc(crtc); | |
1859 | fb = crtc->fb; | |
1860 | intel_fb = to_intel_framebuffer(fb); | |
05394f39 | 1861 | obj = intel_fb->obj; |
bed4a673 | 1862 | |
cd0de039 KP |
1863 | enable_fbc = i915_enable_fbc; |
1864 | if (enable_fbc < 0) { | |
1865 | DRM_DEBUG_KMS("fbc set to per-chip default\n"); | |
1866 | enable_fbc = 1; | |
1867 | if (INTEL_INFO(dev)->gen <= 5) | |
1868 | enable_fbc = 0; | |
1869 | } | |
1870 | if (!enable_fbc) { | |
1871 | DRM_DEBUG_KMS("fbc disabled per module param\n"); | |
c1a9f047 JB |
1872 | dev_priv->no_fbc_reason = FBC_MODULE_PARAM; |
1873 | goto out_disable; | |
1874 | } | |
05394f39 | 1875 | if (intel_fb->obj->base.size > dev_priv->cfb_size) { |
28c97730 | 1876 | DRM_DEBUG_KMS("framebuffer too large, disabling " |
5eddb70b | 1877 | "compression\n"); |
b5e50c3f | 1878 | dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL; |
80824003 JB |
1879 | goto out_disable; |
1880 | } | |
bed4a673 CW |
1881 | if ((crtc->mode.flags & DRM_MODE_FLAG_INTERLACE) || |
1882 | (crtc->mode.flags & DRM_MODE_FLAG_DBLSCAN)) { | |
28c97730 | 1883 | DRM_DEBUG_KMS("mode incompatible with compression, " |
5eddb70b | 1884 | "disabling\n"); |
b5e50c3f | 1885 | dev_priv->no_fbc_reason = FBC_UNSUPPORTED_MODE; |
80824003 JB |
1886 | goto out_disable; |
1887 | } | |
bed4a673 CW |
1888 | if ((crtc->mode.hdisplay > 2048) || |
1889 | (crtc->mode.vdisplay > 1536)) { | |
28c97730 | 1890 | DRM_DEBUG_KMS("mode too large for compression, disabling\n"); |
b5e50c3f | 1891 | dev_priv->no_fbc_reason = FBC_MODE_TOO_LARGE; |
80824003 JB |
1892 | goto out_disable; |
1893 | } | |
bed4a673 | 1894 | if ((IS_I915GM(dev) || IS_I945GM(dev)) && intel_crtc->plane != 0) { |
28c97730 | 1895 | DRM_DEBUG_KMS("plane not 0, disabling compression\n"); |
b5e50c3f | 1896 | dev_priv->no_fbc_reason = FBC_BAD_PLANE; |
80824003 JB |
1897 | goto out_disable; |
1898 | } | |
de568510 CW |
1899 | |
1900 | /* The use of a CPU fence is mandatory in order to detect writes | |
1901 | * by the CPU to the scanout and trigger updates to the FBC. | |
1902 | */ | |
1903 | if (obj->tiling_mode != I915_TILING_X || | |
1904 | obj->fence_reg == I915_FENCE_REG_NONE) { | |
1905 | DRM_DEBUG_KMS("framebuffer not tiled or fenced, disabling compression\n"); | |
b5e50c3f | 1906 | dev_priv->no_fbc_reason = FBC_NOT_TILED; |
80824003 JB |
1907 | goto out_disable; |
1908 | } | |
1909 | ||
c924b934 JW |
1910 | /* If the kernel debugger is active, always disable compression */ |
1911 | if (in_dbg_master()) | |
1912 | goto out_disable; | |
1913 | ||
016b9b61 CW |
1914 | /* If the scanout has not changed, don't modify the FBC settings. |
1915 | * Note that we make the fundamental assumption that the fb->obj | |
1916 | * cannot be unpinned (and have its GTT offset and fence revoked) | |
1917 | * without first being decoupled from the scanout and FBC disabled. | |
1918 | */ | |
1919 | if (dev_priv->cfb_plane == intel_crtc->plane && | |
1920 | dev_priv->cfb_fb == fb->base.id && | |
1921 | dev_priv->cfb_y == crtc->y) | |
1922 | return; | |
1923 | ||
1924 | if (intel_fbc_enabled(dev)) { | |
1925 | /* We update FBC along two paths, after changing fb/crtc | |
1926 | * configuration (modeswitching) and after page-flipping | |
1927 | * finishes. For the latter, we know that not only did | |
1928 | * we disable the FBC at the start of the page-flip | |
1929 | * sequence, but also more than one vblank has passed. | |
1930 | * | |
1931 | * For the former case of modeswitching, it is possible | |
1932 | * to switch between two FBC valid configurations | |
1933 | * instantaneously so we do need to disable the FBC | |
1934 | * before we can modify its control registers. We also | |
1935 | * have to wait for the next vblank for that to take | |
1936 | * effect. However, since we delay enabling FBC we can | |
1937 | * assume that a vblank has passed since disabling and | |
1938 | * that we can safely alter the registers in the deferred | |
1939 | * callback. | |
1940 | * | |
1941 | * In the scenario that we go from a valid to invalid | |
1942 | * and then back to valid FBC configuration we have | |
1943 | * no strict enforcement that a vblank occurred since | |
1944 | * disabling the FBC. However, along all current pipe | |
1945 | * disabling paths we do need to wait for a vblank at | |
1946 | * some point. And we wait before enabling FBC anyway. | |
1947 | */ | |
1948 | DRM_DEBUG_KMS("disabling active FBC for update\n"); | |
1949 | intel_disable_fbc(dev); | |
1950 | } | |
1951 | ||
bed4a673 | 1952 | intel_enable_fbc(crtc, 500); |
80824003 JB |
1953 | return; |
1954 | ||
1955 | out_disable: | |
80824003 | 1956 | /* Multiple disables should be harmless */ |
a939406f CW |
1957 | if (intel_fbc_enabled(dev)) { |
1958 | DRM_DEBUG_KMS("unsupported config, disabling FBC\n"); | |
ee5382ae | 1959 | intel_disable_fbc(dev); |
a939406f | 1960 | } |
80824003 JB |
1961 | } |
1962 | ||
127bd2ac | 1963 | int |
48b956c5 | 1964 | intel_pin_and_fence_fb_obj(struct drm_device *dev, |
05394f39 | 1965 | struct drm_i915_gem_object *obj, |
919926ae | 1966 | struct intel_ring_buffer *pipelined) |
6b95a207 | 1967 | { |
ce453d81 | 1968 | struct drm_i915_private *dev_priv = dev->dev_private; |
6b95a207 KH |
1969 | u32 alignment; |
1970 | int ret; | |
1971 | ||
05394f39 | 1972 | switch (obj->tiling_mode) { |
6b95a207 | 1973 | case I915_TILING_NONE: |
534843da CW |
1974 | if (IS_BROADWATER(dev) || IS_CRESTLINE(dev)) |
1975 | alignment = 128 * 1024; | |
a6c45cf0 | 1976 | else if (INTEL_INFO(dev)->gen >= 4) |
534843da CW |
1977 | alignment = 4 * 1024; |
1978 | else | |
1979 | alignment = 64 * 1024; | |
6b95a207 KH |
1980 | break; |
1981 | case I915_TILING_X: | |
1982 | /* pin() will align the object as required by fence */ | |
1983 | alignment = 0; | |
1984 | break; | |
1985 | case I915_TILING_Y: | |
1986 | /* FIXME: Is this true? */ | |
1987 | DRM_ERROR("Y tiled not allowed for scan out buffers\n"); | |
1988 | return -EINVAL; | |
1989 | default: | |
1990 | BUG(); | |
1991 | } | |
1992 | ||
ce453d81 | 1993 | dev_priv->mm.interruptible = false; |
2da3b9b9 | 1994 | ret = i915_gem_object_pin_to_display_plane(obj, alignment, pipelined); |
48b956c5 | 1995 | if (ret) |
ce453d81 | 1996 | goto err_interruptible; |
6b95a207 KH |
1997 | |
1998 | /* Install a fence for tiled scan-out. Pre-i965 always needs a | |
1999 | * fence, whereas 965+ only requires a fence if using | |
2000 | * framebuffer compression. For simplicity, we always install | |
2001 | * a fence as the cost is not that onerous. | |
2002 | */ | |
05394f39 | 2003 | if (obj->tiling_mode != I915_TILING_NONE) { |
ce453d81 | 2004 | ret = i915_gem_object_get_fence(obj, pipelined); |
48b956c5 CW |
2005 | if (ret) |
2006 | goto err_unpin; | |
6b95a207 KH |
2007 | } |
2008 | ||
ce453d81 | 2009 | dev_priv->mm.interruptible = true; |
6b95a207 | 2010 | return 0; |
48b956c5 CW |
2011 | |
2012 | err_unpin: | |
2013 | i915_gem_object_unpin(obj); | |
ce453d81 CW |
2014 | err_interruptible: |
2015 | dev_priv->mm.interruptible = true; | |
48b956c5 | 2016 | return ret; |
6b95a207 KH |
2017 | } |
2018 | ||
17638cd6 JB |
2019 | static int i9xx_update_plane(struct drm_crtc *crtc, struct drm_framebuffer *fb, |
2020 | int x, int y) | |
81255565 JB |
2021 | { |
2022 | struct drm_device *dev = crtc->dev; | |
2023 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2024 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2025 | struct intel_framebuffer *intel_fb; | |
05394f39 | 2026 | struct drm_i915_gem_object *obj; |
81255565 JB |
2027 | int plane = intel_crtc->plane; |
2028 | unsigned long Start, Offset; | |
81255565 | 2029 | u32 dspcntr; |
5eddb70b | 2030 | u32 reg; |
81255565 JB |
2031 | |
2032 | switch (plane) { | |
2033 | case 0: | |
2034 | case 1: | |
2035 | break; | |
2036 | default: | |
2037 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); | |
2038 | return -EINVAL; | |
2039 | } | |
2040 | ||
2041 | intel_fb = to_intel_framebuffer(fb); | |
2042 | obj = intel_fb->obj; | |
81255565 | 2043 | |
5eddb70b CW |
2044 | reg = DSPCNTR(plane); |
2045 | dspcntr = I915_READ(reg); | |
81255565 JB |
2046 | /* Mask out pixel format bits in case we change it */ |
2047 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | |
2048 | switch (fb->bits_per_pixel) { | |
2049 | case 8: | |
2050 | dspcntr |= DISPPLANE_8BPP; | |
2051 | break; | |
2052 | case 16: | |
2053 | if (fb->depth == 15) | |
2054 | dspcntr |= DISPPLANE_15_16BPP; | |
2055 | else | |
2056 | dspcntr |= DISPPLANE_16BPP; | |
2057 | break; | |
2058 | case 24: | |
2059 | case 32: | |
2060 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | |
2061 | break; | |
2062 | default: | |
17638cd6 | 2063 | DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); |
81255565 JB |
2064 | return -EINVAL; |
2065 | } | |
a6c45cf0 | 2066 | if (INTEL_INFO(dev)->gen >= 4) { |
05394f39 | 2067 | if (obj->tiling_mode != I915_TILING_NONE) |
81255565 JB |
2068 | dspcntr |= DISPPLANE_TILED; |
2069 | else | |
2070 | dspcntr &= ~DISPPLANE_TILED; | |
2071 | } | |
2072 | ||
5eddb70b | 2073 | I915_WRITE(reg, dspcntr); |
81255565 | 2074 | |
05394f39 | 2075 | Start = obj->gtt_offset; |
81255565 JB |
2076 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); |
2077 | ||
4e6cfefc CW |
2078 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", |
2079 | Start, Offset, x, y, fb->pitch); | |
5eddb70b | 2080 | I915_WRITE(DSPSTRIDE(plane), fb->pitch); |
a6c45cf0 | 2081 | if (INTEL_INFO(dev)->gen >= 4) { |
5eddb70b CW |
2082 | I915_WRITE(DSPSURF(plane), Start); |
2083 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); | |
2084 | I915_WRITE(DSPADDR(plane), Offset); | |
2085 | } else | |
2086 | I915_WRITE(DSPADDR(plane), Start + Offset); | |
2087 | POSTING_READ(reg); | |
81255565 | 2088 | |
17638cd6 JB |
2089 | return 0; |
2090 | } | |
2091 | ||
2092 | static int ironlake_update_plane(struct drm_crtc *crtc, | |
2093 | struct drm_framebuffer *fb, int x, int y) | |
2094 | { | |
2095 | struct drm_device *dev = crtc->dev; | |
2096 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2097 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2098 | struct intel_framebuffer *intel_fb; | |
2099 | struct drm_i915_gem_object *obj; | |
2100 | int plane = intel_crtc->plane; | |
2101 | unsigned long Start, Offset; | |
2102 | u32 dspcntr; | |
2103 | u32 reg; | |
2104 | ||
2105 | switch (plane) { | |
2106 | case 0: | |
2107 | case 1: | |
27f8227b | 2108 | case 2: |
17638cd6 JB |
2109 | break; |
2110 | default: | |
2111 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); | |
2112 | return -EINVAL; | |
2113 | } | |
2114 | ||
2115 | intel_fb = to_intel_framebuffer(fb); | |
2116 | obj = intel_fb->obj; | |
2117 | ||
2118 | reg = DSPCNTR(plane); | |
2119 | dspcntr = I915_READ(reg); | |
2120 | /* Mask out pixel format bits in case we change it */ | |
2121 | dspcntr &= ~DISPPLANE_PIXFORMAT_MASK; | |
2122 | switch (fb->bits_per_pixel) { | |
2123 | case 8: | |
2124 | dspcntr |= DISPPLANE_8BPP; | |
2125 | break; | |
2126 | case 16: | |
2127 | if (fb->depth != 16) | |
2128 | return -EINVAL; | |
2129 | ||
2130 | dspcntr |= DISPPLANE_16BPP; | |
2131 | break; | |
2132 | case 24: | |
2133 | case 32: | |
2134 | if (fb->depth == 24) | |
2135 | dspcntr |= DISPPLANE_32BPP_NO_ALPHA; | |
2136 | else if (fb->depth == 30) | |
2137 | dspcntr |= DISPPLANE_32BPP_30BIT_NO_ALPHA; | |
2138 | else | |
2139 | return -EINVAL; | |
2140 | break; | |
2141 | default: | |
2142 | DRM_ERROR("Unknown color depth %d\n", fb->bits_per_pixel); | |
2143 | return -EINVAL; | |
2144 | } | |
2145 | ||
2146 | if (obj->tiling_mode != I915_TILING_NONE) | |
2147 | dspcntr |= DISPPLANE_TILED; | |
2148 | else | |
2149 | dspcntr &= ~DISPPLANE_TILED; | |
2150 | ||
2151 | /* must disable */ | |
2152 | dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; | |
2153 | ||
2154 | I915_WRITE(reg, dspcntr); | |
2155 | ||
2156 | Start = obj->gtt_offset; | |
2157 | Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8); | |
2158 | ||
2159 | DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", | |
2160 | Start, Offset, x, y, fb->pitch); | |
2161 | I915_WRITE(DSPSTRIDE(plane), fb->pitch); | |
2162 | I915_WRITE(DSPSURF(plane), Start); | |
2163 | I915_WRITE(DSPTILEOFF(plane), (y << 16) | x); | |
2164 | I915_WRITE(DSPADDR(plane), Offset); | |
2165 | POSTING_READ(reg); | |
2166 | ||
2167 | return 0; | |
2168 | } | |
2169 | ||
2170 | /* Assume fb object is pinned & idle & fenced and just update base pointers */ | |
2171 | static int | |
2172 | intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |
2173 | int x, int y, enum mode_set_atomic state) | |
2174 | { | |
2175 | struct drm_device *dev = crtc->dev; | |
2176 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2177 | int ret; | |
2178 | ||
2179 | ret = dev_priv->display.update_plane(crtc, fb, x, y); | |
2180 | if (ret) | |
2181 | return ret; | |
2182 | ||
bed4a673 | 2183 | intel_update_fbc(dev); |
3dec0095 | 2184 | intel_increase_pllclock(crtc); |
81255565 JB |
2185 | |
2186 | return 0; | |
2187 | } | |
2188 | ||
5c3b82e2 | 2189 | static int |
3c4fdcfb KH |
2190 | intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, |
2191 | struct drm_framebuffer *old_fb) | |
79e53945 JB |
2192 | { |
2193 | struct drm_device *dev = crtc->dev; | |
79e53945 JB |
2194 | struct drm_i915_master_private *master_priv; |
2195 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
5c3b82e2 | 2196 | int ret; |
79e53945 JB |
2197 | |
2198 | /* no fb bound */ | |
2199 | if (!crtc->fb) { | |
a5071c2f | 2200 | DRM_ERROR("No FB bound\n"); |
5c3b82e2 CW |
2201 | return 0; |
2202 | } | |
2203 | ||
265db958 | 2204 | switch (intel_crtc->plane) { |
5c3b82e2 CW |
2205 | case 0: |
2206 | case 1: | |
2207 | break; | |
27f8227b JB |
2208 | case 2: |
2209 | if (IS_IVYBRIDGE(dev)) | |
2210 | break; | |
2211 | /* fall through otherwise */ | |
5c3b82e2 | 2212 | default: |
a5071c2f | 2213 | DRM_ERROR("no plane for crtc\n"); |
5c3b82e2 | 2214 | return -EINVAL; |
79e53945 JB |
2215 | } |
2216 | ||
5c3b82e2 | 2217 | mutex_lock(&dev->struct_mutex); |
265db958 CW |
2218 | ret = intel_pin_and_fence_fb_obj(dev, |
2219 | to_intel_framebuffer(crtc->fb)->obj, | |
919926ae | 2220 | NULL); |
5c3b82e2 CW |
2221 | if (ret != 0) { |
2222 | mutex_unlock(&dev->struct_mutex); | |
a5071c2f | 2223 | DRM_ERROR("pin & fence failed\n"); |
5c3b82e2 CW |
2224 | return ret; |
2225 | } | |
79e53945 | 2226 | |
265db958 | 2227 | if (old_fb) { |
e6c3a2a6 | 2228 | struct drm_i915_private *dev_priv = dev->dev_private; |
05394f39 | 2229 | struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj; |
265db958 | 2230 | |
e6c3a2a6 | 2231 | wait_event(dev_priv->pending_flip_queue, |
01eec727 | 2232 | atomic_read(&dev_priv->mm.wedged) || |
05394f39 | 2233 | atomic_read(&obj->pending_flip) == 0); |
85345517 CW |
2234 | |
2235 | /* Big Hammer, we also need to ensure that any pending | |
2236 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | |
2237 | * current scanout is retired before unpinning the old | |
2238 | * framebuffer. | |
01eec727 CW |
2239 | * |
2240 | * This should only fail upon a hung GPU, in which case we | |
2241 | * can safely continue. | |
85345517 | 2242 | */ |
a8198eea | 2243 | ret = i915_gem_object_finish_gpu(obj); |
01eec727 | 2244 | (void) ret; |
265db958 CW |
2245 | } |
2246 | ||
21c74a8e JW |
2247 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
2248 | LEAVE_ATOMIC_MODE_SET); | |
4e6cfefc | 2249 | if (ret) { |
265db958 | 2250 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); |
5c3b82e2 | 2251 | mutex_unlock(&dev->struct_mutex); |
a5071c2f | 2252 | DRM_ERROR("failed to update base address\n"); |
4e6cfefc | 2253 | return ret; |
79e53945 | 2254 | } |
3c4fdcfb | 2255 | |
b7f1de28 CW |
2256 | if (old_fb) { |
2257 | intel_wait_for_vblank(dev, intel_crtc->pipe); | |
265db958 | 2258 | i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj); |
b7f1de28 | 2259 | } |
652c393a | 2260 | |
5c3b82e2 | 2261 | mutex_unlock(&dev->struct_mutex); |
79e53945 JB |
2262 | |
2263 | if (!dev->primary->master) | |
5c3b82e2 | 2264 | return 0; |
79e53945 JB |
2265 | |
2266 | master_priv = dev->primary->master->driver_priv; | |
2267 | if (!master_priv->sarea_priv) | |
5c3b82e2 | 2268 | return 0; |
79e53945 | 2269 | |
265db958 | 2270 | if (intel_crtc->pipe) { |
79e53945 JB |
2271 | master_priv->sarea_priv->pipeB_x = x; |
2272 | master_priv->sarea_priv->pipeB_y = y; | |
5c3b82e2 CW |
2273 | } else { |
2274 | master_priv->sarea_priv->pipeA_x = x; | |
2275 | master_priv->sarea_priv->pipeA_y = y; | |
79e53945 | 2276 | } |
5c3b82e2 CW |
2277 | |
2278 | return 0; | |
79e53945 JB |
2279 | } |
2280 | ||
5eddb70b | 2281 | static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock) |
32f9d658 ZW |
2282 | { |
2283 | struct drm_device *dev = crtc->dev; | |
2284 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2285 | u32 dpa_ctl; | |
2286 | ||
28c97730 | 2287 | DRM_DEBUG_KMS("eDP PLL enable for clock %d\n", clock); |
32f9d658 ZW |
2288 | dpa_ctl = I915_READ(DP_A); |
2289 | dpa_ctl &= ~DP_PLL_FREQ_MASK; | |
2290 | ||
2291 | if (clock < 200000) { | |
2292 | u32 temp; | |
2293 | dpa_ctl |= DP_PLL_FREQ_160MHZ; | |
2294 | /* workaround for 160Mhz: | |
2295 | 1) program 0x4600c bits 15:0 = 0x8124 | |
2296 | 2) program 0x46010 bit 0 = 1 | |
2297 | 3) program 0x46034 bit 24 = 1 | |
2298 | 4) program 0x64000 bit 14 = 1 | |
2299 | */ | |
2300 | temp = I915_READ(0x4600c); | |
2301 | temp &= 0xffff0000; | |
2302 | I915_WRITE(0x4600c, temp | 0x8124); | |
2303 | ||
2304 | temp = I915_READ(0x46010); | |
2305 | I915_WRITE(0x46010, temp | 1); | |
2306 | ||
2307 | temp = I915_READ(0x46034); | |
2308 | I915_WRITE(0x46034, temp | (1 << 24)); | |
2309 | } else { | |
2310 | dpa_ctl |= DP_PLL_FREQ_270MHZ; | |
2311 | } | |
2312 | I915_WRITE(DP_A, dpa_ctl); | |
2313 | ||
5eddb70b | 2314 | POSTING_READ(DP_A); |
32f9d658 ZW |
2315 | udelay(500); |
2316 | } | |
2317 | ||
5e84e1a4 ZW |
2318 | static void intel_fdi_normal_train(struct drm_crtc *crtc) |
2319 | { | |
2320 | struct drm_device *dev = crtc->dev; | |
2321 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2322 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2323 | int pipe = intel_crtc->pipe; | |
2324 | u32 reg, temp; | |
2325 | ||
2326 | /* enable normal train */ | |
2327 | reg = FDI_TX_CTL(pipe); | |
2328 | temp = I915_READ(reg); | |
61e499bf | 2329 | if (IS_IVYBRIDGE(dev)) { |
357555c0 JB |
2330 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; |
2331 | temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE; | |
61e499bf KP |
2332 | } else { |
2333 | temp &= ~FDI_LINK_TRAIN_NONE; | |
2334 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | |
357555c0 | 2335 | } |
5e84e1a4 ZW |
2336 | I915_WRITE(reg, temp); |
2337 | ||
2338 | reg = FDI_RX_CTL(pipe); | |
2339 | temp = I915_READ(reg); | |
2340 | if (HAS_PCH_CPT(dev)) { | |
2341 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | |
2342 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | |
2343 | } else { | |
2344 | temp &= ~FDI_LINK_TRAIN_NONE; | |
2345 | temp |= FDI_LINK_TRAIN_NONE; | |
2346 | } | |
2347 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | |
2348 | ||
2349 | /* wait one idle pattern time */ | |
2350 | POSTING_READ(reg); | |
2351 | udelay(1000); | |
357555c0 JB |
2352 | |
2353 | /* IVB wants error correction enabled */ | |
2354 | if (IS_IVYBRIDGE(dev)) | |
2355 | I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE | | |
2356 | FDI_FE_ERRC_ENABLE); | |
5e84e1a4 ZW |
2357 | } |
2358 | ||
291427f5 JB |
2359 | static void cpt_phase_pointer_enable(struct drm_device *dev, int pipe) |
2360 | { | |
2361 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2362 | u32 flags = I915_READ(SOUTH_CHICKEN1); | |
2363 | ||
2364 | flags |= FDI_PHASE_SYNC_OVR(pipe); | |
2365 | I915_WRITE(SOUTH_CHICKEN1, flags); /* once to unlock... */ | |
2366 | flags |= FDI_PHASE_SYNC_EN(pipe); | |
2367 | I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to enable */ | |
2368 | POSTING_READ(SOUTH_CHICKEN1); | |
2369 | } | |
2370 | ||
8db9d77b ZW |
2371 | /* The FDI link training functions for ILK/Ibexpeak. */ |
2372 | static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |
2373 | { | |
2374 | struct drm_device *dev = crtc->dev; | |
2375 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2376 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2377 | int pipe = intel_crtc->pipe; | |
0fc932b8 | 2378 | int plane = intel_crtc->plane; |
5eddb70b | 2379 | u32 reg, temp, tries; |
8db9d77b | 2380 | |
0fc932b8 JB |
2381 | /* FDI needs bits from pipe & plane first */ |
2382 | assert_pipe_enabled(dev_priv, pipe); | |
2383 | assert_plane_enabled(dev_priv, plane); | |
2384 | ||
e1a44743 AJ |
2385 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2386 | for train result */ | |
5eddb70b CW |
2387 | reg = FDI_RX_IMR(pipe); |
2388 | temp = I915_READ(reg); | |
e1a44743 AJ |
2389 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2390 | temp &= ~FDI_RX_BIT_LOCK; | |
5eddb70b CW |
2391 | I915_WRITE(reg, temp); |
2392 | I915_READ(reg); | |
e1a44743 AJ |
2393 | udelay(150); |
2394 | ||
8db9d77b | 2395 | /* enable CPU FDI TX and PCH FDI RX */ |
5eddb70b CW |
2396 | reg = FDI_TX_CTL(pipe); |
2397 | temp = I915_READ(reg); | |
77ffb597 AJ |
2398 | temp &= ~(7 << 19); |
2399 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | |
8db9d77b ZW |
2400 | temp &= ~FDI_LINK_TRAIN_NONE; |
2401 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
5eddb70b | 2402 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
8db9d77b | 2403 | |
5eddb70b CW |
2404 | reg = FDI_RX_CTL(pipe); |
2405 | temp = I915_READ(reg); | |
8db9d77b ZW |
2406 | temp &= ~FDI_LINK_TRAIN_NONE; |
2407 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
5eddb70b CW |
2408 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2409 | ||
2410 | POSTING_READ(reg); | |
8db9d77b ZW |
2411 | udelay(150); |
2412 | ||
5b2adf89 | 2413 | /* Ironlake workaround, enable clock pointer after FDI enable*/ |
6f06ce18 JB |
2414 | if (HAS_PCH_IBX(dev)) { |
2415 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); | |
2416 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR | | |
2417 | FDI_RX_PHASE_SYNC_POINTER_EN); | |
2418 | } | |
5b2adf89 | 2419 | |
5eddb70b | 2420 | reg = FDI_RX_IIR(pipe); |
e1a44743 | 2421 | for (tries = 0; tries < 5; tries++) { |
5eddb70b | 2422 | temp = I915_READ(reg); |
8db9d77b ZW |
2423 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2424 | ||
2425 | if ((temp & FDI_RX_BIT_LOCK)) { | |
2426 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | |
5eddb70b | 2427 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
8db9d77b ZW |
2428 | break; |
2429 | } | |
8db9d77b | 2430 | } |
e1a44743 | 2431 | if (tries == 5) |
5eddb70b | 2432 | DRM_ERROR("FDI train 1 fail!\n"); |
8db9d77b ZW |
2433 | |
2434 | /* Train 2 */ | |
5eddb70b CW |
2435 | reg = FDI_TX_CTL(pipe); |
2436 | temp = I915_READ(reg); | |
8db9d77b ZW |
2437 | temp &= ~FDI_LINK_TRAIN_NONE; |
2438 | temp |= FDI_LINK_TRAIN_PATTERN_2; | |
5eddb70b | 2439 | I915_WRITE(reg, temp); |
8db9d77b | 2440 | |
5eddb70b CW |
2441 | reg = FDI_RX_CTL(pipe); |
2442 | temp = I915_READ(reg); | |
8db9d77b ZW |
2443 | temp &= ~FDI_LINK_TRAIN_NONE; |
2444 | temp |= FDI_LINK_TRAIN_PATTERN_2; | |
5eddb70b | 2445 | I915_WRITE(reg, temp); |
8db9d77b | 2446 | |
5eddb70b CW |
2447 | POSTING_READ(reg); |
2448 | udelay(150); | |
8db9d77b | 2449 | |
5eddb70b | 2450 | reg = FDI_RX_IIR(pipe); |
e1a44743 | 2451 | for (tries = 0; tries < 5; tries++) { |
5eddb70b | 2452 | temp = I915_READ(reg); |
8db9d77b ZW |
2453 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2454 | ||
2455 | if (temp & FDI_RX_SYMBOL_LOCK) { | |
5eddb70b | 2456 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
8db9d77b ZW |
2457 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2458 | break; | |
2459 | } | |
8db9d77b | 2460 | } |
e1a44743 | 2461 | if (tries == 5) |
5eddb70b | 2462 | DRM_ERROR("FDI train 2 fail!\n"); |
8db9d77b ZW |
2463 | |
2464 | DRM_DEBUG_KMS("FDI train done\n"); | |
5c5313c8 | 2465 | |
8db9d77b ZW |
2466 | } |
2467 | ||
0206e353 | 2468 | static const int snb_b_fdi_train_param[] = { |
8db9d77b ZW |
2469 | FDI_LINK_TRAIN_400MV_0DB_SNB_B, |
2470 | FDI_LINK_TRAIN_400MV_6DB_SNB_B, | |
2471 | FDI_LINK_TRAIN_600MV_3_5DB_SNB_B, | |
2472 | FDI_LINK_TRAIN_800MV_0DB_SNB_B, | |
2473 | }; | |
2474 | ||
2475 | /* The FDI link training functions for SNB/Cougarpoint. */ | |
2476 | static void gen6_fdi_link_train(struct drm_crtc *crtc) | |
2477 | { | |
2478 | struct drm_device *dev = crtc->dev; | |
2479 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2480 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2481 | int pipe = intel_crtc->pipe; | |
5eddb70b | 2482 | u32 reg, temp, i; |
8db9d77b | 2483 | |
e1a44743 AJ |
2484 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit |
2485 | for train result */ | |
5eddb70b CW |
2486 | reg = FDI_RX_IMR(pipe); |
2487 | temp = I915_READ(reg); | |
e1a44743 AJ |
2488 | temp &= ~FDI_RX_SYMBOL_LOCK; |
2489 | temp &= ~FDI_RX_BIT_LOCK; | |
5eddb70b CW |
2490 | I915_WRITE(reg, temp); |
2491 | ||
2492 | POSTING_READ(reg); | |
e1a44743 AJ |
2493 | udelay(150); |
2494 | ||
8db9d77b | 2495 | /* enable CPU FDI TX and PCH FDI RX */ |
5eddb70b CW |
2496 | reg = FDI_TX_CTL(pipe); |
2497 | temp = I915_READ(reg); | |
77ffb597 AJ |
2498 | temp &= ~(7 << 19); |
2499 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | |
8db9d77b ZW |
2500 | temp &= ~FDI_LINK_TRAIN_NONE; |
2501 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
2502 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | |
2503 | /* SNB-B */ | |
2504 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | |
5eddb70b | 2505 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
8db9d77b | 2506 | |
5eddb70b CW |
2507 | reg = FDI_RX_CTL(pipe); |
2508 | temp = I915_READ(reg); | |
8db9d77b ZW |
2509 | if (HAS_PCH_CPT(dev)) { |
2510 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | |
2511 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | |
2512 | } else { | |
2513 | temp &= ~FDI_LINK_TRAIN_NONE; | |
2514 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
2515 | } | |
5eddb70b CW |
2516 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2517 | ||
2518 | POSTING_READ(reg); | |
8db9d77b ZW |
2519 | udelay(150); |
2520 | ||
291427f5 JB |
2521 | if (HAS_PCH_CPT(dev)) |
2522 | cpt_phase_pointer_enable(dev, pipe); | |
2523 | ||
0206e353 | 2524 | for (i = 0; i < 4; i++) { |
5eddb70b CW |
2525 | reg = FDI_TX_CTL(pipe); |
2526 | temp = I915_READ(reg); | |
8db9d77b ZW |
2527 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2528 | temp |= snb_b_fdi_train_param[i]; | |
5eddb70b CW |
2529 | I915_WRITE(reg, temp); |
2530 | ||
2531 | POSTING_READ(reg); | |
8db9d77b ZW |
2532 | udelay(500); |
2533 | ||
5eddb70b CW |
2534 | reg = FDI_RX_IIR(pipe); |
2535 | temp = I915_READ(reg); | |
8db9d77b ZW |
2536 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2537 | ||
2538 | if (temp & FDI_RX_BIT_LOCK) { | |
5eddb70b | 2539 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); |
8db9d77b ZW |
2540 | DRM_DEBUG_KMS("FDI train 1 done.\n"); |
2541 | break; | |
2542 | } | |
2543 | } | |
2544 | if (i == 4) | |
5eddb70b | 2545 | DRM_ERROR("FDI train 1 fail!\n"); |
8db9d77b ZW |
2546 | |
2547 | /* Train 2 */ | |
5eddb70b CW |
2548 | reg = FDI_TX_CTL(pipe); |
2549 | temp = I915_READ(reg); | |
8db9d77b ZW |
2550 | temp &= ~FDI_LINK_TRAIN_NONE; |
2551 | temp |= FDI_LINK_TRAIN_PATTERN_2; | |
2552 | if (IS_GEN6(dev)) { | |
2553 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | |
2554 | /* SNB-B */ | |
2555 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | |
2556 | } | |
5eddb70b | 2557 | I915_WRITE(reg, temp); |
8db9d77b | 2558 | |
5eddb70b CW |
2559 | reg = FDI_RX_CTL(pipe); |
2560 | temp = I915_READ(reg); | |
8db9d77b ZW |
2561 | if (HAS_PCH_CPT(dev)) { |
2562 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | |
2563 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; | |
2564 | } else { | |
2565 | temp &= ~FDI_LINK_TRAIN_NONE; | |
2566 | temp |= FDI_LINK_TRAIN_PATTERN_2; | |
2567 | } | |
5eddb70b CW |
2568 | I915_WRITE(reg, temp); |
2569 | ||
2570 | POSTING_READ(reg); | |
8db9d77b ZW |
2571 | udelay(150); |
2572 | ||
0206e353 | 2573 | for (i = 0; i < 4; i++) { |
5eddb70b CW |
2574 | reg = FDI_TX_CTL(pipe); |
2575 | temp = I915_READ(reg); | |
8db9d77b ZW |
2576 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2577 | temp |= snb_b_fdi_train_param[i]; | |
5eddb70b CW |
2578 | I915_WRITE(reg, temp); |
2579 | ||
2580 | POSTING_READ(reg); | |
8db9d77b ZW |
2581 | udelay(500); |
2582 | ||
5eddb70b CW |
2583 | reg = FDI_RX_IIR(pipe); |
2584 | temp = I915_READ(reg); | |
8db9d77b ZW |
2585 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); |
2586 | ||
2587 | if (temp & FDI_RX_SYMBOL_LOCK) { | |
5eddb70b | 2588 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); |
8db9d77b ZW |
2589 | DRM_DEBUG_KMS("FDI train 2 done.\n"); |
2590 | break; | |
2591 | } | |
2592 | } | |
2593 | if (i == 4) | |
5eddb70b | 2594 | DRM_ERROR("FDI train 2 fail!\n"); |
8db9d77b ZW |
2595 | |
2596 | DRM_DEBUG_KMS("FDI train done.\n"); | |
2597 | } | |
2598 | ||
357555c0 JB |
2599 | /* Manual link training for Ivy Bridge A0 parts */ |
2600 | static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |
2601 | { | |
2602 | struct drm_device *dev = crtc->dev; | |
2603 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2604 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2605 | int pipe = intel_crtc->pipe; | |
2606 | u32 reg, temp, i; | |
2607 | ||
2608 | /* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit | |
2609 | for train result */ | |
2610 | reg = FDI_RX_IMR(pipe); | |
2611 | temp = I915_READ(reg); | |
2612 | temp &= ~FDI_RX_SYMBOL_LOCK; | |
2613 | temp &= ~FDI_RX_BIT_LOCK; | |
2614 | I915_WRITE(reg, temp); | |
2615 | ||
2616 | POSTING_READ(reg); | |
2617 | udelay(150); | |
2618 | ||
2619 | /* enable CPU FDI TX and PCH FDI RX */ | |
2620 | reg = FDI_TX_CTL(pipe); | |
2621 | temp = I915_READ(reg); | |
2622 | temp &= ~(7 << 19); | |
2623 | temp |= (intel_crtc->fdi_lanes - 1) << 19; | |
2624 | temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB); | |
2625 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; | |
2626 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | |
2627 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | |
c4f9c4c2 | 2628 | temp |= FDI_COMPOSITE_SYNC; |
357555c0 JB |
2629 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2630 | ||
2631 | reg = FDI_RX_CTL(pipe); | |
2632 | temp = I915_READ(reg); | |
2633 | temp &= ~FDI_LINK_TRAIN_AUTO; | |
2634 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | |
2635 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | |
c4f9c4c2 | 2636 | temp |= FDI_COMPOSITE_SYNC; |
357555c0 JB |
2637 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2638 | ||
2639 | POSTING_READ(reg); | |
2640 | udelay(150); | |
2641 | ||
291427f5 JB |
2642 | if (HAS_PCH_CPT(dev)) |
2643 | cpt_phase_pointer_enable(dev, pipe); | |
2644 | ||
0206e353 | 2645 | for (i = 0; i < 4; i++) { |
357555c0 JB |
2646 | reg = FDI_TX_CTL(pipe); |
2647 | temp = I915_READ(reg); | |
2648 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | |
2649 | temp |= snb_b_fdi_train_param[i]; | |
2650 | I915_WRITE(reg, temp); | |
2651 | ||
2652 | POSTING_READ(reg); | |
2653 | udelay(500); | |
2654 | ||
2655 | reg = FDI_RX_IIR(pipe); | |
2656 | temp = I915_READ(reg); | |
2657 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | |
2658 | ||
2659 | if (temp & FDI_RX_BIT_LOCK || | |
2660 | (I915_READ(reg) & FDI_RX_BIT_LOCK)) { | |
2661 | I915_WRITE(reg, temp | FDI_RX_BIT_LOCK); | |
2662 | DRM_DEBUG_KMS("FDI train 1 done.\n"); | |
2663 | break; | |
2664 | } | |
2665 | } | |
2666 | if (i == 4) | |
2667 | DRM_ERROR("FDI train 1 fail!\n"); | |
2668 | ||
2669 | /* Train 2 */ | |
2670 | reg = FDI_TX_CTL(pipe); | |
2671 | temp = I915_READ(reg); | |
2672 | temp &= ~FDI_LINK_TRAIN_NONE_IVB; | |
2673 | temp |= FDI_LINK_TRAIN_PATTERN_2_IVB; | |
2674 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | |
2675 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | |
2676 | I915_WRITE(reg, temp); | |
2677 | ||
2678 | reg = FDI_RX_CTL(pipe); | |
2679 | temp = I915_READ(reg); | |
2680 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | |
2681 | temp |= FDI_LINK_TRAIN_PATTERN_2_CPT; | |
2682 | I915_WRITE(reg, temp); | |
2683 | ||
2684 | POSTING_READ(reg); | |
2685 | udelay(150); | |
2686 | ||
0206e353 | 2687 | for (i = 0; i < 4; i++) { |
357555c0 JB |
2688 | reg = FDI_TX_CTL(pipe); |
2689 | temp = I915_READ(reg); | |
2690 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | |
2691 | temp |= snb_b_fdi_train_param[i]; | |
2692 | I915_WRITE(reg, temp); | |
2693 | ||
2694 | POSTING_READ(reg); | |
2695 | udelay(500); | |
2696 | ||
2697 | reg = FDI_RX_IIR(pipe); | |
2698 | temp = I915_READ(reg); | |
2699 | DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp); | |
2700 | ||
2701 | if (temp & FDI_RX_SYMBOL_LOCK) { | |
2702 | I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK); | |
2703 | DRM_DEBUG_KMS("FDI train 2 done.\n"); | |
2704 | break; | |
2705 | } | |
2706 | } | |
2707 | if (i == 4) | |
2708 | DRM_ERROR("FDI train 2 fail!\n"); | |
2709 | ||
2710 | DRM_DEBUG_KMS("FDI train done.\n"); | |
2711 | } | |
2712 | ||
2713 | static void ironlake_fdi_pll_enable(struct drm_crtc *crtc) | |
2c07245f ZW |
2714 | { |
2715 | struct drm_device *dev = crtc->dev; | |
2716 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2717 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2718 | int pipe = intel_crtc->pipe; | |
5eddb70b | 2719 | u32 reg, temp; |
79e53945 | 2720 | |
c64e311e | 2721 | /* Write the TU size bits so error detection works */ |
5eddb70b CW |
2722 | I915_WRITE(FDI_RX_TUSIZE1(pipe), |
2723 | I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK); | |
c64e311e | 2724 | |
c98e9dcf | 2725 | /* enable PCH FDI RX PLL, wait warmup plus DMI latency */ |
5eddb70b CW |
2726 | reg = FDI_RX_CTL(pipe); |
2727 | temp = I915_READ(reg); | |
2728 | temp &= ~((0x7 << 19) | (0x7 << 16)); | |
c98e9dcf | 2729 | temp |= (intel_crtc->fdi_lanes - 1) << 19; |
5eddb70b CW |
2730 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; |
2731 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); | |
2732 | ||
2733 | POSTING_READ(reg); | |
c98e9dcf JB |
2734 | udelay(200); |
2735 | ||
2736 | /* Switch from Rawclk to PCDclk */ | |
5eddb70b CW |
2737 | temp = I915_READ(reg); |
2738 | I915_WRITE(reg, temp | FDI_PCDCLK); | |
2739 | ||
2740 | POSTING_READ(reg); | |
c98e9dcf JB |
2741 | udelay(200); |
2742 | ||
2743 | /* Enable CPU FDI TX PLL, always on for Ironlake */ | |
5eddb70b CW |
2744 | reg = FDI_TX_CTL(pipe); |
2745 | temp = I915_READ(reg); | |
c98e9dcf | 2746 | if ((temp & FDI_TX_PLL_ENABLE) == 0) { |
5eddb70b CW |
2747 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); |
2748 | ||
2749 | POSTING_READ(reg); | |
c98e9dcf | 2750 | udelay(100); |
6be4a607 | 2751 | } |
0e23b99d JB |
2752 | } |
2753 | ||
291427f5 JB |
2754 | static void cpt_phase_pointer_disable(struct drm_device *dev, int pipe) |
2755 | { | |
2756 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2757 | u32 flags = I915_READ(SOUTH_CHICKEN1); | |
2758 | ||
2759 | flags &= ~(FDI_PHASE_SYNC_EN(pipe)); | |
2760 | I915_WRITE(SOUTH_CHICKEN1, flags); /* once to disable... */ | |
2761 | flags &= ~(FDI_PHASE_SYNC_OVR(pipe)); | |
2762 | I915_WRITE(SOUTH_CHICKEN1, flags); /* then again to lock */ | |
2763 | POSTING_READ(SOUTH_CHICKEN1); | |
2764 | } | |
0fc932b8 JB |
2765 | static void ironlake_fdi_disable(struct drm_crtc *crtc) |
2766 | { | |
2767 | struct drm_device *dev = crtc->dev; | |
2768 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2769 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2770 | int pipe = intel_crtc->pipe; | |
2771 | u32 reg, temp; | |
2772 | ||
2773 | /* disable CPU FDI tx and PCH FDI rx */ | |
2774 | reg = FDI_TX_CTL(pipe); | |
2775 | temp = I915_READ(reg); | |
2776 | I915_WRITE(reg, temp & ~FDI_TX_ENABLE); | |
2777 | POSTING_READ(reg); | |
2778 | ||
2779 | reg = FDI_RX_CTL(pipe); | |
2780 | temp = I915_READ(reg); | |
2781 | temp &= ~(0x7 << 16); | |
2782 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | |
2783 | I915_WRITE(reg, temp & ~FDI_RX_ENABLE); | |
2784 | ||
2785 | POSTING_READ(reg); | |
2786 | udelay(100); | |
2787 | ||
2788 | /* Ironlake workaround, disable clock pointer after downing FDI */ | |
6f06ce18 JB |
2789 | if (HAS_PCH_IBX(dev)) { |
2790 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR); | |
0fc932b8 JB |
2791 | I915_WRITE(FDI_RX_CHICKEN(pipe), |
2792 | I915_READ(FDI_RX_CHICKEN(pipe) & | |
6f06ce18 | 2793 | ~FDI_RX_PHASE_SYNC_POINTER_EN)); |
291427f5 JB |
2794 | } else if (HAS_PCH_CPT(dev)) { |
2795 | cpt_phase_pointer_disable(dev, pipe); | |
6f06ce18 | 2796 | } |
0fc932b8 JB |
2797 | |
2798 | /* still set train pattern 1 */ | |
2799 | reg = FDI_TX_CTL(pipe); | |
2800 | temp = I915_READ(reg); | |
2801 | temp &= ~FDI_LINK_TRAIN_NONE; | |
2802 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
2803 | I915_WRITE(reg, temp); | |
2804 | ||
2805 | reg = FDI_RX_CTL(pipe); | |
2806 | temp = I915_READ(reg); | |
2807 | if (HAS_PCH_CPT(dev)) { | |
2808 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | |
2809 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | |
2810 | } else { | |
2811 | temp &= ~FDI_LINK_TRAIN_NONE; | |
2812 | temp |= FDI_LINK_TRAIN_PATTERN_1; | |
2813 | } | |
2814 | /* BPC in FDI rx is consistent with that in PIPECONF */ | |
2815 | temp &= ~(0x07 << 16); | |
2816 | temp |= (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) << 11; | |
2817 | I915_WRITE(reg, temp); | |
2818 | ||
2819 | POSTING_READ(reg); | |
2820 | udelay(100); | |
2821 | } | |
2822 | ||
6b383a7f CW |
2823 | /* |
2824 | * When we disable a pipe, we need to clear any pending scanline wait events | |
2825 | * to avoid hanging the ring, which we assume we are waiting on. | |
2826 | */ | |
2827 | static void intel_clear_scanline_wait(struct drm_device *dev) | |
2828 | { | |
2829 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8168bd48 | 2830 | struct intel_ring_buffer *ring; |
6b383a7f CW |
2831 | u32 tmp; |
2832 | ||
2833 | if (IS_GEN2(dev)) | |
2834 | /* Can't break the hang on i8xx */ | |
2835 | return; | |
2836 | ||
1ec14ad3 | 2837 | ring = LP_RING(dev_priv); |
8168bd48 CW |
2838 | tmp = I915_READ_CTL(ring); |
2839 | if (tmp & RING_WAIT) | |
2840 | I915_WRITE_CTL(ring, tmp); | |
6b383a7f CW |
2841 | } |
2842 | ||
e6c3a2a6 CW |
2843 | static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc) |
2844 | { | |
05394f39 | 2845 | struct drm_i915_gem_object *obj; |
e6c3a2a6 CW |
2846 | struct drm_i915_private *dev_priv; |
2847 | ||
2848 | if (crtc->fb == NULL) | |
2849 | return; | |
2850 | ||
05394f39 | 2851 | obj = to_intel_framebuffer(crtc->fb)->obj; |
e6c3a2a6 CW |
2852 | dev_priv = crtc->dev->dev_private; |
2853 | wait_event(dev_priv->pending_flip_queue, | |
05394f39 | 2854 | atomic_read(&obj->pending_flip) == 0); |
e6c3a2a6 CW |
2855 | } |
2856 | ||
040484af JB |
2857 | static bool intel_crtc_driving_pch(struct drm_crtc *crtc) |
2858 | { | |
2859 | struct drm_device *dev = crtc->dev; | |
2860 | struct drm_mode_config *mode_config = &dev->mode_config; | |
2861 | struct intel_encoder *encoder; | |
2862 | ||
2863 | /* | |
2864 | * If there's a non-PCH eDP on this crtc, it must be DP_A, and that | |
2865 | * must be driven by its own crtc; no sharing is possible. | |
2866 | */ | |
2867 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | |
2868 | if (encoder->base.crtc != crtc) | |
2869 | continue; | |
2870 | ||
2871 | switch (encoder->type) { | |
2872 | case INTEL_OUTPUT_EDP: | |
2873 | if (!intel_encoder_is_pch_edp(&encoder->base)) | |
2874 | return false; | |
2875 | continue; | |
2876 | } | |
2877 | } | |
2878 | ||
2879 | return true; | |
2880 | } | |
2881 | ||
f67a559d JB |
2882 | /* |
2883 | * Enable PCH resources required for PCH ports: | |
2884 | * - PCH PLLs | |
2885 | * - FDI training & RX/TX | |
2886 | * - update transcoder timings | |
2887 | * - DP transcoding bits | |
2888 | * - transcoder | |
2889 | */ | |
2890 | static void ironlake_pch_enable(struct drm_crtc *crtc) | |
0e23b99d JB |
2891 | { |
2892 | struct drm_device *dev = crtc->dev; | |
2893 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2894 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2895 | int pipe = intel_crtc->pipe; | |
4b645f14 | 2896 | u32 reg, temp, transc_sel; |
2c07245f | 2897 | |
c98e9dcf | 2898 | /* For PCH output, training FDI link */ |
674cf967 | 2899 | dev_priv->display.fdi_link_train(crtc); |
2c07245f | 2900 | |
92f2584a | 2901 | intel_enable_pch_pll(dev_priv, pipe); |
8db9d77b | 2902 | |
c98e9dcf | 2903 | if (HAS_PCH_CPT(dev)) { |
4b645f14 JB |
2904 | transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL : |
2905 | TRANSC_DPLLB_SEL; | |
2906 | ||
c98e9dcf JB |
2907 | /* Be sure PCH DPLL SEL is set */ |
2908 | temp = I915_READ(PCH_DPLL_SEL); | |
d64311ab JB |
2909 | if (pipe == 0) { |
2910 | temp &= ~(TRANSA_DPLLB_SEL); | |
c98e9dcf | 2911 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); |
d64311ab JB |
2912 | } else if (pipe == 1) { |
2913 | temp &= ~(TRANSB_DPLLB_SEL); | |
c98e9dcf | 2914 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
d64311ab JB |
2915 | } else if (pipe == 2) { |
2916 | temp &= ~(TRANSC_DPLLB_SEL); | |
4b645f14 | 2917 | temp |= (TRANSC_DPLL_ENABLE | transc_sel); |
d64311ab | 2918 | } |
c98e9dcf | 2919 | I915_WRITE(PCH_DPLL_SEL, temp); |
c98e9dcf | 2920 | } |
5eddb70b | 2921 | |
d9b6cb56 JB |
2922 | /* set transcoder timing, panel must allow it */ |
2923 | assert_panel_unlocked(dev_priv, pipe); | |
5eddb70b CW |
2924 | I915_WRITE(TRANS_HTOTAL(pipe), I915_READ(HTOTAL(pipe))); |
2925 | I915_WRITE(TRANS_HBLANK(pipe), I915_READ(HBLANK(pipe))); | |
2926 | I915_WRITE(TRANS_HSYNC(pipe), I915_READ(HSYNC(pipe))); | |
8db9d77b | 2927 | |
5eddb70b CW |
2928 | I915_WRITE(TRANS_VTOTAL(pipe), I915_READ(VTOTAL(pipe))); |
2929 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); | |
2930 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); | |
8db9d77b | 2931 | |
5e84e1a4 ZW |
2932 | intel_fdi_normal_train(crtc); |
2933 | ||
c98e9dcf JB |
2934 | /* For PCH DP, enable TRANS_DP_CTL */ |
2935 | if (HAS_PCH_CPT(dev) && | |
417e822d KP |
2936 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) || |
2937 | intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) { | |
9325c9f0 | 2938 | u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; |
5eddb70b CW |
2939 | reg = TRANS_DP_CTL(pipe); |
2940 | temp = I915_READ(reg); | |
2941 | temp &= ~(TRANS_DP_PORT_SEL_MASK | | |
220cad3c EA |
2942 | TRANS_DP_SYNC_MASK | |
2943 | TRANS_DP_BPC_MASK); | |
5eddb70b CW |
2944 | temp |= (TRANS_DP_OUTPUT_ENABLE | |
2945 | TRANS_DP_ENH_FRAMING); | |
9325c9f0 | 2946 | temp |= bpc << 9; /* same format but at 11:9 */ |
c98e9dcf JB |
2947 | |
2948 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | |
5eddb70b | 2949 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; |
c98e9dcf | 2950 | if (crtc->mode.flags & DRM_MODE_FLAG_PVSYNC) |
5eddb70b | 2951 | temp |= TRANS_DP_VSYNC_ACTIVE_HIGH; |
c98e9dcf JB |
2952 | |
2953 | switch (intel_trans_dp_port_sel(crtc)) { | |
2954 | case PCH_DP_B: | |
5eddb70b | 2955 | temp |= TRANS_DP_PORT_SEL_B; |
c98e9dcf JB |
2956 | break; |
2957 | case PCH_DP_C: | |
5eddb70b | 2958 | temp |= TRANS_DP_PORT_SEL_C; |
c98e9dcf JB |
2959 | break; |
2960 | case PCH_DP_D: | |
5eddb70b | 2961 | temp |= TRANS_DP_PORT_SEL_D; |
c98e9dcf JB |
2962 | break; |
2963 | default: | |
2964 | DRM_DEBUG_KMS("Wrong PCH DP port return. Guess port B\n"); | |
5eddb70b | 2965 | temp |= TRANS_DP_PORT_SEL_B; |
c98e9dcf | 2966 | break; |
32f9d658 | 2967 | } |
2c07245f | 2968 | |
5eddb70b | 2969 | I915_WRITE(reg, temp); |
6be4a607 | 2970 | } |
b52eb4dc | 2971 | |
040484af | 2972 | intel_enable_transcoder(dev_priv, pipe); |
f67a559d JB |
2973 | } |
2974 | ||
d4270e57 JB |
2975 | void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) |
2976 | { | |
2977 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2978 | int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); | |
2979 | u32 temp; | |
2980 | ||
2981 | temp = I915_READ(dslreg); | |
2982 | udelay(500); | |
2983 | if (wait_for(I915_READ(dslreg) != temp, 5)) { | |
2984 | /* Without this, mode sets may fail silently on FDI */ | |
2985 | I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS); | |
2986 | udelay(250); | |
2987 | I915_WRITE(tc2reg, 0); | |
2988 | if (wait_for(I915_READ(dslreg) != temp, 5)) | |
2989 | DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); | |
2990 | } | |
2991 | } | |
2992 | ||
f67a559d JB |
2993 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2994 | { | |
2995 | struct drm_device *dev = crtc->dev; | |
2996 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2997 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
2998 | int pipe = intel_crtc->pipe; | |
2999 | int plane = intel_crtc->plane; | |
3000 | u32 temp; | |
3001 | bool is_pch_port; | |
3002 | ||
3003 | if (intel_crtc->active) | |
3004 | return; | |
3005 | ||
3006 | intel_crtc->active = true; | |
3007 | intel_update_watermarks(dev); | |
3008 | ||
3009 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { | |
3010 | temp = I915_READ(PCH_LVDS); | |
3011 | if ((temp & LVDS_PORT_EN) == 0) | |
3012 | I915_WRITE(PCH_LVDS, temp | LVDS_PORT_EN); | |
3013 | } | |
3014 | ||
3015 | is_pch_port = intel_crtc_driving_pch(crtc); | |
3016 | ||
3017 | if (is_pch_port) | |
357555c0 | 3018 | ironlake_fdi_pll_enable(crtc); |
f67a559d JB |
3019 | else |
3020 | ironlake_fdi_disable(crtc); | |
3021 | ||
3022 | /* Enable panel fitting for LVDS */ | |
3023 | if (dev_priv->pch_pf_size && | |
3024 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { | |
3025 | /* Force use of hard-coded filter coefficients | |
3026 | * as some pre-programmed values are broken, | |
3027 | * e.g. x201. | |
3028 | */ | |
9db4a9c7 JB |
3029 | I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3); |
3030 | I915_WRITE(PF_WIN_POS(pipe), dev_priv->pch_pf_pos); | |
3031 | I915_WRITE(PF_WIN_SZ(pipe), dev_priv->pch_pf_size); | |
f67a559d JB |
3032 | } |
3033 | ||
9c54c0dd JB |
3034 | /* |
3035 | * On ILK+ LUT must be loaded before the pipe is running but with | |
3036 | * clocks enabled | |
3037 | */ | |
3038 | intel_crtc_load_lut(crtc); | |
3039 | ||
f67a559d JB |
3040 | intel_enable_pipe(dev_priv, pipe, is_pch_port); |
3041 | intel_enable_plane(dev_priv, plane, pipe); | |
3042 | ||
3043 | if (is_pch_port) | |
3044 | ironlake_pch_enable(crtc); | |
c98e9dcf | 3045 | |
d1ebd816 | 3046 | mutex_lock(&dev->struct_mutex); |
bed4a673 | 3047 | intel_update_fbc(dev); |
d1ebd816 BW |
3048 | mutex_unlock(&dev->struct_mutex); |
3049 | ||
6b383a7f | 3050 | intel_crtc_update_cursor(crtc, true); |
6be4a607 JB |
3051 | } |
3052 | ||
3053 | static void ironlake_crtc_disable(struct drm_crtc *crtc) | |
3054 | { | |
3055 | struct drm_device *dev = crtc->dev; | |
3056 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3057 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3058 | int pipe = intel_crtc->pipe; | |
3059 | int plane = intel_crtc->plane; | |
5eddb70b | 3060 | u32 reg, temp; |
b52eb4dc | 3061 | |
f7abfe8b CW |
3062 | if (!intel_crtc->active) |
3063 | return; | |
3064 | ||
e6c3a2a6 | 3065 | intel_crtc_wait_for_pending_flips(crtc); |
6be4a607 | 3066 | drm_vblank_off(dev, pipe); |
6b383a7f | 3067 | intel_crtc_update_cursor(crtc, false); |
5eddb70b | 3068 | |
b24e7179 | 3069 | intel_disable_plane(dev_priv, plane, pipe); |
913d8d11 | 3070 | |
973d04f9 CW |
3071 | if (dev_priv->cfb_plane == plane) |
3072 | intel_disable_fbc(dev); | |
2c07245f | 3073 | |
b24e7179 | 3074 | intel_disable_pipe(dev_priv, pipe); |
32f9d658 | 3075 | |
6be4a607 | 3076 | /* Disable PF */ |
9db4a9c7 JB |
3077 | I915_WRITE(PF_CTL(pipe), 0); |
3078 | I915_WRITE(PF_WIN_SZ(pipe), 0); | |
2c07245f | 3079 | |
0fc932b8 | 3080 | ironlake_fdi_disable(crtc); |
2c07245f | 3081 | |
47a05eca JB |
3082 | /* This is a horrible layering violation; we should be doing this in |
3083 | * the connector/encoder ->prepare instead, but we don't always have | |
3084 | * enough information there about the config to know whether it will | |
3085 | * actually be necessary or just cause undesired flicker. | |
3086 | */ | |
3087 | intel_disable_pch_ports(dev_priv, pipe); | |
249c0e64 | 3088 | |
040484af | 3089 | intel_disable_transcoder(dev_priv, pipe); |
913d8d11 | 3090 | |
6be4a607 JB |
3091 | if (HAS_PCH_CPT(dev)) { |
3092 | /* disable TRANS_DP_CTL */ | |
5eddb70b CW |
3093 | reg = TRANS_DP_CTL(pipe); |
3094 | temp = I915_READ(reg); | |
3095 | temp &= ~(TRANS_DP_OUTPUT_ENABLE | TRANS_DP_PORT_SEL_MASK); | |
cb3543c6 | 3096 | temp |= TRANS_DP_PORT_SEL_NONE; |
5eddb70b | 3097 | I915_WRITE(reg, temp); |
6be4a607 JB |
3098 | |
3099 | /* disable DPLL_SEL */ | |
3100 | temp = I915_READ(PCH_DPLL_SEL); | |
9db4a9c7 JB |
3101 | switch (pipe) { |
3102 | case 0: | |
d64311ab | 3103 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); |
9db4a9c7 JB |
3104 | break; |
3105 | case 1: | |
6be4a607 | 3106 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
9db4a9c7 JB |
3107 | break; |
3108 | case 2: | |
4b645f14 | 3109 | /* C shares PLL A or B */ |
d64311ab | 3110 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); |
9db4a9c7 JB |
3111 | break; |
3112 | default: | |
3113 | BUG(); /* wtf */ | |
3114 | } | |
6be4a607 | 3115 | I915_WRITE(PCH_DPLL_SEL, temp); |
6be4a607 | 3116 | } |
e3421a18 | 3117 | |
6be4a607 | 3118 | /* disable PCH DPLL */ |
4b645f14 JB |
3119 | if (!intel_crtc->no_pll) |
3120 | intel_disable_pch_pll(dev_priv, pipe); | |
8db9d77b | 3121 | |
6be4a607 | 3122 | /* Switch from PCDclk to Rawclk */ |
5eddb70b CW |
3123 | reg = FDI_RX_CTL(pipe); |
3124 | temp = I915_READ(reg); | |
3125 | I915_WRITE(reg, temp & ~FDI_PCDCLK); | |
8db9d77b | 3126 | |
6be4a607 | 3127 | /* Disable CPU FDI TX PLL */ |
5eddb70b CW |
3128 | reg = FDI_TX_CTL(pipe); |
3129 | temp = I915_READ(reg); | |
3130 | I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE); | |
3131 | ||
3132 | POSTING_READ(reg); | |
6be4a607 | 3133 | udelay(100); |
8db9d77b | 3134 | |
5eddb70b CW |
3135 | reg = FDI_RX_CTL(pipe); |
3136 | temp = I915_READ(reg); | |
3137 | I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE); | |
2c07245f | 3138 | |
6be4a607 | 3139 | /* Wait for the clocks to turn off. */ |
5eddb70b | 3140 | POSTING_READ(reg); |
6be4a607 | 3141 | udelay(100); |
6b383a7f | 3142 | |
f7abfe8b | 3143 | intel_crtc->active = false; |
6b383a7f | 3144 | intel_update_watermarks(dev); |
d1ebd816 BW |
3145 | |
3146 | mutex_lock(&dev->struct_mutex); | |
6b383a7f CW |
3147 | intel_update_fbc(dev); |
3148 | intel_clear_scanline_wait(dev); | |
d1ebd816 | 3149 | mutex_unlock(&dev->struct_mutex); |
6be4a607 | 3150 | } |
1b3c7a47 | 3151 | |
6be4a607 JB |
3152 | static void ironlake_crtc_dpms(struct drm_crtc *crtc, int mode) |
3153 | { | |
3154 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3155 | int pipe = intel_crtc->pipe; | |
3156 | int plane = intel_crtc->plane; | |
8db9d77b | 3157 | |
6be4a607 JB |
3158 | /* XXX: When our outputs are all unaware of DPMS modes other than off |
3159 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | |
3160 | */ | |
3161 | switch (mode) { | |
3162 | case DRM_MODE_DPMS_ON: | |
3163 | case DRM_MODE_DPMS_STANDBY: | |
3164 | case DRM_MODE_DPMS_SUSPEND: | |
3165 | DRM_DEBUG_KMS("crtc %d/%d dpms on\n", pipe, plane); | |
3166 | ironlake_crtc_enable(crtc); | |
3167 | break; | |
1b3c7a47 | 3168 | |
6be4a607 JB |
3169 | case DRM_MODE_DPMS_OFF: |
3170 | DRM_DEBUG_KMS("crtc %d/%d dpms off\n", pipe, plane); | |
3171 | ironlake_crtc_disable(crtc); | |
2c07245f ZW |
3172 | break; |
3173 | } | |
3174 | } | |
3175 | ||
02e792fb DV |
3176 | static void intel_crtc_dpms_overlay(struct intel_crtc *intel_crtc, bool enable) |
3177 | { | |
02e792fb | 3178 | if (!enable && intel_crtc->overlay) { |
23f09ce3 | 3179 | struct drm_device *dev = intel_crtc->base.dev; |
ce453d81 | 3180 | struct drm_i915_private *dev_priv = dev->dev_private; |
03f77ea5 | 3181 | |
23f09ce3 | 3182 | mutex_lock(&dev->struct_mutex); |
ce453d81 CW |
3183 | dev_priv->mm.interruptible = false; |
3184 | (void) intel_overlay_switch_off(intel_crtc->overlay); | |
3185 | dev_priv->mm.interruptible = true; | |
23f09ce3 | 3186 | mutex_unlock(&dev->struct_mutex); |
02e792fb | 3187 | } |
02e792fb | 3188 | |
5dcdbcb0 CW |
3189 | /* Let userspace switch the overlay on again. In most cases userspace |
3190 | * has to recompute where to put it anyway. | |
3191 | */ | |
02e792fb DV |
3192 | } |
3193 | ||
0b8765c6 | 3194 | static void i9xx_crtc_enable(struct drm_crtc *crtc) |
79e53945 JB |
3195 | { |
3196 | struct drm_device *dev = crtc->dev; | |
79e53945 JB |
3197 | struct drm_i915_private *dev_priv = dev->dev_private; |
3198 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3199 | int pipe = intel_crtc->pipe; | |
80824003 | 3200 | int plane = intel_crtc->plane; |
79e53945 | 3201 | |
f7abfe8b CW |
3202 | if (intel_crtc->active) |
3203 | return; | |
3204 | ||
3205 | intel_crtc->active = true; | |
6b383a7f CW |
3206 | intel_update_watermarks(dev); |
3207 | ||
63d7bbe9 | 3208 | intel_enable_pll(dev_priv, pipe); |
040484af | 3209 | intel_enable_pipe(dev_priv, pipe, false); |
b24e7179 | 3210 | intel_enable_plane(dev_priv, plane, pipe); |
79e53945 | 3211 | |
0b8765c6 | 3212 | intel_crtc_load_lut(crtc); |
bed4a673 | 3213 | intel_update_fbc(dev); |
79e53945 | 3214 | |
0b8765c6 JB |
3215 | /* Give the overlay scaler a chance to enable if it's on this pipe */ |
3216 | intel_crtc_dpms_overlay(intel_crtc, true); | |
6b383a7f | 3217 | intel_crtc_update_cursor(crtc, true); |
0b8765c6 | 3218 | } |
79e53945 | 3219 | |
0b8765c6 JB |
3220 | static void i9xx_crtc_disable(struct drm_crtc *crtc) |
3221 | { | |
3222 | struct drm_device *dev = crtc->dev; | |
3223 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3224 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3225 | int pipe = intel_crtc->pipe; | |
3226 | int plane = intel_crtc->plane; | |
b690e96c | 3227 | |
f7abfe8b CW |
3228 | if (!intel_crtc->active) |
3229 | return; | |
3230 | ||
0b8765c6 | 3231 | /* Give the overlay scaler a chance to disable if it's on this pipe */ |
e6c3a2a6 CW |
3232 | intel_crtc_wait_for_pending_flips(crtc); |
3233 | drm_vblank_off(dev, pipe); | |
0b8765c6 | 3234 | intel_crtc_dpms_overlay(intel_crtc, false); |
6b383a7f | 3235 | intel_crtc_update_cursor(crtc, false); |
0b8765c6 | 3236 | |
973d04f9 CW |
3237 | if (dev_priv->cfb_plane == plane) |
3238 | intel_disable_fbc(dev); | |
79e53945 | 3239 | |
b24e7179 | 3240 | intel_disable_plane(dev_priv, plane, pipe); |
b24e7179 | 3241 | intel_disable_pipe(dev_priv, pipe); |
63d7bbe9 | 3242 | intel_disable_pll(dev_priv, pipe); |
0b8765c6 | 3243 | |
f7abfe8b | 3244 | intel_crtc->active = false; |
6b383a7f CW |
3245 | intel_update_fbc(dev); |
3246 | intel_update_watermarks(dev); | |
3247 | intel_clear_scanline_wait(dev); | |
0b8765c6 JB |
3248 | } |
3249 | ||
3250 | static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode) | |
3251 | { | |
3252 | /* XXX: When our outputs are all unaware of DPMS modes other than off | |
3253 | * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. | |
3254 | */ | |
3255 | switch (mode) { | |
3256 | case DRM_MODE_DPMS_ON: | |
3257 | case DRM_MODE_DPMS_STANDBY: | |
3258 | case DRM_MODE_DPMS_SUSPEND: | |
3259 | i9xx_crtc_enable(crtc); | |
3260 | break; | |
3261 | case DRM_MODE_DPMS_OFF: | |
3262 | i9xx_crtc_disable(crtc); | |
79e53945 JB |
3263 | break; |
3264 | } | |
2c07245f ZW |
3265 | } |
3266 | ||
3267 | /** | |
3268 | * Sets the power management mode of the pipe and plane. | |
2c07245f ZW |
3269 | */ |
3270 | static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) | |
3271 | { | |
3272 | struct drm_device *dev = crtc->dev; | |
e70236a8 | 3273 | struct drm_i915_private *dev_priv = dev->dev_private; |
2c07245f ZW |
3274 | struct drm_i915_master_private *master_priv; |
3275 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
3276 | int pipe = intel_crtc->pipe; | |
3277 | bool enabled; | |
3278 | ||
032d2a0d CW |
3279 | if (intel_crtc->dpms_mode == mode) |
3280 | return; | |
3281 | ||
65655d4a | 3282 | intel_crtc->dpms_mode = mode; |
debcaddc | 3283 | |
e70236a8 | 3284 | dev_priv->display.dpms(crtc, mode); |
79e53945 JB |
3285 | |
3286 | if (!dev->primary->master) | |
3287 | return; | |
3288 | ||
3289 | master_priv = dev->primary->master->driver_priv; | |
3290 | if (!master_priv->sarea_priv) | |
3291 | return; | |
3292 | ||
3293 | enabled = crtc->enabled && mode != DRM_MODE_DPMS_OFF; | |
3294 | ||
3295 | switch (pipe) { | |
3296 | case 0: | |
3297 | master_priv->sarea_priv->pipeA_w = enabled ? crtc->mode.hdisplay : 0; | |
3298 | master_priv->sarea_priv->pipeA_h = enabled ? crtc->mode.vdisplay : 0; | |
3299 | break; | |
3300 | case 1: | |
3301 | master_priv->sarea_priv->pipeB_w = enabled ? crtc->mode.hdisplay : 0; | |
3302 | master_priv->sarea_priv->pipeB_h = enabled ? crtc->mode.vdisplay : 0; | |
3303 | break; | |
3304 | default: | |
9db4a9c7 | 3305 | DRM_ERROR("Can't update pipe %c in SAREA\n", pipe_name(pipe)); |
79e53945 JB |
3306 | break; |
3307 | } | |
79e53945 JB |
3308 | } |
3309 | ||
cdd59983 CW |
3310 | static void intel_crtc_disable(struct drm_crtc *crtc) |
3311 | { | |
3312 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | |
3313 | struct drm_device *dev = crtc->dev; | |
3314 | ||
3315 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF); | |
3316 | ||
3317 | if (crtc->fb) { | |
3318 | mutex_lock(&dev->struct_mutex); | |
3319 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | |
3320 | mutex_unlock(&dev->struct_mutex); | |
3321 | } | |
3322 | } | |
3323 | ||
7e7d76c3 JB |
3324 | /* Prepare for a mode set. |
3325 | * | |
3326 | * Note we could be a lot smarter here. We need to figure out which outputs | |
3327 | * will be enabled, which disabled (in short, how the config will changes) | |
3328 | * and perform the minimum necessary steps to accomplish that, e.g. updating | |
3329 | * watermarks, FBC configuration, making sure PLLs are programmed correctly, | |
3330 | * panel fitting is in the proper state, etc. | |
3331 | */ | |
3332 | static void i9xx_crtc_prepare(struct drm_crtc *crtc) | |
79e53945 | 3333 | { |
7e7d76c3 | 3334 | i9xx_crtc_disable(crtc); |
79e53945 JB |
3335 | } |
3336 | ||
7e7d76c3 | 3337 | static void i9xx_crtc_commit(struct drm_crtc *crtc) |
79e53945 | 3338 | { |
7e7d76c3 | 3339 | i9xx_crtc_enable(crtc); |
7e7d76c3 JB |
3340 | } |
3341 | ||
3342 | static void ironlake_crtc_prepare(struct drm_crtc *crtc) | |
3343 | { | |
7e7d76c3 | 3344 | ironlake_crtc_disable(crtc); |
7e7d76c3 JB |
3345 | } |
3346 | ||
3347 | static void ironlake_crtc_commit(struct drm_crtc *crtc) | |
3348 | { | |
7e7d76c3 | 3349 | ironlake_crtc_enable(crtc); |
79e53945 JB |
3350 | } |
3351 | ||
0206e353 | 3352 | void intel_encoder_prepare(struct drm_encoder *encoder) |
79e53945 JB |
3353 | { |
3354 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | |
3355 | /* lvds has its own version of prepare see intel_lvds_prepare */ | |
3356 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF); | |
3357 | } | |
3358 | ||
0206e353 | 3359 | void intel_encoder_commit(struct drm_encoder *encoder) |
79e53945 JB |
3360 | { |
3361 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | |
d4270e57 JB |
3362 | struct drm_device *dev = encoder->dev; |
3363 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | |
3364 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc); | |
3365 | ||
79e53945 JB |
3366 | /* lvds has its own version of commit see intel_lvds_commit */ |
3367 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | |
d4270e57 JB |
3368 | |
3369 | if (HAS_PCH_CPT(dev)) | |
3370 | intel_cpt_verify_modeset(dev, intel_crtc->pipe); | |
79e53945 JB |
3371 | } |
3372 | ||
ea5b213a CW |
3373 | void intel_encoder_destroy(struct drm_encoder *encoder) |
3374 | { | |
4ef69c7a | 3375 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
ea5b213a | 3376 | |
ea5b213a CW |
3377 | drm_encoder_cleanup(encoder); |
3378 | kfree(intel_encoder); | |
3379 | } | |
3380 | ||
79e53945 JB |
3381 | static bool intel_crtc_mode_fixup(struct drm_crtc *crtc, |
3382 | struct drm_display_mode *mode, | |
3383 | struct drm_display_mode *adjusted_mode) | |
3384 | { | |
2c07245f | 3385 | struct drm_device *dev = crtc->dev; |
89749350 | 3386 | |
bad720ff | 3387 | if (HAS_PCH_SPLIT(dev)) { |
2c07245f | 3388 | /* FDI link clock is fixed at 2.7G */ |
2377b741 JB |
3389 | if (mode->clock * 3 > IRONLAKE_FDI_FREQ * 4) |
3390 | return false; | |
2c07245f | 3391 | } |
89749350 CW |
3392 | |
3393 | /* XXX some encoders set the crtcinfo, others don't. | |
3394 | * Obviously we need some form of conflict resolution here... | |
3395 | */ | |
3396 | if (adjusted_mode->crtc_htotal == 0) | |
3397 | drm_mode_set_crtcinfo(adjusted_mode, 0); | |
3398 | ||
79e53945 JB |
3399 | return true; |
3400 | } | |
3401 | ||
e70236a8 JB |
3402 | static int i945_get_display_clock_speed(struct drm_device *dev) |
3403 | { | |
3404 | return 400000; | |
3405 | } | |
79e53945 | 3406 | |
e70236a8 | 3407 | static int i915_get_display_clock_speed(struct drm_device *dev) |
79e53945 | 3408 | { |
e70236a8 JB |
3409 | return 333000; |
3410 | } | |
79e53945 | 3411 | |
e70236a8 JB |
3412 | static int i9xx_misc_get_display_clock_speed(struct drm_device *dev) |
3413 | { | |
3414 | return 200000; | |
3415 | } | |
79e53945 | 3416 | |
e70236a8 JB |
3417 | static int i915gm_get_display_clock_speed(struct drm_device *dev) |
3418 | { | |
3419 | u16 gcfgc = 0; | |
79e53945 | 3420 | |
e70236a8 JB |
3421 | pci_read_config_word(dev->pdev, GCFGC, &gcfgc); |
3422 | ||
3423 | if (gcfgc & GC_LOW_FREQUENCY_ENABLE) | |
3424 | return 133000; | |
3425 | else { | |
3426 | switch (gcfgc & GC_DISPLAY_CLOCK_MASK) { | |
3427 | case GC_DISPLAY_CLOCK_333_MHZ: | |
3428 | return 333000; | |
3429 | default: | |
3430 | case GC_DISPLAY_CLOCK_190_200_MHZ: | |
3431 | return 190000; | |
79e53945 | 3432 | } |
e70236a8 JB |
3433 | } |
3434 | } | |
3435 | ||
3436 | static int i865_get_display_clock_speed(struct drm_device *dev) | |
3437 | { | |
3438 | return 266000; | |
3439 | } | |
3440 | ||
3441 | static int i855_get_display_clock_speed(struct drm_device *dev) | |
3442 | { | |
3443 | u16 hpllcc = 0; | |
3444 | /* Assume that the hardware is in the high speed state. This | |
3445 | * should be the default. | |
3446 | */ | |
3447 | switch (hpllcc & GC_CLOCK_CONTROL_MASK) { | |
3448 | case GC_CLOCK_133_200: | |
3449 | case GC_CLOCK_100_200: | |
3450 | return 200000; | |
3451 | case GC_CLOCK_166_250: | |
3452 | return 250000; | |
3453 | case GC_CLOCK_100_133: | |
79e53945 | 3454 | return 133000; |
e70236a8 | 3455 | } |
79e53945 | 3456 | |
e70236a8 JB |
3457 | /* Shouldn't happen */ |
3458 | return 0; | |
3459 | } | |
79e53945 | 3460 | |
e70236a8 JB |
3461 | static int i830_get_display_clock_speed(struct drm_device *dev) |
3462 | { | |
3463 | return 133000; | |
79e53945 JB |
3464 | } |
3465 | ||
2c07245f ZW |
3466 | struct fdi_m_n { |
3467 | u32 tu; | |
3468 | u32 gmch_m; | |
3469 | u32 gmch_n; | |
3470 | u32 link_m; | |
3471 | u32 link_n; | |
3472 | }; | |
3473 | ||
3474 | static void | |
3475 | fdi_reduce_ratio(u32 *num, u32 *den) | |
3476 | { | |
3477 | while (*num > 0xffffff || *den > 0xffffff) { | |
3478 | *num >>= 1; | |
3479 | *den >>= 1; | |
3480 | } | |
3481 | } | |
3482 | ||
2c07245f | 3483 | static void |
f2b115e6 AJ |
3484 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, |
3485 | int link_clock, struct fdi_m_n *m_n) | |
2c07245f | 3486 | { |
2c07245f ZW |
3487 | m_n->tu = 64; /* default size */ |
3488 | ||
22ed1113 CW |
3489 | /* BUG_ON(pixel_clock > INT_MAX / 36); */ |
3490 | m_n->gmch_m = bits_per_pixel * pixel_clock; | |
3491 | m_n->gmch_n = link_clock * nlanes * 8; | |
2c07245f ZW |
3492 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
3493 | ||
22ed1113 CW |
3494 | m_n->link_m = pixel_clock; |
3495 | m_n->link_n = link_clock; | |
2c07245f ZW |
3496 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); |
3497 | } | |
3498 | ||
3499 | ||
7662c8bd SL |
3500 | struct intel_watermark_params { |
3501 | unsigned long fifo_size; | |
3502 | unsigned long max_wm; | |
3503 | unsigned long default_wm; | |
3504 | unsigned long guard_size; | |
3505 | unsigned long cacheline_size; | |
3506 | }; | |
3507 | ||
f2b115e6 | 3508 | /* Pineview has different values for various configs */ |
d210246a | 3509 | static const struct intel_watermark_params pineview_display_wm = { |
f2b115e6 AJ |
3510 | PINEVIEW_DISPLAY_FIFO, |
3511 | PINEVIEW_MAX_WM, | |
3512 | PINEVIEW_DFT_WM, | |
3513 | PINEVIEW_GUARD_WM, | |
3514 | PINEVIEW_FIFO_LINE_SIZE | |
7662c8bd | 3515 | }; |
d210246a | 3516 | static const struct intel_watermark_params pineview_display_hplloff_wm = { |
f2b115e6 AJ |
3517 | PINEVIEW_DISPLAY_FIFO, |
3518 | PINEVIEW_MAX_WM, | |
3519 | PINEVIEW_DFT_HPLLOFF_WM, | |
3520 | PINEVIEW_GUARD_WM, | |
3521 | PINEVIEW_FIFO_LINE_SIZE | |
7662c8bd | 3522 | }; |
d210246a | 3523 | static const struct intel_watermark_params pineview_cursor_wm = { |
f2b115e6 AJ |
3524 | PINEVIEW_CURSOR_FIFO, |
3525 | PINEVIEW_CURSOR_MAX_WM, | |
3526 | PINEVIEW_CURSOR_DFT_WM, | |
3527 | PINEVIEW_CURSOR_GUARD_WM, | |
3528 | PINEVIEW_FIFO_LINE_SIZE, | |
7662c8bd | 3529 | }; |
d210246a | 3530 | static const struct intel_watermark_params pineview_cursor_hplloff_wm = { |
f2b115e6 AJ |
3531 | PINEVIEW_CURSOR_FIFO, |
3532 | PINEVIEW_CURSOR_MAX_WM, | |
3533 | PINEVIEW_CURSOR_DFT_WM, | |
3534 | PINEVIEW_CURSOR_GUARD_WM, | |
3535 | PINEVIEW_FIFO_LINE_SIZE | |
7662c8bd | 3536 | }; |
d210246a | 3537 | static const struct intel_watermark_params g4x_wm_info = { |
0e442c60 JB |
3538 | G4X_FIFO_SIZE, |
3539 | G4X_MAX_WM, | |
3540 | G4X_MAX_WM, | |
3541 | 2, | |
3542 | G4X_FIFO_LINE_SIZE, | |
3543 | }; | |
d210246a | 3544 | static const struct intel_watermark_params g4x_cursor_wm_info = { |
4fe5e611 ZY |
3545 | I965_CURSOR_FIFO, |
3546 | I965_CURSOR_MAX_WM, | |
3547 | I965_CURSOR_DFT_WM, | |
3548 | 2, | |
3549 | G4X_FIFO_LINE_SIZE, | |
3550 | }; | |
d210246a | 3551 | static const struct intel_watermark_params i965_cursor_wm_info = { |
4fe5e611 ZY |
3552 | I965_CURSOR_FIFO, |
3553 | I965_CURSOR_MAX_WM, | |
3554 | I965_CURSOR_DFT_WM, | |
3555 | 2, | |
3556 | I915_FIFO_LINE_SIZE, | |
3557 | }; | |
d210246a | 3558 | static const struct intel_watermark_params i945_wm_info = { |
dff33cfc | 3559 | I945_FIFO_SIZE, |
7662c8bd SL |
3560 | I915_MAX_WM, |
3561 | 1, | |
dff33cfc JB |
3562 | 2, |
3563 | I915_FIFO_LINE_SIZE | |
7662c8bd | 3564 | }; |
d210246a | 3565 | static const struct intel_watermark_params i915_wm_info = { |
dff33cfc | 3566 | I915_FIFO_SIZE, |
7662c8bd SL |
3567 | I915_MAX_WM, |
3568 | 1, | |
dff33cfc | 3569 | 2, |
7662c8bd SL |
3570 | I915_FIFO_LINE_SIZE |
3571 | }; | |
d210246a | 3572 | static const struct intel_watermark_params i855_wm_info = { |
7662c8bd SL |
3573 | I855GM_FIFO_SIZE, |
3574 | I915_MAX_WM, | |
3575 | 1, | |
dff33cfc | 3576 | 2, |
7662c8bd SL |
3577 | I830_FIFO_LINE_SIZE |
3578 | }; | |
d210246a | 3579 | static const struct intel_watermark_params i830_wm_info = { |
7662c8bd SL |
3580 | I830_FIFO_SIZE, |
3581 | I915_MAX_WM, | |
3582 | 1, | |
dff33cfc | 3583 | 2, |
7662c8bd SL |
3584 | I830_FIFO_LINE_SIZE |
3585 | }; | |
3586 | ||
d210246a | 3587 | static const struct intel_watermark_params ironlake_display_wm_info = { |
7f8a8569 ZW |
3588 | ILK_DISPLAY_FIFO, |
3589 | ILK_DISPLAY_MAXWM, | |
3590 | ILK_DISPLAY_DFTWM, | |
3591 | 2, | |
3592 | ILK_FIFO_LINE_SIZE | |
3593 | }; | |
d210246a | 3594 | static const struct intel_watermark_params ironlake_cursor_wm_info = { |
c936f44d ZY |
3595 | ILK_CURSOR_FIFO, |
3596 | ILK_CURSOR_MAXWM, | |
3597 | ILK_CURSOR_DFTWM, | |
3598 | 2, | |
3599 | ILK_FIFO_LINE_SIZE | |
3600 | }; | |
d210246a | 3601 | static const struct intel_watermark_params ironlake_display_srwm_info = { |
7f8a8569 ZW |
3602 | ILK_DISPLAY_SR_FIFO, |
3603 | ILK_DISPLAY_MAX_SRWM, | |
3604 | ILK_DISPLAY_DFT_SRWM, | |
3605 | 2, | |
3606 | ILK_FIFO_LINE_SIZE | |
3607 | }; | |
d210246a | 3608 | static const struct intel_watermark_params ironlake_cursor_srwm_info = { |
7f8a8569 ZW |
3609 | ILK_CURSOR_SR_FIFO, |
3610 | ILK_CURSOR_MAX_SRWM, | |
3611 | ILK_CURSOR_DFT_SRWM, | |
3612 | 2, | |
3613 | ILK_FIFO_LINE_SIZE | |
3614 | }; | |
3615 | ||
d210246a | 3616 | static const struct intel_watermark_params sandybridge_display_wm_info = { |
1398261a YL |
3617 | SNB_DISPLAY_FIFO, |
3618 | SNB_DISPLAY_MAXWM, | |
3619 | SNB_DISPLAY_DFTWM, | |
3620 | 2, | |
3621 | SNB_FIFO_LINE_SIZE | |
3622 | }; | |
d210246a | 3623 | static const struct intel_watermark_params sandybridge_cursor_wm_info = { |
1398261a YL |
3624 | SNB_CURSOR_FIFO, |
3625 | SNB_CURSOR_MAXWM, | |
3626 | SNB_CURSOR_DFTWM, | |
3627 | 2, | |
3628 | SNB_FIFO_LINE_SIZE | |
3629 | }; | |
d210246a | 3630 | static const struct intel_watermark_params sandybridge_display_srwm_info = { |
1398261a YL |
3631 | SNB_DISPLAY_SR_FIFO, |
3632 | SNB_DISPLAY_MAX_SRWM, | |
3633 | SNB_DISPLAY_DFT_SRWM, | |
3634 | 2, | |
3635 | SNB_FIFO_LINE_SIZE | |
3636 | }; | |
d210246a | 3637 | static const struct intel_watermark_params sandybridge_cursor_srwm_info = { |
1398261a YL |
3638 | SNB_CURSOR_SR_FIFO, |
3639 | SNB_CURSOR_MAX_SRWM, | |
3640 | SNB_CURSOR_DFT_SRWM, | |
3641 | 2, | |
3642 | SNB_FIFO_LINE_SIZE | |
3643 | }; | |
3644 | ||
3645 | ||
dff33cfc JB |
3646 | /** |
3647 | * intel_calculate_wm - calculate watermark level | |
3648 | * @clock_in_khz: pixel clock | |
3649 | * @wm: chip FIFO params | |
3650 | * @pixel_size: display pixel size | |
3651 | * @latency_ns: memory latency for the platform | |
3652 | * | |
3653 | * Calculate the watermark level (the level at which the display plane will | |
3654 | * start fetching from memory again). Each chip has a different display | |
3655 | * FIFO size and allocation, so the caller needs to figure that out and pass | |
3656 | * in the correct intel_watermark_params structure. | |
3657 | * | |
3658 | * As the pixel clock runs, the FIFO will be drained at a rate that depends | |
3659 | * on the pixel size. When it reaches the watermark level, it'll start | |
3660 | * fetching FIFO line sized based chunks from memory until the FIFO fills | |
3661 | * past the watermark point. If the FIFO drains completely, a FIFO underrun | |
3662 | * will occur, and a display engine hang could result. | |
3663 | */ | |
7662c8bd | 3664 | static unsigned long intel_calculate_wm(unsigned long clock_in_khz, |
d210246a CW |
3665 | const struct intel_watermark_params *wm, |
3666 | int fifo_size, | |
7662c8bd SL |
3667 | int pixel_size, |
3668 | unsigned long latency_ns) | |
3669 | { | |
390c4dd4 | 3670 | long entries_required, wm_size; |
dff33cfc | 3671 | |
d660467c JB |
3672 | /* |
3673 | * Note: we need to make sure we don't overflow for various clock & | |
3674 | * latency values. | |
3675 | * clocks go from a few thousand to several hundred thousand. | |
3676 | * latency is usually a few thousand | |
3677 | */ | |
3678 | entries_required = ((clock_in_khz / 1000) * pixel_size * latency_ns) / | |
3679 | 1000; | |
8de9b311 | 3680 | entries_required = DIV_ROUND_UP(entries_required, wm->cacheline_size); |
7662c8bd | 3681 | |
bbb0aef5 | 3682 | DRM_DEBUG_KMS("FIFO entries required for mode: %ld\n", entries_required); |
dff33cfc | 3683 | |
d210246a | 3684 | wm_size = fifo_size - (entries_required + wm->guard_size); |
dff33cfc | 3685 | |
bbb0aef5 | 3686 | DRM_DEBUG_KMS("FIFO watermark level: %ld\n", wm_size); |
7662c8bd | 3687 | |
390c4dd4 JB |
3688 | /* Don't promote wm_size to unsigned... */ |
3689 | if (wm_size > (long)wm->max_wm) | |
7662c8bd | 3690 | wm_size = wm->max_wm; |
c3add4b6 | 3691 | if (wm_size <= 0) |
7662c8bd SL |
3692 | wm_size = wm->default_wm; |
3693 | return wm_size; | |
3694 | } | |
3695 | ||
3696 | struct cxsr_latency { | |
3697 | int is_desktop; | |
95534263 | 3698 | int is_ddr3; |
7662c8bd SL |
3699 | unsigned long fsb_freq; |
3700 | unsigned long mem_freq; | |
3701 | unsigned long display_sr; | |
3702 | unsigned long display_hpll_disable; | |
3703 | unsigned long cursor_sr; | |
3704 | unsigned long cursor_hpll_disable; | |
3705 | }; | |
3706 | ||
403c89ff | 3707 | static const struct cxsr_latency cxsr_latency_table[] = { |
95534263 LP |
3708 | {1, 0, 800, 400, 3382, 33382, 3983, 33983}, /* DDR2-400 SC */ |
3709 | {1, 0, 800, 667, 3354, 33354, 3807, 33807}, /* DDR2-667 SC */ | |
3710 | {1, 0, 800, 800, 3347, 33347, 3763, 33763}, /* DDR2-800 SC */ | |
3711 | {1, 1, 800, 667, 6420, 36420, 6873, 36873}, /* DDR3-667 SC */ | |
3712 | {1, 1, 800, 800, 5902, 35902, 6318, 36318}, /* DDR3-800 SC */ | |
3713 | ||
3714 | {1, 0, 667, 400, 3400, 33400, 4021, 34021}, /* DDR2-400 SC */ | |
3715 | {1, 0, 667, 667, 3372, 33372, 3845, 33845}, /* DDR2-667 SC */ | |
3716 | {1, 0, 667, 800, 3386, 33386, 3822, 33822}, /* DDR2-800 SC */ | |
3717 | {1, 1, 667, 667, 6438, 36438, 6911, 36911}, /* DDR3-667 SC */ | |
3718 | {1, 1, 667, 800, 5941, 35941, 6377, 36377}, /* DDR3-800 SC */ | |
3719 | ||
3720 | {1, 0, 400, 400, 3472, 33472, 4173, 34173}, /* DDR2-400 SC */ | |
3721 | {1, 0, 400, 667, 3443, 33443, 3996, 33996}, /* DDR2-667 SC */ | |
3722 | {1, 0, 400, 800, 3430, 33430, 3946, 33946}, /* DDR2-800 SC */ | |
3723 | {1, 1, 400, 667, 6509, 36509, 7062, 37062}, /* DDR3-667 SC */ | |
3724 | {1, 1, 400, 800, 5985, 35985, 6501, 36501}, /* DDR3-800 SC */ | |
3725 | ||
3726 | {0, 0, 800, 400, 3438, 33438, 4065, 34065}, /* DDR2-400 SC */ | |
3727 | {0, 0, 800, 667, 3410, 33410, 3889, 33889}, /* DDR2-667 SC */ | |
3728 | {0, 0, 800, 800, 3403, 33403, 3845, 33845}, /* DDR2-800 SC */ | |
3729 | {0, 1, 800, 667, 6476, 36476, 6955, 36955}, /* DDR3-667 SC */ | |
3730 | {0, 1, 800, 800, 5958, 35958, 6400, 36400}, /* DDR3-800 SC */ | |
3731 | ||
3732 | {0, 0, 667, 400, 3456, 33456, 4103, 34106}, /* DDR2-400 SC */ | |
3733 | {0, 0, 667, 667, 3428, 33428, 3927, 33927}, /* DDR2-667 SC */ | |
3734 | {0, 0, 667, 800, 3443, 33443, 3905, 33905}, /* DDR2-800 SC */ | |
3735 | {0, 1, 667, 667, 6494, 36494, 6993, 36993}, /* DDR3-667 SC */ | |
3736 | {0, 1, 667, 800, 5998, 35998, 6460, 36460}, /* DDR3-800 SC */ | |
3737 | ||
3738 | {0, 0, 400, 400, 3528, 33528, 4255, 34255}, /* DDR2-400 SC */ | |
3739 | {0, 0, 400, 667, 3500, 33500, 4079, 34079}, /* DDR2-667 SC */ | |
3740 | {0, 0, 400, 800, 3487, 33487, 4029, 34029}, /* DDR2-800 SC */ | |
3741 | {0, 1, 400, 667, 6566, 36566, 7145, 37145}, /* DDR3-667 SC */ | |
3742 | {0, 1, 400, 800, 6042, 36042, 6584, 36584}, /* DDR3-800 SC */ | |
7662c8bd SL |
3743 | }; |
3744 | ||
403c89ff CW |
3745 | static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, |
3746 | int is_ddr3, | |
3747 | int fsb, | |
3748 | int mem) | |
7662c8bd | 3749 | { |
403c89ff | 3750 | const struct cxsr_latency *latency; |
7662c8bd | 3751 | int i; |
7662c8bd SL |
3752 | |
3753 | if (fsb == 0 || mem == 0) | |
3754 | return NULL; | |
3755 | ||
3756 | for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { | |
3757 | latency = &cxsr_latency_table[i]; | |
3758 | if (is_desktop == latency->is_desktop && | |
95534263 | 3759 | is_ddr3 == latency->is_ddr3 && |
decbbcda JSR |
3760 | fsb == latency->fsb_freq && mem == latency->mem_freq) |
3761 | return latency; | |
7662c8bd | 3762 | } |
decbbcda | 3763 | |
28c97730 | 3764 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); |
decbbcda JSR |
3765 | |
3766 | return NULL; | |
7662c8bd SL |
3767 | } |
3768 | ||
f2b115e6 | 3769 | static void pineview_disable_cxsr(struct drm_device *dev) |
7662c8bd SL |
3770 | { |
3771 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7662c8bd SL |
3772 | |
3773 | /* deactivate cxsr */ | |
3e33d94d | 3774 | I915_WRITE(DSPFW3, I915_READ(DSPFW3) & ~PINEVIEW_SELF_REFRESH_EN); |
7662c8bd SL |
3775 | } |
3776 | ||
bcc24fb4 JB |
3777 | /* |
3778 | * Latency for FIFO fetches is dependent on several factors: | |
3779 | * - memory configuration (speed, channels) | |
3780 | * - chipset | |
3781 | * - current MCH state | |
3782 | * It can be fairly high in some situations, so here we assume a fairly | |
3783 | * pessimal value. It's a tradeoff between extra memory fetches (if we | |
3784 | * set this value too high, the FIFO will fetch frequently to stay full) | |
3785 | * and power consumption (set it too low to save power and we might see | |
3786 | * FIFO underruns and display "flicker"). | |
3787 | * | |
3788 | * A value of 5us seems to be a good balance; safe for very low end | |
3789 | * platforms but not overly aggressive on lower latency configs. | |
3790 | */ | |
69e302a9 | 3791 | static const int latency_ns = 5000; |
7662c8bd | 3792 | |
e70236a8 | 3793 | static int i9xx_get_fifo_size(struct drm_device *dev, int plane) |
dff33cfc JB |
3794 | { |
3795 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3796 | uint32_t dsparb = I915_READ(DSPARB); | |
3797 | int size; | |
3798 | ||
8de9b311 CW |
3799 | size = dsparb & 0x7f; |
3800 | if (plane) | |
3801 | size = ((dsparb >> DSPARB_CSTART_SHIFT) & 0x7f) - size; | |
dff33cfc | 3802 | |
28c97730 | 3803 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
5eddb70b | 3804 | plane ? "B" : "A", size); |
dff33cfc JB |
3805 | |
3806 | return size; | |
3807 | } | |
7662c8bd | 3808 | |
e70236a8 JB |
3809 | static int i85x_get_fifo_size(struct drm_device *dev, int plane) |
3810 | { | |
3811 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3812 | uint32_t dsparb = I915_READ(DSPARB); | |
3813 | int size; | |
3814 | ||
8de9b311 CW |
3815 | size = dsparb & 0x1ff; |
3816 | if (plane) | |
3817 | size = ((dsparb >> DSPARB_BEND_SHIFT) & 0x1ff) - size; | |
e70236a8 | 3818 | size >>= 1; /* Convert to cachelines */ |
dff33cfc | 3819 | |
28c97730 | 3820 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
5eddb70b | 3821 | plane ? "B" : "A", size); |
dff33cfc JB |
3822 | |
3823 | return size; | |
3824 | } | |
7662c8bd | 3825 | |
e70236a8 JB |
3826 | static int i845_get_fifo_size(struct drm_device *dev, int plane) |
3827 | { | |
3828 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3829 | uint32_t dsparb = I915_READ(DSPARB); | |
3830 | int size; | |
3831 | ||
3832 | size = dsparb & 0x7f; | |
3833 | size >>= 2; /* Convert to cachelines */ | |
3834 | ||
28c97730 | 3835 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
5eddb70b CW |
3836 | plane ? "B" : "A", |
3837 | size); | |
e70236a8 JB |
3838 | |
3839 | return size; | |
3840 | } | |
3841 | ||
3842 | static int i830_get_fifo_size(struct drm_device *dev, int plane) | |
3843 | { | |
3844 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3845 | uint32_t dsparb = I915_READ(DSPARB); | |
3846 | int size; | |
3847 | ||
3848 | size = dsparb & 0x7f; | |
3849 | size >>= 1; /* Convert to cachelines */ | |
3850 | ||
28c97730 | 3851 | DRM_DEBUG_KMS("FIFO size - (0x%08x) %s: %d\n", dsparb, |
5eddb70b | 3852 | plane ? "B" : "A", size); |
e70236a8 JB |
3853 | |
3854 | return size; | |
3855 | } | |
3856 | ||
d210246a CW |
3857 | static struct drm_crtc *single_enabled_crtc(struct drm_device *dev) |
3858 | { | |
3859 | struct drm_crtc *crtc, *enabled = NULL; | |
3860 | ||
3861 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
3862 | if (crtc->enabled && crtc->fb) { | |
3863 | if (enabled) | |
3864 | return NULL; | |
3865 | enabled = crtc; | |
3866 | } | |
3867 | } | |
3868 | ||
3869 | return enabled; | |
3870 | } | |
3871 | ||
3872 | static void pineview_update_wm(struct drm_device *dev) | |
d4294342 ZY |
3873 | { |
3874 | struct drm_i915_private *dev_priv = dev->dev_private; | |
d210246a | 3875 | struct drm_crtc *crtc; |
403c89ff | 3876 | const struct cxsr_latency *latency; |
d4294342 ZY |
3877 | u32 reg; |
3878 | unsigned long wm; | |
d4294342 | 3879 | |
403c89ff | 3880 | latency = intel_get_cxsr_latency(IS_PINEVIEW_G(dev), dev_priv->is_ddr3, |
95534263 | 3881 | dev_priv->fsb_freq, dev_priv->mem_freq); |
d4294342 ZY |
3882 | if (!latency) { |
3883 | DRM_DEBUG_KMS("Unknown FSB/MEM found, disable CxSR\n"); | |
3884 | pineview_disable_cxsr(dev); | |
3885 | return; | |
3886 | } | |
3887 | ||
d210246a CW |
3888 | crtc = single_enabled_crtc(dev); |
3889 | if (crtc) { | |
3890 | int clock = crtc->mode.clock; | |
3891 | int pixel_size = crtc->fb->bits_per_pixel / 8; | |
d4294342 ZY |
3892 | |
3893 | /* Display SR */ | |
d210246a CW |
3894 | wm = intel_calculate_wm(clock, &pineview_display_wm, |
3895 | pineview_display_wm.fifo_size, | |
d4294342 ZY |
3896 | pixel_size, latency->display_sr); |
3897 | reg = I915_READ(DSPFW1); | |
3898 | reg &= ~DSPFW_SR_MASK; | |
3899 | reg |= wm << DSPFW_SR_SHIFT; | |
3900 | I915_WRITE(DSPFW1, reg); | |
3901 | DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); | |
3902 | ||
3903 | /* cursor SR */ | |
d210246a CW |
3904 | wm = intel_calculate_wm(clock, &pineview_cursor_wm, |
3905 | pineview_display_wm.fifo_size, | |
d4294342 ZY |
3906 | pixel_size, latency->cursor_sr); |
3907 | reg = I915_READ(DSPFW3); | |
3908 | reg &= ~DSPFW_CURSOR_SR_MASK; | |
3909 | reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; | |
3910 | I915_WRITE(DSPFW3, reg); | |
3911 | ||
3912 | /* Display HPLL off SR */ | |
d210246a CW |
3913 | wm = intel_calculate_wm(clock, &pineview_display_hplloff_wm, |
3914 | pineview_display_hplloff_wm.fifo_size, | |
d4294342 ZY |
3915 | pixel_size, latency->display_hpll_disable); |
3916 | reg = I915_READ(DSPFW3); | |
3917 | reg &= ~DSPFW_HPLL_SR_MASK; | |
3918 | reg |= wm & DSPFW_HPLL_SR_MASK; | |
3919 | I915_WRITE(DSPFW3, reg); | |
3920 | ||
3921 | /* cursor HPLL off SR */ | |
d210246a CW |
3922 | wm = intel_calculate_wm(clock, &pineview_cursor_hplloff_wm, |
3923 | pineview_display_hplloff_wm.fifo_size, | |
d4294342 ZY |
3924 | pixel_size, latency->cursor_hpll_disable); |
3925 | reg = I915_READ(DSPFW3); | |
3926 | reg &= ~DSPFW_HPLL_CURSOR_MASK; | |
3927 | reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; | |
3928 | I915_WRITE(DSPFW3, reg); | |
3929 | DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); | |
3930 | ||
3931 | /* activate cxsr */ | |
3e33d94d CW |
3932 | I915_WRITE(DSPFW3, |
3933 | I915_READ(DSPFW3) | PINEVIEW_SELF_REFRESH_EN); | |
d4294342 ZY |
3934 | DRM_DEBUG_KMS("Self-refresh is enabled\n"); |
3935 | } else { | |
3936 | pineview_disable_cxsr(dev); | |
3937 | DRM_DEBUG_KMS("Self-refresh is disabled\n"); | |
3938 | } | |
3939 | } | |
3940 | ||
417ae147 CW |
3941 | static bool g4x_compute_wm0(struct drm_device *dev, |
3942 | int plane, | |
3943 | const struct intel_watermark_params *display, | |
3944 | int display_latency_ns, | |
3945 | const struct intel_watermark_params *cursor, | |
3946 | int cursor_latency_ns, | |
3947 | int *plane_wm, | |
3948 | int *cursor_wm) | |
3949 | { | |
3950 | struct drm_crtc *crtc; | |
3951 | int htotal, hdisplay, clock, pixel_size; | |
3952 | int line_time_us, line_count; | |
3953 | int entries, tlb_miss; | |
3954 | ||
3955 | crtc = intel_get_crtc_for_plane(dev, plane); | |
5c72d064 CW |
3956 | if (crtc->fb == NULL || !crtc->enabled) { |
3957 | *cursor_wm = cursor->guard_size; | |
3958 | *plane_wm = display->guard_size; | |
417ae147 | 3959 | return false; |
5c72d064 | 3960 | } |
417ae147 CW |
3961 | |
3962 | htotal = crtc->mode.htotal; | |
3963 | hdisplay = crtc->mode.hdisplay; | |
3964 | clock = crtc->mode.clock; | |
3965 | pixel_size = crtc->fb->bits_per_pixel / 8; | |
3966 | ||
3967 | /* Use the small buffer method to calculate plane watermark */ | |
3968 | entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; | |
3969 | tlb_miss = display->fifo_size*display->cacheline_size - hdisplay * 8; | |
3970 | if (tlb_miss > 0) | |
3971 | entries += tlb_miss; | |
3972 | entries = DIV_ROUND_UP(entries, display->cacheline_size); | |
3973 | *plane_wm = entries + display->guard_size; | |
3974 | if (*plane_wm > (int)display->max_wm) | |
3975 | *plane_wm = display->max_wm; | |
3976 | ||
3977 | /* Use the large buffer method to calculate cursor watermark */ | |
3978 | line_time_us = ((htotal * 1000) / clock); | |
3979 | line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; | |
3980 | entries = line_count * 64 * pixel_size; | |
3981 | tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; | |
3982 | if (tlb_miss > 0) | |
3983 | entries += tlb_miss; | |
3984 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | |
3985 | *cursor_wm = entries + cursor->guard_size; | |
3986 | if (*cursor_wm > (int)cursor->max_wm) | |
3987 | *cursor_wm = (int)cursor->max_wm; | |
3988 | ||
3989 | return true; | |
3990 | } | |
3991 | ||
3992 | /* | |
3993 | * Check the wm result. | |
3994 | * | |
3995 | * If any calculated watermark values is larger than the maximum value that | |
3996 | * can be programmed into the associated watermark register, that watermark | |
3997 | * must be disabled. | |
3998 | */ | |
3999 | static bool g4x_check_srwm(struct drm_device *dev, | |
4000 | int display_wm, int cursor_wm, | |
4001 | const struct intel_watermark_params *display, | |
4002 | const struct intel_watermark_params *cursor) | |
652c393a | 4003 | { |
417ae147 CW |
4004 | DRM_DEBUG_KMS("SR watermark: display plane %d, cursor %d\n", |
4005 | display_wm, cursor_wm); | |
652c393a | 4006 | |
417ae147 | 4007 | if (display_wm > display->max_wm) { |
bbb0aef5 | 4008 | DRM_DEBUG_KMS("display watermark is too large(%d/%ld), disabling\n", |
417ae147 CW |
4009 | display_wm, display->max_wm); |
4010 | return false; | |
4011 | } | |
0e442c60 | 4012 | |
417ae147 | 4013 | if (cursor_wm > cursor->max_wm) { |
bbb0aef5 | 4014 | DRM_DEBUG_KMS("cursor watermark is too large(%d/%ld), disabling\n", |
417ae147 CW |
4015 | cursor_wm, cursor->max_wm); |
4016 | return false; | |
4017 | } | |
0e442c60 | 4018 | |
417ae147 CW |
4019 | if (!(display_wm || cursor_wm)) { |
4020 | DRM_DEBUG_KMS("SR latency is 0, disabling\n"); | |
4021 | return false; | |
4022 | } | |
0e442c60 | 4023 | |
417ae147 CW |
4024 | return true; |
4025 | } | |
0e442c60 | 4026 | |
417ae147 | 4027 | static bool g4x_compute_srwm(struct drm_device *dev, |
d210246a CW |
4028 | int plane, |
4029 | int latency_ns, | |
417ae147 CW |
4030 | const struct intel_watermark_params *display, |
4031 | const struct intel_watermark_params *cursor, | |
4032 | int *display_wm, int *cursor_wm) | |
4033 | { | |
d210246a CW |
4034 | struct drm_crtc *crtc; |
4035 | int hdisplay, htotal, pixel_size, clock; | |
417ae147 CW |
4036 | unsigned long line_time_us; |
4037 | int line_count, line_size; | |
4038 | int small, large; | |
4039 | int entries; | |
0e442c60 | 4040 | |
417ae147 CW |
4041 | if (!latency_ns) { |
4042 | *display_wm = *cursor_wm = 0; | |
4043 | return false; | |
4044 | } | |
0e442c60 | 4045 | |
d210246a CW |
4046 | crtc = intel_get_crtc_for_plane(dev, plane); |
4047 | hdisplay = crtc->mode.hdisplay; | |
4048 | htotal = crtc->mode.htotal; | |
4049 | clock = crtc->mode.clock; | |
4050 | pixel_size = crtc->fb->bits_per_pixel / 8; | |
4051 | ||
417ae147 CW |
4052 | line_time_us = (htotal * 1000) / clock; |
4053 | line_count = (latency_ns / line_time_us + 1000) / 1000; | |
4054 | line_size = hdisplay * pixel_size; | |
0e442c60 | 4055 | |
417ae147 CW |
4056 | /* Use the minimum of the small and large buffer method for primary */ |
4057 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; | |
4058 | large = line_count * line_size; | |
0e442c60 | 4059 | |
417ae147 CW |
4060 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); |
4061 | *display_wm = entries + display->guard_size; | |
4fe5e611 | 4062 | |
417ae147 CW |
4063 | /* calculate the self-refresh watermark for display cursor */ |
4064 | entries = line_count * pixel_size * 64; | |
4065 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); | |
4066 | *cursor_wm = entries + cursor->guard_size; | |
4fe5e611 | 4067 | |
417ae147 CW |
4068 | return g4x_check_srwm(dev, |
4069 | *display_wm, *cursor_wm, | |
4070 | display, cursor); | |
4071 | } | |
4fe5e611 | 4072 | |
7ccb4a53 | 4073 | #define single_plane_enabled(mask) is_power_of_2(mask) |
d210246a CW |
4074 | |
4075 | static void g4x_update_wm(struct drm_device *dev) | |
417ae147 CW |
4076 | { |
4077 | static const int sr_latency_ns = 12000; | |
4078 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4079 | int planea_wm, planeb_wm, cursora_wm, cursorb_wm; | |
d210246a CW |
4080 | int plane_sr, cursor_sr; |
4081 | unsigned int enabled = 0; | |
417ae147 CW |
4082 | |
4083 | if (g4x_compute_wm0(dev, 0, | |
4084 | &g4x_wm_info, latency_ns, | |
4085 | &g4x_cursor_wm_info, latency_ns, | |
4086 | &planea_wm, &cursora_wm)) | |
d210246a | 4087 | enabled |= 1; |
417ae147 CW |
4088 | |
4089 | if (g4x_compute_wm0(dev, 1, | |
4090 | &g4x_wm_info, latency_ns, | |
4091 | &g4x_cursor_wm_info, latency_ns, | |
4092 | &planeb_wm, &cursorb_wm)) | |
d210246a | 4093 | enabled |= 2; |
417ae147 CW |
4094 | |
4095 | plane_sr = cursor_sr = 0; | |
d210246a CW |
4096 | if (single_plane_enabled(enabled) && |
4097 | g4x_compute_srwm(dev, ffs(enabled) - 1, | |
4098 | sr_latency_ns, | |
417ae147 CW |
4099 | &g4x_wm_info, |
4100 | &g4x_cursor_wm_info, | |
4101 | &plane_sr, &cursor_sr)) | |
0e442c60 | 4102 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
417ae147 CW |
4103 | else |
4104 | I915_WRITE(FW_BLC_SELF, | |
4105 | I915_READ(FW_BLC_SELF) & ~FW_BLC_SELF_EN); | |
0e442c60 | 4106 | |
308977ac CW |
4107 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", |
4108 | planea_wm, cursora_wm, | |
4109 | planeb_wm, cursorb_wm, | |
4110 | plane_sr, cursor_sr); | |
0e442c60 | 4111 | |
417ae147 CW |
4112 | I915_WRITE(DSPFW1, |
4113 | (plane_sr << DSPFW_SR_SHIFT) | | |
0e442c60 | 4114 | (cursorb_wm << DSPFW_CURSORB_SHIFT) | |
417ae147 CW |
4115 | (planeb_wm << DSPFW_PLANEB_SHIFT) | |
4116 | planea_wm); | |
4117 | I915_WRITE(DSPFW2, | |
4118 | (I915_READ(DSPFW2) & DSPFW_CURSORA_MASK) | | |
0e442c60 JB |
4119 | (cursora_wm << DSPFW_CURSORA_SHIFT)); |
4120 | /* HPLL off in SR has some issues on G4x... disable it */ | |
417ae147 CW |
4121 | I915_WRITE(DSPFW3, |
4122 | (I915_READ(DSPFW3) & ~DSPFW_HPLL_SR_EN) | | |
0e442c60 | 4123 | (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); |
652c393a JB |
4124 | } |
4125 | ||
d210246a | 4126 | static void i965_update_wm(struct drm_device *dev) |
7662c8bd SL |
4127 | { |
4128 | struct drm_i915_private *dev_priv = dev->dev_private; | |
d210246a CW |
4129 | struct drm_crtc *crtc; |
4130 | int srwm = 1; | |
4fe5e611 | 4131 | int cursor_sr = 16; |
1dc7546d JB |
4132 | |
4133 | /* Calc sr entries for one plane configs */ | |
d210246a CW |
4134 | crtc = single_enabled_crtc(dev); |
4135 | if (crtc) { | |
1dc7546d | 4136 | /* self-refresh has much higher latency */ |
69e302a9 | 4137 | static const int sr_latency_ns = 12000; |
d210246a CW |
4138 | int clock = crtc->mode.clock; |
4139 | int htotal = crtc->mode.htotal; | |
4140 | int hdisplay = crtc->mode.hdisplay; | |
4141 | int pixel_size = crtc->fb->bits_per_pixel / 8; | |
4142 | unsigned long line_time_us; | |
4143 | int entries; | |
1dc7546d | 4144 | |
d210246a | 4145 | line_time_us = ((htotal * 1000) / clock); |
1dc7546d JB |
4146 | |
4147 | /* Use ns/us then divide to preserve precision */ | |
d210246a CW |
4148 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
4149 | pixel_size * hdisplay; | |
4150 | entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); | |
d210246a | 4151 | srwm = I965_FIFO_SIZE - entries; |
1dc7546d JB |
4152 | if (srwm < 0) |
4153 | srwm = 1; | |
1b07e04e | 4154 | srwm &= 0x1ff; |
308977ac CW |
4155 | DRM_DEBUG_KMS("self-refresh entries: %d, wm: %d\n", |
4156 | entries, srwm); | |
4fe5e611 | 4157 | |
d210246a | 4158 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
5eddb70b | 4159 | pixel_size * 64; |
d210246a | 4160 | entries = DIV_ROUND_UP(entries, |
8de9b311 | 4161 | i965_cursor_wm_info.cacheline_size); |
4fe5e611 | 4162 | cursor_sr = i965_cursor_wm_info.fifo_size - |
d210246a | 4163 | (entries + i965_cursor_wm_info.guard_size); |
4fe5e611 ZY |
4164 | |
4165 | if (cursor_sr > i965_cursor_wm_info.max_wm) | |
4166 | cursor_sr = i965_cursor_wm_info.max_wm; | |
4167 | ||
4168 | DRM_DEBUG_KMS("self-refresh watermark: display plane %d " | |
4169 | "cursor %d\n", srwm, cursor_sr); | |
4170 | ||
a6c45cf0 | 4171 | if (IS_CRESTLINE(dev)) |
adcdbc66 | 4172 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN); |
33c5fd12 DJ |
4173 | } else { |
4174 | /* Turn off self refresh if both pipes are enabled */ | |
a6c45cf0 | 4175 | if (IS_CRESTLINE(dev)) |
adcdbc66 JB |
4176 | I915_WRITE(FW_BLC_SELF, I915_READ(FW_BLC_SELF) |
4177 | & ~FW_BLC_SELF_EN); | |
1dc7546d | 4178 | } |
7662c8bd | 4179 | |
1dc7546d JB |
4180 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: 8, B: 8, C: 8, SR %d\n", |
4181 | srwm); | |
7662c8bd SL |
4182 | |
4183 | /* 965 has limitations... */ | |
417ae147 CW |
4184 | I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | |
4185 | (8 << 16) | (8 << 8) | (8 << 0)); | |
7662c8bd | 4186 | I915_WRITE(DSPFW2, (8 << 8) | (8 << 0)); |
4fe5e611 ZY |
4187 | /* update cursor SR watermark */ |
4188 | I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); | |
7662c8bd SL |
4189 | } |
4190 | ||
d210246a | 4191 | static void i9xx_update_wm(struct drm_device *dev) |
7662c8bd SL |
4192 | { |
4193 | struct drm_i915_private *dev_priv = dev->dev_private; | |
d210246a | 4194 | const struct intel_watermark_params *wm_info; |
dff33cfc JB |
4195 | uint32_t fwater_lo; |
4196 | uint32_t fwater_hi; | |
d210246a CW |
4197 | int cwm, srwm = 1; |
4198 | int fifo_size; | |
dff33cfc | 4199 | int planea_wm, planeb_wm; |
d210246a | 4200 | struct drm_crtc *crtc, *enabled = NULL; |
7662c8bd | 4201 | |
72557b4f | 4202 | if (IS_I945GM(dev)) |
d210246a | 4203 | wm_info = &i945_wm_info; |
a6c45cf0 | 4204 | else if (!IS_GEN2(dev)) |
d210246a | 4205 | wm_info = &i915_wm_info; |
7662c8bd | 4206 | else |
d210246a CW |
4207 | wm_info = &i855_wm_info; |
4208 | ||
4209 | fifo_size = dev_priv->display.get_fifo_size(dev, 0); | |
4210 | crtc = intel_get_crtc_for_plane(dev, 0); | |
4211 | if (crtc->enabled && crtc->fb) { | |
4212 | planea_wm = intel_calculate_wm(crtc->mode.clock, | |
4213 | wm_info, fifo_size, | |
4214 | crtc->fb->bits_per_pixel / 8, | |
4215 | latency_ns); | |
4216 | enabled = crtc; | |
4217 | } else | |
4218 | planea_wm = fifo_size - wm_info->guard_size; | |
4219 | ||
4220 | fifo_size = dev_priv->display.get_fifo_size(dev, 1); | |
4221 | crtc = intel_get_crtc_for_plane(dev, 1); | |
4222 | if (crtc->enabled && crtc->fb) { | |
4223 | planeb_wm = intel_calculate_wm(crtc->mode.clock, | |
4224 | wm_info, fifo_size, | |
4225 | crtc->fb->bits_per_pixel / 8, | |
4226 | latency_ns); | |
4227 | if (enabled == NULL) | |
4228 | enabled = crtc; | |
4229 | else | |
4230 | enabled = NULL; | |
4231 | } else | |
4232 | planeb_wm = fifo_size - wm_info->guard_size; | |
7662c8bd | 4233 | |
28c97730 | 4234 | DRM_DEBUG_KMS("FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); |
7662c8bd SL |
4235 | |
4236 | /* | |
4237 | * Overlay gets an aggressive default since video jitter is bad. | |
4238 | */ | |
4239 | cwm = 2; | |
4240 | ||
18b2190c AL |
4241 | /* Play safe and disable self-refresh before adjusting watermarks. */ |
4242 | if (IS_I945G(dev) || IS_I945GM(dev)) | |
4243 | I915_WRITE(FW_BLC_SELF, FW_BLC_SELF_EN_MASK | 0); | |
4244 | else if (IS_I915GM(dev)) | |
4245 | I915_WRITE(INSTPM, I915_READ(INSTPM) & ~INSTPM_SELF_EN); | |
4246 | ||
dff33cfc | 4247 | /* Calc sr entries for one plane configs */ |
d210246a | 4248 | if (HAS_FW_BLC(dev) && enabled) { |
dff33cfc | 4249 | /* self-refresh has much higher latency */ |
69e302a9 | 4250 | static const int sr_latency_ns = 6000; |
d210246a CW |
4251 | int clock = enabled->mode.clock; |
4252 | int htotal = enabled->mode.htotal; | |
4253 | int hdisplay = enabled->mode.hdisplay; | |
4254 | int pixel_size = enabled->fb->bits_per_pixel / 8; | |
4255 | unsigned long line_time_us; | |
4256 | int entries; | |
dff33cfc | 4257 | |
d210246a | 4258 | line_time_us = (htotal * 1000) / clock; |
dff33cfc JB |
4259 | |
4260 | /* Use ns/us then divide to preserve precision */ | |
d210246a CW |
4261 | entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * |
4262 | pixel_size * hdisplay; | |
4263 | entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); | |
4264 | DRM_DEBUG_KMS("self-refresh entries: %d\n", entries); | |
4265 | srwm = wm_info->fifo_size - entries; | |
dff33cfc JB |
4266 | if (srwm < 0) |
4267 | srwm = 1; | |
ee980b80 LP |
4268 | |
4269 | if (IS_I945G(dev) || IS_I945GM(dev)) | |
18b2190c AL |
4270 | I915_WRITE(FW_BLC_SELF, |
4271 | FW_BLC_SELF_FIFO_MASK | (srwm & 0xff)); | |
4272 | else if (IS_I915GM(dev)) | |
ee980b80 | 4273 | I915_WRITE(FW_BLC_SELF, srwm & 0x3f); |
7662c8bd SL |
4274 | } |
4275 | ||
28c97730 | 4276 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d, B: %d, C: %d, SR %d\n", |
5eddb70b | 4277 | planea_wm, planeb_wm, cwm, srwm); |
7662c8bd | 4278 | |
dff33cfc JB |
4279 | fwater_lo = ((planeb_wm & 0x3f) << 16) | (planea_wm & 0x3f); |
4280 | fwater_hi = (cwm & 0x1f); | |
4281 | ||
4282 | /* Set request length to 8 cachelines per fetch */ | |
4283 | fwater_lo = fwater_lo | (1 << 24) | (1 << 8); | |
4284 | fwater_hi = fwater_hi | (1 << 8); | |
7662c8bd SL |
4285 | |
4286 | I915_WRITE(FW_BLC, fwater_lo); | |
4287 | I915_WRITE(FW_BLC2, fwater_hi); | |
18b2190c | 4288 | |
d210246a CW |
4289 | if (HAS_FW_BLC(dev)) { |
4290 | if (enabled) { | |
4291 | if (IS_I945G(dev) || IS_I945GM(dev)) | |
4292 | I915_WRITE(FW_BLC_SELF, | |
4293 | FW_BLC_SELF_EN_MASK | FW_BLC_SELF_EN); | |
4294 | else if (IS_I915GM(dev)) | |
4295 | I915_WRITE(INSTPM, I915_READ(INSTPM) | INSTPM_SELF_EN); | |
4296 | DRM_DEBUG_KMS("memory self refresh enabled\n"); | |
4297 | } else | |
4298 | DRM_DEBUG_KMS("memory self refresh disabled\n"); | |
4299 | } | |
7662c8bd SL |
4300 | } |
4301 | ||
d210246a | 4302 | static void i830_update_wm(struct drm_device *dev) |
7662c8bd SL |
4303 | { |
4304 | struct drm_i915_private *dev_priv = dev->dev_private; | |
d210246a CW |
4305 | struct drm_crtc *crtc; |
4306 | uint32_t fwater_lo; | |
dff33cfc | 4307 | int planea_wm; |
7662c8bd | 4308 | |
d210246a CW |
4309 | crtc = single_enabled_crtc(dev); |
4310 | if (crtc == NULL) | |
4311 | return; | |
7662c8bd | 4312 | |
d210246a CW |
4313 | planea_wm = intel_calculate_wm(crtc->mode.clock, &i830_wm_info, |
4314 | dev_priv->display.get_fifo_size(dev, 0), | |
4315 | crtc->fb->bits_per_pixel / 8, | |
4316 | latency_ns); | |
4317 | fwater_lo = I915_READ(FW_BLC) & ~0xfff; | |
f3601326 JB |
4318 | fwater_lo |= (3<<8) | planea_wm; |
4319 | ||
28c97730 | 4320 | DRM_DEBUG_KMS("Setting FIFO watermarks - A: %d\n", planea_wm); |
7662c8bd SL |
4321 | |
4322 | I915_WRITE(FW_BLC, fwater_lo); | |
4323 | } | |
4324 | ||
7f8a8569 | 4325 | #define ILK_LP0_PLANE_LATENCY 700 |
c936f44d | 4326 | #define ILK_LP0_CURSOR_LATENCY 1300 |
7f8a8569 | 4327 | |
1398261a YL |
4328 | /* |
4329 | * Check the wm result. | |
4330 | * | |
4331 | * If any calculated watermark values is larger than the maximum value that | |
4332 | * can be programmed into the associated watermark register, that watermark | |
4333 | * must be disabled. | |
1398261a | 4334 | */ |
b79d4990 JB |
4335 | static bool ironlake_check_srwm(struct drm_device *dev, int level, |
4336 | int fbc_wm, int display_wm, int cursor_wm, | |
4337 | const struct intel_watermark_params *display, | |
4338 | const struct intel_watermark_params *cursor) | |
1398261a YL |
4339 | { |
4340 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4341 | ||
4342 | DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d," | |
4343 | " cursor %d\n", level, display_wm, fbc_wm, cursor_wm); | |
4344 | ||
4345 | if (fbc_wm > SNB_FBC_MAX_SRWM) { | |
4346 | DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n", | |
b79d4990 | 4347 | fbc_wm, SNB_FBC_MAX_SRWM, level); |
1398261a YL |
4348 | |
4349 | /* fbc has it's own way to disable FBC WM */ | |
4350 | I915_WRITE(DISP_ARB_CTL, | |
4351 | I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS); | |
4352 | return false; | |
4353 | } | |
4354 | ||
b79d4990 | 4355 | if (display_wm > display->max_wm) { |
1398261a | 4356 | DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n", |
b79d4990 | 4357 | display_wm, SNB_DISPLAY_MAX_SRWM, level); |
1398261a YL |
4358 | return false; |
4359 | } | |
4360 | ||
b79d4990 | 4361 | if (cursor_wm > cursor->max_wm) { |
1398261a | 4362 | DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n", |
b79d4990 | 4363 | cursor_wm, SNB_CURSOR_MAX_SRWM, level); |
1398261a YL |
4364 | return false; |
4365 | } | |
4366 | ||
4367 | if (!(fbc_wm || display_wm || cursor_wm)) { | |
4368 | DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level); | |
4369 | return false; | |
4370 | } | |
4371 | ||
4372 | return true; | |
4373 | } | |
4374 | ||
4375 | /* | |
4376 | * Compute watermark values of WM[1-3], | |
4377 | */ | |
d210246a CW |
4378 | static bool ironlake_compute_srwm(struct drm_device *dev, int level, int plane, |
4379 | int latency_ns, | |
b79d4990 JB |
4380 | const struct intel_watermark_params *display, |
4381 | const struct intel_watermark_params *cursor, | |
4382 | int *fbc_wm, int *display_wm, int *cursor_wm) | |
1398261a | 4383 | { |
d210246a | 4384 | struct drm_crtc *crtc; |
1398261a | 4385 | unsigned long line_time_us; |
d210246a | 4386 | int hdisplay, htotal, pixel_size, clock; |
b79d4990 | 4387 | int line_count, line_size; |
1398261a YL |
4388 | int small, large; |
4389 | int entries; | |
1398261a YL |
4390 | |
4391 | if (!latency_ns) { | |
4392 | *fbc_wm = *display_wm = *cursor_wm = 0; | |
4393 | return false; | |
4394 | } | |
4395 | ||
d210246a CW |
4396 | crtc = intel_get_crtc_for_plane(dev, plane); |
4397 | hdisplay = crtc->mode.hdisplay; | |
4398 | htotal = crtc->mode.htotal; | |
4399 | clock = crtc->mode.clock; | |
4400 | pixel_size = crtc->fb->bits_per_pixel / 8; | |
4401 | ||
1398261a YL |
4402 | line_time_us = (htotal * 1000) / clock; |
4403 | line_count = (latency_ns / line_time_us + 1000) / 1000; | |
4404 | line_size = hdisplay * pixel_size; | |
4405 | ||
4406 | /* Use the minimum of the small and large buffer method for primary */ | |
4407 | small = ((clock * pixel_size / 1000) * latency_ns) / 1000; | |
4408 | large = line_count * line_size; | |
4409 | ||
b79d4990 JB |
4410 | entries = DIV_ROUND_UP(min(small, large), display->cacheline_size); |
4411 | *display_wm = entries + display->guard_size; | |
1398261a YL |
4412 | |
4413 | /* | |
b79d4990 | 4414 | * Spec says: |
1398261a YL |
4415 | * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2 |
4416 | */ | |
4417 | *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2; | |
4418 | ||
4419 | /* calculate the self-refresh watermark for display cursor */ | |
4420 | entries = line_count * pixel_size * 64; | |
b79d4990 JB |
4421 | entries = DIV_ROUND_UP(entries, cursor->cacheline_size); |
4422 | *cursor_wm = entries + cursor->guard_size; | |
1398261a | 4423 | |
b79d4990 JB |
4424 | return ironlake_check_srwm(dev, level, |
4425 | *fbc_wm, *display_wm, *cursor_wm, | |
4426 | display, cursor); | |
4427 | } | |
4428 | ||
d210246a | 4429 | static void ironlake_update_wm(struct drm_device *dev) |
b79d4990 JB |
4430 | { |
4431 | struct drm_i915_private *dev_priv = dev->dev_private; | |
d210246a CW |
4432 | int fbc_wm, plane_wm, cursor_wm; |
4433 | unsigned int enabled; | |
b79d4990 JB |
4434 | |
4435 | enabled = 0; | |
9f405100 CW |
4436 | if (g4x_compute_wm0(dev, 0, |
4437 | &ironlake_display_wm_info, | |
4438 | ILK_LP0_PLANE_LATENCY, | |
4439 | &ironlake_cursor_wm_info, | |
4440 | ILK_LP0_CURSOR_LATENCY, | |
4441 | &plane_wm, &cursor_wm)) { | |
b79d4990 JB |
4442 | I915_WRITE(WM0_PIPEA_ILK, |
4443 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | |
4444 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | |
4445 | " plane %d, " "cursor: %d\n", | |
4446 | plane_wm, cursor_wm); | |
d210246a | 4447 | enabled |= 1; |
b79d4990 JB |
4448 | } |
4449 | ||
9f405100 CW |
4450 | if (g4x_compute_wm0(dev, 1, |
4451 | &ironlake_display_wm_info, | |
4452 | ILK_LP0_PLANE_LATENCY, | |
4453 | &ironlake_cursor_wm_info, | |
4454 | ILK_LP0_CURSOR_LATENCY, | |
4455 | &plane_wm, &cursor_wm)) { | |
b79d4990 JB |
4456 | I915_WRITE(WM0_PIPEB_ILK, |
4457 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | |
4458 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | |
4459 | " plane %d, cursor: %d\n", | |
4460 | plane_wm, cursor_wm); | |
d210246a | 4461 | enabled |= 2; |
b79d4990 JB |
4462 | } |
4463 | ||
4464 | /* | |
4465 | * Calculate and update the self-refresh watermark only when one | |
4466 | * display plane is used. | |
4467 | */ | |
4468 | I915_WRITE(WM3_LP_ILK, 0); | |
4469 | I915_WRITE(WM2_LP_ILK, 0); | |
4470 | I915_WRITE(WM1_LP_ILK, 0); | |
4471 | ||
d210246a | 4472 | if (!single_plane_enabled(enabled)) |
b79d4990 | 4473 | return; |
d210246a | 4474 | enabled = ffs(enabled) - 1; |
b79d4990 JB |
4475 | |
4476 | /* WM1 */ | |
d210246a CW |
4477 | if (!ironlake_compute_srwm(dev, 1, enabled, |
4478 | ILK_READ_WM1_LATENCY() * 500, | |
b79d4990 JB |
4479 | &ironlake_display_srwm_info, |
4480 | &ironlake_cursor_srwm_info, | |
4481 | &fbc_wm, &plane_wm, &cursor_wm)) | |
4482 | return; | |
4483 | ||
4484 | I915_WRITE(WM1_LP_ILK, | |
4485 | WM1_LP_SR_EN | | |
4486 | (ILK_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | | |
4487 | (fbc_wm << WM1_LP_FBC_SHIFT) | | |
4488 | (plane_wm << WM1_LP_SR_SHIFT) | | |
4489 | cursor_wm); | |
4490 | ||
4491 | /* WM2 */ | |
d210246a CW |
4492 | if (!ironlake_compute_srwm(dev, 2, enabled, |
4493 | ILK_READ_WM2_LATENCY() * 500, | |
b79d4990 JB |
4494 | &ironlake_display_srwm_info, |
4495 | &ironlake_cursor_srwm_info, | |
4496 | &fbc_wm, &plane_wm, &cursor_wm)) | |
4497 | return; | |
4498 | ||
4499 | I915_WRITE(WM2_LP_ILK, | |
4500 | WM2_LP_EN | | |
4501 | (ILK_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | | |
4502 | (fbc_wm << WM1_LP_FBC_SHIFT) | | |
4503 | (plane_wm << WM1_LP_SR_SHIFT) | | |
4504 | cursor_wm); | |
4505 | ||
4506 | /* | |
4507 | * WM3 is unsupported on ILK, probably because we don't have latency | |
4508 | * data for that power state | |
4509 | */ | |
1398261a YL |
4510 | } |
4511 | ||
d210246a | 4512 | static void sandybridge_update_wm(struct drm_device *dev) |
1398261a YL |
4513 | { |
4514 | struct drm_i915_private *dev_priv = dev->dev_private; | |
a0fa62d3 | 4515 | int latency = SNB_READ_WM0_LATENCY() * 100; /* In unit 0.1us */ |
d210246a CW |
4516 | int fbc_wm, plane_wm, cursor_wm; |
4517 | unsigned int enabled; | |
1398261a YL |
4518 | |
4519 | enabled = 0; | |
9f405100 CW |
4520 | if (g4x_compute_wm0(dev, 0, |
4521 | &sandybridge_display_wm_info, latency, | |
4522 | &sandybridge_cursor_wm_info, latency, | |
4523 | &plane_wm, &cursor_wm)) { | |
1398261a YL |
4524 | I915_WRITE(WM0_PIPEA_ILK, |
4525 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | |
4526 | DRM_DEBUG_KMS("FIFO watermarks For pipe A -" | |
4527 | " plane %d, " "cursor: %d\n", | |
4528 | plane_wm, cursor_wm); | |
d210246a | 4529 | enabled |= 1; |
1398261a YL |
4530 | } |
4531 | ||
9f405100 CW |
4532 | if (g4x_compute_wm0(dev, 1, |
4533 | &sandybridge_display_wm_info, latency, | |
4534 | &sandybridge_cursor_wm_info, latency, | |
4535 | &plane_wm, &cursor_wm)) { | |
1398261a YL |
4536 | I915_WRITE(WM0_PIPEB_ILK, |
4537 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | |
4538 | DRM_DEBUG_KMS("FIFO watermarks For pipe B -" | |
4539 | " plane %d, cursor: %d\n", | |
4540 | plane_wm, cursor_wm); | |
d210246a | 4541 | enabled |= 2; |
1398261a YL |
4542 | } |
4543 | ||
d6c892df JB |
4544 | /* IVB has 3 pipes */ |
4545 | if (IS_IVYBRIDGE(dev) && | |
4546 | g4x_compute_wm0(dev, 2, | |
4547 | &sandybridge_display_wm_info, latency, | |
4548 | &sandybridge_cursor_wm_info, latency, | |
4549 | &plane_wm, &cursor_wm)) { | |
4550 | I915_WRITE(WM0_PIPEC_IVB, | |
4551 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | |
4552 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" | |
4553 | " plane %d, cursor: %d\n", | |
4554 | plane_wm, cursor_wm); | |
4555 | enabled |= 3; | |
4556 | } | |
4557 | ||
1398261a YL |
4558 | /* |
4559 | * Calculate and update the self-refresh watermark only when one | |
4560 | * display plane is used. | |
4561 | * | |
4562 | * SNB support 3 levels of watermark. | |
4563 | * | |
4564 | * WM1/WM2/WM2 watermarks have to be enabled in the ascending order, | |
4565 | * and disabled in the descending order | |
4566 | * | |
4567 | */ | |
4568 | I915_WRITE(WM3_LP_ILK, 0); | |
4569 | I915_WRITE(WM2_LP_ILK, 0); | |
4570 | I915_WRITE(WM1_LP_ILK, 0); | |
4571 | ||
d210246a | 4572 | if (!single_plane_enabled(enabled)) |
1398261a | 4573 | return; |
d210246a | 4574 | enabled = ffs(enabled) - 1; |
1398261a YL |
4575 | |
4576 | /* WM1 */ | |
d210246a CW |
4577 | if (!ironlake_compute_srwm(dev, 1, enabled, |
4578 | SNB_READ_WM1_LATENCY() * 500, | |
b79d4990 JB |
4579 | &sandybridge_display_srwm_info, |
4580 | &sandybridge_cursor_srwm_info, | |
4581 | &fbc_wm, &plane_wm, &cursor_wm)) | |
1398261a YL |
4582 | return; |
4583 | ||
4584 | I915_WRITE(WM1_LP_ILK, | |
4585 | WM1_LP_SR_EN | | |
4586 | (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) | | |
4587 | (fbc_wm << WM1_LP_FBC_SHIFT) | | |
4588 | (plane_wm << WM1_LP_SR_SHIFT) | | |
4589 | cursor_wm); | |
4590 | ||
4591 | /* WM2 */ | |
d210246a CW |
4592 | if (!ironlake_compute_srwm(dev, 2, enabled, |
4593 | SNB_READ_WM2_LATENCY() * 500, | |
b79d4990 JB |
4594 | &sandybridge_display_srwm_info, |
4595 | &sandybridge_cursor_srwm_info, | |
4596 | &fbc_wm, &plane_wm, &cursor_wm)) | |
1398261a YL |
4597 | return; |
4598 | ||
4599 | I915_WRITE(WM2_LP_ILK, | |
4600 | WM2_LP_EN | | |
4601 | (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) | | |
4602 | (fbc_wm << WM1_LP_FBC_SHIFT) | | |
4603 | (plane_wm << WM1_LP_SR_SHIFT) | | |
4604 | cursor_wm); | |
4605 | ||
4606 | /* WM3 */ | |
d210246a CW |
4607 | if (!ironlake_compute_srwm(dev, 3, enabled, |
4608 | SNB_READ_WM3_LATENCY() * 500, | |
b79d4990 JB |
4609 | &sandybridge_display_srwm_info, |
4610 | &sandybridge_cursor_srwm_info, | |
4611 | &fbc_wm, &plane_wm, &cursor_wm)) | |
1398261a YL |
4612 | return; |
4613 | ||
4614 | I915_WRITE(WM3_LP_ILK, | |
4615 | WM3_LP_EN | | |
4616 | (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) | | |
4617 | (fbc_wm << WM1_LP_FBC_SHIFT) | | |
4618 | (plane_wm << WM1_LP_SR_SHIFT) | | |
4619 | cursor_wm); | |
4620 | } | |
4621 | ||
7662c8bd SL |
4622 | /** |
4623 | * intel_update_watermarks - update FIFO watermark values based on current modes | |
4624 | * | |
4625 | * Calculate watermark values for the various WM regs based on current mode | |
4626 | * and plane configuration. | |
4627 | * | |
4628 | * There are several cases to deal with here: | |
4629 | * - normal (i.e. non-self-refresh) | |
4630 | * - self-refresh (SR) mode | |
4631 | * - lines are large relative to FIFO size (buffer can hold up to 2) | |
4632 | * - lines are small relative to FIFO size (buffer can hold more than 2 | |
4633 | * lines), so need to account for TLB latency | |
4634 | * | |
4635 | * The normal calculation is: | |
4636 | * watermark = dotclock * bytes per pixel * latency | |
4637 | * where latency is platform & configuration dependent (we assume pessimal | |
4638 | * values here). | |
4639 | * | |
4640 | * The SR calculation is: | |
4641 | * watermark = (trunc(latency/line time)+1) * surface width * | |
4642 | * bytes per pixel | |
4643 | * where | |
4644 | * line time = htotal / dotclock | |
fa143215 | 4645 | * surface width = hdisplay for normal plane and 64 for cursor |
7662c8bd SL |
4646 | * and latency is assumed to be high, as above. |
4647 | * | |
4648 | * The final value programmed to the register should always be rounded up, | |
4649 | * and include an extra 2 entries to account for clock crossings. | |
4650 | * | |
4651 | * We don't use the sprite, so we can ignore that. And on Crestline we have | |
4652 | * to set the non-SR watermarks to 8. | |
5eddb70b | 4653 | */ |
7662c8bd SL |
4654 | static void intel_update_watermarks(struct drm_device *dev) |
4655 | { | |
e70236a8 | 4656 | struct drm_i915_private *dev_priv = dev->dev_private; |
7662c8bd | 4657 | |
d210246a CW |
4658 | if (dev_priv->display.update_wm) |
4659 | dev_priv->display.update_wm(dev); | |
7662c8bd SL |
4660 | } |
4661 | ||
a7615030 CW |
4662 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
4663 | { | |
72bbe58c KP |
4664 | if (i915_panel_use_ssc >= 0) |
4665 | return i915_panel_use_ssc != 0; | |
4666 | return dev_priv->lvds_use_ssc | |
435793df | 4667 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); |
a7615030 CW |
4668 | } |
4669 | ||
5a354204 JB |
4670 | /** |
4671 | * intel_choose_pipe_bpp_dither - figure out what color depth the pipe should send | |
4672 | * @crtc: CRTC structure | |
3b5c78a3 | 4673 | * @mode: requested mode |
5a354204 JB |
4674 | * |
4675 | * A pipe may be connected to one or more outputs. Based on the depth of the | |
4676 | * attached framebuffer, choose a good color depth to use on the pipe. | |
4677 | * | |
4678 | * If possible, match the pipe depth to the fb depth. In some cases, this | |
4679 | * isn't ideal, because the connected output supports a lesser or restricted | |
4680 | * set of depths. Resolve that here: | |
4681 | * LVDS typically supports only 6bpc, so clamp down in that case | |
4682 | * HDMI supports only 8bpc or 12bpc, so clamp to 8bpc with dither for 10bpc | |
4683 | * Displays may support a restricted set as well, check EDID and clamp as | |
4684 | * appropriate. | |
3b5c78a3 | 4685 | * DP may want to dither down to 6bpc to fit larger modes |
5a354204 JB |
4686 | * |
4687 | * RETURNS: | |
4688 | * Dithering requirement (i.e. false if display bpc and pipe bpc match, | |
4689 | * true if they don't match). | |
4690 | */ | |
4691 | static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc, | |
3b5c78a3 AJ |
4692 | unsigned int *pipe_bpp, |
4693 | struct drm_display_mode *mode) | |
5a354204 JB |
4694 | { |
4695 | struct drm_device *dev = crtc->dev; | |
4696 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4697 | struct drm_encoder *encoder; | |
4698 | struct drm_connector *connector; | |
4699 | unsigned int display_bpc = UINT_MAX, bpc; | |
4700 | ||
4701 | /* Walk the encoders & connectors on this crtc, get min bpc */ | |
4702 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { | |
4703 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | |
4704 | ||
4705 | if (encoder->crtc != crtc) | |
4706 | continue; | |
4707 | ||
4708 | if (intel_encoder->type == INTEL_OUTPUT_LVDS) { | |
4709 | unsigned int lvds_bpc; | |
4710 | ||
4711 | if ((I915_READ(PCH_LVDS) & LVDS_A3_POWER_MASK) == | |
4712 | LVDS_A3_POWER_UP) | |
4713 | lvds_bpc = 8; | |
4714 | else | |
4715 | lvds_bpc = 6; | |
4716 | ||
4717 | if (lvds_bpc < display_bpc) { | |
82820490 | 4718 | DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); |
5a354204 JB |
4719 | display_bpc = lvds_bpc; |
4720 | } | |
4721 | continue; | |
4722 | } | |
4723 | ||
4724 | if (intel_encoder->type == INTEL_OUTPUT_EDP) { | |
4725 | /* Use VBT settings if we have an eDP panel */ | |
4726 | unsigned int edp_bpc = dev_priv->edp.bpp / 3; | |
4727 | ||
4728 | if (edp_bpc < display_bpc) { | |
82820490 | 4729 | DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); |
5a354204 JB |
4730 | display_bpc = edp_bpc; |
4731 | } | |
4732 | continue; | |
4733 | } | |
4734 | ||
4735 | /* Not one of the known troublemakers, check the EDID */ | |
4736 | list_for_each_entry(connector, &dev->mode_config.connector_list, | |
4737 | head) { | |
4738 | if (connector->encoder != encoder) | |
4739 | continue; | |
4740 | ||
62ac41a6 JB |
4741 | /* Don't use an invalid EDID bpc value */ |
4742 | if (connector->display_info.bpc && | |
4743 | connector->display_info.bpc < display_bpc) { | |
82820490 | 4744 | DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); |
5a354204 JB |
4745 | display_bpc = connector->display_info.bpc; |
4746 | } | |
4747 | } | |
4748 | ||
4749 | /* | |
4750 | * HDMI is either 12 or 8, so if the display lets 10bpc sneak | |
4751 | * through, clamp it down. (Note: >12bpc will be caught below.) | |
4752 | */ | |
4753 | if (intel_encoder->type == INTEL_OUTPUT_HDMI) { | |
4754 | if (display_bpc > 8 && display_bpc < 12) { | |
82820490 | 4755 | DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n"); |
5a354204 JB |
4756 | display_bpc = 12; |
4757 | } else { | |
82820490 | 4758 | DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n"); |
5a354204 JB |
4759 | display_bpc = 8; |
4760 | } | |
4761 | } | |
4762 | } | |
4763 | ||
3b5c78a3 AJ |
4764 | if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { |
4765 | DRM_DEBUG_KMS("Dithering DP to 6bpc\n"); | |
4766 | display_bpc = 6; | |
4767 | } | |
4768 | ||
5a354204 JB |
4769 | /* |
4770 | * We could just drive the pipe at the highest bpc all the time and | |
4771 | * enable dithering as needed, but that costs bandwidth. So choose | |
4772 | * the minimum value that expresses the full color range of the fb but | |
4773 | * also stays within the max display bpc discovered above. | |
4774 | */ | |
4775 | ||
4776 | switch (crtc->fb->depth) { | |
4777 | case 8: | |
4778 | bpc = 8; /* since we go through a colormap */ | |
4779 | break; | |
4780 | case 15: | |
4781 | case 16: | |
4782 | bpc = 6; /* min is 18bpp */ | |
4783 | break; | |
4784 | case 24: | |
578393cd | 4785 | bpc = 8; |
5a354204 JB |
4786 | break; |
4787 | case 30: | |
578393cd | 4788 | bpc = 10; |
5a354204 JB |
4789 | break; |
4790 | case 48: | |
578393cd | 4791 | bpc = 12; |
5a354204 JB |
4792 | break; |
4793 | default: | |
4794 | DRM_DEBUG("unsupported depth, assuming 24 bits\n"); | |
4795 | bpc = min((unsigned int)8, display_bpc); | |
4796 | break; | |
4797 | } | |
4798 | ||
578393cd KP |
4799 | display_bpc = min(display_bpc, bpc); |
4800 | ||
82820490 AJ |
4801 | DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n", |
4802 | bpc, display_bpc); | |
5a354204 | 4803 | |
578393cd | 4804 | *pipe_bpp = display_bpc * 3; |
5a354204 JB |
4805 | |
4806 | return display_bpc != bpc; | |
4807 | } | |
4808 | ||
f564048e EA |
4809 | static int i9xx_crtc_mode_set(struct drm_crtc *crtc, |
4810 | struct drm_display_mode *mode, | |
4811 | struct drm_display_mode *adjusted_mode, | |
4812 | int x, int y, | |
4813 | struct drm_framebuffer *old_fb) | |
79e53945 JB |
4814 | { |
4815 | struct drm_device *dev = crtc->dev; | |
4816 | struct drm_i915_private *dev_priv = dev->dev_private; | |
4817 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
4818 | int pipe = intel_crtc->pipe; | |
80824003 | 4819 | int plane = intel_crtc->plane; |
c751ce4f | 4820 | int refclk, num_connectors = 0; |
652c393a | 4821 | intel_clock_t clock, reduced_clock; |
5eddb70b | 4822 | u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; |
652c393a | 4823 | bool ok, has_reduced_clock = false, is_sdvo = false, is_dvo = false; |
a4fc5ed6 | 4824 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
79e53945 | 4825 | struct drm_mode_config *mode_config = &dev->mode_config; |
5eddb70b | 4826 | struct intel_encoder *encoder; |
d4906093 | 4827 | const intel_limit_t *limit; |
5c3b82e2 | 4828 | int ret; |
fae14981 | 4829 | u32 temp; |
aa9b500d | 4830 | u32 lvds_sync = 0; |
79e53945 | 4831 | |
5eddb70b CW |
4832 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
4833 | if (encoder->base.crtc != crtc) | |
79e53945 JB |
4834 | continue; |
4835 | ||
5eddb70b | 4836 | switch (encoder->type) { |
79e53945 JB |
4837 | case INTEL_OUTPUT_LVDS: |
4838 | is_lvds = true; | |
4839 | break; | |
4840 | case INTEL_OUTPUT_SDVO: | |
7d57382e | 4841 | case INTEL_OUTPUT_HDMI: |
79e53945 | 4842 | is_sdvo = true; |
5eddb70b | 4843 | if (encoder->needs_tv_clock) |
e2f0ba97 | 4844 | is_tv = true; |
79e53945 JB |
4845 | break; |
4846 | case INTEL_OUTPUT_DVO: | |
4847 | is_dvo = true; | |
4848 | break; | |
4849 | case INTEL_OUTPUT_TVOUT: | |
4850 | is_tv = true; | |
4851 | break; | |
4852 | case INTEL_OUTPUT_ANALOG: | |
4853 | is_crt = true; | |
4854 | break; | |
a4fc5ed6 KP |
4855 | case INTEL_OUTPUT_DISPLAYPORT: |
4856 | is_dp = true; | |
4857 | break; | |
79e53945 | 4858 | } |
43565a06 | 4859 | |
c751ce4f | 4860 | num_connectors++; |
79e53945 JB |
4861 | } |
4862 | ||
a7615030 | 4863 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { |
43565a06 | 4864 | refclk = dev_priv->lvds_ssc_freq * 1000; |
28c97730 | 4865 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", |
5eddb70b | 4866 | refclk / 1000); |
a6c45cf0 | 4867 | } else if (!IS_GEN2(dev)) { |
79e53945 JB |
4868 | refclk = 96000; |
4869 | } else { | |
4870 | refclk = 48000; | |
4871 | } | |
4872 | ||
d4906093 ML |
4873 | /* |
4874 | * Returns a set of divisors for the desired target clock with the given | |
4875 | * refclk, or FALSE. The returned values represent the clock equation: | |
4876 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | |
4877 | */ | |
1b894b59 | 4878 | limit = intel_limit(crtc, refclk); |
d4906093 | 4879 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); |
79e53945 JB |
4880 | if (!ok) { |
4881 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | |
5c3b82e2 | 4882 | return -EINVAL; |
79e53945 JB |
4883 | } |
4884 | ||
cda4b7d3 | 4885 | /* Ensure that the cursor is valid for the new mode before changing... */ |
6b383a7f | 4886 | intel_crtc_update_cursor(crtc, true); |
cda4b7d3 | 4887 | |
ddc9003c ZY |
4888 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
4889 | has_reduced_clock = limit->find_pll(limit, crtc, | |
5eddb70b CW |
4890 | dev_priv->lvds_downclock, |
4891 | refclk, | |
4892 | &reduced_clock); | |
18f9ed12 ZY |
4893 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { |
4894 | /* | |
4895 | * If the different P is found, it means that we can't | |
4896 | * switch the display clock by using the FP0/FP1. | |
4897 | * In such case we will disable the LVDS downclock | |
4898 | * feature. | |
4899 | */ | |
4900 | DRM_DEBUG_KMS("Different P is found for " | |
5eddb70b | 4901 | "LVDS clock/downclock\n"); |
18f9ed12 ZY |
4902 | has_reduced_clock = 0; |
4903 | } | |
652c393a | 4904 | } |
7026d4ac ZW |
4905 | /* SDVO TV has fixed PLL values depend on its clock range, |
4906 | this mirrors vbios setting. */ | |
4907 | if (is_sdvo && is_tv) { | |
4908 | if (adjusted_mode->clock >= 100000 | |
5eddb70b | 4909 | && adjusted_mode->clock < 140500) { |
7026d4ac ZW |
4910 | clock.p1 = 2; |
4911 | clock.p2 = 10; | |
4912 | clock.n = 3; | |
4913 | clock.m1 = 16; | |
4914 | clock.m2 = 8; | |
4915 | } else if (adjusted_mode->clock >= 140500 | |
5eddb70b | 4916 | && adjusted_mode->clock <= 200000) { |
7026d4ac ZW |
4917 | clock.p1 = 1; |
4918 | clock.p2 = 10; | |
4919 | clock.n = 6; | |
4920 | clock.m1 = 12; | |
4921 | clock.m2 = 8; | |
4922 | } | |
4923 | } | |
4924 | ||
f2b115e6 | 4925 | if (IS_PINEVIEW(dev)) { |
2177832f | 4926 | fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; |
652c393a JB |
4927 | if (has_reduced_clock) |
4928 | fp2 = (1 << reduced_clock.n) << 16 | | |
4929 | reduced_clock.m1 << 8 | reduced_clock.m2; | |
4930 | } else { | |
2177832f | 4931 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
652c393a JB |
4932 | if (has_reduced_clock) |
4933 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | |
4934 | reduced_clock.m2; | |
4935 | } | |
79e53945 | 4936 | |
929c77fb | 4937 | dpll = DPLL_VGA_MODE_DIS; |
2c07245f | 4938 | |
a6c45cf0 | 4939 | if (!IS_GEN2(dev)) { |
79e53945 JB |
4940 | if (is_lvds) |
4941 | dpll |= DPLLB_MODE_LVDS; | |
4942 | else | |
4943 | dpll |= DPLLB_MODE_DAC_SERIAL; | |
4944 | if (is_sdvo) { | |
6c9547ff CW |
4945 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
4946 | if (pixel_multiplier > 1) { | |
4947 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | |
4948 | dpll |= (pixel_multiplier - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; | |
6c9547ff | 4949 | } |
79e53945 | 4950 | dpll |= DPLL_DVO_HIGH_SPEED; |
79e53945 | 4951 | } |
929c77fb | 4952 | if (is_dp) |
a4fc5ed6 | 4953 | dpll |= DPLL_DVO_HIGH_SPEED; |
79e53945 JB |
4954 | |
4955 | /* compute bitmask from p1 value */ | |
f2b115e6 AJ |
4956 | if (IS_PINEVIEW(dev)) |
4957 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW; | |
2c07245f | 4958 | else { |
2177832f | 4959 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; |
652c393a JB |
4960 | if (IS_G4X(dev) && has_reduced_clock) |
4961 | dpll |= (1 << (reduced_clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | |
2c07245f | 4962 | } |
79e53945 JB |
4963 | switch (clock.p2) { |
4964 | case 5: | |
4965 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; | |
4966 | break; | |
4967 | case 7: | |
4968 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; | |
4969 | break; | |
4970 | case 10: | |
4971 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; | |
4972 | break; | |
4973 | case 14: | |
4974 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; | |
4975 | break; | |
4976 | } | |
929c77fb | 4977 | if (INTEL_INFO(dev)->gen >= 4) |
79e53945 JB |
4978 | dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); |
4979 | } else { | |
4980 | if (is_lvds) { | |
4981 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | |
4982 | } else { | |
4983 | if (clock.p1 == 2) | |
4984 | dpll |= PLL_P1_DIVIDE_BY_TWO; | |
4985 | else | |
4986 | dpll |= (clock.p1 - 2) << DPLL_FPA01_P1_POST_DIV_SHIFT; | |
4987 | if (clock.p2 == 4) | |
4988 | dpll |= PLL_P2_DIVIDE_BY_4; | |
4989 | } | |
4990 | } | |
4991 | ||
43565a06 KH |
4992 | if (is_sdvo && is_tv) |
4993 | dpll |= PLL_REF_INPUT_TVCLKINBC; | |
4994 | else if (is_tv) | |
79e53945 | 4995 | /* XXX: just matching BIOS for now */ |
43565a06 | 4996 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
79e53945 | 4997 | dpll |= 3; |
a7615030 | 4998 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
43565a06 | 4999 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
79e53945 JB |
5000 | else |
5001 | dpll |= PLL_REF_INPUT_DREFCLK; | |
5002 | ||
5003 | /* setup pipeconf */ | |
5eddb70b | 5004 | pipeconf = I915_READ(PIPECONF(pipe)); |
79e53945 JB |
5005 | |
5006 | /* Set up the display plane register */ | |
5007 | dspcntr = DISPPLANE_GAMMA_ENABLE; | |
5008 | ||
f2b115e6 | 5009 | /* Ironlake's plane is forced to pipe, bit 24 is to |
2c07245f | 5010 | enable color space conversion */ |
929c77fb EA |
5011 | if (pipe == 0) |
5012 | dspcntr &= ~DISPPLANE_SEL_PIPE_MASK; | |
5013 | else | |
5014 | dspcntr |= DISPPLANE_SEL_PIPE_B; | |
79e53945 | 5015 | |
a6c45cf0 | 5016 | if (pipe == 0 && INTEL_INFO(dev)->gen < 4) { |
79e53945 JB |
5017 | /* Enable pixel doubling when the dot clock is > 90% of the (display) |
5018 | * core speed. | |
5019 | * | |
5020 | * XXX: No double-wide on 915GM pipe B. Is that the only reason for the | |
5021 | * pipe == 0 check? | |
5022 | */ | |
e70236a8 JB |
5023 | if (mode->clock > |
5024 | dev_priv->display.get_display_clock_speed(dev) * 9 / 10) | |
5eddb70b | 5025 | pipeconf |= PIPECONF_DOUBLE_WIDE; |
79e53945 | 5026 | else |
5eddb70b | 5027 | pipeconf &= ~PIPECONF_DOUBLE_WIDE; |
79e53945 JB |
5028 | } |
5029 | ||
3b5c78a3 AJ |
5030 | /* default to 8bpc */ |
5031 | pipeconf &= ~(PIPECONF_BPP_MASK | PIPECONF_DITHER_EN); | |
5032 | if (is_dp) { | |
5033 | if (mode->private_flags & INTEL_MODE_DP_FORCE_6BPC) { | |
5034 | pipeconf |= PIPECONF_BPP_6 | | |
5035 | PIPECONF_DITHER_EN | | |
5036 | PIPECONF_DITHER_TYPE_SP; | |
5037 | } | |
5038 | } | |
5039 | ||
929c77fb | 5040 | dpll |= DPLL_VCO_ENABLE; |
8d86dc6a | 5041 | |
28c97730 | 5042 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); |
79e53945 JB |
5043 | drm_mode_debug_printmodeline(mode); |
5044 | ||
fae14981 EA |
5045 | I915_WRITE(FP0(pipe), fp); |
5046 | I915_WRITE(DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); | |
5eddb70b | 5047 | |
fae14981 | 5048 | POSTING_READ(DPLL(pipe)); |
c713bb08 | 5049 | udelay(150); |
8db9d77b | 5050 | |
79e53945 JB |
5051 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
5052 | * This is an exception to the general rule that mode_set doesn't turn | |
5053 | * things on. | |
5054 | */ | |
5055 | if (is_lvds) { | |
fae14981 | 5056 | temp = I915_READ(LVDS); |
5eddb70b | 5057 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
b3b095b3 | 5058 | if (pipe == 1) { |
929c77fb | 5059 | temp |= LVDS_PIPEB_SELECT; |
b3b095b3 | 5060 | } else { |
929c77fb | 5061 | temp &= ~LVDS_PIPEB_SELECT; |
b3b095b3 | 5062 | } |
a3e17eb8 | 5063 | /* set the corresponsding LVDS_BORDER bit */ |
5eddb70b | 5064 | temp |= dev_priv->lvds_border_bits; |
79e53945 JB |
5065 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
5066 | * set the DPLLs for dual-channel mode or not. | |
5067 | */ | |
5068 | if (clock.p2 == 7) | |
5eddb70b | 5069 | temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
79e53945 | 5070 | else |
5eddb70b | 5071 | temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); |
79e53945 JB |
5072 | |
5073 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) | |
5074 | * appropriately here, but we need to look more thoroughly into how | |
5075 | * panels behave in the two modes. | |
5076 | */ | |
929c77fb EA |
5077 | /* set the dithering flag on LVDS as needed */ |
5078 | if (INTEL_INFO(dev)->gen >= 4) { | |
434ed097 | 5079 | if (dev_priv->lvds_dither) |
5eddb70b | 5080 | temp |= LVDS_ENABLE_DITHER; |
434ed097 | 5081 | else |
5eddb70b | 5082 | temp &= ~LVDS_ENABLE_DITHER; |
898822ce | 5083 | } |
aa9b500d BF |
5084 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
5085 | lvds_sync |= LVDS_HSYNC_POLARITY; | |
5086 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) | |
5087 | lvds_sync |= LVDS_VSYNC_POLARITY; | |
5088 | if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) | |
5089 | != lvds_sync) { | |
5090 | char flags[2] = "-+"; | |
5091 | DRM_INFO("Changing LVDS panel from " | |
5092 | "(%chsync, %cvsync) to (%chsync, %cvsync)\n", | |
5093 | flags[!(temp & LVDS_HSYNC_POLARITY)], | |
5094 | flags[!(temp & LVDS_VSYNC_POLARITY)], | |
5095 | flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], | |
5096 | flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); | |
5097 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); | |
5098 | temp |= lvds_sync; | |
5099 | } | |
fae14981 | 5100 | I915_WRITE(LVDS, temp); |
79e53945 | 5101 | } |
434ed097 | 5102 | |
929c77fb | 5103 | if (is_dp) { |
a4fc5ed6 | 5104 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
434ed097 JB |
5105 | } |
5106 | ||
fae14981 | 5107 | I915_WRITE(DPLL(pipe), dpll); |
5eddb70b | 5108 | |
c713bb08 | 5109 | /* Wait for the clocks to stabilize. */ |
fae14981 | 5110 | POSTING_READ(DPLL(pipe)); |
c713bb08 | 5111 | udelay(150); |
32f9d658 | 5112 | |
c713bb08 EA |
5113 | if (INTEL_INFO(dev)->gen >= 4) { |
5114 | temp = 0; | |
5115 | if (is_sdvo) { | |
5116 | temp = intel_mode_get_pixel_multiplier(adjusted_mode); | |
5117 | if (temp > 1) | |
5118 | temp = (temp - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT; | |
5119 | else | |
5120 | temp = 0; | |
32f9d658 | 5121 | } |
c713bb08 EA |
5122 | I915_WRITE(DPLL_MD(pipe), temp); |
5123 | } else { | |
5124 | /* The pixel multiplier can only be updated once the | |
5125 | * DPLL is enabled and the clocks are stable. | |
5126 | * | |
5127 | * So write it again. | |
5128 | */ | |
fae14981 | 5129 | I915_WRITE(DPLL(pipe), dpll); |
79e53945 | 5130 | } |
79e53945 | 5131 | |
5eddb70b | 5132 | intel_crtc->lowfreq_avail = false; |
652c393a | 5133 | if (is_lvds && has_reduced_clock && i915_powersave) { |
fae14981 | 5134 | I915_WRITE(FP1(pipe), fp2); |
652c393a JB |
5135 | intel_crtc->lowfreq_avail = true; |
5136 | if (HAS_PIPE_CXSR(dev)) { | |
28c97730 | 5137 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
652c393a JB |
5138 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
5139 | } | |
5140 | } else { | |
fae14981 | 5141 | I915_WRITE(FP1(pipe), fp); |
652c393a | 5142 | if (HAS_PIPE_CXSR(dev)) { |
28c97730 | 5143 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
652c393a JB |
5144 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; |
5145 | } | |
5146 | } | |
5147 | ||
734b4157 KH |
5148 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
5149 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; | |
5150 | /* the chip adds 2 halflines automatically */ | |
5151 | adjusted_mode->crtc_vdisplay -= 1; | |
5152 | adjusted_mode->crtc_vtotal -= 1; | |
5153 | adjusted_mode->crtc_vblank_start -= 1; | |
5154 | adjusted_mode->crtc_vblank_end -= 1; | |
5155 | adjusted_mode->crtc_vsync_end -= 1; | |
5156 | adjusted_mode->crtc_vsync_start -= 1; | |
5157 | } else | |
5158 | pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ | |
5159 | ||
5eddb70b CW |
5160 | I915_WRITE(HTOTAL(pipe), |
5161 | (adjusted_mode->crtc_hdisplay - 1) | | |
79e53945 | 5162 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
5eddb70b CW |
5163 | I915_WRITE(HBLANK(pipe), |
5164 | (adjusted_mode->crtc_hblank_start - 1) | | |
79e53945 | 5165 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); |
5eddb70b CW |
5166 | I915_WRITE(HSYNC(pipe), |
5167 | (adjusted_mode->crtc_hsync_start - 1) | | |
79e53945 | 5168 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); |
5eddb70b CW |
5169 | |
5170 | I915_WRITE(VTOTAL(pipe), | |
5171 | (adjusted_mode->crtc_vdisplay - 1) | | |
79e53945 | 5172 | ((adjusted_mode->crtc_vtotal - 1) << 16)); |
5eddb70b CW |
5173 | I915_WRITE(VBLANK(pipe), |
5174 | (adjusted_mode->crtc_vblank_start - 1) | | |
79e53945 | 5175 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); |
5eddb70b CW |
5176 | I915_WRITE(VSYNC(pipe), |
5177 | (adjusted_mode->crtc_vsync_start - 1) | | |
79e53945 | 5178 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); |
5eddb70b CW |
5179 | |
5180 | /* pipesrc and dspsize control the size that is scaled from, | |
5181 | * which should always be the user's requested size. | |
79e53945 | 5182 | */ |
929c77fb EA |
5183 | I915_WRITE(DSPSIZE(plane), |
5184 | ((mode->vdisplay - 1) << 16) | | |
5185 | (mode->hdisplay - 1)); | |
5186 | I915_WRITE(DSPPOS(plane), 0); | |
5eddb70b CW |
5187 | I915_WRITE(PIPESRC(pipe), |
5188 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | |
2c07245f | 5189 | |
f564048e EA |
5190 | I915_WRITE(PIPECONF(pipe), pipeconf); |
5191 | POSTING_READ(PIPECONF(pipe)); | |
929c77fb | 5192 | intel_enable_pipe(dev_priv, pipe, false); |
f564048e EA |
5193 | |
5194 | intel_wait_for_vblank(dev, pipe); | |
5195 | ||
f564048e EA |
5196 | I915_WRITE(DSPCNTR(plane), dspcntr); |
5197 | POSTING_READ(DSPCNTR(plane)); | |
284d9529 | 5198 | intel_enable_plane(dev_priv, plane, pipe); |
f564048e EA |
5199 | |
5200 | ret = intel_pipe_set_base(crtc, x, y, old_fb); | |
5201 | ||
5202 | intel_update_watermarks(dev); | |
5203 | ||
f564048e EA |
5204 | return ret; |
5205 | } | |
5206 | ||
9fb526db KP |
5207 | /* |
5208 | * Initialize reference clocks when the driver loads | |
5209 | */ | |
5210 | void ironlake_init_pch_refclk(struct drm_device *dev) | |
13d83a67 JB |
5211 | { |
5212 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5213 | struct drm_mode_config *mode_config = &dev->mode_config; | |
13d83a67 | 5214 | struct intel_encoder *encoder; |
13d83a67 JB |
5215 | u32 temp; |
5216 | bool has_lvds = false; | |
199e5d79 KP |
5217 | bool has_cpu_edp = false; |
5218 | bool has_pch_edp = false; | |
5219 | bool has_panel = false; | |
99eb6a01 KP |
5220 | bool has_ck505 = false; |
5221 | bool can_ssc = false; | |
13d83a67 JB |
5222 | |
5223 | /* We need to take the global config into account */ | |
199e5d79 KP |
5224 | list_for_each_entry(encoder, &mode_config->encoder_list, |
5225 | base.head) { | |
5226 | switch (encoder->type) { | |
5227 | case INTEL_OUTPUT_LVDS: | |
5228 | has_panel = true; | |
5229 | has_lvds = true; | |
5230 | break; | |
5231 | case INTEL_OUTPUT_EDP: | |
5232 | has_panel = true; | |
5233 | if (intel_encoder_is_pch_edp(&encoder->base)) | |
5234 | has_pch_edp = true; | |
5235 | else | |
5236 | has_cpu_edp = true; | |
5237 | break; | |
13d83a67 JB |
5238 | } |
5239 | } | |
5240 | ||
99eb6a01 KP |
5241 | if (HAS_PCH_IBX(dev)) { |
5242 | has_ck505 = dev_priv->display_clock_mode; | |
5243 | can_ssc = has_ck505; | |
5244 | } else { | |
5245 | has_ck505 = false; | |
5246 | can_ssc = true; | |
5247 | } | |
5248 | ||
5249 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", | |
5250 | has_panel, has_lvds, has_pch_edp, has_cpu_edp, | |
5251 | has_ck505); | |
13d83a67 JB |
5252 | |
5253 | /* Ironlake: try to setup display ref clock before DPLL | |
5254 | * enabling. This is only under driver's control after | |
5255 | * PCH B stepping, previous chipset stepping should be | |
5256 | * ignoring this setting. | |
5257 | */ | |
5258 | temp = I915_READ(PCH_DREF_CONTROL); | |
5259 | /* Always enable nonspread source */ | |
5260 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | |
13d83a67 | 5261 | |
99eb6a01 KP |
5262 | if (has_ck505) |
5263 | temp |= DREF_NONSPREAD_CK505_ENABLE; | |
5264 | else | |
5265 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | |
13d83a67 | 5266 | |
199e5d79 KP |
5267 | if (has_panel) { |
5268 | temp &= ~DREF_SSC_SOURCE_MASK; | |
5269 | temp |= DREF_SSC_SOURCE_ENABLE; | |
13d83a67 | 5270 | |
199e5d79 | 5271 | /* SSC must be turned on before enabling the CPU output */ |
99eb6a01 | 5272 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
199e5d79 | 5273 | DRM_DEBUG_KMS("Using SSC on panel\n"); |
13d83a67 | 5274 | temp |= DREF_SSC1_ENABLE; |
13d83a67 | 5275 | } |
199e5d79 KP |
5276 | |
5277 | /* Get SSC going before enabling the outputs */ | |
5278 | I915_WRITE(PCH_DREF_CONTROL, temp); | |
5279 | POSTING_READ(PCH_DREF_CONTROL); | |
5280 | udelay(200); | |
5281 | ||
13d83a67 JB |
5282 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
5283 | ||
5284 | /* Enable CPU source on CPU attached eDP */ | |
199e5d79 | 5285 | if (has_cpu_edp) { |
99eb6a01 | 5286 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
199e5d79 | 5287 | DRM_DEBUG_KMS("Using SSC on eDP\n"); |
13d83a67 | 5288 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; |
199e5d79 | 5289 | } |
13d83a67 JB |
5290 | else |
5291 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | |
199e5d79 KP |
5292 | } else |
5293 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | |
5294 | ||
5295 | I915_WRITE(PCH_DREF_CONTROL, temp); | |
5296 | POSTING_READ(PCH_DREF_CONTROL); | |
5297 | udelay(200); | |
5298 | } else { | |
5299 | DRM_DEBUG_KMS("Disabling SSC entirely\n"); | |
5300 | ||
5301 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | |
5302 | ||
5303 | /* Turn off CPU output */ | |
5304 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | |
5305 | ||
5306 | I915_WRITE(PCH_DREF_CONTROL, temp); | |
5307 | POSTING_READ(PCH_DREF_CONTROL); | |
5308 | udelay(200); | |
5309 | ||
5310 | /* Turn off the SSC source */ | |
5311 | temp &= ~DREF_SSC_SOURCE_MASK; | |
5312 | temp |= DREF_SSC_SOURCE_DISABLE; | |
5313 | ||
5314 | /* Turn off SSC1 */ | |
5315 | temp &= ~ DREF_SSC1_ENABLE; | |
5316 | ||
13d83a67 JB |
5317 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5318 | POSTING_READ(PCH_DREF_CONTROL); | |
5319 | udelay(200); | |
5320 | } | |
5321 | } | |
5322 | ||
d9d444cb JB |
5323 | static int ironlake_get_refclk(struct drm_crtc *crtc) |
5324 | { | |
5325 | struct drm_device *dev = crtc->dev; | |
5326 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5327 | struct intel_encoder *encoder; | |
5328 | struct drm_mode_config *mode_config = &dev->mode_config; | |
5329 | struct intel_encoder *edp_encoder = NULL; | |
5330 | int num_connectors = 0; | |
5331 | bool is_lvds = false; | |
5332 | ||
5333 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | |
5334 | if (encoder->base.crtc != crtc) | |
5335 | continue; | |
5336 | ||
5337 | switch (encoder->type) { | |
5338 | case INTEL_OUTPUT_LVDS: | |
5339 | is_lvds = true; | |
5340 | break; | |
5341 | case INTEL_OUTPUT_EDP: | |
5342 | edp_encoder = encoder; | |
5343 | break; | |
5344 | } | |
5345 | num_connectors++; | |
5346 | } | |
5347 | ||
5348 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | |
5349 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | |
5350 | dev_priv->lvds_ssc_freq); | |
5351 | return dev_priv->lvds_ssc_freq * 1000; | |
5352 | } | |
5353 | ||
5354 | return 120000; | |
5355 | } | |
5356 | ||
f564048e EA |
5357 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
5358 | struct drm_display_mode *mode, | |
5359 | struct drm_display_mode *adjusted_mode, | |
5360 | int x, int y, | |
5361 | struct drm_framebuffer *old_fb) | |
79e53945 JB |
5362 | { |
5363 | struct drm_device *dev = crtc->dev; | |
5364 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5365 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
5366 | int pipe = intel_crtc->pipe; | |
80824003 | 5367 | int plane = intel_crtc->plane; |
c751ce4f | 5368 | int refclk, num_connectors = 0; |
652c393a | 5369 | intel_clock_t clock, reduced_clock; |
5eddb70b | 5370 | u32 dpll, fp = 0, fp2 = 0, dspcntr, pipeconf; |
a07d6787 | 5371 | bool ok, has_reduced_clock = false, is_sdvo = false; |
a4fc5ed6 | 5372 | bool is_crt = false, is_lvds = false, is_tv = false, is_dp = false; |
8e647a27 | 5373 | struct intel_encoder *has_edp_encoder = NULL; |
79e53945 | 5374 | struct drm_mode_config *mode_config = &dev->mode_config; |
5eddb70b | 5375 | struct intel_encoder *encoder; |
d4906093 | 5376 | const intel_limit_t *limit; |
5c3b82e2 | 5377 | int ret; |
2c07245f | 5378 | struct fdi_m_n m_n = {0}; |
fae14981 | 5379 | u32 temp; |
aa9b500d | 5380 | u32 lvds_sync = 0; |
5a354204 JB |
5381 | int target_clock, pixel_multiplier, lane, link_bw, factor; |
5382 | unsigned int pipe_bpp; | |
5383 | bool dither; | |
79e53945 | 5384 | |
5eddb70b CW |
5385 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { |
5386 | if (encoder->base.crtc != crtc) | |
79e53945 JB |
5387 | continue; |
5388 | ||
5eddb70b | 5389 | switch (encoder->type) { |
79e53945 JB |
5390 | case INTEL_OUTPUT_LVDS: |
5391 | is_lvds = true; | |
5392 | break; | |
5393 | case INTEL_OUTPUT_SDVO: | |
7d57382e | 5394 | case INTEL_OUTPUT_HDMI: |
79e53945 | 5395 | is_sdvo = true; |
5eddb70b | 5396 | if (encoder->needs_tv_clock) |
e2f0ba97 | 5397 | is_tv = true; |
79e53945 | 5398 | break; |
79e53945 JB |
5399 | case INTEL_OUTPUT_TVOUT: |
5400 | is_tv = true; | |
5401 | break; | |
5402 | case INTEL_OUTPUT_ANALOG: | |
5403 | is_crt = true; | |
5404 | break; | |
a4fc5ed6 KP |
5405 | case INTEL_OUTPUT_DISPLAYPORT: |
5406 | is_dp = true; | |
5407 | break; | |
32f9d658 | 5408 | case INTEL_OUTPUT_EDP: |
5eddb70b | 5409 | has_edp_encoder = encoder; |
32f9d658 | 5410 | break; |
79e53945 | 5411 | } |
43565a06 | 5412 | |
c751ce4f | 5413 | num_connectors++; |
79e53945 JB |
5414 | } |
5415 | ||
d9d444cb | 5416 | refclk = ironlake_get_refclk(crtc); |
79e53945 | 5417 | |
d4906093 ML |
5418 | /* |
5419 | * Returns a set of divisors for the desired target clock with the given | |
5420 | * refclk, or FALSE. The returned values represent the clock equation: | |
5421 | * reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2. | |
5422 | */ | |
1b894b59 | 5423 | limit = intel_limit(crtc, refclk); |
d4906093 | 5424 | ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); |
79e53945 JB |
5425 | if (!ok) { |
5426 | DRM_ERROR("Couldn't find PLL settings for mode!\n"); | |
5c3b82e2 | 5427 | return -EINVAL; |
79e53945 JB |
5428 | } |
5429 | ||
cda4b7d3 | 5430 | /* Ensure that the cursor is valid for the new mode before changing... */ |
6b383a7f | 5431 | intel_crtc_update_cursor(crtc, true); |
cda4b7d3 | 5432 | |
ddc9003c ZY |
5433 | if (is_lvds && dev_priv->lvds_downclock_avail) { |
5434 | has_reduced_clock = limit->find_pll(limit, crtc, | |
5eddb70b CW |
5435 | dev_priv->lvds_downclock, |
5436 | refclk, | |
5437 | &reduced_clock); | |
18f9ed12 ZY |
5438 | if (has_reduced_clock && (clock.p != reduced_clock.p)) { |
5439 | /* | |
5440 | * If the different P is found, it means that we can't | |
5441 | * switch the display clock by using the FP0/FP1. | |
5442 | * In such case we will disable the LVDS downclock | |
5443 | * feature. | |
5444 | */ | |
5445 | DRM_DEBUG_KMS("Different P is found for " | |
5eddb70b | 5446 | "LVDS clock/downclock\n"); |
18f9ed12 ZY |
5447 | has_reduced_clock = 0; |
5448 | } | |
652c393a | 5449 | } |
7026d4ac ZW |
5450 | /* SDVO TV has fixed PLL values depend on its clock range, |
5451 | this mirrors vbios setting. */ | |
5452 | if (is_sdvo && is_tv) { | |
5453 | if (adjusted_mode->clock >= 100000 | |
5eddb70b | 5454 | && adjusted_mode->clock < 140500) { |
7026d4ac ZW |
5455 | clock.p1 = 2; |
5456 | clock.p2 = 10; | |
5457 | clock.n = 3; | |
5458 | clock.m1 = 16; | |
5459 | clock.m2 = 8; | |
5460 | } else if (adjusted_mode->clock >= 140500 | |
5eddb70b | 5461 | && adjusted_mode->clock <= 200000) { |
7026d4ac ZW |
5462 | clock.p1 = 1; |
5463 | clock.p2 = 10; | |
5464 | clock.n = 6; | |
5465 | clock.m1 = 12; | |
5466 | clock.m2 = 8; | |
5467 | } | |
5468 | } | |
5469 | ||
2c07245f | 5470 | /* FDI link */ |
8febb297 EA |
5471 | pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); |
5472 | lane = 0; | |
5473 | /* CPU eDP doesn't require FDI link, so just set DP M/N | |
5474 | according to current link config */ | |
5475 | if (has_edp_encoder && | |
5476 | !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | |
5477 | target_clock = mode->clock; | |
5478 | intel_edp_link_config(has_edp_encoder, | |
5479 | &lane, &link_bw); | |
5480 | } else { | |
5481 | /* [e]DP over FDI requires target mode clock | |
5482 | instead of link clock */ | |
5483 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) | |
5eb08b69 | 5484 | target_clock = mode->clock; |
8febb297 EA |
5485 | else |
5486 | target_clock = adjusted_mode->clock; | |
5487 | ||
5488 | /* FDI is a binary signal running at ~2.7GHz, encoding | |
5489 | * each output octet as 10 bits. The actual frequency | |
5490 | * is stored as a divider into a 100MHz clock, and the | |
5491 | * mode pixel clock is stored in units of 1KHz. | |
5492 | * Hence the bw of each lane in terms of the mode signal | |
5493 | * is: | |
5494 | */ | |
5495 | link_bw = intel_fdi_link_freq(dev) * MHz(100)/KHz(1)/10; | |
5496 | } | |
58a27471 | 5497 | |
8febb297 EA |
5498 | /* determine panel color depth */ |
5499 | temp = I915_READ(PIPECONF(pipe)); | |
5500 | temp &= ~PIPE_BPC_MASK; | |
3b5c78a3 | 5501 | dither = intel_choose_pipe_bpp_dither(crtc, &pipe_bpp, mode); |
5a354204 JB |
5502 | switch (pipe_bpp) { |
5503 | case 18: | |
5504 | temp |= PIPE_6BPC; | |
8febb297 | 5505 | break; |
5a354204 JB |
5506 | case 24: |
5507 | temp |= PIPE_8BPC; | |
8febb297 | 5508 | break; |
5a354204 JB |
5509 | case 30: |
5510 | temp |= PIPE_10BPC; | |
8febb297 | 5511 | break; |
5a354204 JB |
5512 | case 36: |
5513 | temp |= PIPE_12BPC; | |
8febb297 EA |
5514 | break; |
5515 | default: | |
62ac41a6 JB |
5516 | WARN(1, "intel_choose_pipe_bpp returned invalid value %d\n", |
5517 | pipe_bpp); | |
5a354204 JB |
5518 | temp |= PIPE_8BPC; |
5519 | pipe_bpp = 24; | |
5520 | break; | |
8febb297 | 5521 | } |
77ffb597 | 5522 | |
5a354204 JB |
5523 | intel_crtc->bpp = pipe_bpp; |
5524 | I915_WRITE(PIPECONF(pipe), temp); | |
5525 | ||
8febb297 EA |
5526 | if (!lane) { |
5527 | /* | |
5528 | * Account for spread spectrum to avoid | |
5529 | * oversubscribing the link. Max center spread | |
5530 | * is 2.5%; use 5% for safety's sake. | |
5531 | */ | |
5a354204 | 5532 | u32 bps = target_clock * intel_crtc->bpp * 21 / 20; |
8febb297 | 5533 | lane = bps / (link_bw * 8) + 1; |
5eb08b69 | 5534 | } |
2c07245f | 5535 | |
8febb297 EA |
5536 | intel_crtc->fdi_lanes = lane; |
5537 | ||
5538 | if (pixel_multiplier > 1) | |
5539 | link_bw *= pixel_multiplier; | |
5a354204 JB |
5540 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, |
5541 | &m_n); | |
8febb297 | 5542 | |
a07d6787 EA |
5543 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
5544 | if (has_reduced_clock) | |
5545 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | |
5546 | reduced_clock.m2; | |
79e53945 | 5547 | |
c1858123 | 5548 | /* Enable autotuning of the PLL clock (if permissible) */ |
8febb297 EA |
5549 | factor = 21; |
5550 | if (is_lvds) { | |
5551 | if ((intel_panel_use_ssc(dev_priv) && | |
5552 | dev_priv->lvds_ssc_freq == 100) || | |
5553 | (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP) | |
5554 | factor = 25; | |
5555 | } else if (is_sdvo && is_tv) | |
5556 | factor = 20; | |
c1858123 | 5557 | |
cb0e0931 | 5558 | if (clock.m < factor * clock.n) |
8febb297 | 5559 | fp |= FP_CB_TUNE; |
2c07245f | 5560 | |
5eddb70b | 5561 | dpll = 0; |
2c07245f | 5562 | |
a07d6787 EA |
5563 | if (is_lvds) |
5564 | dpll |= DPLLB_MODE_LVDS; | |
5565 | else | |
5566 | dpll |= DPLLB_MODE_DAC_SERIAL; | |
5567 | if (is_sdvo) { | |
5568 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | |
5569 | if (pixel_multiplier > 1) { | |
5570 | dpll |= (pixel_multiplier - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT; | |
79e53945 | 5571 | } |
a07d6787 EA |
5572 | dpll |= DPLL_DVO_HIGH_SPEED; |
5573 | } | |
5574 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) | |
5575 | dpll |= DPLL_DVO_HIGH_SPEED; | |
79e53945 | 5576 | |
a07d6787 EA |
5577 | /* compute bitmask from p1 value */ |
5578 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; | |
5579 | /* also FPA1 */ | |
5580 | dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT; | |
5581 | ||
5582 | switch (clock.p2) { | |
5583 | case 5: | |
5584 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; | |
5585 | break; | |
5586 | case 7: | |
5587 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_7; | |
5588 | break; | |
5589 | case 10: | |
5590 | dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_10; | |
5591 | break; | |
5592 | case 14: | |
5593 | dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; | |
5594 | break; | |
79e53945 JB |
5595 | } |
5596 | ||
43565a06 KH |
5597 | if (is_sdvo && is_tv) |
5598 | dpll |= PLL_REF_INPUT_TVCLKINBC; | |
5599 | else if (is_tv) | |
79e53945 | 5600 | /* XXX: just matching BIOS for now */ |
43565a06 | 5601 | /* dpll |= PLL_REF_INPUT_TVCLKINBC; */ |
79e53945 | 5602 | dpll |= 3; |
a7615030 | 5603 | else if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) |
43565a06 | 5604 | dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN; |
79e53945 JB |
5605 | else |
5606 | dpll |= PLL_REF_INPUT_DREFCLK; | |
5607 | ||
5608 | /* setup pipeconf */ | |
5eddb70b | 5609 | pipeconf = I915_READ(PIPECONF(pipe)); |
79e53945 JB |
5610 | |
5611 | /* Set up the display plane register */ | |
5612 | dspcntr = DISPPLANE_GAMMA_ENABLE; | |
5613 | ||
f7cb34d4 | 5614 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
79e53945 JB |
5615 | drm_mode_debug_printmodeline(mode); |
5616 | ||
5c5313c8 | 5617 | /* PCH eDP needs FDI, but CPU eDP does not */ |
4b645f14 JB |
5618 | if (!intel_crtc->no_pll) { |
5619 | if (!has_edp_encoder || | |
5620 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | |
5621 | I915_WRITE(PCH_FP0(pipe), fp); | |
5622 | I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); | |
5623 | ||
5624 | POSTING_READ(PCH_DPLL(pipe)); | |
5625 | udelay(150); | |
5626 | } | |
5627 | } else { | |
5628 | if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) && | |
5629 | fp == I915_READ(PCH_FP0(0))) { | |
5630 | intel_crtc->use_pll_a = true; | |
5631 | DRM_DEBUG_KMS("using pipe a dpll\n"); | |
5632 | } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) && | |
5633 | fp == I915_READ(PCH_FP0(1))) { | |
5634 | intel_crtc->use_pll_a = false; | |
5635 | DRM_DEBUG_KMS("using pipe b dpll\n"); | |
5636 | } else { | |
5637 | DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n"); | |
5638 | return -EINVAL; | |
5639 | } | |
79e53945 JB |
5640 | } |
5641 | ||
5642 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | |
5643 | * This is an exception to the general rule that mode_set doesn't turn | |
5644 | * things on. | |
5645 | */ | |
5646 | if (is_lvds) { | |
fae14981 | 5647 | temp = I915_READ(PCH_LVDS); |
5eddb70b | 5648 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
4b645f14 JB |
5649 | if (HAS_PCH_CPT(dev)) |
5650 | temp |= PORT_TRANS_SEL_CPT(pipe); | |
5651 | else if (pipe == 1) | |
5652 | temp |= LVDS_PIPEB_SELECT; | |
5653 | else | |
5654 | temp &= ~LVDS_PIPEB_SELECT; | |
5655 | ||
a3e17eb8 | 5656 | /* set the corresponsding LVDS_BORDER bit */ |
5eddb70b | 5657 | temp |= dev_priv->lvds_border_bits; |
79e53945 JB |
5658 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
5659 | * set the DPLLs for dual-channel mode or not. | |
5660 | */ | |
5661 | if (clock.p2 == 7) | |
5eddb70b | 5662 | temp |= LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP; |
79e53945 | 5663 | else |
5eddb70b | 5664 | temp &= ~(LVDS_B0B3_POWER_UP | LVDS_CLKB_POWER_UP); |
79e53945 JB |
5665 | |
5666 | /* It would be nice to set 24 vs 18-bit mode (LVDS_A3_POWER_UP) | |
5667 | * appropriately here, but we need to look more thoroughly into how | |
5668 | * panels behave in the two modes. | |
5669 | */ | |
aa9b500d BF |
5670 | if (adjusted_mode->flags & DRM_MODE_FLAG_NHSYNC) |
5671 | lvds_sync |= LVDS_HSYNC_POLARITY; | |
5672 | if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC) | |
5673 | lvds_sync |= LVDS_VSYNC_POLARITY; | |
5674 | if ((temp & (LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY)) | |
5675 | != lvds_sync) { | |
5676 | char flags[2] = "-+"; | |
5677 | DRM_INFO("Changing LVDS panel from " | |
5678 | "(%chsync, %cvsync) to (%chsync, %cvsync)\n", | |
5679 | flags[!(temp & LVDS_HSYNC_POLARITY)], | |
5680 | flags[!(temp & LVDS_VSYNC_POLARITY)], | |
5681 | flags[!(lvds_sync & LVDS_HSYNC_POLARITY)], | |
5682 | flags[!(lvds_sync & LVDS_VSYNC_POLARITY)]); | |
5683 | temp &= ~(LVDS_HSYNC_POLARITY | LVDS_VSYNC_POLARITY); | |
5684 | temp |= lvds_sync; | |
5685 | } | |
fae14981 | 5686 | I915_WRITE(PCH_LVDS, temp); |
79e53945 | 5687 | } |
434ed097 | 5688 | |
8febb297 EA |
5689 | pipeconf &= ~PIPECONF_DITHER_EN; |
5690 | pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; | |
5a354204 | 5691 | if ((is_lvds && dev_priv->lvds_dither) || dither) { |
8febb297 | 5692 | pipeconf |= PIPECONF_DITHER_EN; |
f74974c7 | 5693 | pipeconf |= PIPECONF_DITHER_TYPE_SP; |
434ed097 | 5694 | } |
5c5313c8 | 5695 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
a4fc5ed6 | 5696 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
8febb297 | 5697 | } else { |
8db9d77b | 5698 | /* For non-DP output, clear any trans DP clock recovery setting.*/ |
9db4a9c7 JB |
5699 | I915_WRITE(TRANSDATA_M1(pipe), 0); |
5700 | I915_WRITE(TRANSDATA_N1(pipe), 0); | |
5701 | I915_WRITE(TRANSDPLINK_M1(pipe), 0); | |
5702 | I915_WRITE(TRANSDPLINK_N1(pipe), 0); | |
8db9d77b | 5703 | } |
79e53945 | 5704 | |
4b645f14 JB |
5705 | if (!intel_crtc->no_pll && |
5706 | (!has_edp_encoder || | |
5707 | intel_encoder_is_pch_edp(&has_edp_encoder->base))) { | |
fae14981 | 5708 | I915_WRITE(PCH_DPLL(pipe), dpll); |
5eddb70b | 5709 | |
32f9d658 | 5710 | /* Wait for the clocks to stabilize. */ |
fae14981 | 5711 | POSTING_READ(PCH_DPLL(pipe)); |
32f9d658 ZW |
5712 | udelay(150); |
5713 | ||
8febb297 EA |
5714 | /* The pixel multiplier can only be updated once the |
5715 | * DPLL is enabled and the clocks are stable. | |
5716 | * | |
5717 | * So write it again. | |
5718 | */ | |
fae14981 | 5719 | I915_WRITE(PCH_DPLL(pipe), dpll); |
79e53945 | 5720 | } |
79e53945 | 5721 | |
5eddb70b | 5722 | intel_crtc->lowfreq_avail = false; |
4b645f14 JB |
5723 | if (!intel_crtc->no_pll) { |
5724 | if (is_lvds && has_reduced_clock && i915_powersave) { | |
5725 | I915_WRITE(PCH_FP1(pipe), fp2); | |
5726 | intel_crtc->lowfreq_avail = true; | |
5727 | if (HAS_PIPE_CXSR(dev)) { | |
5728 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); | |
5729 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | |
5730 | } | |
5731 | } else { | |
5732 | I915_WRITE(PCH_FP1(pipe), fp); | |
5733 | if (HAS_PIPE_CXSR(dev)) { | |
5734 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); | |
5735 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | |
5736 | } | |
652c393a JB |
5737 | } |
5738 | } | |
5739 | ||
734b4157 KH |
5740 | if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
5741 | pipeconf |= PIPECONF_INTERLACE_W_FIELD_INDICATION; | |
5742 | /* the chip adds 2 halflines automatically */ | |
5743 | adjusted_mode->crtc_vdisplay -= 1; | |
5744 | adjusted_mode->crtc_vtotal -= 1; | |
5745 | adjusted_mode->crtc_vblank_start -= 1; | |
5746 | adjusted_mode->crtc_vblank_end -= 1; | |
5747 | adjusted_mode->crtc_vsync_end -= 1; | |
5748 | adjusted_mode->crtc_vsync_start -= 1; | |
5749 | } else | |
5750 | pipeconf &= ~PIPECONF_INTERLACE_W_FIELD_INDICATION; /* progressive */ | |
5751 | ||
5eddb70b CW |
5752 | I915_WRITE(HTOTAL(pipe), |
5753 | (adjusted_mode->crtc_hdisplay - 1) | | |
79e53945 | 5754 | ((adjusted_mode->crtc_htotal - 1) << 16)); |
5eddb70b CW |
5755 | I915_WRITE(HBLANK(pipe), |
5756 | (adjusted_mode->crtc_hblank_start - 1) | | |
79e53945 | 5757 | ((adjusted_mode->crtc_hblank_end - 1) << 16)); |
5eddb70b CW |
5758 | I915_WRITE(HSYNC(pipe), |
5759 | (adjusted_mode->crtc_hsync_start - 1) | | |
79e53945 | 5760 | ((adjusted_mode->crtc_hsync_end - 1) << 16)); |
5eddb70b CW |
5761 | |
5762 | I915_WRITE(VTOTAL(pipe), | |
5763 | (adjusted_mode->crtc_vdisplay - 1) | | |
79e53945 | 5764 | ((adjusted_mode->crtc_vtotal - 1) << 16)); |
5eddb70b CW |
5765 | I915_WRITE(VBLANK(pipe), |
5766 | (adjusted_mode->crtc_vblank_start - 1) | | |
79e53945 | 5767 | ((adjusted_mode->crtc_vblank_end - 1) << 16)); |
5eddb70b CW |
5768 | I915_WRITE(VSYNC(pipe), |
5769 | (adjusted_mode->crtc_vsync_start - 1) | | |
79e53945 | 5770 | ((adjusted_mode->crtc_vsync_end - 1) << 16)); |
5eddb70b | 5771 | |
8febb297 EA |
5772 | /* pipesrc controls the size that is scaled from, which should |
5773 | * always be the user's requested size. | |
79e53945 | 5774 | */ |
5eddb70b CW |
5775 | I915_WRITE(PIPESRC(pipe), |
5776 | ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); | |
2c07245f | 5777 | |
8febb297 EA |
5778 | I915_WRITE(PIPE_DATA_M1(pipe), TU_SIZE(m_n.tu) | m_n.gmch_m); |
5779 | I915_WRITE(PIPE_DATA_N1(pipe), m_n.gmch_n); | |
5780 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); | |
5781 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); | |
2c07245f | 5782 | |
8febb297 EA |
5783 | if (has_edp_encoder && |
5784 | !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | |
5785 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); | |
2c07245f ZW |
5786 | } |
5787 | ||
5eddb70b CW |
5788 | I915_WRITE(PIPECONF(pipe), pipeconf); |
5789 | POSTING_READ(PIPECONF(pipe)); | |
79e53945 | 5790 | |
9d0498a2 | 5791 | intel_wait_for_vblank(dev, pipe); |
79e53945 | 5792 | |
f00a3ddf | 5793 | if (IS_GEN5(dev)) { |
553bd149 ZW |
5794 | /* enable address swizzle for tiling buffer */ |
5795 | temp = I915_READ(DISP_ARB_CTL); | |
5796 | I915_WRITE(DISP_ARB_CTL, temp | DISP_TILE_SURFACE_SWIZZLING); | |
5797 | } | |
5798 | ||
5eddb70b | 5799 | I915_WRITE(DSPCNTR(plane), dspcntr); |
b24e7179 | 5800 | POSTING_READ(DSPCNTR(plane)); |
79e53945 | 5801 | |
5c3b82e2 | 5802 | ret = intel_pipe_set_base(crtc, x, y, old_fb); |
7662c8bd SL |
5803 | |
5804 | intel_update_watermarks(dev); | |
5805 | ||
1f803ee5 | 5806 | return ret; |
79e53945 JB |
5807 | } |
5808 | ||
f564048e EA |
5809 | static int intel_crtc_mode_set(struct drm_crtc *crtc, |
5810 | struct drm_display_mode *mode, | |
5811 | struct drm_display_mode *adjusted_mode, | |
5812 | int x, int y, | |
5813 | struct drm_framebuffer *old_fb) | |
5814 | { | |
5815 | struct drm_device *dev = crtc->dev; | |
5816 | struct drm_i915_private *dev_priv = dev->dev_private; | |
0b701d27 EA |
5817 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
5818 | int pipe = intel_crtc->pipe; | |
f564048e EA |
5819 | int ret; |
5820 | ||
0b701d27 | 5821 | drm_vblank_pre_modeset(dev, pipe); |
7662c8bd | 5822 | |
f564048e EA |
5823 | ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode, |
5824 | x, y, old_fb); | |
7662c8bd | 5825 | |
79e53945 | 5826 | drm_vblank_post_modeset(dev, pipe); |
5c3b82e2 | 5827 | |
120eced9 KP |
5828 | intel_crtc->dpms_mode = DRM_MODE_DPMS_ON; |
5829 | ||
1f803ee5 | 5830 | return ret; |
79e53945 JB |
5831 | } |
5832 | ||
e0dac65e WF |
5833 | static void g4x_write_eld(struct drm_connector *connector, |
5834 | struct drm_crtc *crtc) | |
5835 | { | |
5836 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | |
5837 | uint8_t *eld = connector->eld; | |
5838 | uint32_t eldv; | |
5839 | uint32_t len; | |
5840 | uint32_t i; | |
5841 | ||
5842 | i = I915_READ(G4X_AUD_VID_DID); | |
5843 | ||
5844 | if (i == INTEL_AUDIO_DEVBLC || i == INTEL_AUDIO_DEVCL) | |
5845 | eldv = G4X_ELDV_DEVCL_DEVBLC; | |
5846 | else | |
5847 | eldv = G4X_ELDV_DEVCTG; | |
5848 | ||
5849 | i = I915_READ(G4X_AUD_CNTL_ST); | |
5850 | i &= ~(eldv | G4X_ELD_ADDR); | |
5851 | len = (i >> 9) & 0x1f; /* ELD buffer size */ | |
5852 | I915_WRITE(G4X_AUD_CNTL_ST, i); | |
5853 | ||
5854 | if (!eld[0]) | |
5855 | return; | |
5856 | ||
5857 | len = min_t(uint8_t, eld[2], len); | |
5858 | DRM_DEBUG_DRIVER("ELD size %d\n", len); | |
5859 | for (i = 0; i < len; i++) | |
5860 | I915_WRITE(G4X_HDMIW_HDMIEDID, *((uint32_t *)eld + i)); | |
5861 | ||
5862 | i = I915_READ(G4X_AUD_CNTL_ST); | |
5863 | i |= eldv; | |
5864 | I915_WRITE(G4X_AUD_CNTL_ST, i); | |
5865 | } | |
5866 | ||
5867 | static void ironlake_write_eld(struct drm_connector *connector, | |
5868 | struct drm_crtc *crtc) | |
5869 | { | |
5870 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | |
5871 | uint8_t *eld = connector->eld; | |
5872 | uint32_t eldv; | |
5873 | uint32_t i; | |
5874 | int len; | |
5875 | int hdmiw_hdmiedid; | |
5876 | int aud_cntl_st; | |
5877 | int aud_cntrl_st2; | |
5878 | ||
5879 | if (IS_IVYBRIDGE(connector->dev)) { | |
5880 | hdmiw_hdmiedid = GEN7_HDMIW_HDMIEDID_A; | |
5881 | aud_cntl_st = GEN7_AUD_CNTRL_ST_A; | |
5882 | aud_cntrl_st2 = GEN7_AUD_CNTRL_ST2; | |
5883 | } else { | |
5884 | hdmiw_hdmiedid = GEN5_HDMIW_HDMIEDID_A; | |
5885 | aud_cntl_st = GEN5_AUD_CNTL_ST_A; | |
5886 | aud_cntrl_st2 = GEN5_AUD_CNTL_ST2; | |
5887 | } | |
5888 | ||
5889 | i = to_intel_crtc(crtc)->pipe; | |
5890 | hdmiw_hdmiedid += i * 0x100; | |
5891 | aud_cntl_st += i * 0x100; | |
5892 | ||
5893 | DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(i)); | |
5894 | ||
5895 | i = I915_READ(aud_cntl_st); | |
5896 | i = (i >> 29) & 0x3; /* DIP_Port_Select, 0x1 = PortB */ | |
5897 | if (!i) { | |
5898 | DRM_DEBUG_DRIVER("Audio directed to unknown port\n"); | |
5899 | /* operate blindly on all ports */ | |
5900 | eldv = GEN5_ELD_VALIDB; | |
5901 | eldv |= GEN5_ELD_VALIDB << 4; | |
5902 | eldv |= GEN5_ELD_VALIDB << 8; | |
5903 | } else { | |
5904 | DRM_DEBUG_DRIVER("ELD on port %c\n", 'A' + i); | |
5905 | eldv = GEN5_ELD_VALIDB << ((i - 1) * 4); | |
5906 | } | |
5907 | ||
5908 | i = I915_READ(aud_cntrl_st2); | |
5909 | i &= ~eldv; | |
5910 | I915_WRITE(aud_cntrl_st2, i); | |
5911 | ||
5912 | if (!eld[0]) | |
5913 | return; | |
5914 | ||
5915 | if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | |
5916 | DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n"); | |
5917 | eld[5] |= (1 << 2); /* Conn_Type, 0x1 = DisplayPort */ | |
5918 | } | |
5919 | ||
5920 | i = I915_READ(aud_cntl_st); | |
5921 | i &= ~GEN5_ELD_ADDRESS; | |
5922 | I915_WRITE(aud_cntl_st, i); | |
5923 | ||
5924 | len = min_t(uint8_t, eld[2], 21); /* 84 bytes of hw ELD buffer */ | |
5925 | DRM_DEBUG_DRIVER("ELD size %d\n", len); | |
5926 | for (i = 0; i < len; i++) | |
5927 | I915_WRITE(hdmiw_hdmiedid, *((uint32_t *)eld + i)); | |
5928 | ||
5929 | i = I915_READ(aud_cntrl_st2); | |
5930 | i |= eldv; | |
5931 | I915_WRITE(aud_cntrl_st2, i); | |
5932 | } | |
5933 | ||
5934 | void intel_write_eld(struct drm_encoder *encoder, | |
5935 | struct drm_display_mode *mode) | |
5936 | { | |
5937 | struct drm_crtc *crtc = encoder->crtc; | |
5938 | struct drm_connector *connector; | |
5939 | struct drm_device *dev = encoder->dev; | |
5940 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5941 | ||
5942 | connector = drm_select_eld(encoder, mode); | |
5943 | if (!connector) | |
5944 | return; | |
5945 | ||
5946 | DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", | |
5947 | connector->base.id, | |
5948 | drm_get_connector_name(connector), | |
5949 | connector->encoder->base.id, | |
5950 | drm_get_encoder_name(connector->encoder)); | |
5951 | ||
5952 | connector->eld[6] = drm_av_sync_delay(connector, mode) / 2; | |
5953 | ||
5954 | if (dev_priv->display.write_eld) | |
5955 | dev_priv->display.write_eld(connector, crtc); | |
5956 | } | |
5957 | ||
79e53945 JB |
5958 | /** Loads the palette/gamma unit for the CRTC with the prepared values */ |
5959 | void intel_crtc_load_lut(struct drm_crtc *crtc) | |
5960 | { | |
5961 | struct drm_device *dev = crtc->dev; | |
5962 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5963 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
9db4a9c7 | 5964 | int palreg = PALETTE(intel_crtc->pipe); |
79e53945 JB |
5965 | int i; |
5966 | ||
5967 | /* The clocks have to be on to load the palette. */ | |
5968 | if (!crtc->enabled) | |
5969 | return; | |
5970 | ||
f2b115e6 | 5971 | /* use legacy palette for Ironlake */ |
bad720ff | 5972 | if (HAS_PCH_SPLIT(dev)) |
9db4a9c7 | 5973 | palreg = LGC_PALETTE(intel_crtc->pipe); |
2c07245f | 5974 | |
79e53945 JB |
5975 | for (i = 0; i < 256; i++) { |
5976 | I915_WRITE(palreg + 4 * i, | |
5977 | (intel_crtc->lut_r[i] << 16) | | |
5978 | (intel_crtc->lut_g[i] << 8) | | |
5979 | intel_crtc->lut_b[i]); | |
5980 | } | |
5981 | } | |
5982 | ||
560b85bb CW |
5983 | static void i845_update_cursor(struct drm_crtc *crtc, u32 base) |
5984 | { | |
5985 | struct drm_device *dev = crtc->dev; | |
5986 | struct drm_i915_private *dev_priv = dev->dev_private; | |
5987 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
5988 | bool visible = base != 0; | |
5989 | u32 cntl; | |
5990 | ||
5991 | if (intel_crtc->cursor_visible == visible) | |
5992 | return; | |
5993 | ||
9db4a9c7 | 5994 | cntl = I915_READ(_CURACNTR); |
560b85bb CW |
5995 | if (visible) { |
5996 | /* On these chipsets we can only modify the base whilst | |
5997 | * the cursor is disabled. | |
5998 | */ | |
9db4a9c7 | 5999 | I915_WRITE(_CURABASE, base); |
560b85bb CW |
6000 | |
6001 | cntl &= ~(CURSOR_FORMAT_MASK); | |
6002 | /* XXX width must be 64, stride 256 => 0x00 << 28 */ | |
6003 | cntl |= CURSOR_ENABLE | | |
6004 | CURSOR_GAMMA_ENABLE | | |
6005 | CURSOR_FORMAT_ARGB; | |
6006 | } else | |
6007 | cntl &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE); | |
9db4a9c7 | 6008 | I915_WRITE(_CURACNTR, cntl); |
560b85bb CW |
6009 | |
6010 | intel_crtc->cursor_visible = visible; | |
6011 | } | |
6012 | ||
6013 | static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |
6014 | { | |
6015 | struct drm_device *dev = crtc->dev; | |
6016 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6017 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
6018 | int pipe = intel_crtc->pipe; | |
6019 | bool visible = base != 0; | |
6020 | ||
6021 | if (intel_crtc->cursor_visible != visible) { | |
548f245b | 6022 | uint32_t cntl = I915_READ(CURCNTR(pipe)); |
560b85bb CW |
6023 | if (base) { |
6024 | cntl &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT); | |
6025 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | |
6026 | cntl |= pipe << 28; /* Connect to correct pipe */ | |
6027 | } else { | |
6028 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | |
6029 | cntl |= CURSOR_MODE_DISABLE; | |
6030 | } | |
9db4a9c7 | 6031 | I915_WRITE(CURCNTR(pipe), cntl); |
560b85bb CW |
6032 | |
6033 | intel_crtc->cursor_visible = visible; | |
6034 | } | |
6035 | /* and commit changes on next vblank */ | |
9db4a9c7 | 6036 | I915_WRITE(CURBASE(pipe), base); |
560b85bb CW |
6037 | } |
6038 | ||
65a21cd6 JB |
6039 | static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) |
6040 | { | |
6041 | struct drm_device *dev = crtc->dev; | |
6042 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6043 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
6044 | int pipe = intel_crtc->pipe; | |
6045 | bool visible = base != 0; | |
6046 | ||
6047 | if (intel_crtc->cursor_visible != visible) { | |
6048 | uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); | |
6049 | if (base) { | |
6050 | cntl &= ~CURSOR_MODE; | |
6051 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | |
6052 | } else { | |
6053 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | |
6054 | cntl |= CURSOR_MODE_DISABLE; | |
6055 | } | |
6056 | I915_WRITE(CURCNTR_IVB(pipe), cntl); | |
6057 | ||
6058 | intel_crtc->cursor_visible = visible; | |
6059 | } | |
6060 | /* and commit changes on next vblank */ | |
6061 | I915_WRITE(CURBASE_IVB(pipe), base); | |
6062 | } | |
6063 | ||
cda4b7d3 | 6064 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ |
6b383a7f CW |
6065 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, |
6066 | bool on) | |
cda4b7d3 CW |
6067 | { |
6068 | struct drm_device *dev = crtc->dev; | |
6069 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6070 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
6071 | int pipe = intel_crtc->pipe; | |
6072 | int x = intel_crtc->cursor_x; | |
6073 | int y = intel_crtc->cursor_y; | |
560b85bb | 6074 | u32 base, pos; |
cda4b7d3 CW |
6075 | bool visible; |
6076 | ||
6077 | pos = 0; | |
6078 | ||
6b383a7f | 6079 | if (on && crtc->enabled && crtc->fb) { |
cda4b7d3 CW |
6080 | base = intel_crtc->cursor_addr; |
6081 | if (x > (int) crtc->fb->width) | |
6082 | base = 0; | |
6083 | ||
6084 | if (y > (int) crtc->fb->height) | |
6085 | base = 0; | |
6086 | } else | |
6087 | base = 0; | |
6088 | ||
6089 | if (x < 0) { | |
6090 | if (x + intel_crtc->cursor_width < 0) | |
6091 | base = 0; | |
6092 | ||
6093 | pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; | |
6094 | x = -x; | |
6095 | } | |
6096 | pos |= x << CURSOR_X_SHIFT; | |
6097 | ||
6098 | if (y < 0) { | |
6099 | if (y + intel_crtc->cursor_height < 0) | |
6100 | base = 0; | |
6101 | ||
6102 | pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; | |
6103 | y = -y; | |
6104 | } | |
6105 | pos |= y << CURSOR_Y_SHIFT; | |
6106 | ||
6107 | visible = base != 0; | |
560b85bb | 6108 | if (!visible && !intel_crtc->cursor_visible) |
cda4b7d3 CW |
6109 | return; |
6110 | ||
65a21cd6 JB |
6111 | if (IS_IVYBRIDGE(dev)) { |
6112 | I915_WRITE(CURPOS_IVB(pipe), pos); | |
6113 | ivb_update_cursor(crtc, base); | |
6114 | } else { | |
6115 | I915_WRITE(CURPOS(pipe), pos); | |
6116 | if (IS_845G(dev) || IS_I865G(dev)) | |
6117 | i845_update_cursor(crtc, base); | |
6118 | else | |
6119 | i9xx_update_cursor(crtc, base); | |
6120 | } | |
cda4b7d3 CW |
6121 | |
6122 | if (visible) | |
6123 | intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); | |
6124 | } | |
6125 | ||
79e53945 | 6126 | static int intel_crtc_cursor_set(struct drm_crtc *crtc, |
05394f39 | 6127 | struct drm_file *file, |
79e53945 JB |
6128 | uint32_t handle, |
6129 | uint32_t width, uint32_t height) | |
6130 | { | |
6131 | struct drm_device *dev = crtc->dev; | |
6132 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6133 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
05394f39 | 6134 | struct drm_i915_gem_object *obj; |
cda4b7d3 | 6135 | uint32_t addr; |
3f8bc370 | 6136 | int ret; |
79e53945 | 6137 | |
28c97730 | 6138 | DRM_DEBUG_KMS("\n"); |
79e53945 JB |
6139 | |
6140 | /* if we want to turn off the cursor ignore width and height */ | |
6141 | if (!handle) { | |
28c97730 | 6142 | DRM_DEBUG_KMS("cursor off\n"); |
3f8bc370 | 6143 | addr = 0; |
05394f39 | 6144 | obj = NULL; |
5004417d | 6145 | mutex_lock(&dev->struct_mutex); |
3f8bc370 | 6146 | goto finish; |
79e53945 JB |
6147 | } |
6148 | ||
6149 | /* Currently we only support 64x64 cursors */ | |
6150 | if (width != 64 || height != 64) { | |
6151 | DRM_ERROR("we currently only support 64x64 cursors\n"); | |
6152 | return -EINVAL; | |
6153 | } | |
6154 | ||
05394f39 | 6155 | obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle)); |
c8725226 | 6156 | if (&obj->base == NULL) |
79e53945 JB |
6157 | return -ENOENT; |
6158 | ||
05394f39 | 6159 | if (obj->base.size < width * height * 4) { |
79e53945 | 6160 | DRM_ERROR("buffer is to small\n"); |
34b8686e DA |
6161 | ret = -ENOMEM; |
6162 | goto fail; | |
79e53945 JB |
6163 | } |
6164 | ||
71acb5eb | 6165 | /* we only need to pin inside GTT if cursor is non-phy */ |
7f9872e0 | 6166 | mutex_lock(&dev->struct_mutex); |
b295d1b6 | 6167 | if (!dev_priv->info->cursor_needs_physical) { |
d9e86c0e CW |
6168 | if (obj->tiling_mode) { |
6169 | DRM_ERROR("cursor cannot be tiled\n"); | |
6170 | ret = -EINVAL; | |
6171 | goto fail_locked; | |
6172 | } | |
6173 | ||
2da3b9b9 | 6174 | ret = i915_gem_object_pin_to_display_plane(obj, 0, NULL); |
e7b526bb CW |
6175 | if (ret) { |
6176 | DRM_ERROR("failed to move cursor bo into the GTT\n"); | |
2da3b9b9 | 6177 | goto fail_locked; |
e7b526bb CW |
6178 | } |
6179 | ||
d9e86c0e CW |
6180 | ret = i915_gem_object_put_fence(obj); |
6181 | if (ret) { | |
2da3b9b9 | 6182 | DRM_ERROR("failed to release fence for cursor"); |
d9e86c0e CW |
6183 | goto fail_unpin; |
6184 | } | |
6185 | ||
05394f39 | 6186 | addr = obj->gtt_offset; |
71acb5eb | 6187 | } else { |
6eeefaf3 | 6188 | int align = IS_I830(dev) ? 16 * 1024 : 256; |
05394f39 | 6189 | ret = i915_gem_attach_phys_object(dev, obj, |
6eeefaf3 CW |
6190 | (intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1, |
6191 | align); | |
71acb5eb DA |
6192 | if (ret) { |
6193 | DRM_ERROR("failed to attach phys object\n"); | |
7f9872e0 | 6194 | goto fail_locked; |
71acb5eb | 6195 | } |
05394f39 | 6196 | addr = obj->phys_obj->handle->busaddr; |
3f8bc370 KH |
6197 | } |
6198 | ||
a6c45cf0 | 6199 | if (IS_GEN2(dev)) |
14b60391 JB |
6200 | I915_WRITE(CURSIZE, (height << 12) | width); |
6201 | ||
3f8bc370 | 6202 | finish: |
3f8bc370 | 6203 | if (intel_crtc->cursor_bo) { |
b295d1b6 | 6204 | if (dev_priv->info->cursor_needs_physical) { |
05394f39 | 6205 | if (intel_crtc->cursor_bo != obj) |
71acb5eb DA |
6206 | i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); |
6207 | } else | |
6208 | i915_gem_object_unpin(intel_crtc->cursor_bo); | |
05394f39 | 6209 | drm_gem_object_unreference(&intel_crtc->cursor_bo->base); |
3f8bc370 | 6210 | } |
80824003 | 6211 | |
7f9872e0 | 6212 | mutex_unlock(&dev->struct_mutex); |
3f8bc370 KH |
6213 | |
6214 | intel_crtc->cursor_addr = addr; | |
05394f39 | 6215 | intel_crtc->cursor_bo = obj; |
cda4b7d3 CW |
6216 | intel_crtc->cursor_width = width; |
6217 | intel_crtc->cursor_height = height; | |
6218 | ||
6b383a7f | 6219 | intel_crtc_update_cursor(crtc, true); |
3f8bc370 | 6220 | |
79e53945 | 6221 | return 0; |
e7b526bb | 6222 | fail_unpin: |
05394f39 | 6223 | i915_gem_object_unpin(obj); |
7f9872e0 | 6224 | fail_locked: |
34b8686e | 6225 | mutex_unlock(&dev->struct_mutex); |
bc9025bd | 6226 | fail: |
05394f39 | 6227 | drm_gem_object_unreference_unlocked(&obj->base); |
34b8686e | 6228 | return ret; |
79e53945 JB |
6229 | } |
6230 | ||
6231 | static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) | |
6232 | { | |
79e53945 | 6233 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
79e53945 | 6234 | |
cda4b7d3 CW |
6235 | intel_crtc->cursor_x = x; |
6236 | intel_crtc->cursor_y = y; | |
652c393a | 6237 | |
6b383a7f | 6238 | intel_crtc_update_cursor(crtc, true); |
79e53945 JB |
6239 | |
6240 | return 0; | |
6241 | } | |
6242 | ||
6243 | /** Sets the color ramps on behalf of RandR */ | |
6244 | void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green, | |
6245 | u16 blue, int regno) | |
6246 | { | |
6247 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
6248 | ||
6249 | intel_crtc->lut_r[regno] = red >> 8; | |
6250 | intel_crtc->lut_g[regno] = green >> 8; | |
6251 | intel_crtc->lut_b[regno] = blue >> 8; | |
6252 | } | |
6253 | ||
b8c00ac5 DA |
6254 | void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green, |
6255 | u16 *blue, int regno) | |
6256 | { | |
6257 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
6258 | ||
6259 | *red = intel_crtc->lut_r[regno] << 8; | |
6260 | *green = intel_crtc->lut_g[regno] << 8; | |
6261 | *blue = intel_crtc->lut_b[regno] << 8; | |
6262 | } | |
6263 | ||
79e53945 | 6264 | static void intel_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, |
7203425a | 6265 | u16 *blue, uint32_t start, uint32_t size) |
79e53945 | 6266 | { |
7203425a | 6267 | int end = (start + size > 256) ? 256 : start + size, i; |
79e53945 | 6268 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
79e53945 | 6269 | |
7203425a | 6270 | for (i = start; i < end; i++) { |
79e53945 JB |
6271 | intel_crtc->lut_r[i] = red[i] >> 8; |
6272 | intel_crtc->lut_g[i] = green[i] >> 8; | |
6273 | intel_crtc->lut_b[i] = blue[i] >> 8; | |
6274 | } | |
6275 | ||
6276 | intel_crtc_load_lut(crtc); | |
6277 | } | |
6278 | ||
6279 | /** | |
6280 | * Get a pipe with a simple mode set on it for doing load-based monitor | |
6281 | * detection. | |
6282 | * | |
6283 | * It will be up to the load-detect code to adjust the pipe as appropriate for | |
c751ce4f | 6284 | * its requirements. The pipe will be connected to no other encoders. |
79e53945 | 6285 | * |
c751ce4f | 6286 | * Currently this code will only succeed if there is a pipe with no encoders |
79e53945 JB |
6287 | * configured for it. In the future, it could choose to temporarily disable |
6288 | * some outputs to free up a pipe for its use. | |
6289 | * | |
6290 | * \return crtc, or NULL if no pipes are available. | |
6291 | */ | |
6292 | ||
6293 | /* VESA 640x480x72Hz mode to set on the pipe */ | |
6294 | static struct drm_display_mode load_detect_mode = { | |
6295 | DRM_MODE("640x480", DRM_MODE_TYPE_DEFAULT, 31500, 640, 664, | |
6296 | 704, 832, 0, 480, 489, 491, 520, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC), | |
6297 | }; | |
6298 | ||
d2dff872 CW |
6299 | static struct drm_framebuffer * |
6300 | intel_framebuffer_create(struct drm_device *dev, | |
6301 | struct drm_mode_fb_cmd *mode_cmd, | |
6302 | struct drm_i915_gem_object *obj) | |
6303 | { | |
6304 | struct intel_framebuffer *intel_fb; | |
6305 | int ret; | |
6306 | ||
6307 | intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL); | |
6308 | if (!intel_fb) { | |
6309 | drm_gem_object_unreference_unlocked(&obj->base); | |
6310 | return ERR_PTR(-ENOMEM); | |
6311 | } | |
6312 | ||
6313 | ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj); | |
6314 | if (ret) { | |
6315 | drm_gem_object_unreference_unlocked(&obj->base); | |
6316 | kfree(intel_fb); | |
6317 | return ERR_PTR(ret); | |
6318 | } | |
6319 | ||
6320 | return &intel_fb->base; | |
6321 | } | |
6322 | ||
6323 | static u32 | |
6324 | intel_framebuffer_pitch_for_width(int width, int bpp) | |
6325 | { | |
6326 | u32 pitch = DIV_ROUND_UP(width * bpp, 8); | |
6327 | return ALIGN(pitch, 64); | |
6328 | } | |
6329 | ||
6330 | static u32 | |
6331 | intel_framebuffer_size_for_mode(struct drm_display_mode *mode, int bpp) | |
6332 | { | |
6333 | u32 pitch = intel_framebuffer_pitch_for_width(mode->hdisplay, bpp); | |
6334 | return ALIGN(pitch * mode->vdisplay, PAGE_SIZE); | |
6335 | } | |
6336 | ||
6337 | static struct drm_framebuffer * | |
6338 | intel_framebuffer_create_for_mode(struct drm_device *dev, | |
6339 | struct drm_display_mode *mode, | |
6340 | int depth, int bpp) | |
6341 | { | |
6342 | struct drm_i915_gem_object *obj; | |
6343 | struct drm_mode_fb_cmd mode_cmd; | |
6344 | ||
6345 | obj = i915_gem_alloc_object(dev, | |
6346 | intel_framebuffer_size_for_mode(mode, bpp)); | |
6347 | if (obj == NULL) | |
6348 | return ERR_PTR(-ENOMEM); | |
6349 | ||
6350 | mode_cmd.width = mode->hdisplay; | |
6351 | mode_cmd.height = mode->vdisplay; | |
6352 | mode_cmd.depth = depth; | |
6353 | mode_cmd.bpp = bpp; | |
6354 | mode_cmd.pitch = intel_framebuffer_pitch_for_width(mode_cmd.width, bpp); | |
6355 | ||
6356 | return intel_framebuffer_create(dev, &mode_cmd, obj); | |
6357 | } | |
6358 | ||
6359 | static struct drm_framebuffer * | |
6360 | mode_fits_in_fbdev(struct drm_device *dev, | |
6361 | struct drm_display_mode *mode) | |
6362 | { | |
6363 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6364 | struct drm_i915_gem_object *obj; | |
6365 | struct drm_framebuffer *fb; | |
6366 | ||
6367 | if (dev_priv->fbdev == NULL) | |
6368 | return NULL; | |
6369 | ||
6370 | obj = dev_priv->fbdev->ifb.obj; | |
6371 | if (obj == NULL) | |
6372 | return NULL; | |
6373 | ||
6374 | fb = &dev_priv->fbdev->ifb.base; | |
6375 | if (fb->pitch < intel_framebuffer_pitch_for_width(mode->hdisplay, | |
6376 | fb->bits_per_pixel)) | |
6377 | return NULL; | |
6378 | ||
6379 | if (obj->base.size < mode->vdisplay * fb->pitch) | |
6380 | return NULL; | |
6381 | ||
6382 | return fb; | |
6383 | } | |
6384 | ||
7173188d CW |
6385 | bool intel_get_load_detect_pipe(struct intel_encoder *intel_encoder, |
6386 | struct drm_connector *connector, | |
6387 | struct drm_display_mode *mode, | |
8261b191 | 6388 | struct intel_load_detect_pipe *old) |
79e53945 JB |
6389 | { |
6390 | struct intel_crtc *intel_crtc; | |
6391 | struct drm_crtc *possible_crtc; | |
4ef69c7a | 6392 | struct drm_encoder *encoder = &intel_encoder->base; |
79e53945 JB |
6393 | struct drm_crtc *crtc = NULL; |
6394 | struct drm_device *dev = encoder->dev; | |
d2dff872 | 6395 | struct drm_framebuffer *old_fb; |
79e53945 JB |
6396 | int i = -1; |
6397 | ||
d2dff872 CW |
6398 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6399 | connector->base.id, drm_get_connector_name(connector), | |
6400 | encoder->base.id, drm_get_encoder_name(encoder)); | |
6401 | ||
79e53945 JB |
6402 | /* |
6403 | * Algorithm gets a little messy: | |
7a5e4805 | 6404 | * |
79e53945 JB |
6405 | * - if the connector already has an assigned crtc, use it (but make |
6406 | * sure it's on first) | |
7a5e4805 | 6407 | * |
79e53945 JB |
6408 | * - try to find the first unused crtc that can drive this connector, |
6409 | * and use that if we find one | |
79e53945 JB |
6410 | */ |
6411 | ||
6412 | /* See if we already have a CRTC for this connector */ | |
6413 | if (encoder->crtc) { | |
6414 | crtc = encoder->crtc; | |
8261b191 | 6415 | |
79e53945 | 6416 | intel_crtc = to_intel_crtc(crtc); |
8261b191 CW |
6417 | old->dpms_mode = intel_crtc->dpms_mode; |
6418 | old->load_detect_temp = false; | |
6419 | ||
6420 | /* Make sure the crtc and connector are running */ | |
79e53945 | 6421 | if (intel_crtc->dpms_mode != DRM_MODE_DPMS_ON) { |
6492711d CW |
6422 | struct drm_encoder_helper_funcs *encoder_funcs; |
6423 | struct drm_crtc_helper_funcs *crtc_funcs; | |
6424 | ||
79e53945 JB |
6425 | crtc_funcs = crtc->helper_private; |
6426 | crtc_funcs->dpms(crtc, DRM_MODE_DPMS_ON); | |
6492711d CW |
6427 | |
6428 | encoder_funcs = encoder->helper_private; | |
79e53945 JB |
6429 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
6430 | } | |
8261b191 | 6431 | |
7173188d | 6432 | return true; |
79e53945 JB |
6433 | } |
6434 | ||
6435 | /* Find an unused one (if possible) */ | |
6436 | list_for_each_entry(possible_crtc, &dev->mode_config.crtc_list, head) { | |
6437 | i++; | |
6438 | if (!(encoder->possible_crtcs & (1 << i))) | |
6439 | continue; | |
6440 | if (!possible_crtc->enabled) { | |
6441 | crtc = possible_crtc; | |
6442 | break; | |
6443 | } | |
79e53945 JB |
6444 | } |
6445 | ||
6446 | /* | |
6447 | * If we didn't find an unused CRTC, don't use any. | |
6448 | */ | |
6449 | if (!crtc) { | |
7173188d CW |
6450 | DRM_DEBUG_KMS("no pipe available for load-detect\n"); |
6451 | return false; | |
79e53945 JB |
6452 | } |
6453 | ||
6454 | encoder->crtc = crtc; | |
c1c43977 | 6455 | connector->encoder = encoder; |
79e53945 JB |
6456 | |
6457 | intel_crtc = to_intel_crtc(crtc); | |
8261b191 CW |
6458 | old->dpms_mode = intel_crtc->dpms_mode; |
6459 | old->load_detect_temp = true; | |
d2dff872 | 6460 | old->release_fb = NULL; |
79e53945 | 6461 | |
6492711d CW |
6462 | if (!mode) |
6463 | mode = &load_detect_mode; | |
79e53945 | 6464 | |
d2dff872 CW |
6465 | old_fb = crtc->fb; |
6466 | ||
6467 | /* We need a framebuffer large enough to accommodate all accesses | |
6468 | * that the plane may generate whilst we perform load detection. | |
6469 | * We can not rely on the fbcon either being present (we get called | |
6470 | * during its initialisation to detect all boot displays, or it may | |
6471 | * not even exist) or that it is large enough to satisfy the | |
6472 | * requested mode. | |
6473 | */ | |
6474 | crtc->fb = mode_fits_in_fbdev(dev, mode); | |
6475 | if (crtc->fb == NULL) { | |
6476 | DRM_DEBUG_KMS("creating tmp fb for load-detection\n"); | |
6477 | crtc->fb = intel_framebuffer_create_for_mode(dev, mode, 24, 32); | |
6478 | old->release_fb = crtc->fb; | |
6479 | } else | |
6480 | DRM_DEBUG_KMS("reusing fbdev for load-detection framebuffer\n"); | |
6481 | if (IS_ERR(crtc->fb)) { | |
6482 | DRM_DEBUG_KMS("failed to allocate framebuffer for load-detection\n"); | |
6483 | crtc->fb = old_fb; | |
6484 | return false; | |
79e53945 | 6485 | } |
79e53945 | 6486 | |
d2dff872 | 6487 | if (!drm_crtc_helper_set_mode(crtc, mode, 0, 0, old_fb)) { |
6492711d | 6488 | DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n"); |
d2dff872 CW |
6489 | if (old->release_fb) |
6490 | old->release_fb->funcs->destroy(old->release_fb); | |
6491 | crtc->fb = old_fb; | |
6492711d | 6492 | return false; |
79e53945 | 6493 | } |
7173188d | 6494 | |
79e53945 | 6495 | /* let the connector get through one full cycle before testing */ |
9d0498a2 | 6496 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
79e53945 | 6497 | |
7173188d | 6498 | return true; |
79e53945 JB |
6499 | } |
6500 | ||
c1c43977 | 6501 | void intel_release_load_detect_pipe(struct intel_encoder *intel_encoder, |
8261b191 CW |
6502 | struct drm_connector *connector, |
6503 | struct intel_load_detect_pipe *old) | |
79e53945 | 6504 | { |
4ef69c7a | 6505 | struct drm_encoder *encoder = &intel_encoder->base; |
79e53945 JB |
6506 | struct drm_device *dev = encoder->dev; |
6507 | struct drm_crtc *crtc = encoder->crtc; | |
6508 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | |
6509 | struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private; | |
6510 | ||
d2dff872 CW |
6511 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n", |
6512 | connector->base.id, drm_get_connector_name(connector), | |
6513 | encoder->base.id, drm_get_encoder_name(encoder)); | |
6514 | ||
8261b191 | 6515 | if (old->load_detect_temp) { |
c1c43977 | 6516 | connector->encoder = NULL; |
79e53945 | 6517 | drm_helper_disable_unused_functions(dev); |
d2dff872 CW |
6518 | |
6519 | if (old->release_fb) | |
6520 | old->release_fb->funcs->destroy(old->release_fb); | |
6521 | ||
0622a53c | 6522 | return; |
79e53945 JB |
6523 | } |
6524 | ||
c751ce4f | 6525 | /* Switch crtc and encoder back off if necessary */ |
0622a53c CW |
6526 | if (old->dpms_mode != DRM_MODE_DPMS_ON) { |
6527 | encoder_funcs->dpms(encoder, old->dpms_mode); | |
8261b191 | 6528 | crtc_funcs->dpms(crtc, old->dpms_mode); |
79e53945 JB |
6529 | } |
6530 | } | |
6531 | ||
6532 | /* Returns the clock of the currently programmed mode of the given pipe. */ | |
6533 | static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc) | |
6534 | { | |
6535 | struct drm_i915_private *dev_priv = dev->dev_private; | |
6536 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
6537 | int pipe = intel_crtc->pipe; | |
548f245b | 6538 | u32 dpll = I915_READ(DPLL(pipe)); |
79e53945 JB |
6539 | u32 fp; |
6540 | intel_clock_t clock; | |
6541 | ||
6542 | if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0) | |
39adb7a5 | 6543 | fp = I915_READ(FP0(pipe)); |
79e53945 | 6544 | else |
39adb7a5 | 6545 | fp = I915_READ(FP1(pipe)); |
79e53945 JB |
6546 | |
6547 | clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT; | |
f2b115e6 AJ |
6548 | if (IS_PINEVIEW(dev)) { |
6549 | clock.n = ffs((fp & FP_N_PINEVIEW_DIV_MASK) >> FP_N_DIV_SHIFT) - 1; | |
6550 | clock.m2 = (fp & FP_M2_PINEVIEW_DIV_MASK) >> FP_M2_DIV_SHIFT; | |
2177832f SL |
6551 | } else { |
6552 | clock.n = (fp & FP_N_DIV_MASK) >> FP_N_DIV_SHIFT; | |
6553 | clock.m2 = (fp & FP_M2_DIV_MASK) >> FP_M2_DIV_SHIFT; | |
6554 | } | |
6555 | ||
a6c45cf0 | 6556 | if (!IS_GEN2(dev)) { |
f2b115e6 AJ |
6557 | if (IS_PINEVIEW(dev)) |
6558 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW) >> | |
6559 | DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW); | |
2177832f SL |
6560 | else |
6561 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK) >> | |
79e53945 JB |
6562 | DPLL_FPA01_P1_POST_DIV_SHIFT); |
6563 | ||
6564 | switch (dpll & DPLL_MODE_MASK) { | |
6565 | case DPLLB_MODE_DAC_SERIAL: | |
6566 | clock.p2 = dpll & DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 ? | |
6567 | 5 : 10; | |
6568 | break; | |
6569 | case DPLLB_MODE_LVDS: | |
6570 | clock.p2 = dpll & DPLLB_LVDS_P2_CLOCK_DIV_7 ? | |
6571 | 7 : 14; | |
6572 | break; | |
6573 | default: | |
28c97730 | 6574 | DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed " |
79e53945 JB |
6575 | "mode\n", (int)(dpll & DPLL_MODE_MASK)); |
6576 | return 0; | |
6577 | } | |
6578 | ||
6579 | /* XXX: Handle the 100Mhz refclk */ | |
2177832f | 6580 | intel_clock(dev, 96000, &clock); |
79e53945 JB |
6581 | } else { |
6582 | bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); | |
6583 | ||
6584 | if (is_lvds) { | |
6585 | clock.p1 = ffs((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS) >> | |
6586 | DPLL_FPA01_P1_POST_DIV_SHIFT); | |
6587 | clock.p2 = 14; | |
6588 | ||
6589 | if ((dpll & PLL_REF_INPUT_MASK) == | |
6590 | PLLB_REF_INPUT_SPREADSPECTRUMIN) { | |
6591 | /* XXX: might not be 66MHz */ | |
2177832f | 6592 | intel_clock(dev, 66000, &clock); |
79e53945 | 6593 | } else |
2177832f | 6594 | intel_clock(dev, 48000, &clock); |
79e53945 JB |
6595 | } else { |
6596 | if (dpll & PLL_P1_DIVIDE_BY_TWO) | |
6597 | clock.p1 = 2; | |
6598 | else { | |
6599 | clock.p1 = ((dpll & DPLL_FPA01_P1_POST_DIV_MASK_I830) >> | |
6600 | DPLL_FPA01_P1_POST_DIV_SHIFT) + 2; | |
6601 | } | |
6602 | if (dpll & PLL_P2_DIVIDE_BY_4) | |
6603 | clock.p2 = 4; | |
6604 | else | |
6605 | clock.p2 = 2; | |
6606 | ||
2177832f | 6607 | intel_clock(dev, 48000, &clock); |
79e53945 JB |
6608 | } |
6609 | } | |
6610 | ||
6611 | /* XXX: It would be nice to validate the clocks, but we can't reuse | |
6612 | * i830PllIsValid() because it relies on the xf86_config connector | |
6613 | * configuration being accurate, which it isn't necessarily. | |
6614 | */ | |
6615 | ||
6616 | return clock.dot; | |
6617 | } | |
6618 | ||
6619 | /** Returns the currently programmed mode of the given pipe. */ | |
6620 | struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev, | |
6621 | struct drm_crtc *crtc) | |
6622 | { | |
548f245b | 6623 | struct drm_i915_private *dev_priv = dev->dev_private; |
79e53945 JB |
6624 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6625 | int pipe = intel_crtc->pipe; | |
6626 | struct drm_display_mode *mode; | |
548f245b JB |
6627 | int htot = I915_READ(HTOTAL(pipe)); |
6628 | int hsync = I915_READ(HSYNC(pipe)); | |
6629 | int vtot = I915_READ(VTOTAL(pipe)); | |
6630 | int vsync = I915_READ(VSYNC(pipe)); | |
79e53945 JB |
6631 | |
6632 | mode = kzalloc(sizeof(*mode), GFP_KERNEL); | |
6633 | if (!mode) | |
6634 | return NULL; | |
6635 | ||
6636 | mode->clock = intel_crtc_clock_get(dev, crtc); | |
6637 | mode->hdisplay = (htot & 0xffff) + 1; | |
6638 | mode->htotal = ((htot & 0xffff0000) >> 16) + 1; | |
6639 | mode->hsync_start = (hsync & 0xffff) + 1; | |
6640 | mode->hsync_end = ((hsync & 0xffff0000) >> 16) + 1; | |
6641 | mode->vdisplay = (vtot & 0xffff) + 1; | |
6642 | mode->vtotal = ((vtot & 0xffff0000) >> 16) + 1; | |
6643 | mode->vsync_start = (vsync & 0xffff) + 1; | |
6644 | mode->vsync_end = ((vsync & 0xffff0000) >> 16) + 1; | |
6645 | ||
6646 | drm_mode_set_name(mode); | |
6647 | drm_mode_set_crtcinfo(mode, 0); | |
6648 | ||
6649 | return mode; | |
6650 | } | |
6651 | ||
652c393a JB |
6652 | #define GPU_IDLE_TIMEOUT 500 /* ms */ |
6653 | ||
6654 | /* When this timer fires, we've been idle for awhile */ | |
6655 | static void intel_gpu_idle_timer(unsigned long arg) | |
6656 | { | |
6657 | struct drm_device *dev = (struct drm_device *)arg; | |
6658 | drm_i915_private_t *dev_priv = dev->dev_private; | |
6659 | ||
ff7ea4c0 CW |
6660 | if (!list_empty(&dev_priv->mm.active_list)) { |
6661 | /* Still processing requests, so just re-arm the timer. */ | |
6662 | mod_timer(&dev_priv->idle_timer, jiffies + | |
6663 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | |
6664 | return; | |
6665 | } | |
652c393a | 6666 | |
ff7ea4c0 | 6667 | dev_priv->busy = false; |
01dfba93 | 6668 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
652c393a JB |
6669 | } |
6670 | ||
652c393a JB |
6671 | #define CRTC_IDLE_TIMEOUT 1000 /* ms */ |
6672 | ||
6673 | static void intel_crtc_idle_timer(unsigned long arg) | |
6674 | { | |
6675 | struct intel_crtc *intel_crtc = (struct intel_crtc *)arg; | |
6676 | struct drm_crtc *crtc = &intel_crtc->base; | |
6677 | drm_i915_private_t *dev_priv = crtc->dev->dev_private; | |
ff7ea4c0 | 6678 | struct intel_framebuffer *intel_fb; |
652c393a | 6679 | |
ff7ea4c0 CW |
6680 | intel_fb = to_intel_framebuffer(crtc->fb); |
6681 | if (intel_fb && intel_fb->obj->active) { | |
6682 | /* The framebuffer is still being accessed by the GPU. */ | |
6683 | mod_timer(&intel_crtc->idle_timer, jiffies + | |
6684 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | |
6685 | return; | |
6686 | } | |
652c393a | 6687 | |
ff7ea4c0 | 6688 | intel_crtc->busy = false; |
01dfba93 | 6689 | queue_work(dev_priv->wq, &dev_priv->idle_work); |
652c393a JB |
6690 | } |
6691 | ||
3dec0095 | 6692 | static void intel_increase_pllclock(struct drm_crtc *crtc) |
652c393a JB |
6693 | { |
6694 | struct drm_device *dev = crtc->dev; | |
6695 | drm_i915_private_t *dev_priv = dev->dev_private; | |
6696 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
6697 | int pipe = intel_crtc->pipe; | |
dbdc6479 JB |
6698 | int dpll_reg = DPLL(pipe); |
6699 | int dpll; | |
652c393a | 6700 | |
bad720ff | 6701 | if (HAS_PCH_SPLIT(dev)) |
652c393a JB |
6702 | return; |
6703 | ||
6704 | if (!dev_priv->lvds_downclock_avail) | |
6705 | return; | |
6706 | ||
dbdc6479 | 6707 | dpll = I915_READ(dpll_reg); |
652c393a | 6708 | if (!HAS_PIPE_CXSR(dev) && (dpll & DISPLAY_RATE_SELECT_FPA1)) { |
44d98a61 | 6709 | DRM_DEBUG_DRIVER("upclocking LVDS\n"); |
652c393a JB |
6710 | |
6711 | /* Unlock panel regs */ | |
dbdc6479 JB |
6712 | I915_WRITE(PP_CONTROL, |
6713 | I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS); | |
652c393a JB |
6714 | |
6715 | dpll &= ~DISPLAY_RATE_SELECT_FPA1; | |
6716 | I915_WRITE(dpll_reg, dpll); | |
9d0498a2 | 6717 | intel_wait_for_vblank(dev, pipe); |
dbdc6479 | 6718 | |
652c393a JB |
6719 | dpll = I915_READ(dpll_reg); |
6720 | if (dpll & DISPLAY_RATE_SELECT_FPA1) | |
44d98a61 | 6721 | DRM_DEBUG_DRIVER("failed to upclock LVDS!\n"); |
652c393a JB |
6722 | |
6723 | /* ...and lock them again */ | |
6724 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); | |
6725 | } | |
6726 | ||
6727 | /* Schedule downclock */ | |
3dec0095 DV |
6728 | mod_timer(&intel_crtc->idle_timer, jiffies + |
6729 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | |
652c393a JB |
6730 | } |
6731 | ||
6732 | static void intel_decrease_pllclock(struct drm_crtc *crtc) | |
6733 | { | |
6734 | struct drm_device *dev = crtc->dev; | |
6735 | drm_i915_private_t *dev_priv = dev->dev_private; | |
6736 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
6737 | int pipe = intel_crtc->pipe; | |
9db4a9c7 | 6738 | int dpll_reg = DPLL(pipe); |
652c393a JB |
6739 | int dpll = I915_READ(dpll_reg); |
6740 | ||
bad720ff | 6741 | if (HAS_PCH_SPLIT(dev)) |
652c393a JB |
6742 | return; |
6743 | ||
6744 | if (!dev_priv->lvds_downclock_avail) | |
6745 | return; | |
6746 | ||
6747 | /* | |
6748 | * Since this is called by a timer, we should never get here in | |
6749 | * the manual case. | |
6750 | */ | |
6751 | if (!HAS_PIPE_CXSR(dev) && intel_crtc->lowfreq_avail) { | |
44d98a61 | 6752 | DRM_DEBUG_DRIVER("downclocking LVDS\n"); |
652c393a JB |
6753 | |
6754 | /* Unlock panel regs */ | |
4a655f04 JB |
6755 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | |
6756 | PANEL_UNLOCK_REGS); | |
652c393a JB |
6757 | |
6758 | dpll |= DISPLAY_RATE_SELECT_FPA1; | |
6759 | I915_WRITE(dpll_reg, dpll); | |
9d0498a2 | 6760 | intel_wait_for_vblank(dev, pipe); |
652c393a JB |
6761 | dpll = I915_READ(dpll_reg); |
6762 | if (!(dpll & DISPLAY_RATE_SELECT_FPA1)) | |
44d98a61 | 6763 | DRM_DEBUG_DRIVER("failed to downclock LVDS!\n"); |
652c393a JB |
6764 | |
6765 | /* ...and lock them again */ | |
6766 | I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 0x3); | |
6767 | } | |
6768 | ||
6769 | } | |
6770 | ||
6771 | /** | |
6772 | * intel_idle_update - adjust clocks for idleness | |
6773 | * @work: work struct | |
6774 | * | |
6775 | * Either the GPU or display (or both) went idle. Check the busy status | |
6776 | * here and adjust the CRTC and GPU clocks as necessary. | |
6777 | */ | |
6778 | static void intel_idle_update(struct work_struct *work) | |
6779 | { | |
6780 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
6781 | idle_work); | |
6782 | struct drm_device *dev = dev_priv->dev; | |
6783 | struct drm_crtc *crtc; | |
6784 | struct intel_crtc *intel_crtc; | |
6785 | ||
6786 | if (!i915_powersave) | |
6787 | return; | |
6788 | ||
6789 | mutex_lock(&dev->struct_mutex); | |
6790 | ||
7648fa99 JB |
6791 | i915_update_gfx_val(dev_priv); |
6792 | ||
652c393a JB |
6793 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
6794 | /* Skip inactive CRTCs */ | |
6795 | if (!crtc->fb) | |
6796 | continue; | |
6797 | ||
6798 | intel_crtc = to_intel_crtc(crtc); | |
6799 | if (!intel_crtc->busy) | |
6800 | intel_decrease_pllclock(crtc); | |
6801 | } | |
6802 | ||
45ac22c8 | 6803 | |
652c393a JB |
6804 | mutex_unlock(&dev->struct_mutex); |
6805 | } | |
6806 | ||
6807 | /** | |
6808 | * intel_mark_busy - mark the GPU and possibly the display busy | |
6809 | * @dev: drm device | |
6810 | * @obj: object we're operating on | |
6811 | * | |
6812 | * Callers can use this function to indicate that the GPU is busy processing | |
6813 | * commands. If @obj matches one of the CRTC objects (i.e. it's a scanout | |
6814 | * buffer), we'll also mark the display as busy, so we know to increase its | |
6815 | * clock frequency. | |
6816 | */ | |
05394f39 | 6817 | void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj) |
652c393a JB |
6818 | { |
6819 | drm_i915_private_t *dev_priv = dev->dev_private; | |
6820 | struct drm_crtc *crtc = NULL; | |
6821 | struct intel_framebuffer *intel_fb; | |
6822 | struct intel_crtc *intel_crtc; | |
6823 | ||
5e17ee74 ZW |
6824 | if (!drm_core_check_feature(dev, DRIVER_MODESET)) |
6825 | return; | |
6826 | ||
18b2190c | 6827 | if (!dev_priv->busy) |
28cf798f | 6828 | dev_priv->busy = true; |
18b2190c | 6829 | else |
28cf798f CW |
6830 | mod_timer(&dev_priv->idle_timer, jiffies + |
6831 | msecs_to_jiffies(GPU_IDLE_TIMEOUT)); | |
652c393a JB |
6832 | |
6833 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
6834 | if (!crtc->fb) | |
6835 | continue; | |
6836 | ||
6837 | intel_crtc = to_intel_crtc(crtc); | |
6838 | intel_fb = to_intel_framebuffer(crtc->fb); | |
6839 | if (intel_fb->obj == obj) { | |
6840 | if (!intel_crtc->busy) { | |
6841 | /* Non-busy -> busy, upclock */ | |
3dec0095 | 6842 | intel_increase_pllclock(crtc); |
652c393a JB |
6843 | intel_crtc->busy = true; |
6844 | } else { | |
6845 | /* Busy -> busy, put off timer */ | |
6846 | mod_timer(&intel_crtc->idle_timer, jiffies + | |
6847 | msecs_to_jiffies(CRTC_IDLE_TIMEOUT)); | |
6848 | } | |
6849 | } | |
6850 | } | |
6851 | } | |
6852 | ||
79e53945 JB |
6853 | static void intel_crtc_destroy(struct drm_crtc *crtc) |
6854 | { | |
6855 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
67e77c5a DV |
6856 | struct drm_device *dev = crtc->dev; |
6857 | struct intel_unpin_work *work; | |
6858 | unsigned long flags; | |
6859 | ||
6860 | spin_lock_irqsave(&dev->event_lock, flags); | |
6861 | work = intel_crtc->unpin_work; | |
6862 | intel_crtc->unpin_work = NULL; | |
6863 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
6864 | ||
6865 | if (work) { | |
6866 | cancel_work_sync(&work->work); | |
6867 | kfree(work); | |
6868 | } | |
79e53945 JB |
6869 | |
6870 | drm_crtc_cleanup(crtc); | |
67e77c5a | 6871 | |
79e53945 JB |
6872 | kfree(intel_crtc); |
6873 | } | |
6874 | ||
6b95a207 KH |
6875 | static void intel_unpin_work_fn(struct work_struct *__work) |
6876 | { | |
6877 | struct intel_unpin_work *work = | |
6878 | container_of(__work, struct intel_unpin_work, work); | |
6879 | ||
6880 | mutex_lock(&work->dev->struct_mutex); | |
b1b87f6b | 6881 | i915_gem_object_unpin(work->old_fb_obj); |
05394f39 CW |
6882 | drm_gem_object_unreference(&work->pending_flip_obj->base); |
6883 | drm_gem_object_unreference(&work->old_fb_obj->base); | |
d9e86c0e | 6884 | |
7782de3b | 6885 | intel_update_fbc(work->dev); |
6b95a207 KH |
6886 | mutex_unlock(&work->dev->struct_mutex); |
6887 | kfree(work); | |
6888 | } | |
6889 | ||
1afe3e9d | 6890 | static void do_intel_finish_page_flip(struct drm_device *dev, |
49b14a5c | 6891 | struct drm_crtc *crtc) |
6b95a207 KH |
6892 | { |
6893 | drm_i915_private_t *dev_priv = dev->dev_private; | |
6b95a207 KH |
6894 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
6895 | struct intel_unpin_work *work; | |
05394f39 | 6896 | struct drm_i915_gem_object *obj; |
6b95a207 | 6897 | struct drm_pending_vblank_event *e; |
49b14a5c | 6898 | struct timeval tnow, tvbl; |
6b95a207 KH |
6899 | unsigned long flags; |
6900 | ||
6901 | /* Ignore early vblank irqs */ | |
6902 | if (intel_crtc == NULL) | |
6903 | return; | |
6904 | ||
49b14a5c MK |
6905 | do_gettimeofday(&tnow); |
6906 | ||
6b95a207 KH |
6907 | spin_lock_irqsave(&dev->event_lock, flags); |
6908 | work = intel_crtc->unpin_work; | |
6909 | if (work == NULL || !work->pending) { | |
6910 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
6911 | return; | |
6912 | } | |
6913 | ||
6914 | intel_crtc->unpin_work = NULL; | |
6b95a207 KH |
6915 | |
6916 | if (work->event) { | |
6917 | e = work->event; | |
49b14a5c | 6918 | e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl); |
0af7e4df MK |
6919 | |
6920 | /* Called before vblank count and timestamps have | |
6921 | * been updated for the vblank interval of flip | |
6922 | * completion? Need to increment vblank count and | |
6923 | * add one videorefresh duration to returned timestamp | |
49b14a5c MK |
6924 | * to account for this. We assume this happened if we |
6925 | * get called over 0.9 frame durations after the last | |
6926 | * timestamped vblank. | |
6927 | * | |
6928 | * This calculation can not be used with vrefresh rates | |
6929 | * below 5Hz (10Hz to be on the safe side) without | |
6930 | * promoting to 64 integers. | |
0af7e4df | 6931 | */ |
49b14a5c MK |
6932 | if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) > |
6933 | 9 * crtc->framedur_ns) { | |
0af7e4df | 6934 | e->event.sequence++; |
49b14a5c MK |
6935 | tvbl = ns_to_timeval(timeval_to_ns(&tvbl) + |
6936 | crtc->framedur_ns); | |
0af7e4df MK |
6937 | } |
6938 | ||
49b14a5c MK |
6939 | e->event.tv_sec = tvbl.tv_sec; |
6940 | e->event.tv_usec = tvbl.tv_usec; | |
0af7e4df | 6941 | |
6b95a207 KH |
6942 | list_add_tail(&e->base.link, |
6943 | &e->base.file_priv->event_list); | |
6944 | wake_up_interruptible(&e->base.file_priv->event_wait); | |
6945 | } | |
6946 | ||
0af7e4df MK |
6947 | drm_vblank_put(dev, intel_crtc->pipe); |
6948 | ||
6b95a207 KH |
6949 | spin_unlock_irqrestore(&dev->event_lock, flags); |
6950 | ||
05394f39 | 6951 | obj = work->old_fb_obj; |
d9e86c0e | 6952 | |
e59f2bac | 6953 | atomic_clear_mask(1 << intel_crtc->plane, |
05394f39 CW |
6954 | &obj->pending_flip.counter); |
6955 | if (atomic_read(&obj->pending_flip) == 0) | |
f787a5f5 | 6956 | wake_up(&dev_priv->pending_flip_queue); |
d9e86c0e | 6957 | |
6b95a207 | 6958 | schedule_work(&work->work); |
e5510fac JB |
6959 | |
6960 | trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj); | |
6b95a207 KH |
6961 | } |
6962 | ||
1afe3e9d JB |
6963 | void intel_finish_page_flip(struct drm_device *dev, int pipe) |
6964 | { | |
6965 | drm_i915_private_t *dev_priv = dev->dev_private; | |
6966 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
6967 | ||
49b14a5c | 6968 | do_intel_finish_page_flip(dev, crtc); |
1afe3e9d JB |
6969 | } |
6970 | ||
6971 | void intel_finish_page_flip_plane(struct drm_device *dev, int plane) | |
6972 | { | |
6973 | drm_i915_private_t *dev_priv = dev->dev_private; | |
6974 | struct drm_crtc *crtc = dev_priv->plane_to_crtc_mapping[plane]; | |
6975 | ||
49b14a5c | 6976 | do_intel_finish_page_flip(dev, crtc); |
1afe3e9d JB |
6977 | } |
6978 | ||
6b95a207 KH |
6979 | void intel_prepare_page_flip(struct drm_device *dev, int plane) |
6980 | { | |
6981 | drm_i915_private_t *dev_priv = dev->dev_private; | |
6982 | struct intel_crtc *intel_crtc = | |
6983 | to_intel_crtc(dev_priv->plane_to_crtc_mapping[plane]); | |
6984 | unsigned long flags; | |
6985 | ||
6986 | spin_lock_irqsave(&dev->event_lock, flags); | |
de3f440f | 6987 | if (intel_crtc->unpin_work) { |
4e5359cd SF |
6988 | if ((++intel_crtc->unpin_work->pending) > 1) |
6989 | DRM_ERROR("Prepared flip multiple times\n"); | |
de3f440f JB |
6990 | } else { |
6991 | DRM_DEBUG_DRIVER("preparing flip with no unpin work?\n"); | |
6992 | } | |
6b95a207 KH |
6993 | spin_unlock_irqrestore(&dev->event_lock, flags); |
6994 | } | |
6995 | ||
8c9f3aaf JB |
6996 | static int intel_gen2_queue_flip(struct drm_device *dev, |
6997 | struct drm_crtc *crtc, | |
6998 | struct drm_framebuffer *fb, | |
6999 | struct drm_i915_gem_object *obj) | |
7000 | { | |
7001 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7002 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
7003 | unsigned long offset; | |
7004 | u32 flip_mask; | |
7005 | int ret; | |
7006 | ||
7007 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | |
7008 | if (ret) | |
7009 | goto out; | |
7010 | ||
7011 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | |
7012 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; | |
7013 | ||
7014 | ret = BEGIN_LP_RING(6); | |
7015 | if (ret) | |
7016 | goto out; | |
7017 | ||
7018 | /* Can't queue multiple flips, so wait for the previous | |
7019 | * one to finish before executing the next. | |
7020 | */ | |
7021 | if (intel_crtc->plane) | |
7022 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | |
7023 | else | |
7024 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | |
7025 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | |
7026 | OUT_RING(MI_NOOP); | |
7027 | OUT_RING(MI_DISPLAY_FLIP | | |
7028 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | |
7029 | OUT_RING(fb->pitch); | |
7030 | OUT_RING(obj->gtt_offset + offset); | |
7031 | OUT_RING(MI_NOOP); | |
7032 | ADVANCE_LP_RING(); | |
7033 | out: | |
7034 | return ret; | |
7035 | } | |
7036 | ||
7037 | static int intel_gen3_queue_flip(struct drm_device *dev, | |
7038 | struct drm_crtc *crtc, | |
7039 | struct drm_framebuffer *fb, | |
7040 | struct drm_i915_gem_object *obj) | |
7041 | { | |
7042 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7043 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
7044 | unsigned long offset; | |
7045 | u32 flip_mask; | |
7046 | int ret; | |
7047 | ||
7048 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | |
7049 | if (ret) | |
7050 | goto out; | |
7051 | ||
7052 | /* Offset into the new buffer for cases of shared fbs between CRTCs */ | |
7053 | offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8; | |
7054 | ||
7055 | ret = BEGIN_LP_RING(6); | |
7056 | if (ret) | |
7057 | goto out; | |
7058 | ||
7059 | if (intel_crtc->plane) | |
7060 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | |
7061 | else | |
7062 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | |
7063 | OUT_RING(MI_WAIT_FOR_EVENT | flip_mask); | |
7064 | OUT_RING(MI_NOOP); | |
7065 | OUT_RING(MI_DISPLAY_FLIP_I915 | | |
7066 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | |
7067 | OUT_RING(fb->pitch); | |
7068 | OUT_RING(obj->gtt_offset + offset); | |
7069 | OUT_RING(MI_NOOP); | |
7070 | ||
7071 | ADVANCE_LP_RING(); | |
7072 | out: | |
7073 | return ret; | |
7074 | } | |
7075 | ||
7076 | static int intel_gen4_queue_flip(struct drm_device *dev, | |
7077 | struct drm_crtc *crtc, | |
7078 | struct drm_framebuffer *fb, | |
7079 | struct drm_i915_gem_object *obj) | |
7080 | { | |
7081 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7082 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
7083 | uint32_t pf, pipesrc; | |
7084 | int ret; | |
7085 | ||
7086 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | |
7087 | if (ret) | |
7088 | goto out; | |
7089 | ||
7090 | ret = BEGIN_LP_RING(4); | |
7091 | if (ret) | |
7092 | goto out; | |
7093 | ||
7094 | /* i965+ uses the linear or tiled offsets from the | |
7095 | * Display Registers (which do not change across a page-flip) | |
7096 | * so we need only reprogram the base address. | |
7097 | */ | |
7098 | OUT_RING(MI_DISPLAY_FLIP | | |
7099 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | |
7100 | OUT_RING(fb->pitch); | |
7101 | OUT_RING(obj->gtt_offset | obj->tiling_mode); | |
7102 | ||
7103 | /* XXX Enabling the panel-fitter across page-flip is so far | |
7104 | * untested on non-native modes, so ignore it for now. | |
7105 | * pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE; | |
7106 | */ | |
7107 | pf = 0; | |
7108 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | |
7109 | OUT_RING(pf | pipesrc); | |
7110 | ADVANCE_LP_RING(); | |
7111 | out: | |
7112 | return ret; | |
7113 | } | |
7114 | ||
7115 | static int intel_gen6_queue_flip(struct drm_device *dev, | |
7116 | struct drm_crtc *crtc, | |
7117 | struct drm_framebuffer *fb, | |
7118 | struct drm_i915_gem_object *obj) | |
7119 | { | |
7120 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7121 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
7122 | uint32_t pf, pipesrc; | |
7123 | int ret; | |
7124 | ||
7125 | ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv)); | |
7126 | if (ret) | |
7127 | goto out; | |
7128 | ||
7129 | ret = BEGIN_LP_RING(4); | |
7130 | if (ret) | |
7131 | goto out; | |
7132 | ||
7133 | OUT_RING(MI_DISPLAY_FLIP | | |
7134 | MI_DISPLAY_FLIP_PLANE(intel_crtc->plane)); | |
7135 | OUT_RING(fb->pitch | obj->tiling_mode); | |
7136 | OUT_RING(obj->gtt_offset); | |
7137 | ||
7138 | pf = I915_READ(PF_CTL(intel_crtc->pipe)) & PF_ENABLE; | |
7139 | pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; | |
7140 | OUT_RING(pf | pipesrc); | |
7141 | ADVANCE_LP_RING(); | |
7142 | out: | |
7143 | return ret; | |
7144 | } | |
7145 | ||
7c9017e5 JB |
7146 | /* |
7147 | * On gen7 we currently use the blit ring because (in early silicon at least) | |
7148 | * the render ring doesn't give us interrpts for page flip completion, which | |
7149 | * means clients will hang after the first flip is queued. Fortunately the | |
7150 | * blit ring generates interrupts properly, so use it instead. | |
7151 | */ | |
7152 | static int intel_gen7_queue_flip(struct drm_device *dev, | |
7153 | struct drm_crtc *crtc, | |
7154 | struct drm_framebuffer *fb, | |
7155 | struct drm_i915_gem_object *obj) | |
7156 | { | |
7157 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7158 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
7159 | struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; | |
7160 | int ret; | |
7161 | ||
7162 | ret = intel_pin_and_fence_fb_obj(dev, obj, ring); | |
7163 | if (ret) | |
7164 | goto out; | |
7165 | ||
7166 | ret = intel_ring_begin(ring, 4); | |
7167 | if (ret) | |
7168 | goto out; | |
7169 | ||
7170 | intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); | |
7171 | intel_ring_emit(ring, (fb->pitch | obj->tiling_mode)); | |
7172 | intel_ring_emit(ring, (obj->gtt_offset)); | |
7173 | intel_ring_emit(ring, (MI_NOOP)); | |
7174 | intel_ring_advance(ring); | |
7175 | out: | |
7176 | return ret; | |
7177 | } | |
7178 | ||
8c9f3aaf JB |
7179 | static int intel_default_queue_flip(struct drm_device *dev, |
7180 | struct drm_crtc *crtc, | |
7181 | struct drm_framebuffer *fb, | |
7182 | struct drm_i915_gem_object *obj) | |
7183 | { | |
7184 | return -ENODEV; | |
7185 | } | |
7186 | ||
6b95a207 KH |
7187 | static int intel_crtc_page_flip(struct drm_crtc *crtc, |
7188 | struct drm_framebuffer *fb, | |
7189 | struct drm_pending_vblank_event *event) | |
7190 | { | |
7191 | struct drm_device *dev = crtc->dev; | |
7192 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7193 | struct intel_framebuffer *intel_fb; | |
05394f39 | 7194 | struct drm_i915_gem_object *obj; |
6b95a207 KH |
7195 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
7196 | struct intel_unpin_work *work; | |
8c9f3aaf | 7197 | unsigned long flags; |
52e68630 | 7198 | int ret; |
6b95a207 KH |
7199 | |
7200 | work = kzalloc(sizeof *work, GFP_KERNEL); | |
7201 | if (work == NULL) | |
7202 | return -ENOMEM; | |
7203 | ||
6b95a207 KH |
7204 | work->event = event; |
7205 | work->dev = crtc->dev; | |
7206 | intel_fb = to_intel_framebuffer(crtc->fb); | |
b1b87f6b | 7207 | work->old_fb_obj = intel_fb->obj; |
6b95a207 KH |
7208 | INIT_WORK(&work->work, intel_unpin_work_fn); |
7209 | ||
7317c75e JB |
7210 | ret = drm_vblank_get(dev, intel_crtc->pipe); |
7211 | if (ret) | |
7212 | goto free_work; | |
7213 | ||
6b95a207 KH |
7214 | /* We borrow the event spin lock for protecting unpin_work */ |
7215 | spin_lock_irqsave(&dev->event_lock, flags); | |
7216 | if (intel_crtc->unpin_work) { | |
7217 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
7218 | kfree(work); | |
7317c75e | 7219 | drm_vblank_put(dev, intel_crtc->pipe); |
468f0b44 CW |
7220 | |
7221 | DRM_DEBUG_DRIVER("flip queue: crtc already busy\n"); | |
6b95a207 KH |
7222 | return -EBUSY; |
7223 | } | |
7224 | intel_crtc->unpin_work = work; | |
7225 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
7226 | ||
7227 | intel_fb = to_intel_framebuffer(fb); | |
7228 | obj = intel_fb->obj; | |
7229 | ||
468f0b44 | 7230 | mutex_lock(&dev->struct_mutex); |
6b95a207 | 7231 | |
75dfca80 | 7232 | /* Reference the objects for the scheduled work. */ |
05394f39 CW |
7233 | drm_gem_object_reference(&work->old_fb_obj->base); |
7234 | drm_gem_object_reference(&obj->base); | |
6b95a207 KH |
7235 | |
7236 | crtc->fb = fb; | |
96b099fd | 7237 | |
e1f99ce6 | 7238 | work->pending_flip_obj = obj; |
e1f99ce6 | 7239 | |
4e5359cd SF |
7240 | work->enable_stall_check = true; |
7241 | ||
e1f99ce6 CW |
7242 | /* Block clients from rendering to the new back buffer until |
7243 | * the flip occurs and the object is no longer visible. | |
7244 | */ | |
05394f39 | 7245 | atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); |
e1f99ce6 | 7246 | |
8c9f3aaf JB |
7247 | ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); |
7248 | if (ret) | |
7249 | goto cleanup_pending; | |
6b95a207 | 7250 | |
7782de3b | 7251 | intel_disable_fbc(dev); |
6b95a207 KH |
7252 | mutex_unlock(&dev->struct_mutex); |
7253 | ||
e5510fac JB |
7254 | trace_i915_flip_request(intel_crtc->plane, obj); |
7255 | ||
6b95a207 | 7256 | return 0; |
96b099fd | 7257 | |
8c9f3aaf JB |
7258 | cleanup_pending: |
7259 | atomic_sub(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip); | |
05394f39 CW |
7260 | drm_gem_object_unreference(&work->old_fb_obj->base); |
7261 | drm_gem_object_unreference(&obj->base); | |
96b099fd CW |
7262 | mutex_unlock(&dev->struct_mutex); |
7263 | ||
7264 | spin_lock_irqsave(&dev->event_lock, flags); | |
7265 | intel_crtc->unpin_work = NULL; | |
7266 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
7267 | ||
7317c75e JB |
7268 | drm_vblank_put(dev, intel_crtc->pipe); |
7269 | free_work: | |
96b099fd CW |
7270 | kfree(work); |
7271 | ||
7272 | return ret; | |
6b95a207 KH |
7273 | } |
7274 | ||
47f1c6c9 CW |
7275 | static void intel_sanitize_modesetting(struct drm_device *dev, |
7276 | int pipe, int plane) | |
7277 | { | |
7278 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7279 | u32 reg, val; | |
7280 | ||
7281 | if (HAS_PCH_SPLIT(dev)) | |
7282 | return; | |
7283 | ||
7284 | /* Who knows what state these registers were left in by the BIOS or | |
7285 | * grub? | |
7286 | * | |
7287 | * If we leave the registers in a conflicting state (e.g. with the | |
7288 | * display plane reading from the other pipe than the one we intend | |
7289 | * to use) then when we attempt to teardown the active mode, we will | |
7290 | * not disable the pipes and planes in the correct order -- leaving | |
7291 | * a plane reading from a disabled pipe and possibly leading to | |
7292 | * undefined behaviour. | |
7293 | */ | |
7294 | ||
7295 | reg = DSPCNTR(plane); | |
7296 | val = I915_READ(reg); | |
7297 | ||
7298 | if ((val & DISPLAY_PLANE_ENABLE) == 0) | |
7299 | return; | |
7300 | if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) | |
7301 | return; | |
7302 | ||
7303 | /* This display plane is active and attached to the other CPU pipe. */ | |
7304 | pipe = !pipe; | |
7305 | ||
7306 | /* Disable the plane and wait for it to stop reading from the pipe. */ | |
b24e7179 JB |
7307 | intel_disable_plane(dev_priv, plane, pipe); |
7308 | intel_disable_pipe(dev_priv, pipe); | |
47f1c6c9 | 7309 | } |
79e53945 | 7310 | |
f6e5b160 CW |
7311 | static void intel_crtc_reset(struct drm_crtc *crtc) |
7312 | { | |
7313 | struct drm_device *dev = crtc->dev; | |
7314 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
7315 | ||
7316 | /* Reset flags back to the 'unknown' status so that they | |
7317 | * will be correctly set on the initial modeset. | |
7318 | */ | |
7319 | intel_crtc->dpms_mode = -1; | |
7320 | ||
7321 | /* We need to fix up any BIOS configuration that conflicts with | |
7322 | * our expectations. | |
7323 | */ | |
7324 | intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); | |
7325 | } | |
7326 | ||
7327 | static struct drm_crtc_helper_funcs intel_helper_funcs = { | |
7328 | .dpms = intel_crtc_dpms, | |
7329 | .mode_fixup = intel_crtc_mode_fixup, | |
7330 | .mode_set = intel_crtc_mode_set, | |
7331 | .mode_set_base = intel_pipe_set_base, | |
7332 | .mode_set_base_atomic = intel_pipe_set_base_atomic, | |
7333 | .load_lut = intel_crtc_load_lut, | |
7334 | .disable = intel_crtc_disable, | |
7335 | }; | |
7336 | ||
7337 | static const struct drm_crtc_funcs intel_crtc_funcs = { | |
7338 | .reset = intel_crtc_reset, | |
7339 | .cursor_set = intel_crtc_cursor_set, | |
7340 | .cursor_move = intel_crtc_cursor_move, | |
7341 | .gamma_set = intel_crtc_gamma_set, | |
7342 | .set_config = drm_crtc_helper_set_config, | |
7343 | .destroy = intel_crtc_destroy, | |
7344 | .page_flip = intel_crtc_page_flip, | |
7345 | }; | |
7346 | ||
b358d0a6 | 7347 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
79e53945 | 7348 | { |
22fd0fab | 7349 | drm_i915_private_t *dev_priv = dev->dev_private; |
79e53945 JB |
7350 | struct intel_crtc *intel_crtc; |
7351 | int i; | |
7352 | ||
7353 | intel_crtc = kzalloc(sizeof(struct intel_crtc) + (INTELFB_CONN_LIMIT * sizeof(struct drm_connector *)), GFP_KERNEL); | |
7354 | if (intel_crtc == NULL) | |
7355 | return; | |
7356 | ||
7357 | drm_crtc_init(dev, &intel_crtc->base, &intel_crtc_funcs); | |
7358 | ||
7359 | drm_mode_crtc_set_gamma_size(&intel_crtc->base, 256); | |
79e53945 JB |
7360 | for (i = 0; i < 256; i++) { |
7361 | intel_crtc->lut_r[i] = i; | |
7362 | intel_crtc->lut_g[i] = i; | |
7363 | intel_crtc->lut_b[i] = i; | |
7364 | } | |
7365 | ||
80824003 JB |
7366 | /* Swap pipes & planes for FBC on pre-965 */ |
7367 | intel_crtc->pipe = pipe; | |
7368 | intel_crtc->plane = pipe; | |
e2e767ab | 7369 | if (IS_MOBILE(dev) && IS_GEN3(dev)) { |
28c97730 | 7370 | DRM_DEBUG_KMS("swapping pipes & planes for FBC\n"); |
e2e767ab | 7371 | intel_crtc->plane = !pipe; |
80824003 JB |
7372 | } |
7373 | ||
22fd0fab JB |
7374 | BUG_ON(pipe >= ARRAY_SIZE(dev_priv->plane_to_crtc_mapping) || |
7375 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] != NULL); | |
7376 | dev_priv->plane_to_crtc_mapping[intel_crtc->plane] = &intel_crtc->base; | |
7377 | dev_priv->pipe_to_crtc_mapping[intel_crtc->pipe] = &intel_crtc->base; | |
7378 | ||
5d1d0cc8 | 7379 | intel_crtc_reset(&intel_crtc->base); |
04dbff52 | 7380 | intel_crtc->active = true; /* force the pipe off on setup_init_config */ |
5a354204 | 7381 | intel_crtc->bpp = 24; /* default for pre-Ironlake */ |
7e7d76c3 JB |
7382 | |
7383 | if (HAS_PCH_SPLIT(dev)) { | |
4b645f14 JB |
7384 | if (pipe == 2 && IS_IVYBRIDGE(dev)) |
7385 | intel_crtc->no_pll = true; | |
7e7d76c3 JB |
7386 | intel_helper_funcs.prepare = ironlake_crtc_prepare; |
7387 | intel_helper_funcs.commit = ironlake_crtc_commit; | |
7388 | } else { | |
7389 | intel_helper_funcs.prepare = i9xx_crtc_prepare; | |
7390 | intel_helper_funcs.commit = i9xx_crtc_commit; | |
7391 | } | |
7392 | ||
79e53945 JB |
7393 | drm_crtc_helper_add(&intel_crtc->base, &intel_helper_funcs); |
7394 | ||
652c393a JB |
7395 | intel_crtc->busy = false; |
7396 | ||
7397 | setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, | |
7398 | (unsigned long)intel_crtc); | |
79e53945 JB |
7399 | } |
7400 | ||
08d7b3d1 | 7401 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
05394f39 | 7402 | struct drm_file *file) |
08d7b3d1 CW |
7403 | { |
7404 | drm_i915_private_t *dev_priv = dev->dev_private; | |
7405 | struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data; | |
c05422d5 DV |
7406 | struct drm_mode_object *drmmode_obj; |
7407 | struct intel_crtc *crtc; | |
08d7b3d1 CW |
7408 | |
7409 | if (!dev_priv) { | |
7410 | DRM_ERROR("called with no initialization\n"); | |
7411 | return -EINVAL; | |
7412 | } | |
7413 | ||
c05422d5 DV |
7414 | drmmode_obj = drm_mode_object_find(dev, pipe_from_crtc_id->crtc_id, |
7415 | DRM_MODE_OBJECT_CRTC); | |
08d7b3d1 | 7416 | |
c05422d5 | 7417 | if (!drmmode_obj) { |
08d7b3d1 CW |
7418 | DRM_ERROR("no such CRTC id\n"); |
7419 | return -EINVAL; | |
7420 | } | |
7421 | ||
c05422d5 DV |
7422 | crtc = to_intel_crtc(obj_to_crtc(drmmode_obj)); |
7423 | pipe_from_crtc_id->pipe = crtc->pipe; | |
08d7b3d1 | 7424 | |
c05422d5 | 7425 | return 0; |
08d7b3d1 CW |
7426 | } |
7427 | ||
c5e4df33 | 7428 | static int intel_encoder_clones(struct drm_device *dev, int type_mask) |
79e53945 | 7429 | { |
4ef69c7a | 7430 | struct intel_encoder *encoder; |
79e53945 | 7431 | int index_mask = 0; |
79e53945 JB |
7432 | int entry = 0; |
7433 | ||
4ef69c7a CW |
7434 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
7435 | if (type_mask & encoder->clone_mask) | |
79e53945 JB |
7436 | index_mask |= (1 << entry); |
7437 | entry++; | |
7438 | } | |
4ef69c7a | 7439 | |
79e53945 JB |
7440 | return index_mask; |
7441 | } | |
7442 | ||
4d302442 CW |
7443 | static bool has_edp_a(struct drm_device *dev) |
7444 | { | |
7445 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7446 | ||
7447 | if (!IS_MOBILE(dev)) | |
7448 | return false; | |
7449 | ||
7450 | if ((I915_READ(DP_A) & DP_DETECTED) == 0) | |
7451 | return false; | |
7452 | ||
7453 | if (IS_GEN5(dev) && | |
7454 | (I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE)) | |
7455 | return false; | |
7456 | ||
7457 | return true; | |
7458 | } | |
7459 | ||
79e53945 JB |
7460 | static void intel_setup_outputs(struct drm_device *dev) |
7461 | { | |
725e30ad | 7462 | struct drm_i915_private *dev_priv = dev->dev_private; |
4ef69c7a | 7463 | struct intel_encoder *encoder; |
cb0953d7 | 7464 | bool dpd_is_edp = false; |
c5d1b51d | 7465 | bool has_lvds = false; |
79e53945 | 7466 | |
541998a1 | 7467 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
c5d1b51d CW |
7468 | has_lvds = intel_lvds_init(dev); |
7469 | if (!has_lvds && !HAS_PCH_SPLIT(dev)) { | |
7470 | /* disable the panel fitter on everything but LVDS */ | |
7471 | I915_WRITE(PFIT_CONTROL, 0); | |
7472 | } | |
79e53945 | 7473 | |
bad720ff | 7474 | if (HAS_PCH_SPLIT(dev)) { |
cb0953d7 | 7475 | dpd_is_edp = intel_dpd_is_edp(dev); |
30ad48b7 | 7476 | |
4d302442 | 7477 | if (has_edp_a(dev)) |
32f9d658 ZW |
7478 | intel_dp_init(dev, DP_A); |
7479 | ||
cb0953d7 AJ |
7480 | if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
7481 | intel_dp_init(dev, PCH_DP_D); | |
7482 | } | |
7483 | ||
7484 | intel_crt_init(dev); | |
7485 | ||
7486 | if (HAS_PCH_SPLIT(dev)) { | |
7487 | int found; | |
7488 | ||
30ad48b7 | 7489 | if (I915_READ(HDMIB) & PORT_DETECTED) { |
461ed3ca ZY |
7490 | /* PCH SDVOB multiplex with HDMIB */ |
7491 | found = intel_sdvo_init(dev, PCH_SDVOB); | |
30ad48b7 ZW |
7492 | if (!found) |
7493 | intel_hdmi_init(dev, HDMIB); | |
5eb08b69 ZW |
7494 | if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED)) |
7495 | intel_dp_init(dev, PCH_DP_B); | |
30ad48b7 ZW |
7496 | } |
7497 | ||
7498 | if (I915_READ(HDMIC) & PORT_DETECTED) | |
7499 | intel_hdmi_init(dev, HDMIC); | |
7500 | ||
7501 | if (I915_READ(HDMID) & PORT_DETECTED) | |
7502 | intel_hdmi_init(dev, HDMID); | |
7503 | ||
5eb08b69 ZW |
7504 | if (I915_READ(PCH_DP_C) & DP_DETECTED) |
7505 | intel_dp_init(dev, PCH_DP_C); | |
7506 | ||
cb0953d7 | 7507 | if (!dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED)) |
5eb08b69 ZW |
7508 | intel_dp_init(dev, PCH_DP_D); |
7509 | ||
103a196f | 7510 | } else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) { |
27185ae1 | 7511 | bool found = false; |
7d57382e | 7512 | |
725e30ad | 7513 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
b01f2c3a | 7514 | DRM_DEBUG_KMS("probing SDVOB\n"); |
725e30ad | 7515 | found = intel_sdvo_init(dev, SDVOB); |
b01f2c3a JB |
7516 | if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) { |
7517 | DRM_DEBUG_KMS("probing HDMI on SDVOB\n"); | |
725e30ad | 7518 | intel_hdmi_init(dev, SDVOB); |
b01f2c3a | 7519 | } |
27185ae1 | 7520 | |
b01f2c3a JB |
7521 | if (!found && SUPPORTS_INTEGRATED_DP(dev)) { |
7522 | DRM_DEBUG_KMS("probing DP_B\n"); | |
a4fc5ed6 | 7523 | intel_dp_init(dev, DP_B); |
b01f2c3a | 7524 | } |
725e30ad | 7525 | } |
13520b05 KH |
7526 | |
7527 | /* Before G4X SDVOC doesn't have its own detect register */ | |
13520b05 | 7528 | |
b01f2c3a JB |
7529 | if (I915_READ(SDVOB) & SDVO_DETECTED) { |
7530 | DRM_DEBUG_KMS("probing SDVOC\n"); | |
725e30ad | 7531 | found = intel_sdvo_init(dev, SDVOC); |
b01f2c3a | 7532 | } |
27185ae1 ML |
7533 | |
7534 | if (!found && (I915_READ(SDVOC) & SDVO_DETECTED)) { | |
7535 | ||
b01f2c3a JB |
7536 | if (SUPPORTS_INTEGRATED_HDMI(dev)) { |
7537 | DRM_DEBUG_KMS("probing HDMI on SDVOC\n"); | |
725e30ad | 7538 | intel_hdmi_init(dev, SDVOC); |
b01f2c3a JB |
7539 | } |
7540 | if (SUPPORTS_INTEGRATED_DP(dev)) { | |
7541 | DRM_DEBUG_KMS("probing DP_C\n"); | |
a4fc5ed6 | 7542 | intel_dp_init(dev, DP_C); |
b01f2c3a | 7543 | } |
725e30ad | 7544 | } |
27185ae1 | 7545 | |
b01f2c3a JB |
7546 | if (SUPPORTS_INTEGRATED_DP(dev) && |
7547 | (I915_READ(DP_D) & DP_DETECTED)) { | |
7548 | DRM_DEBUG_KMS("probing DP_D\n"); | |
a4fc5ed6 | 7549 | intel_dp_init(dev, DP_D); |
b01f2c3a | 7550 | } |
bad720ff | 7551 | } else if (IS_GEN2(dev)) |
79e53945 JB |
7552 | intel_dvo_init(dev); |
7553 | ||
103a196f | 7554 | if (SUPPORTS_TV(dev)) |
79e53945 JB |
7555 | intel_tv_init(dev); |
7556 | ||
4ef69c7a CW |
7557 | list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { |
7558 | encoder->base.possible_crtcs = encoder->crtc_mask; | |
7559 | encoder->base.possible_clones = | |
7560 | intel_encoder_clones(dev, encoder->clone_mask); | |
79e53945 | 7561 | } |
47356eb6 | 7562 | |
2c7111db CW |
7563 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
7564 | drm_helper_disable_unused_functions(dev); | |
9fb526db KP |
7565 | |
7566 | if (HAS_PCH_SPLIT(dev)) | |
7567 | ironlake_init_pch_refclk(dev); | |
79e53945 JB |
7568 | } |
7569 | ||
7570 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | |
7571 | { | |
7572 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | |
79e53945 JB |
7573 | |
7574 | drm_framebuffer_cleanup(fb); | |
05394f39 | 7575 | drm_gem_object_unreference_unlocked(&intel_fb->obj->base); |
79e53945 JB |
7576 | |
7577 | kfree(intel_fb); | |
7578 | } | |
7579 | ||
7580 | static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb, | |
05394f39 | 7581 | struct drm_file *file, |
79e53945 JB |
7582 | unsigned int *handle) |
7583 | { | |
7584 | struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb); | |
05394f39 | 7585 | struct drm_i915_gem_object *obj = intel_fb->obj; |
79e53945 | 7586 | |
05394f39 | 7587 | return drm_gem_handle_create(file, &obj->base, handle); |
79e53945 JB |
7588 | } |
7589 | ||
7590 | static const struct drm_framebuffer_funcs intel_fb_funcs = { | |
7591 | .destroy = intel_user_framebuffer_destroy, | |
7592 | .create_handle = intel_user_framebuffer_create_handle, | |
7593 | }; | |
7594 | ||
38651674 DA |
7595 | int intel_framebuffer_init(struct drm_device *dev, |
7596 | struct intel_framebuffer *intel_fb, | |
7597 | struct drm_mode_fb_cmd *mode_cmd, | |
05394f39 | 7598 | struct drm_i915_gem_object *obj) |
79e53945 | 7599 | { |
79e53945 JB |
7600 | int ret; |
7601 | ||
05394f39 | 7602 | if (obj->tiling_mode == I915_TILING_Y) |
57cd6508 CW |
7603 | return -EINVAL; |
7604 | ||
7605 | if (mode_cmd->pitch & 63) | |
7606 | return -EINVAL; | |
7607 | ||
7608 | switch (mode_cmd->bpp) { | |
7609 | case 8: | |
7610 | case 16: | |
b5626747 JB |
7611 | /* Only pre-ILK can handle 5:5:5 */ |
7612 | if (mode_cmd->depth == 15 && !HAS_PCH_SPLIT(dev)) | |
7613 | return -EINVAL; | |
7614 | break; | |
7615 | ||
57cd6508 CW |
7616 | case 24: |
7617 | case 32: | |
7618 | break; | |
7619 | default: | |
7620 | return -EINVAL; | |
7621 | } | |
7622 | ||
79e53945 JB |
7623 | ret = drm_framebuffer_init(dev, &intel_fb->base, &intel_fb_funcs); |
7624 | if (ret) { | |
7625 | DRM_ERROR("framebuffer init failed %d\n", ret); | |
7626 | return ret; | |
7627 | } | |
7628 | ||
7629 | drm_helper_mode_fill_fb_struct(&intel_fb->base, mode_cmd); | |
79e53945 | 7630 | intel_fb->obj = obj; |
79e53945 JB |
7631 | return 0; |
7632 | } | |
7633 | ||
79e53945 JB |
7634 | static struct drm_framebuffer * |
7635 | intel_user_framebuffer_create(struct drm_device *dev, | |
7636 | struct drm_file *filp, | |
7637 | struct drm_mode_fb_cmd *mode_cmd) | |
7638 | { | |
05394f39 | 7639 | struct drm_i915_gem_object *obj; |
79e53945 | 7640 | |
05394f39 | 7641 | obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle)); |
c8725226 | 7642 | if (&obj->base == NULL) |
cce13ff7 | 7643 | return ERR_PTR(-ENOENT); |
79e53945 | 7644 | |
d2dff872 | 7645 | return intel_framebuffer_create(dev, mode_cmd, obj); |
79e53945 JB |
7646 | } |
7647 | ||
79e53945 | 7648 | static const struct drm_mode_config_funcs intel_mode_funcs = { |
79e53945 | 7649 | .fb_create = intel_user_framebuffer_create, |
eb1f8e4f | 7650 | .output_poll_changed = intel_fb_output_poll_changed, |
79e53945 JB |
7651 | }; |
7652 | ||
05394f39 | 7653 | static struct drm_i915_gem_object * |
aa40d6bb | 7654 | intel_alloc_context_page(struct drm_device *dev) |
9ea8d059 | 7655 | { |
05394f39 | 7656 | struct drm_i915_gem_object *ctx; |
9ea8d059 CW |
7657 | int ret; |
7658 | ||
2c34b850 BW |
7659 | WARN_ON(!mutex_is_locked(&dev->struct_mutex)); |
7660 | ||
aa40d6bb ZN |
7661 | ctx = i915_gem_alloc_object(dev, 4096); |
7662 | if (!ctx) { | |
9ea8d059 CW |
7663 | DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); |
7664 | return NULL; | |
7665 | } | |
7666 | ||
75e9e915 | 7667 | ret = i915_gem_object_pin(ctx, 4096, true); |
9ea8d059 CW |
7668 | if (ret) { |
7669 | DRM_ERROR("failed to pin power context: %d\n", ret); | |
7670 | goto err_unref; | |
7671 | } | |
7672 | ||
aa40d6bb | 7673 | ret = i915_gem_object_set_to_gtt_domain(ctx, 1); |
9ea8d059 CW |
7674 | if (ret) { |
7675 | DRM_ERROR("failed to set-domain on power context: %d\n", ret); | |
7676 | goto err_unpin; | |
7677 | } | |
9ea8d059 | 7678 | |
aa40d6bb | 7679 | return ctx; |
9ea8d059 CW |
7680 | |
7681 | err_unpin: | |
aa40d6bb | 7682 | i915_gem_object_unpin(ctx); |
9ea8d059 | 7683 | err_unref: |
05394f39 | 7684 | drm_gem_object_unreference(&ctx->base); |
9ea8d059 CW |
7685 | mutex_unlock(&dev->struct_mutex); |
7686 | return NULL; | |
7687 | } | |
7688 | ||
7648fa99 JB |
7689 | bool ironlake_set_drps(struct drm_device *dev, u8 val) |
7690 | { | |
7691 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7692 | u16 rgvswctl; | |
7693 | ||
7694 | rgvswctl = I915_READ16(MEMSWCTL); | |
7695 | if (rgvswctl & MEMCTL_CMD_STS) { | |
7696 | DRM_DEBUG("gpu busy, RCS change rejected\n"); | |
7697 | return false; /* still busy with another command */ | |
7698 | } | |
7699 | ||
7700 | rgvswctl = (MEMCTL_CMD_CHFREQ << MEMCTL_CMD_SHIFT) | | |
7701 | (val << MEMCTL_FREQ_SHIFT) | MEMCTL_SFCAVM; | |
7702 | I915_WRITE16(MEMSWCTL, rgvswctl); | |
7703 | POSTING_READ16(MEMSWCTL); | |
7704 | ||
7705 | rgvswctl |= MEMCTL_CMD_STS; | |
7706 | I915_WRITE16(MEMSWCTL, rgvswctl); | |
7707 | ||
7708 | return true; | |
7709 | } | |
7710 | ||
f97108d1 JB |
7711 | void ironlake_enable_drps(struct drm_device *dev) |
7712 | { | |
7713 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7648fa99 | 7714 | u32 rgvmodectl = I915_READ(MEMMODECTL); |
f97108d1 | 7715 | u8 fmax, fmin, fstart, vstart; |
f97108d1 | 7716 | |
ea056c14 JB |
7717 | /* Enable temp reporting */ |
7718 | I915_WRITE16(PMMISC, I915_READ(PMMISC) | MCPPCE_EN); | |
7719 | I915_WRITE16(TSC1, I915_READ(TSC1) | TSE); | |
7720 | ||
f97108d1 JB |
7721 | /* 100ms RC evaluation intervals */ |
7722 | I915_WRITE(RCUPEI, 100000); | |
7723 | I915_WRITE(RCDNEI, 100000); | |
7724 | ||
7725 | /* Set max/min thresholds to 90ms and 80ms respectively */ | |
7726 | I915_WRITE(RCBMAXAVG, 90000); | |
7727 | I915_WRITE(RCBMINAVG, 80000); | |
7728 | ||
7729 | I915_WRITE(MEMIHYST, 1); | |
7730 | ||
7731 | /* Set up min, max, and cur for interrupt handling */ | |
7732 | fmax = (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT; | |
7733 | fmin = (rgvmodectl & MEMMODE_FMIN_MASK); | |
7734 | fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> | |
7735 | MEMMODE_FSTART_SHIFT; | |
7648fa99 | 7736 | |
f97108d1 JB |
7737 | vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> |
7738 | PXVFREQ_PX_SHIFT; | |
7739 | ||
80dbf4b7 | 7740 | dev_priv->fmax = fmax; /* IPS callback will increase this */ |
7648fa99 JB |
7741 | dev_priv->fstart = fstart; |
7742 | ||
80dbf4b7 | 7743 | dev_priv->max_delay = fstart; |
f97108d1 JB |
7744 | dev_priv->min_delay = fmin; |
7745 | dev_priv->cur_delay = fstart; | |
7746 | ||
80dbf4b7 JB |
7747 | DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", |
7748 | fmax, fmin, fstart); | |
7648fa99 | 7749 | |
f97108d1 JB |
7750 | I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); |
7751 | ||
7752 | /* | |
7753 | * Interrupts will be enabled in ironlake_irq_postinstall | |
7754 | */ | |
7755 | ||
7756 | I915_WRITE(VIDSTART, vstart); | |
7757 | POSTING_READ(VIDSTART); | |
7758 | ||
7759 | rgvmodectl |= MEMMODE_SWMODE_EN; | |
7760 | I915_WRITE(MEMMODECTL, rgvmodectl); | |
7761 | ||
481b6af3 | 7762 | if (wait_for((I915_READ(MEMSWCTL) & MEMCTL_CMD_STS) == 0, 10)) |
913d8d11 | 7763 | DRM_ERROR("stuck trying to change perf mode\n"); |
f97108d1 JB |
7764 | msleep(1); |
7765 | ||
7648fa99 | 7766 | ironlake_set_drps(dev, fstart); |
f97108d1 | 7767 | |
7648fa99 JB |
7768 | dev_priv->last_count1 = I915_READ(0x112e4) + I915_READ(0x112e8) + |
7769 | I915_READ(0x112e0); | |
7770 | dev_priv->last_time1 = jiffies_to_msecs(jiffies); | |
7771 | dev_priv->last_count2 = I915_READ(0x112f4); | |
7772 | getrawmonotonic(&dev_priv->last_time2); | |
f97108d1 JB |
7773 | } |
7774 | ||
7775 | void ironlake_disable_drps(struct drm_device *dev) | |
7776 | { | |
7777 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7648fa99 | 7778 | u16 rgvswctl = I915_READ16(MEMSWCTL); |
f97108d1 JB |
7779 | |
7780 | /* Ack interrupts, disable EFC interrupt */ | |
7781 | I915_WRITE(MEMINTREN, I915_READ(MEMINTREN) & ~MEMINT_EVAL_CHG_EN); | |
7782 | I915_WRITE(MEMINTRSTS, MEMINT_EVAL_CHG); | |
7783 | I915_WRITE(DEIER, I915_READ(DEIER) & ~DE_PCU_EVENT); | |
7784 | I915_WRITE(DEIIR, DE_PCU_EVENT); | |
7785 | I915_WRITE(DEIMR, I915_READ(DEIMR) | DE_PCU_EVENT); | |
7786 | ||
7787 | /* Go back to the starting frequency */ | |
7648fa99 | 7788 | ironlake_set_drps(dev, dev_priv->fstart); |
f97108d1 JB |
7789 | msleep(1); |
7790 | rgvswctl |= MEMCTL_CMD_STS; | |
7791 | I915_WRITE(MEMSWCTL, rgvswctl); | |
7792 | msleep(1); | |
7793 | ||
7794 | } | |
7795 | ||
3b8d8d91 JB |
7796 | void gen6_set_rps(struct drm_device *dev, u8 val) |
7797 | { | |
7798 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7799 | u32 swreq; | |
7800 | ||
7801 | swreq = (val & 0x3ff) << 25; | |
7802 | I915_WRITE(GEN6_RPNSWREQ, swreq); | |
7803 | } | |
7804 | ||
7805 | void gen6_disable_rps(struct drm_device *dev) | |
7806 | { | |
7807 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7808 | ||
7809 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | |
7810 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | |
7811 | I915_WRITE(GEN6_PMIER, 0); | |
6fdd4d98 DV |
7812 | /* Complete PM interrupt masking here doesn't race with the rps work |
7813 | * item again unmasking PM interrupts because that is using a different | |
7814 | * register (PMIMR) to mask PM interrupts. The only risk is in leaving | |
7815 | * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ | |
4912d041 BW |
7816 | |
7817 | spin_lock_irq(&dev_priv->rps_lock); | |
7818 | dev_priv->pm_iir = 0; | |
7819 | spin_unlock_irq(&dev_priv->rps_lock); | |
7820 | ||
3b8d8d91 JB |
7821 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); |
7822 | } | |
7823 | ||
7648fa99 JB |
7824 | static unsigned long intel_pxfreq(u32 vidfreq) |
7825 | { | |
7826 | unsigned long freq; | |
7827 | int div = (vidfreq & 0x3f0000) >> 16; | |
7828 | int post = (vidfreq & 0x3000) >> 12; | |
7829 | int pre = (vidfreq & 0x7); | |
7830 | ||
7831 | if (!pre) | |
7832 | return 0; | |
7833 | ||
7834 | freq = ((div * 133333) / ((1<<post) * pre)); | |
7835 | ||
7836 | return freq; | |
7837 | } | |
7838 | ||
7839 | void intel_init_emon(struct drm_device *dev) | |
7840 | { | |
7841 | struct drm_i915_private *dev_priv = dev->dev_private; | |
7842 | u32 lcfuse; | |
7843 | u8 pxw[16]; | |
7844 | int i; | |
7845 | ||
7846 | /* Disable to program */ | |
7847 | I915_WRITE(ECR, 0); | |
7848 | POSTING_READ(ECR); | |
7849 | ||
7850 | /* Program energy weights for various events */ | |
7851 | I915_WRITE(SDEW, 0x15040d00); | |
7852 | I915_WRITE(CSIEW0, 0x007f0000); | |
7853 | I915_WRITE(CSIEW1, 0x1e220004); | |
7854 | I915_WRITE(CSIEW2, 0x04000004); | |
7855 | ||
7856 | for (i = 0; i < 5; i++) | |
7857 | I915_WRITE(PEW + (i * 4), 0); | |
7858 | for (i = 0; i < 3; i++) | |
7859 | I915_WRITE(DEW + (i * 4), 0); | |
7860 | ||
7861 | /* Program P-state weights to account for frequency power adjustment */ | |
7862 | for (i = 0; i < 16; i++) { | |
7863 | u32 pxvidfreq = I915_READ(PXVFREQ_BASE + (i * 4)); | |
7864 | unsigned long freq = intel_pxfreq(pxvidfreq); | |
7865 | unsigned long vid = (pxvidfreq & PXVFREQ_PX_MASK) >> | |
7866 | PXVFREQ_PX_SHIFT; | |
7867 | unsigned long val; | |
7868 | ||
7869 | val = vid * vid; | |
7870 | val *= (freq / 1000); | |
7871 | val *= 255; | |
7872 | val /= (127*127*900); | |
7873 | if (val > 0xff) | |
7874 | DRM_ERROR("bad pxval: %ld\n", val); | |
7875 | pxw[i] = val; | |
7876 | } | |
7877 | /* Render standby states get 0 weight */ | |
7878 | pxw[14] = 0; | |
7879 | pxw[15] = 0; | |
7880 | ||
7881 | for (i = 0; i < 4; i++) { | |
7882 | u32 val = (pxw[i*4] << 24) | (pxw[(i*4)+1] << 16) | | |
7883 | (pxw[(i*4)+2] << 8) | (pxw[(i*4)+3]); | |
7884 | I915_WRITE(PXW + (i * 4), val); | |
7885 | } | |
7886 | ||
7887 | /* Adjust magic regs to magic values (more experimental results) */ | |
7888 | I915_WRITE(OGW0, 0); | |
7889 | I915_WRITE(OGW1, 0); | |
7890 | I915_WRITE(EG0, 0x00007f00); | |
7891 | I915_WRITE(EG1, 0x0000000e); | |
7892 | I915_WRITE(EG2, 0x000e0000); | |
7893 | I915_WRITE(EG3, 0x68000300); | |
7894 | I915_WRITE(EG4, 0x42000000); | |
7895 | I915_WRITE(EG5, 0x00140031); | |
7896 | I915_WRITE(EG6, 0); | |
7897 | I915_WRITE(EG7, 0); | |
7898 | ||
7899 | for (i = 0; i < 8; i++) | |
7900 | I915_WRITE(PXWL + (i * 4), 0); | |
7901 | ||
7902 | /* Enable PMON + select events */ | |
7903 | I915_WRITE(ECR, 0x80000019); | |
7904 | ||
7905 | lcfuse = I915_READ(LCFUSE02); | |
7906 | ||
7907 | dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK); | |
7908 | } | |
7909 | ||
c0f372b3 KP |
7910 | static bool intel_enable_rc6(struct drm_device *dev) |
7911 | { | |
7912 | /* | |
7913 | * Respect the kernel parameter if it is set | |
7914 | */ | |
7915 | if (i915_enable_rc6 >= 0) | |
7916 | return i915_enable_rc6; | |
7917 | ||
7918 | /* | |
7919 | * Disable RC6 on Ironlake | |
7920 | */ | |
7921 | if (INTEL_INFO(dev)->gen == 5) | |
7922 | return 0; | |
7923 | ||
7924 | /* | |
7925 | * Enable rc6 on Sandybridge if DMA remapping is disabled | |
7926 | */ | |
7927 | if (INTEL_INFO(dev)->gen == 6) { | |
7928 | DRM_DEBUG_DRIVER("Sandybridge: intel_iommu_enabled %s -- RC6 %sabled\n", | |
7929 | intel_iommu_enabled ? "true" : "false", | |
7930 | !intel_iommu_enabled ? "en" : "dis"); | |
7931 | return !intel_iommu_enabled; | |
7932 | } | |
7933 | DRM_DEBUG_DRIVER("RC6 enabled\n"); | |
7934 | return 1; | |
7935 | } | |
7936 | ||
3b8d8d91 | 7937 | void gen6_enable_rps(struct drm_i915_private *dev_priv) |
8fd26859 | 7938 | { |
a6044e23 JB |
7939 | u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); |
7940 | u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); | |
7df8721b | 7941 | u32 pcu_mbox, rc6_mask = 0; |
a6044e23 | 7942 | int cur_freq, min_freq, max_freq; |
8fd26859 CW |
7943 | int i; |
7944 | ||
7945 | /* Here begins a magic sequence of register writes to enable | |
7946 | * auto-downclocking. | |
7947 | * | |
7948 | * Perhaps there might be some value in exposing these to | |
7949 | * userspace... | |
7950 | */ | |
7951 | I915_WRITE(GEN6_RC_STATE, 0); | |
d1ebd816 | 7952 | mutex_lock(&dev_priv->dev->struct_mutex); |
fcca7926 | 7953 | gen6_gt_force_wake_get(dev_priv); |
8fd26859 | 7954 | |
3b8d8d91 | 7955 | /* disable the counters and set deterministic thresholds */ |
8fd26859 CW |
7956 | I915_WRITE(GEN6_RC_CONTROL, 0); |
7957 | ||
7958 | I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16); | |
7959 | I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30); | |
7960 | I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30); | |
7961 | I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000); | |
7962 | I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25); | |
7963 | ||
7964 | for (i = 0; i < I915_NUM_RINGS; i++) | |
7965 | I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10); | |
7966 | ||
7967 | I915_WRITE(GEN6_RC_SLEEP, 0); | |
7968 | I915_WRITE(GEN6_RC1e_THRESHOLD, 1000); | |
7969 | I915_WRITE(GEN6_RC6_THRESHOLD, 50000); | |
7970 | I915_WRITE(GEN6_RC6p_THRESHOLD, 100000); | |
7971 | I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */ | |
7972 | ||
c0f372b3 | 7973 | if (intel_enable_rc6(dev_priv->dev)) |
7df8721b JB |
7974 | rc6_mask = GEN6_RC_CTL_RC6p_ENABLE | |
7975 | GEN6_RC_CTL_RC6_ENABLE; | |
7976 | ||
8fd26859 | 7977 | I915_WRITE(GEN6_RC_CONTROL, |
7df8721b | 7978 | rc6_mask | |
9c3d2f7f | 7979 | GEN6_RC_CTL_EI_MODE(1) | |
8fd26859 CW |
7980 | GEN6_RC_CTL_HW_ENABLE); |
7981 | ||
3b8d8d91 | 7982 | I915_WRITE(GEN6_RPNSWREQ, |
8fd26859 CW |
7983 | GEN6_FREQUENCY(10) | |
7984 | GEN6_OFFSET(0) | | |
7985 | GEN6_AGGRESSIVE_TURBO); | |
7986 | I915_WRITE(GEN6_RC_VIDEO_FREQ, | |
7987 | GEN6_FREQUENCY(12)); | |
7988 | ||
7989 | I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000); | |
7990 | I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, | |
7991 | 18 << 24 | | |
7992 | 6 << 16); | |
ccab5c82 JB |
7993 | I915_WRITE(GEN6_RP_UP_THRESHOLD, 10000); |
7994 | I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 1000000); | |
8fd26859 | 7995 | I915_WRITE(GEN6_RP_UP_EI, 100000); |
ccab5c82 | 7996 | I915_WRITE(GEN6_RP_DOWN_EI, 5000000); |
8fd26859 CW |
7997 | I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10); |
7998 | I915_WRITE(GEN6_RP_CONTROL, | |
7999 | GEN6_RP_MEDIA_TURBO | | |
8000 | GEN6_RP_USE_NORMAL_FREQ | | |
8001 | GEN6_RP_MEDIA_IS_GFX | | |
8002 | GEN6_RP_ENABLE | | |
ccab5c82 JB |
8003 | GEN6_RP_UP_BUSY_AVG | |
8004 | GEN6_RP_DOWN_IDLE_CONT); | |
8fd26859 CW |
8005 | |
8006 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | |
8007 | 500)) | |
8008 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | |
8009 | ||
8010 | I915_WRITE(GEN6_PCODE_DATA, 0); | |
8011 | I915_WRITE(GEN6_PCODE_MAILBOX, | |
8012 | GEN6_PCODE_READY | | |
8013 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | |
8014 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | |
8015 | 500)) | |
8016 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | |
8017 | ||
a6044e23 JB |
8018 | min_freq = (rp_state_cap & 0xff0000) >> 16; |
8019 | max_freq = rp_state_cap & 0xff; | |
8020 | cur_freq = (gt_perf_status & 0xff00) >> 8; | |
8021 | ||
8022 | /* Check for overclock support */ | |
8023 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | |
8024 | 500)) | |
8025 | DRM_ERROR("timeout waiting for pcode mailbox to become idle\n"); | |
8026 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_READ_OC_PARAMS); | |
8027 | pcu_mbox = I915_READ(GEN6_PCODE_DATA); | |
8028 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0, | |
8029 | 500)) | |
8030 | DRM_ERROR("timeout waiting for pcode mailbox to finish\n"); | |
8031 | if (pcu_mbox & (1<<31)) { /* OC supported */ | |
8032 | max_freq = pcu_mbox & 0xff; | |
e281fcaa | 8033 | DRM_DEBUG_DRIVER("overclocking supported, adjusting frequency max to %dMHz\n", pcu_mbox * 50); |
a6044e23 JB |
8034 | } |
8035 | ||
8036 | /* In units of 100MHz */ | |
8037 | dev_priv->max_delay = max_freq; | |
8038 | dev_priv->min_delay = min_freq; | |
8039 | dev_priv->cur_delay = cur_freq; | |
8040 | ||
8fd26859 CW |
8041 | /* requires MSI enabled */ |
8042 | I915_WRITE(GEN6_PMIER, | |
8043 | GEN6_PM_MBOX_EVENT | | |
8044 | GEN6_PM_THERMAL_EVENT | | |
8045 | GEN6_PM_RP_DOWN_TIMEOUT | | |
8046 | GEN6_PM_RP_UP_THRESHOLD | | |
8047 | GEN6_PM_RP_DOWN_THRESHOLD | | |
8048 | GEN6_PM_RP_UP_EI_EXPIRED | | |
8049 | GEN6_PM_RP_DOWN_EI_EXPIRED); | |
4912d041 BW |
8050 | spin_lock_irq(&dev_priv->rps_lock); |
8051 | WARN_ON(dev_priv->pm_iir != 0); | |
3b8d8d91 | 8052 | I915_WRITE(GEN6_PMIMR, 0); |
4912d041 | 8053 | spin_unlock_irq(&dev_priv->rps_lock); |
3b8d8d91 JB |
8054 | /* enable all PM interrupts */ |
8055 | I915_WRITE(GEN6_PMINTRMSK, 0); | |
8fd26859 | 8056 | |
fcca7926 | 8057 | gen6_gt_force_wake_put(dev_priv); |
d1ebd816 | 8058 | mutex_unlock(&dev_priv->dev->struct_mutex); |
8fd26859 CW |
8059 | } |
8060 | ||
23b2f8bb JB |
8061 | void gen6_update_ring_freq(struct drm_i915_private *dev_priv) |
8062 | { | |
8063 | int min_freq = 15; | |
8064 | int gpu_freq, ia_freq, max_ia_freq; | |
8065 | int scaling_factor = 180; | |
8066 | ||
8067 | max_ia_freq = cpufreq_quick_get_max(0); | |
8068 | /* | |
8069 | * Default to measured freq if none found, PCU will ensure we don't go | |
8070 | * over | |
8071 | */ | |
8072 | if (!max_ia_freq) | |
8073 | max_ia_freq = tsc_khz; | |
8074 | ||
8075 | /* Convert from kHz to MHz */ | |
8076 | max_ia_freq /= 1000; | |
8077 | ||
8078 | mutex_lock(&dev_priv->dev->struct_mutex); | |
8079 | ||
8080 | /* | |
8081 | * For each potential GPU frequency, load a ring frequency we'd like | |
8082 | * to use for memory access. We do this by specifying the IA frequency | |
8083 | * the PCU should use as a reference to determine the ring frequency. | |
8084 | */ | |
8085 | for (gpu_freq = dev_priv->max_delay; gpu_freq >= dev_priv->min_delay; | |
8086 | gpu_freq--) { | |
8087 | int diff = dev_priv->max_delay - gpu_freq; | |
8088 | ||
8089 | /* | |
8090 | * For GPU frequencies less than 750MHz, just use the lowest | |
8091 | * ring freq. | |
8092 | */ | |
8093 | if (gpu_freq < min_freq) | |
8094 | ia_freq = 800; | |
8095 | else | |
8096 | ia_freq = max_ia_freq - ((diff * scaling_factor) / 2); | |
8097 | ia_freq = DIV_ROUND_CLOSEST(ia_freq, 100); | |
8098 | ||
8099 | I915_WRITE(GEN6_PCODE_DATA, | |
8100 | (ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT) | | |
8101 | gpu_freq); | |
8102 | I915_WRITE(GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | | |
8103 | GEN6_PCODE_WRITE_MIN_FREQ_TABLE); | |
8104 | if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & | |
8105 | GEN6_PCODE_READY) == 0, 10)) { | |
8106 | DRM_ERROR("pcode write of freq table timed out\n"); | |
8107 | continue; | |
8108 | } | |
8109 | } | |
8110 | ||
8111 | mutex_unlock(&dev_priv->dev->struct_mutex); | |
8112 | } | |
8113 | ||
6067aaea JB |
8114 | static void ironlake_init_clock_gating(struct drm_device *dev) |
8115 | { | |
8116 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8117 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | |
8118 | ||
8119 | /* Required for FBC */ | |
8120 | dspclk_gate |= DPFCUNIT_CLOCK_GATE_DISABLE | | |
8121 | DPFCRUNIT_CLOCK_GATE_DISABLE | | |
8122 | DPFDUNIT_CLOCK_GATE_DISABLE; | |
8123 | /* Required for CxSR */ | |
8124 | dspclk_gate |= DPARBUNIT_CLOCK_GATE_DISABLE; | |
8125 | ||
8126 | I915_WRITE(PCH_3DCGDIS0, | |
8127 | MARIUNIT_CLOCK_GATE_DISABLE | | |
8128 | SVSMUNIT_CLOCK_GATE_DISABLE); | |
8129 | I915_WRITE(PCH_3DCGDIS1, | |
8130 | VFMUNIT_CLOCK_GATE_DISABLE); | |
8131 | ||
8132 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | |
8133 | ||
6067aaea JB |
8134 | /* |
8135 | * According to the spec the following bits should be set in | |
8136 | * order to enable memory self-refresh | |
8137 | * The bit 22/21 of 0x42004 | |
8138 | * The bit 5 of 0x42020 | |
8139 | * The bit 15 of 0x45000 | |
8140 | */ | |
8141 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
8142 | (I915_READ(ILK_DISPLAY_CHICKEN2) | | |
8143 | ILK_DPARB_GATE | ILK_VSDPFD_FULL)); | |
8144 | I915_WRITE(ILK_DSPCLK_GATE, | |
8145 | (I915_READ(ILK_DSPCLK_GATE) | | |
8146 | ILK_DPARB_CLK_GATE)); | |
8147 | I915_WRITE(DISP_ARB_CTL, | |
8148 | (I915_READ(DISP_ARB_CTL) | | |
8149 | DISP_FBC_WM_DIS)); | |
8150 | I915_WRITE(WM3_LP_ILK, 0); | |
8151 | I915_WRITE(WM2_LP_ILK, 0); | |
8152 | I915_WRITE(WM1_LP_ILK, 0); | |
8153 | ||
8154 | /* | |
8155 | * Based on the document from hardware guys the following bits | |
8156 | * should be set unconditionally in order to enable FBC. | |
8157 | * The bit 22 of 0x42000 | |
8158 | * The bit 22 of 0x42004 | |
8159 | * The bit 7,8,9 of 0x42020. | |
8160 | */ | |
8161 | if (IS_IRONLAKE_M(dev)) { | |
8162 | I915_WRITE(ILK_DISPLAY_CHICKEN1, | |
8163 | I915_READ(ILK_DISPLAY_CHICKEN1) | | |
8164 | ILK_FBCQ_DIS); | |
8165 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
8166 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
8167 | ILK_DPARB_GATE); | |
8168 | I915_WRITE(ILK_DSPCLK_GATE, | |
8169 | I915_READ(ILK_DSPCLK_GATE) | | |
8170 | ILK_DPFC_DIS1 | | |
8171 | ILK_DPFC_DIS2 | | |
8172 | ILK_CLK_FBC); | |
8173 | } | |
8174 | ||
8175 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
8176 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
8177 | ILK_ELPIN_409_SELECT); | |
8178 | I915_WRITE(_3D_CHICKEN2, | |
8179 | _3D_CHICKEN2_WM_READ_PIPELINED << 16 | | |
8180 | _3D_CHICKEN2_WM_READ_PIPELINED); | |
8fd26859 CW |
8181 | } |
8182 | ||
6067aaea | 8183 | static void gen6_init_clock_gating(struct drm_device *dev) |
652c393a JB |
8184 | { |
8185 | struct drm_i915_private *dev_priv = dev->dev_private; | |
9db4a9c7 | 8186 | int pipe; |
6067aaea JB |
8187 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; |
8188 | ||
8189 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | |
652c393a | 8190 | |
6067aaea JB |
8191 | I915_WRITE(ILK_DISPLAY_CHICKEN2, |
8192 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
8193 | ILK_ELPIN_409_SELECT); | |
8956c8bb | 8194 | |
6067aaea JB |
8195 | I915_WRITE(WM3_LP_ILK, 0); |
8196 | I915_WRITE(WM2_LP_ILK, 0); | |
8197 | I915_WRITE(WM1_LP_ILK, 0); | |
652c393a | 8198 | |
406478dc EA |
8199 | /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock |
8200 | * gating disable must be set. Failure to set it results in | |
8201 | * flickering pixels due to Z write ordering failures after | |
8202 | * some amount of runtime in the Mesa "fire" demo, and Unigine | |
8203 | * Sanctuary and Tropics, and apparently anything else with | |
8204 | * alpha test or pixel discard. | |
9ca1d10d EA |
8205 | * |
8206 | * According to the spec, bit 11 (RCCUNIT) must also be set, | |
8207 | * but we didn't debug actual testcases to find it out. | |
406478dc | 8208 | */ |
9ca1d10d EA |
8209 | I915_WRITE(GEN6_UCGCTL2, |
8210 | GEN6_RCPBUNIT_CLOCK_GATE_DISABLE | | |
8211 | GEN6_RCCUNIT_CLOCK_GATE_DISABLE); | |
406478dc | 8212 | |
652c393a | 8213 | /* |
6067aaea JB |
8214 | * According to the spec the following bits should be |
8215 | * set in order to enable memory self-refresh and fbc: | |
8216 | * The bit21 and bit22 of 0x42000 | |
8217 | * The bit21 and bit22 of 0x42004 | |
8218 | * The bit5 and bit7 of 0x42020 | |
8219 | * The bit14 of 0x70180 | |
8220 | * The bit14 of 0x71180 | |
652c393a | 8221 | */ |
6067aaea JB |
8222 | I915_WRITE(ILK_DISPLAY_CHICKEN1, |
8223 | I915_READ(ILK_DISPLAY_CHICKEN1) | | |
8224 | ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS); | |
8225 | I915_WRITE(ILK_DISPLAY_CHICKEN2, | |
8226 | I915_READ(ILK_DISPLAY_CHICKEN2) | | |
8227 | ILK_DPARB_GATE | ILK_VSDPFD_FULL); | |
8228 | I915_WRITE(ILK_DSPCLK_GATE, | |
8229 | I915_READ(ILK_DSPCLK_GATE) | | |
8230 | ILK_DPARB_CLK_GATE | | |
8231 | ILK_DPFD_CLK_GATE); | |
8956c8bb | 8232 | |
d74362c9 | 8233 | for_each_pipe(pipe) { |
6067aaea JB |
8234 | I915_WRITE(DSPCNTR(pipe), |
8235 | I915_READ(DSPCNTR(pipe)) | | |
8236 | DISPPLANE_TRICKLE_FEED_DISABLE); | |
d74362c9 KP |
8237 | intel_flush_display_plane(dev_priv, pipe); |
8238 | } | |
6067aaea | 8239 | } |
8956c8bb | 8240 | |
28963a3e JB |
8241 | static void ivybridge_init_clock_gating(struct drm_device *dev) |
8242 | { | |
8243 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8244 | int pipe; | |
8245 | uint32_t dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE; | |
7f8a8569 | 8246 | |
28963a3e | 8247 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
382b0936 | 8248 | |
28963a3e JB |
8249 | I915_WRITE(WM3_LP_ILK, 0); |
8250 | I915_WRITE(WM2_LP_ILK, 0); | |
8251 | I915_WRITE(WM1_LP_ILK, 0); | |
de6e2eaf | 8252 | |
28963a3e | 8253 | I915_WRITE(ILK_DSPCLK_GATE, IVB_VRHUNIT_CLK_GATE); |
67e92af0 | 8254 | |
d74362c9 | 8255 | for_each_pipe(pipe) { |
28963a3e JB |
8256 | I915_WRITE(DSPCNTR(pipe), |
8257 | I915_READ(DSPCNTR(pipe)) | | |
8258 | DISPPLANE_TRICKLE_FEED_DISABLE); | |
d74362c9 KP |
8259 | intel_flush_display_plane(dev_priv, pipe); |
8260 | } | |
28963a3e JB |
8261 | } |
8262 | ||
6067aaea JB |
8263 | static void g4x_init_clock_gating(struct drm_device *dev) |
8264 | { | |
8265 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8266 | uint32_t dspclk_gate; | |
8fd26859 | 8267 | |
6067aaea JB |
8268 | I915_WRITE(RENCLK_GATE_D1, 0); |
8269 | I915_WRITE(RENCLK_GATE_D2, VF_UNIT_CLOCK_GATE_DISABLE | | |
8270 | GS_UNIT_CLOCK_GATE_DISABLE | | |
8271 | CL_UNIT_CLOCK_GATE_DISABLE); | |
8272 | I915_WRITE(RAMCLK_GATE_D, 0); | |
8273 | dspclk_gate = VRHUNIT_CLOCK_GATE_DISABLE | | |
8274 | OVRUNIT_CLOCK_GATE_DISABLE | | |
8275 | OVCUNIT_CLOCK_GATE_DISABLE; | |
8276 | if (IS_GM45(dev)) | |
8277 | dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; | |
8278 | I915_WRITE(DSPCLK_GATE_D, dspclk_gate); | |
8279 | } | |
1398261a | 8280 | |
6067aaea JB |
8281 | static void crestline_init_clock_gating(struct drm_device *dev) |
8282 | { | |
8283 | struct drm_i915_private *dev_priv = dev->dev_private; | |
652c393a | 8284 | |
6067aaea JB |
8285 | I915_WRITE(RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); |
8286 | I915_WRITE(RENCLK_GATE_D2, 0); | |
8287 | I915_WRITE(DSPCLK_GATE_D, 0); | |
8288 | I915_WRITE(RAMCLK_GATE_D, 0); | |
8289 | I915_WRITE16(DEUC, 0); | |
8290 | } | |
652c393a | 8291 | |
6067aaea JB |
8292 | static void broadwater_init_clock_gating(struct drm_device *dev) |
8293 | { | |
8294 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8295 | ||
8296 | I915_WRITE(RENCLK_GATE_D1, I965_RCZ_CLOCK_GATE_DISABLE | | |
8297 | I965_RCC_CLOCK_GATE_DISABLE | | |
8298 | I965_RCPB_CLOCK_GATE_DISABLE | | |
8299 | I965_ISC_CLOCK_GATE_DISABLE | | |
8300 | I965_FBC_CLOCK_GATE_DISABLE); | |
8301 | I915_WRITE(RENCLK_GATE_D2, 0); | |
8302 | } | |
8303 | ||
8304 | static void gen3_init_clock_gating(struct drm_device *dev) | |
8305 | { | |
8306 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8307 | u32 dstate = I915_READ(D_STATE); | |
8308 | ||
8309 | dstate |= DSTATE_PLL_D3_OFF | DSTATE_GFX_CLOCK_GATING | | |
8310 | DSTATE_DOT_CLOCK_GATING; | |
8311 | I915_WRITE(D_STATE, dstate); | |
8312 | } | |
8313 | ||
8314 | static void i85x_init_clock_gating(struct drm_device *dev) | |
8315 | { | |
8316 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8317 | ||
8318 | I915_WRITE(RENCLK_GATE_D1, SV_CLOCK_GATE_DISABLE); | |
8319 | } | |
8320 | ||
8321 | static void i830_init_clock_gating(struct drm_device *dev) | |
8322 | { | |
8323 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8324 | ||
8325 | I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); | |
652c393a JB |
8326 | } |
8327 | ||
645c62a5 JB |
8328 | static void ibx_init_clock_gating(struct drm_device *dev) |
8329 | { | |
8330 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8331 | ||
8332 | /* | |
8333 | * On Ibex Peak and Cougar Point, we need to disable clock | |
8334 | * gating for the panel power sequencer or it will fail to | |
8335 | * start up when no ports are active. | |
8336 | */ | |
8337 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | |
8338 | } | |
8339 | ||
8340 | static void cpt_init_clock_gating(struct drm_device *dev) | |
8341 | { | |
8342 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3bcf603f | 8343 | int pipe; |
645c62a5 JB |
8344 | |
8345 | /* | |
8346 | * On Ibex Peak and Cougar Point, we need to disable clock | |
8347 | * gating for the panel power sequencer or it will fail to | |
8348 | * start up when no ports are active. | |
8349 | */ | |
8350 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | |
8351 | I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | | |
8352 | DPLS_EDP_PPS_FIX_DIS); | |
3bcf603f JB |
8353 | /* Without this, mode sets may fail silently on FDI */ |
8354 | for_each_pipe(pipe) | |
8355 | I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); | |
652c393a JB |
8356 | } |
8357 | ||
ac668088 | 8358 | static void ironlake_teardown_rc6(struct drm_device *dev) |
0cdab21f CW |
8359 | { |
8360 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8361 | ||
8362 | if (dev_priv->renderctx) { | |
ac668088 CW |
8363 | i915_gem_object_unpin(dev_priv->renderctx); |
8364 | drm_gem_object_unreference(&dev_priv->renderctx->base); | |
0cdab21f CW |
8365 | dev_priv->renderctx = NULL; |
8366 | } | |
8367 | ||
8368 | if (dev_priv->pwrctx) { | |
ac668088 CW |
8369 | i915_gem_object_unpin(dev_priv->pwrctx); |
8370 | drm_gem_object_unreference(&dev_priv->pwrctx->base); | |
8371 | dev_priv->pwrctx = NULL; | |
8372 | } | |
8373 | } | |
8374 | ||
8375 | static void ironlake_disable_rc6(struct drm_device *dev) | |
8376 | { | |
8377 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8378 | ||
8379 | if (I915_READ(PWRCTXA)) { | |
8380 | /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ | |
8381 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); | |
8382 | wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), | |
8383 | 50); | |
0cdab21f CW |
8384 | |
8385 | I915_WRITE(PWRCTXA, 0); | |
8386 | POSTING_READ(PWRCTXA); | |
8387 | ||
ac668088 CW |
8388 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); |
8389 | POSTING_READ(RSTDBYCTL); | |
0cdab21f | 8390 | } |
ac668088 | 8391 | |
99507307 | 8392 | ironlake_teardown_rc6(dev); |
0cdab21f CW |
8393 | } |
8394 | ||
ac668088 | 8395 | static int ironlake_setup_rc6(struct drm_device *dev) |
d5bb081b JB |
8396 | { |
8397 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8398 | ||
ac668088 CW |
8399 | if (dev_priv->renderctx == NULL) |
8400 | dev_priv->renderctx = intel_alloc_context_page(dev); | |
8401 | if (!dev_priv->renderctx) | |
8402 | return -ENOMEM; | |
8403 | ||
8404 | if (dev_priv->pwrctx == NULL) | |
8405 | dev_priv->pwrctx = intel_alloc_context_page(dev); | |
8406 | if (!dev_priv->pwrctx) { | |
8407 | ironlake_teardown_rc6(dev); | |
8408 | return -ENOMEM; | |
8409 | } | |
8410 | ||
8411 | return 0; | |
d5bb081b JB |
8412 | } |
8413 | ||
8414 | void ironlake_enable_rc6(struct drm_device *dev) | |
8415 | { | |
8416 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8417 | int ret; | |
8418 | ||
ac668088 CW |
8419 | /* rc6 disabled by default due to repeated reports of hanging during |
8420 | * boot and resume. | |
8421 | */ | |
c0f372b3 | 8422 | if (!intel_enable_rc6(dev)) |
ac668088 CW |
8423 | return; |
8424 | ||
2c34b850 | 8425 | mutex_lock(&dev->struct_mutex); |
ac668088 | 8426 | ret = ironlake_setup_rc6(dev); |
2c34b850 BW |
8427 | if (ret) { |
8428 | mutex_unlock(&dev->struct_mutex); | |
ac668088 | 8429 | return; |
2c34b850 | 8430 | } |
ac668088 | 8431 | |
d5bb081b JB |
8432 | /* |
8433 | * GPU can automatically power down the render unit if given a page | |
8434 | * to save state. | |
8435 | */ | |
8436 | ret = BEGIN_LP_RING(6); | |
8437 | if (ret) { | |
ac668088 | 8438 | ironlake_teardown_rc6(dev); |
2c34b850 | 8439 | mutex_unlock(&dev->struct_mutex); |
d5bb081b JB |
8440 | return; |
8441 | } | |
ac668088 | 8442 | |
d5bb081b JB |
8443 | OUT_RING(MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); |
8444 | OUT_RING(MI_SET_CONTEXT); | |
8445 | OUT_RING(dev_priv->renderctx->gtt_offset | | |
8446 | MI_MM_SPACE_GTT | | |
8447 | MI_SAVE_EXT_STATE_EN | | |
8448 | MI_RESTORE_EXT_STATE_EN | | |
8449 | MI_RESTORE_INHIBIT); | |
8450 | OUT_RING(MI_SUSPEND_FLUSH); | |
8451 | OUT_RING(MI_NOOP); | |
8452 | OUT_RING(MI_FLUSH); | |
8453 | ADVANCE_LP_RING(); | |
8454 | ||
4a246cfc BW |
8455 | /* |
8456 | * Wait for the command parser to advance past MI_SET_CONTEXT. The HW | |
8457 | * does an implicit flush, combined with MI_FLUSH above, it should be | |
8458 | * safe to assume that renderctx is valid | |
8459 | */ | |
8460 | ret = intel_wait_ring_idle(LP_RING(dev_priv)); | |
8461 | if (ret) { | |
8462 | DRM_ERROR("failed to enable ironlake power power savings\n"); | |
8463 | ironlake_teardown_rc6(dev); | |
8464 | mutex_unlock(&dev->struct_mutex); | |
8465 | return; | |
8466 | } | |
8467 | ||
d5bb081b JB |
8468 | I915_WRITE(PWRCTXA, dev_priv->pwrctx->gtt_offset | PWRCTX_EN); |
8469 | I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); | |
2c34b850 | 8470 | mutex_unlock(&dev->struct_mutex); |
d5bb081b JB |
8471 | } |
8472 | ||
645c62a5 JB |
8473 | void intel_init_clock_gating(struct drm_device *dev) |
8474 | { | |
8475 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8476 | ||
8477 | dev_priv->display.init_clock_gating(dev); | |
8478 | ||
8479 | if (dev_priv->display.init_pch_clock_gating) | |
8480 | dev_priv->display.init_pch_clock_gating(dev); | |
8481 | } | |
ac668088 | 8482 | |
e70236a8 JB |
8483 | /* Set up chip specific display functions */ |
8484 | static void intel_init_display(struct drm_device *dev) | |
8485 | { | |
8486 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8487 | ||
8488 | /* We always want a DPMS function */ | |
f564048e | 8489 | if (HAS_PCH_SPLIT(dev)) { |
f2b115e6 | 8490 | dev_priv->display.dpms = ironlake_crtc_dpms; |
f564048e | 8491 | dev_priv->display.crtc_mode_set = ironlake_crtc_mode_set; |
17638cd6 | 8492 | dev_priv->display.update_plane = ironlake_update_plane; |
f564048e | 8493 | } else { |
e70236a8 | 8494 | dev_priv->display.dpms = i9xx_crtc_dpms; |
f564048e | 8495 | dev_priv->display.crtc_mode_set = i9xx_crtc_mode_set; |
17638cd6 | 8496 | dev_priv->display.update_plane = i9xx_update_plane; |
f564048e | 8497 | } |
e70236a8 | 8498 | |
ee5382ae | 8499 | if (I915_HAS_FBC(dev)) { |
9c04f015 | 8500 | if (HAS_PCH_SPLIT(dev)) { |
b52eb4dc ZY |
8501 | dev_priv->display.fbc_enabled = ironlake_fbc_enabled; |
8502 | dev_priv->display.enable_fbc = ironlake_enable_fbc; | |
8503 | dev_priv->display.disable_fbc = ironlake_disable_fbc; | |
8504 | } else if (IS_GM45(dev)) { | |
74dff282 JB |
8505 | dev_priv->display.fbc_enabled = g4x_fbc_enabled; |
8506 | dev_priv->display.enable_fbc = g4x_enable_fbc; | |
8507 | dev_priv->display.disable_fbc = g4x_disable_fbc; | |
a6c45cf0 | 8508 | } else if (IS_CRESTLINE(dev)) { |
e70236a8 JB |
8509 | dev_priv->display.fbc_enabled = i8xx_fbc_enabled; |
8510 | dev_priv->display.enable_fbc = i8xx_enable_fbc; | |
8511 | dev_priv->display.disable_fbc = i8xx_disable_fbc; | |
8512 | } | |
74dff282 | 8513 | /* 855GM needs testing */ |
e70236a8 JB |
8514 | } |
8515 | ||
8516 | /* Returns the core display clock speed */ | |
0206e353 | 8517 | if (IS_I945G(dev) || (IS_G33(dev) && !IS_PINEVIEW_M(dev))) |
e70236a8 JB |
8518 | dev_priv->display.get_display_clock_speed = |
8519 | i945_get_display_clock_speed; | |
8520 | else if (IS_I915G(dev)) | |
8521 | dev_priv->display.get_display_clock_speed = | |
8522 | i915_get_display_clock_speed; | |
f2b115e6 | 8523 | else if (IS_I945GM(dev) || IS_845G(dev) || IS_PINEVIEW_M(dev)) |
e70236a8 JB |
8524 | dev_priv->display.get_display_clock_speed = |
8525 | i9xx_misc_get_display_clock_speed; | |
8526 | else if (IS_I915GM(dev)) | |
8527 | dev_priv->display.get_display_clock_speed = | |
8528 | i915gm_get_display_clock_speed; | |
8529 | else if (IS_I865G(dev)) | |
8530 | dev_priv->display.get_display_clock_speed = | |
8531 | i865_get_display_clock_speed; | |
f0f8a9ce | 8532 | else if (IS_I85X(dev)) |
e70236a8 JB |
8533 | dev_priv->display.get_display_clock_speed = |
8534 | i855_get_display_clock_speed; | |
8535 | else /* 852, 830 */ | |
8536 | dev_priv->display.get_display_clock_speed = | |
8537 | i830_get_display_clock_speed; | |
8538 | ||
8539 | /* For FIFO watermark updates */ | |
7f8a8569 | 8540 | if (HAS_PCH_SPLIT(dev)) { |
8d715f00 KP |
8541 | dev_priv->display.force_wake_get = __gen6_gt_force_wake_get; |
8542 | dev_priv->display.force_wake_put = __gen6_gt_force_wake_put; | |
8543 | ||
8544 | /* IVB configs may use multi-threaded forcewake */ | |
8545 | if (IS_IVYBRIDGE(dev)) { | |
8546 | u32 ecobus; | |
8547 | ||
8548 | mutex_lock(&dev->struct_mutex); | |
8549 | __gen6_gt_force_wake_mt_get(dev_priv); | |
8550 | ecobus = I915_READ(ECOBUS); | |
8551 | __gen6_gt_force_wake_mt_put(dev_priv); | |
8552 | mutex_unlock(&dev->struct_mutex); | |
8553 | ||
8554 | if (ecobus & FORCEWAKE_MT_ENABLE) { | |
8555 | DRM_DEBUG_KMS("Using MT version of forcewake\n"); | |
8556 | dev_priv->display.force_wake_get = | |
8557 | __gen6_gt_force_wake_mt_get; | |
8558 | dev_priv->display.force_wake_put = | |
8559 | __gen6_gt_force_wake_mt_put; | |
8560 | } | |
8561 | } | |
8562 | ||
645c62a5 JB |
8563 | if (HAS_PCH_IBX(dev)) |
8564 | dev_priv->display.init_pch_clock_gating = ibx_init_clock_gating; | |
8565 | else if (HAS_PCH_CPT(dev)) | |
8566 | dev_priv->display.init_pch_clock_gating = cpt_init_clock_gating; | |
8567 | ||
f00a3ddf | 8568 | if (IS_GEN5(dev)) { |
7f8a8569 ZW |
8569 | if (I915_READ(MLTR_ILK) & ILK_SRLT_MASK) |
8570 | dev_priv->display.update_wm = ironlake_update_wm; | |
8571 | else { | |
8572 | DRM_DEBUG_KMS("Failed to get proper latency. " | |
8573 | "Disable CxSR\n"); | |
8574 | dev_priv->display.update_wm = NULL; | |
1398261a | 8575 | } |
674cf967 | 8576 | dev_priv->display.fdi_link_train = ironlake_fdi_link_train; |
6067aaea | 8577 | dev_priv->display.init_clock_gating = ironlake_init_clock_gating; |
e0dac65e | 8578 | dev_priv->display.write_eld = ironlake_write_eld; |
1398261a YL |
8579 | } else if (IS_GEN6(dev)) { |
8580 | if (SNB_READ_WM0_LATENCY()) { | |
8581 | dev_priv->display.update_wm = sandybridge_update_wm; | |
8582 | } else { | |
8583 | DRM_DEBUG_KMS("Failed to read display plane latency. " | |
8584 | "Disable CxSR\n"); | |
8585 | dev_priv->display.update_wm = NULL; | |
7f8a8569 | 8586 | } |
674cf967 | 8587 | dev_priv->display.fdi_link_train = gen6_fdi_link_train; |
6067aaea | 8588 | dev_priv->display.init_clock_gating = gen6_init_clock_gating; |
e0dac65e | 8589 | dev_priv->display.write_eld = ironlake_write_eld; |
357555c0 JB |
8590 | } else if (IS_IVYBRIDGE(dev)) { |
8591 | /* FIXME: detect B0+ stepping and use auto training */ | |
8592 | dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; | |
fe100d4d JB |
8593 | if (SNB_READ_WM0_LATENCY()) { |
8594 | dev_priv->display.update_wm = sandybridge_update_wm; | |
8595 | } else { | |
8596 | DRM_DEBUG_KMS("Failed to read display plane latency. " | |
8597 | "Disable CxSR\n"); | |
8598 | dev_priv->display.update_wm = NULL; | |
8599 | } | |
28963a3e | 8600 | dev_priv->display.init_clock_gating = ivybridge_init_clock_gating; |
e0dac65e | 8601 | dev_priv->display.write_eld = ironlake_write_eld; |
7f8a8569 ZW |
8602 | } else |
8603 | dev_priv->display.update_wm = NULL; | |
8604 | } else if (IS_PINEVIEW(dev)) { | |
d4294342 | 8605 | if (!intel_get_cxsr_latency(IS_PINEVIEW_G(dev), |
95534263 | 8606 | dev_priv->is_ddr3, |
d4294342 ZY |
8607 | dev_priv->fsb_freq, |
8608 | dev_priv->mem_freq)) { | |
8609 | DRM_INFO("failed to find known CxSR latency " | |
95534263 | 8610 | "(found ddr%s fsb freq %d, mem freq %d), " |
d4294342 | 8611 | "disabling CxSR\n", |
0206e353 | 8612 | (dev_priv->is_ddr3 == 1) ? "3" : "2", |
d4294342 ZY |
8613 | dev_priv->fsb_freq, dev_priv->mem_freq); |
8614 | /* Disable CxSR and never update its watermark again */ | |
8615 | pineview_disable_cxsr(dev); | |
8616 | dev_priv->display.update_wm = NULL; | |
8617 | } else | |
8618 | dev_priv->display.update_wm = pineview_update_wm; | |
95e0ee92 | 8619 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
6067aaea | 8620 | } else if (IS_G4X(dev)) { |
e0dac65e | 8621 | dev_priv->display.write_eld = g4x_write_eld; |
e70236a8 | 8622 | dev_priv->display.update_wm = g4x_update_wm; |
6067aaea JB |
8623 | dev_priv->display.init_clock_gating = g4x_init_clock_gating; |
8624 | } else if (IS_GEN4(dev)) { | |
e70236a8 | 8625 | dev_priv->display.update_wm = i965_update_wm; |
6067aaea JB |
8626 | if (IS_CRESTLINE(dev)) |
8627 | dev_priv->display.init_clock_gating = crestline_init_clock_gating; | |
8628 | else if (IS_BROADWATER(dev)) | |
8629 | dev_priv->display.init_clock_gating = broadwater_init_clock_gating; | |
8630 | } else if (IS_GEN3(dev)) { | |
e70236a8 JB |
8631 | dev_priv->display.update_wm = i9xx_update_wm; |
8632 | dev_priv->display.get_fifo_size = i9xx_get_fifo_size; | |
6067aaea JB |
8633 | dev_priv->display.init_clock_gating = gen3_init_clock_gating; |
8634 | } else if (IS_I865G(dev)) { | |
8635 | dev_priv->display.update_wm = i830_update_wm; | |
8636 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; | |
8637 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | |
8f4695ed AJ |
8638 | } else if (IS_I85X(dev)) { |
8639 | dev_priv->display.update_wm = i9xx_update_wm; | |
8640 | dev_priv->display.get_fifo_size = i85x_get_fifo_size; | |
6067aaea | 8641 | dev_priv->display.init_clock_gating = i85x_init_clock_gating; |
e70236a8 | 8642 | } else { |
8f4695ed | 8643 | dev_priv->display.update_wm = i830_update_wm; |
6067aaea | 8644 | dev_priv->display.init_clock_gating = i830_init_clock_gating; |
8f4695ed | 8645 | if (IS_845G(dev)) |
e70236a8 JB |
8646 | dev_priv->display.get_fifo_size = i845_get_fifo_size; |
8647 | else | |
8648 | dev_priv->display.get_fifo_size = i830_get_fifo_size; | |
e70236a8 | 8649 | } |
8c9f3aaf JB |
8650 | |
8651 | /* Default just returns -ENODEV to indicate unsupported */ | |
8652 | dev_priv->display.queue_flip = intel_default_queue_flip; | |
8653 | ||
8654 | switch (INTEL_INFO(dev)->gen) { | |
8655 | case 2: | |
8656 | dev_priv->display.queue_flip = intel_gen2_queue_flip; | |
8657 | break; | |
8658 | ||
8659 | case 3: | |
8660 | dev_priv->display.queue_flip = intel_gen3_queue_flip; | |
8661 | break; | |
8662 | ||
8663 | case 4: | |
8664 | case 5: | |
8665 | dev_priv->display.queue_flip = intel_gen4_queue_flip; | |
8666 | break; | |
8667 | ||
8668 | case 6: | |
8669 | dev_priv->display.queue_flip = intel_gen6_queue_flip; | |
8670 | break; | |
7c9017e5 JB |
8671 | case 7: |
8672 | dev_priv->display.queue_flip = intel_gen7_queue_flip; | |
8673 | break; | |
8c9f3aaf | 8674 | } |
e70236a8 JB |
8675 | } |
8676 | ||
b690e96c JB |
8677 | /* |
8678 | * Some BIOSes insist on assuming the GPU's pipe A is enabled at suspend, | |
8679 | * resume, or other times. This quirk makes sure that's the case for | |
8680 | * affected systems. | |
8681 | */ | |
0206e353 | 8682 | static void quirk_pipea_force(struct drm_device *dev) |
b690e96c JB |
8683 | { |
8684 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8685 | ||
8686 | dev_priv->quirks |= QUIRK_PIPEA_FORCE; | |
8687 | DRM_DEBUG_DRIVER("applying pipe a force quirk\n"); | |
8688 | } | |
8689 | ||
435793df KP |
8690 | /* |
8691 | * Some machines (Lenovo U160) do not work with SSC on LVDS for some reason | |
8692 | */ | |
8693 | static void quirk_ssc_force_disable(struct drm_device *dev) | |
8694 | { | |
8695 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8696 | dev_priv->quirks |= QUIRK_LVDS_SSC_DISABLE; | |
8697 | } | |
8698 | ||
b690e96c JB |
8699 | struct intel_quirk { |
8700 | int device; | |
8701 | int subsystem_vendor; | |
8702 | int subsystem_device; | |
8703 | void (*hook)(struct drm_device *dev); | |
8704 | }; | |
8705 | ||
8706 | struct intel_quirk intel_quirks[] = { | |
8707 | /* HP Compaq 2730p needs pipe A force quirk (LP: #291555) */ | |
8708 | { 0x2a42, 0x103c, 0x30eb, quirk_pipea_force }, | |
8709 | /* HP Mini needs pipe A force quirk (LP: #322104) */ | |
0206e353 | 8710 | { 0x27ae, 0x103c, 0x361a, quirk_pipea_force }, |
b690e96c JB |
8711 | |
8712 | /* Thinkpad R31 needs pipe A force quirk */ | |
8713 | { 0x3577, 0x1014, 0x0505, quirk_pipea_force }, | |
8714 | /* Toshiba Protege R-205, S-209 needs pipe A force quirk */ | |
8715 | { 0x2592, 0x1179, 0x0001, quirk_pipea_force }, | |
8716 | ||
8717 | /* ThinkPad X30 needs pipe A force quirk (LP: #304614) */ | |
8718 | { 0x3577, 0x1014, 0x0513, quirk_pipea_force }, | |
8719 | /* ThinkPad X40 needs pipe A force quirk */ | |
8720 | ||
8721 | /* ThinkPad T60 needs pipe A force quirk (bug #16494) */ | |
8722 | { 0x2782, 0x17aa, 0x201a, quirk_pipea_force }, | |
8723 | ||
8724 | /* 855 & before need to leave pipe A & dpll A up */ | |
8725 | { 0x3582, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | |
8726 | { 0x2562, PCI_ANY_ID, PCI_ANY_ID, quirk_pipea_force }, | |
435793df KP |
8727 | |
8728 | /* Lenovo U160 cannot use SSC on LVDS */ | |
8729 | { 0x0046, 0x17aa, 0x3920, quirk_ssc_force_disable }, | |
070d329a MAS |
8730 | |
8731 | /* Sony Vaio Y cannot use SSC on LVDS */ | |
8732 | { 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable }, | |
b690e96c JB |
8733 | }; |
8734 | ||
8735 | static void intel_init_quirks(struct drm_device *dev) | |
8736 | { | |
8737 | struct pci_dev *d = dev->pdev; | |
8738 | int i; | |
8739 | ||
8740 | for (i = 0; i < ARRAY_SIZE(intel_quirks); i++) { | |
8741 | struct intel_quirk *q = &intel_quirks[i]; | |
8742 | ||
8743 | if (d->device == q->device && | |
8744 | (d->subsystem_vendor == q->subsystem_vendor || | |
8745 | q->subsystem_vendor == PCI_ANY_ID) && | |
8746 | (d->subsystem_device == q->subsystem_device || | |
8747 | q->subsystem_device == PCI_ANY_ID)) | |
8748 | q->hook(dev); | |
8749 | } | |
8750 | } | |
8751 | ||
9cce37f4 JB |
8752 | /* Disable the VGA plane that we never use */ |
8753 | static void i915_disable_vga(struct drm_device *dev) | |
8754 | { | |
8755 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8756 | u8 sr1; | |
8757 | u32 vga_reg; | |
8758 | ||
8759 | if (HAS_PCH_SPLIT(dev)) | |
8760 | vga_reg = CPU_VGACNTRL; | |
8761 | else | |
8762 | vga_reg = VGACNTRL; | |
8763 | ||
8764 | vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); | |
8765 | outb(1, VGA_SR_INDEX); | |
8766 | sr1 = inb(VGA_SR_DATA); | |
8767 | outb(sr1 | 1<<5, VGA_SR_DATA); | |
8768 | vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); | |
8769 | udelay(300); | |
8770 | ||
8771 | I915_WRITE(vga_reg, VGA_DISP_DISABLE); | |
8772 | POSTING_READ(vga_reg); | |
8773 | } | |
8774 | ||
79e53945 JB |
8775 | void intel_modeset_init(struct drm_device *dev) |
8776 | { | |
652c393a | 8777 | struct drm_i915_private *dev_priv = dev->dev_private; |
79e53945 JB |
8778 | int i; |
8779 | ||
8780 | drm_mode_config_init(dev); | |
8781 | ||
8782 | dev->mode_config.min_width = 0; | |
8783 | dev->mode_config.min_height = 0; | |
8784 | ||
8785 | dev->mode_config.funcs = (void *)&intel_mode_funcs; | |
8786 | ||
b690e96c JB |
8787 | intel_init_quirks(dev); |
8788 | ||
e70236a8 JB |
8789 | intel_init_display(dev); |
8790 | ||
a6c45cf0 CW |
8791 | if (IS_GEN2(dev)) { |
8792 | dev->mode_config.max_width = 2048; | |
8793 | dev->mode_config.max_height = 2048; | |
8794 | } else if (IS_GEN3(dev)) { | |
5e4d6fa7 KP |
8795 | dev->mode_config.max_width = 4096; |
8796 | dev->mode_config.max_height = 4096; | |
79e53945 | 8797 | } else { |
a6c45cf0 CW |
8798 | dev->mode_config.max_width = 8192; |
8799 | dev->mode_config.max_height = 8192; | |
79e53945 | 8800 | } |
35c3047a | 8801 | dev->mode_config.fb_base = dev->agp->base; |
79e53945 | 8802 | |
28c97730 | 8803 | DRM_DEBUG_KMS("%d display pipe%s available.\n", |
a3524f1b | 8804 | dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : ""); |
79e53945 | 8805 | |
a3524f1b | 8806 | for (i = 0; i < dev_priv->num_pipe; i++) { |
79e53945 JB |
8807 | intel_crtc_init(dev, i); |
8808 | } | |
8809 | ||
9cce37f4 JB |
8810 | /* Just disable it once at startup */ |
8811 | i915_disable_vga(dev); | |
79e53945 | 8812 | intel_setup_outputs(dev); |
652c393a | 8813 | |
645c62a5 | 8814 | intel_init_clock_gating(dev); |
9cce37f4 | 8815 | |
7648fa99 | 8816 | if (IS_IRONLAKE_M(dev)) { |
f97108d1 | 8817 | ironlake_enable_drps(dev); |
7648fa99 JB |
8818 | intel_init_emon(dev); |
8819 | } | |
f97108d1 | 8820 | |
1c70c0ce | 8821 | if (IS_GEN6(dev) || IS_GEN7(dev)) { |
3b8d8d91 | 8822 | gen6_enable_rps(dev_priv); |
23b2f8bb JB |
8823 | gen6_update_ring_freq(dev_priv); |
8824 | } | |
3b8d8d91 | 8825 | |
652c393a JB |
8826 | INIT_WORK(&dev_priv->idle_work, intel_idle_update); |
8827 | setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer, | |
8828 | (unsigned long)dev); | |
2c7111db CW |
8829 | } |
8830 | ||
8831 | void intel_modeset_gem_init(struct drm_device *dev) | |
8832 | { | |
8833 | if (IS_IRONLAKE_M(dev)) | |
8834 | ironlake_enable_rc6(dev); | |
02e792fb DV |
8835 | |
8836 | intel_setup_overlay(dev); | |
79e53945 JB |
8837 | } |
8838 | ||
8839 | void intel_modeset_cleanup(struct drm_device *dev) | |
8840 | { | |
652c393a JB |
8841 | struct drm_i915_private *dev_priv = dev->dev_private; |
8842 | struct drm_crtc *crtc; | |
8843 | struct intel_crtc *intel_crtc; | |
8844 | ||
f87ea761 | 8845 | drm_kms_helper_poll_fini(dev); |
652c393a JB |
8846 | mutex_lock(&dev->struct_mutex); |
8847 | ||
723bfd70 JB |
8848 | intel_unregister_dsm_handler(); |
8849 | ||
8850 | ||
652c393a JB |
8851 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
8852 | /* Skip inactive CRTCs */ | |
8853 | if (!crtc->fb) | |
8854 | continue; | |
8855 | ||
8856 | intel_crtc = to_intel_crtc(crtc); | |
3dec0095 | 8857 | intel_increase_pllclock(crtc); |
652c393a JB |
8858 | } |
8859 | ||
973d04f9 | 8860 | intel_disable_fbc(dev); |
e70236a8 | 8861 | |
f97108d1 JB |
8862 | if (IS_IRONLAKE_M(dev)) |
8863 | ironlake_disable_drps(dev); | |
1c70c0ce | 8864 | if (IS_GEN6(dev) || IS_GEN7(dev)) |
3b8d8d91 | 8865 | gen6_disable_rps(dev); |
f97108d1 | 8866 | |
d5bb081b JB |
8867 | if (IS_IRONLAKE_M(dev)) |
8868 | ironlake_disable_rc6(dev); | |
0cdab21f | 8869 | |
69341a5e KH |
8870 | mutex_unlock(&dev->struct_mutex); |
8871 | ||
6c0d9350 DV |
8872 | /* Disable the irq before mode object teardown, for the irq might |
8873 | * enqueue unpin/hotplug work. */ | |
8874 | drm_irq_uninstall(dev); | |
8875 | cancel_work_sync(&dev_priv->hotplug_work); | |
6fdd4d98 | 8876 | cancel_work_sync(&dev_priv->rps_work); |
6c0d9350 | 8877 | |
1630fe75 CW |
8878 | /* flush any delayed tasks or pending work */ |
8879 | flush_scheduled_work(); | |
8880 | ||
3dec0095 DV |
8881 | /* Shut off idle work before the crtcs get freed. */ |
8882 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | |
8883 | intel_crtc = to_intel_crtc(crtc); | |
8884 | del_timer_sync(&intel_crtc->idle_timer); | |
8885 | } | |
8886 | del_timer_sync(&dev_priv->idle_timer); | |
8887 | cancel_work_sync(&dev_priv->idle_work); | |
8888 | ||
79e53945 JB |
8889 | drm_mode_config_cleanup(dev); |
8890 | } | |
8891 | ||
f1c79df3 ZW |
8892 | /* |
8893 | * Return which encoder is currently attached for connector. | |
8894 | */ | |
df0e9248 | 8895 | struct drm_encoder *intel_best_encoder(struct drm_connector *connector) |
79e53945 | 8896 | { |
df0e9248 CW |
8897 | return &intel_attached_encoder(connector)->base; |
8898 | } | |
f1c79df3 | 8899 | |
df0e9248 CW |
8900 | void intel_connector_attach_encoder(struct intel_connector *connector, |
8901 | struct intel_encoder *encoder) | |
8902 | { | |
8903 | connector->encoder = encoder; | |
8904 | drm_mode_connector_attach_encoder(&connector->base, | |
8905 | &encoder->base); | |
79e53945 | 8906 | } |
28d52043 DA |
8907 | |
8908 | /* | |
8909 | * set vga decode state - true == enable VGA decode | |
8910 | */ | |
8911 | int intel_modeset_vga_set_state(struct drm_device *dev, bool state) | |
8912 | { | |
8913 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8914 | u16 gmch_ctrl; | |
8915 | ||
8916 | pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); | |
8917 | if (state) | |
8918 | gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; | |
8919 | else | |
8920 | gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; | |
8921 | pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); | |
8922 | return 0; | |
8923 | } | |
c4a1d9e4 CW |
8924 | |
8925 | #ifdef CONFIG_DEBUG_FS | |
8926 | #include <linux/seq_file.h> | |
8927 | ||
8928 | struct intel_display_error_state { | |
8929 | struct intel_cursor_error_state { | |
8930 | u32 control; | |
8931 | u32 position; | |
8932 | u32 base; | |
8933 | u32 size; | |
8934 | } cursor[2]; | |
8935 | ||
8936 | struct intel_pipe_error_state { | |
8937 | u32 conf; | |
8938 | u32 source; | |
8939 | ||
8940 | u32 htotal; | |
8941 | u32 hblank; | |
8942 | u32 hsync; | |
8943 | u32 vtotal; | |
8944 | u32 vblank; | |
8945 | u32 vsync; | |
8946 | } pipe[2]; | |
8947 | ||
8948 | struct intel_plane_error_state { | |
8949 | u32 control; | |
8950 | u32 stride; | |
8951 | u32 size; | |
8952 | u32 pos; | |
8953 | u32 addr; | |
8954 | u32 surface; | |
8955 | u32 tile_offset; | |
8956 | } plane[2]; | |
8957 | }; | |
8958 | ||
8959 | struct intel_display_error_state * | |
8960 | intel_display_capture_error_state(struct drm_device *dev) | |
8961 | { | |
0206e353 | 8962 | drm_i915_private_t *dev_priv = dev->dev_private; |
c4a1d9e4 CW |
8963 | struct intel_display_error_state *error; |
8964 | int i; | |
8965 | ||
8966 | error = kmalloc(sizeof(*error), GFP_ATOMIC); | |
8967 | if (error == NULL) | |
8968 | return NULL; | |
8969 | ||
8970 | for (i = 0; i < 2; i++) { | |
8971 | error->cursor[i].control = I915_READ(CURCNTR(i)); | |
8972 | error->cursor[i].position = I915_READ(CURPOS(i)); | |
8973 | error->cursor[i].base = I915_READ(CURBASE(i)); | |
8974 | ||
8975 | error->plane[i].control = I915_READ(DSPCNTR(i)); | |
8976 | error->plane[i].stride = I915_READ(DSPSTRIDE(i)); | |
8977 | error->plane[i].size = I915_READ(DSPSIZE(i)); | |
0206e353 | 8978 | error->plane[i].pos = I915_READ(DSPPOS(i)); |
c4a1d9e4 CW |
8979 | error->plane[i].addr = I915_READ(DSPADDR(i)); |
8980 | if (INTEL_INFO(dev)->gen >= 4) { | |
8981 | error->plane[i].surface = I915_READ(DSPSURF(i)); | |
8982 | error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); | |
8983 | } | |
8984 | ||
8985 | error->pipe[i].conf = I915_READ(PIPECONF(i)); | |
8986 | error->pipe[i].source = I915_READ(PIPESRC(i)); | |
8987 | error->pipe[i].htotal = I915_READ(HTOTAL(i)); | |
8988 | error->pipe[i].hblank = I915_READ(HBLANK(i)); | |
8989 | error->pipe[i].hsync = I915_READ(HSYNC(i)); | |
8990 | error->pipe[i].vtotal = I915_READ(VTOTAL(i)); | |
8991 | error->pipe[i].vblank = I915_READ(VBLANK(i)); | |
8992 | error->pipe[i].vsync = I915_READ(VSYNC(i)); | |
8993 | } | |
8994 | ||
8995 | return error; | |
8996 | } | |
8997 | ||
8998 | void | |
8999 | intel_display_print_error_state(struct seq_file *m, | |
9000 | struct drm_device *dev, | |
9001 | struct intel_display_error_state *error) | |
9002 | { | |
9003 | int i; | |
9004 | ||
9005 | for (i = 0; i < 2; i++) { | |
9006 | seq_printf(m, "Pipe [%d]:\n", i); | |
9007 | seq_printf(m, " CONF: %08x\n", error->pipe[i].conf); | |
9008 | seq_printf(m, " SRC: %08x\n", error->pipe[i].source); | |
9009 | seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); | |
9010 | seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); | |
9011 | seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); | |
9012 | seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); | |
9013 | seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); | |
9014 | seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); | |
9015 | ||
9016 | seq_printf(m, "Plane [%d]:\n", i); | |
9017 | seq_printf(m, " CNTR: %08x\n", error->plane[i].control); | |
9018 | seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride); | |
9019 | seq_printf(m, " SIZE: %08x\n", error->plane[i].size); | |
9020 | seq_printf(m, " POS: %08x\n", error->plane[i].pos); | |
9021 | seq_printf(m, " ADDR: %08x\n", error->plane[i].addr); | |
9022 | if (INTEL_INFO(dev)->gen >= 4) { | |
9023 | seq_printf(m, " SURF: %08x\n", error->plane[i].surface); | |
9024 | seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset); | |
9025 | } | |
9026 | ||
9027 | seq_printf(m, "Cursor [%d]:\n", i); | |
9028 | seq_printf(m, " CNTR: %08x\n", error->cursor[i].control); | |
9029 | seq_printf(m, " POS: %08x\n", error->cursor[i].position); | |
9030 | seq_printf(m, " BASE: %08x\n", error->cursor[i].base); | |
9031 | } | |
9032 | } | |
9033 | #endif |