]>
Commit | Line | Data |
---|---|---|
0d6aa60b | 1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- |
1da177e4 | 2 | */ |
0d6aa60b | 3 | /* |
1da177e4 LT |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. | |
bc54fd1a DA |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
0d6aa60b | 27 | */ |
1da177e4 | 28 | |
a70491cc JP |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | ||
63eeaf38 | 31 | #include <linux/sysrq.h> |
5a0e3ad6 | 32 | #include <linux/slab.h> |
760285e7 DH |
33 | #include <drm/drmP.h> |
34 | #include <drm/i915_drm.h> | |
1da177e4 | 35 | #include "i915_drv.h" |
1c5d22f7 | 36 | #include "i915_trace.h" |
79e53945 | 37 | #include "intel_drv.h" |
1da177e4 | 38 | |
e5868a31 EE |
39 | static const u32 hpd_ibx[] = { |
40 | [HPD_CRT] = SDE_CRT_HOTPLUG, | |
41 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, | |
42 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG, | |
43 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG, | |
44 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG | |
45 | }; | |
46 | ||
47 | static const u32 hpd_cpt[] = { | |
48 | [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, | |
73c352a2 | 49 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, |
e5868a31 EE |
50 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, |
51 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, | |
52 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT | |
53 | }; | |
54 | ||
55 | static const u32 hpd_mask_i915[] = { | |
56 | [HPD_CRT] = CRT_HOTPLUG_INT_EN, | |
57 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, | |
58 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, | |
59 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, | |
60 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, | |
61 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN | |
62 | }; | |
63 | ||
64 | static const u32 hpd_status_gen4[] = { | |
65 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | |
66 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, | |
67 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, | |
68 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
69 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
70 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
71 | }; | |
72 | ||
e5868a31 EE |
73 | static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ |
74 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | |
75 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, | |
76 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, | |
77 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
78 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
79 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
80 | }; | |
81 | ||
036a4a7d | 82 | /* For display hotplug interrupt */ |
995b6762 | 83 | static void |
f2b115e6 | 84 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
036a4a7d | 85 | { |
4bc9d430 DV |
86 | assert_spin_locked(&dev_priv->irq_lock); |
87 | ||
1ec14ad3 CW |
88 | if ((dev_priv->irq_mask & mask) != 0) { |
89 | dev_priv->irq_mask &= ~mask; | |
90 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
3143a2bf | 91 | POSTING_READ(DEIMR); |
036a4a7d ZW |
92 | } |
93 | } | |
94 | ||
0ff9800a | 95 | static void |
f2b115e6 | 96 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
036a4a7d | 97 | { |
4bc9d430 DV |
98 | assert_spin_locked(&dev_priv->irq_lock); |
99 | ||
1ec14ad3 CW |
100 | if ((dev_priv->irq_mask & mask) != mask) { |
101 | dev_priv->irq_mask |= mask; | |
102 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
3143a2bf | 103 | POSTING_READ(DEIMR); |
036a4a7d ZW |
104 | } |
105 | } | |
106 | ||
8664281b PZ |
107 | static bool ivb_can_enable_err_int(struct drm_device *dev) |
108 | { | |
109 | struct drm_i915_private *dev_priv = dev->dev_private; | |
110 | struct intel_crtc *crtc; | |
111 | enum pipe pipe; | |
112 | ||
4bc9d430 DV |
113 | assert_spin_locked(&dev_priv->irq_lock); |
114 | ||
8664281b PZ |
115 | for_each_pipe(pipe) { |
116 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | |
117 | ||
118 | if (crtc->cpu_fifo_underrun_disabled) | |
119 | return false; | |
120 | } | |
121 | ||
122 | return true; | |
123 | } | |
124 | ||
125 | static bool cpt_can_enable_serr_int(struct drm_device *dev) | |
126 | { | |
127 | struct drm_i915_private *dev_priv = dev->dev_private; | |
128 | enum pipe pipe; | |
129 | struct intel_crtc *crtc; | |
130 | ||
fee884ed DV |
131 | assert_spin_locked(&dev_priv->irq_lock); |
132 | ||
8664281b PZ |
133 | for_each_pipe(pipe) { |
134 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | |
135 | ||
136 | if (crtc->pch_fifo_underrun_disabled) | |
137 | return false; | |
138 | } | |
139 | ||
140 | return true; | |
141 | } | |
142 | ||
143 | static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, | |
144 | enum pipe pipe, bool enable) | |
145 | { | |
146 | struct drm_i915_private *dev_priv = dev->dev_private; | |
147 | uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : | |
148 | DE_PIPEB_FIFO_UNDERRUN; | |
149 | ||
150 | if (enable) | |
151 | ironlake_enable_display_irq(dev_priv, bit); | |
152 | else | |
153 | ironlake_disable_display_irq(dev_priv, bit); | |
154 | } | |
155 | ||
156 | static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, | |
7336df65 | 157 | enum pipe pipe, bool enable) |
8664281b PZ |
158 | { |
159 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8664281b | 160 | if (enable) { |
7336df65 DV |
161 | I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); |
162 | ||
8664281b PZ |
163 | if (!ivb_can_enable_err_int(dev)) |
164 | return; | |
165 | ||
8664281b PZ |
166 | ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); |
167 | } else { | |
7336df65 DV |
168 | bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); |
169 | ||
170 | /* Change the state _after_ we've read out the current one. */ | |
8664281b | 171 | ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); |
7336df65 DV |
172 | |
173 | if (!was_enabled && | |
174 | (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { | |
175 | DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", | |
176 | pipe_name(pipe)); | |
177 | } | |
8664281b PZ |
178 | } |
179 | } | |
180 | ||
fee884ed DV |
181 | /** |
182 | * ibx_display_interrupt_update - update SDEIMR | |
183 | * @dev_priv: driver private | |
184 | * @interrupt_mask: mask of interrupt bits to update | |
185 | * @enabled_irq_mask: mask of interrupt bits to enable | |
186 | */ | |
187 | static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, | |
188 | uint32_t interrupt_mask, | |
189 | uint32_t enabled_irq_mask) | |
190 | { | |
191 | uint32_t sdeimr = I915_READ(SDEIMR); | |
192 | sdeimr &= ~interrupt_mask; | |
193 | sdeimr |= (~enabled_irq_mask & interrupt_mask); | |
194 | ||
195 | assert_spin_locked(&dev_priv->irq_lock); | |
196 | ||
197 | I915_WRITE(SDEIMR, sdeimr); | |
198 | POSTING_READ(SDEIMR); | |
199 | } | |
200 | #define ibx_enable_display_interrupt(dev_priv, bits) \ | |
201 | ibx_display_interrupt_update((dev_priv), (bits), (bits)) | |
202 | #define ibx_disable_display_interrupt(dev_priv, bits) \ | |
203 | ibx_display_interrupt_update((dev_priv), (bits), 0) | |
204 | ||
de28075d DV |
205 | static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, |
206 | enum transcoder pch_transcoder, | |
8664281b PZ |
207 | bool enable) |
208 | { | |
8664281b | 209 | struct drm_i915_private *dev_priv = dev->dev_private; |
de28075d DV |
210 | uint32_t bit = (pch_transcoder == TRANSCODER_A) ? |
211 | SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; | |
8664281b PZ |
212 | |
213 | if (enable) | |
fee884ed | 214 | ibx_enable_display_interrupt(dev_priv, bit); |
8664281b | 215 | else |
fee884ed | 216 | ibx_disable_display_interrupt(dev_priv, bit); |
8664281b PZ |
217 | } |
218 | ||
219 | static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, | |
220 | enum transcoder pch_transcoder, | |
221 | bool enable) | |
222 | { | |
223 | struct drm_i915_private *dev_priv = dev->dev_private; | |
224 | ||
225 | if (enable) { | |
1dd246fb DV |
226 | I915_WRITE(SERR_INT, |
227 | SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); | |
228 | ||
8664281b PZ |
229 | if (!cpt_can_enable_serr_int(dev)) |
230 | return; | |
231 | ||
fee884ed | 232 | ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
8664281b | 233 | } else { |
1dd246fb DV |
234 | uint32_t tmp = I915_READ(SERR_INT); |
235 | bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); | |
236 | ||
237 | /* Change the state _after_ we've read out the current one. */ | |
fee884ed | 238 | ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
1dd246fb DV |
239 | |
240 | if (!was_enabled && | |
241 | (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { | |
242 | DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", | |
243 | transcoder_name(pch_transcoder)); | |
244 | } | |
8664281b | 245 | } |
8664281b PZ |
246 | } |
247 | ||
248 | /** | |
249 | * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages | |
250 | * @dev: drm device | |
251 | * @pipe: pipe | |
252 | * @enable: true if we want to report FIFO underrun errors, false otherwise | |
253 | * | |
254 | * This function makes us disable or enable CPU fifo underruns for a specific | |
255 | * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun | |
256 | * reporting for one pipe may also disable all the other CPU error interruts for | |
257 | * the other pipes, due to the fact that there's just one interrupt mask/enable | |
258 | * bit for all the pipes. | |
259 | * | |
260 | * Returns the previous state of underrun reporting. | |
261 | */ | |
262 | bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, | |
263 | enum pipe pipe, bool enable) | |
264 | { | |
265 | struct drm_i915_private *dev_priv = dev->dev_private; | |
266 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
267 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
268 | unsigned long flags; | |
269 | bool ret; | |
270 | ||
271 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
272 | ||
273 | ret = !intel_crtc->cpu_fifo_underrun_disabled; | |
274 | ||
275 | if (enable == ret) | |
276 | goto done; | |
277 | ||
278 | intel_crtc->cpu_fifo_underrun_disabled = !enable; | |
279 | ||
280 | if (IS_GEN5(dev) || IS_GEN6(dev)) | |
281 | ironlake_set_fifo_underrun_reporting(dev, pipe, enable); | |
282 | else if (IS_GEN7(dev)) | |
7336df65 | 283 | ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); |
8664281b PZ |
284 | |
285 | done: | |
286 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
287 | return ret; | |
288 | } | |
289 | ||
290 | /** | |
291 | * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages | |
292 | * @dev: drm device | |
293 | * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) | |
294 | * @enable: true if we want to report FIFO underrun errors, false otherwise | |
295 | * | |
296 | * This function makes us disable or enable PCH fifo underruns for a specific | |
297 | * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO | |
298 | * underrun reporting for one transcoder may also disable all the other PCH | |
299 | * error interruts for the other transcoders, due to the fact that there's just | |
300 | * one interrupt mask/enable bit for all the transcoders. | |
301 | * | |
302 | * Returns the previous state of underrun reporting. | |
303 | */ | |
304 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, | |
305 | enum transcoder pch_transcoder, | |
306 | bool enable) | |
307 | { | |
308 | struct drm_i915_private *dev_priv = dev->dev_private; | |
de28075d DV |
309 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; |
310 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
8664281b PZ |
311 | unsigned long flags; |
312 | bool ret; | |
313 | ||
de28075d DV |
314 | /* |
315 | * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT | |
316 | * has only one pch transcoder A that all pipes can use. To avoid racy | |
317 | * pch transcoder -> pipe lookups from interrupt code simply store the | |
318 | * underrun statistics in crtc A. Since we never expose this anywhere | |
319 | * nor use it outside of the fifo underrun code here using the "wrong" | |
320 | * crtc on LPT won't cause issues. | |
321 | */ | |
8664281b PZ |
322 | |
323 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
324 | ||
325 | ret = !intel_crtc->pch_fifo_underrun_disabled; | |
326 | ||
327 | if (enable == ret) | |
328 | goto done; | |
329 | ||
330 | intel_crtc->pch_fifo_underrun_disabled = !enable; | |
331 | ||
332 | if (HAS_PCH_IBX(dev)) | |
de28075d | 333 | ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); |
8664281b PZ |
334 | else |
335 | cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); | |
336 | ||
337 | done: | |
338 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
339 | return ret; | |
340 | } | |
341 | ||
342 | ||
7c463586 KP |
343 | void |
344 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |
345 | { | |
46c06a30 VS |
346 | u32 reg = PIPESTAT(pipe); |
347 | u32 pipestat = I915_READ(reg) & 0x7fff0000; | |
7c463586 | 348 | |
b79480ba DV |
349 | assert_spin_locked(&dev_priv->irq_lock); |
350 | ||
46c06a30 VS |
351 | if ((pipestat & mask) == mask) |
352 | return; | |
353 | ||
354 | /* Enable the interrupt, clear any pending status */ | |
355 | pipestat |= mask | (mask >> 16); | |
356 | I915_WRITE(reg, pipestat); | |
357 | POSTING_READ(reg); | |
7c463586 KP |
358 | } |
359 | ||
360 | void | |
361 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |
362 | { | |
46c06a30 VS |
363 | u32 reg = PIPESTAT(pipe); |
364 | u32 pipestat = I915_READ(reg) & 0x7fff0000; | |
7c463586 | 365 | |
b79480ba DV |
366 | assert_spin_locked(&dev_priv->irq_lock); |
367 | ||
46c06a30 VS |
368 | if ((pipestat & mask) == 0) |
369 | return; | |
370 | ||
371 | pipestat &= ~mask; | |
372 | I915_WRITE(reg, pipestat); | |
373 | POSTING_READ(reg); | |
7c463586 KP |
374 | } |
375 | ||
01c66889 | 376 | /** |
f49e38dd | 377 | * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion |
01c66889 | 378 | */ |
f49e38dd | 379 | static void i915_enable_asle_pipestat(struct drm_device *dev) |
01c66889 | 380 | { |
1ec14ad3 CW |
381 | drm_i915_private_t *dev_priv = dev->dev_private; |
382 | unsigned long irqflags; | |
383 | ||
f49e38dd JN |
384 | if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) |
385 | return; | |
386 | ||
1ec14ad3 | 387 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
01c66889 | 388 | |
f898780b JN |
389 | i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); |
390 | if (INTEL_INFO(dev)->gen >= 4) | |
391 | i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); | |
1ec14ad3 CW |
392 | |
393 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
01c66889 ZY |
394 | } |
395 | ||
0a3e67a4 JB |
396 | /** |
397 | * i915_pipe_enabled - check if a pipe is enabled | |
398 | * @dev: DRM device | |
399 | * @pipe: pipe to check | |
400 | * | |
401 | * Reading certain registers when the pipe is disabled can hang the chip. | |
402 | * Use this routine to make sure the PLL is running and the pipe is active | |
403 | * before reading such registers if unsure. | |
404 | */ | |
405 | static int | |
406 | i915_pipe_enabled(struct drm_device *dev, int pipe) | |
407 | { | |
408 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
702e7a56 | 409 | |
a01025af DV |
410 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
411 | /* Locking is horribly broken here, but whatever. */ | |
412 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
413 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
71f8ba6b | 414 | |
a01025af DV |
415 | return intel_crtc->active; |
416 | } else { | |
417 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; | |
418 | } | |
0a3e67a4 JB |
419 | } |
420 | ||
42f52ef8 KP |
421 | /* Called from drm generic code, passed a 'crtc', which |
422 | * we use as a pipe index | |
423 | */ | |
f71d4af4 | 424 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) |
0a3e67a4 JB |
425 | { |
426 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
427 | unsigned long high_frame; | |
428 | unsigned long low_frame; | |
5eddb70b | 429 | u32 high1, high2, low; |
0a3e67a4 JB |
430 | |
431 | if (!i915_pipe_enabled(dev, pipe)) { | |
44d98a61 | 432 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
9db4a9c7 | 433 | "pipe %c\n", pipe_name(pipe)); |
0a3e67a4 JB |
434 | return 0; |
435 | } | |
436 | ||
9db4a9c7 JB |
437 | high_frame = PIPEFRAME(pipe); |
438 | low_frame = PIPEFRAMEPIXEL(pipe); | |
5eddb70b | 439 | |
0a3e67a4 JB |
440 | /* |
441 | * High & low register fields aren't synchronized, so make sure | |
442 | * we get a low value that's stable across two reads of the high | |
443 | * register. | |
444 | */ | |
445 | do { | |
5eddb70b CW |
446 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
447 | low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; | |
448 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | |
0a3e67a4 JB |
449 | } while (high1 != high2); |
450 | ||
5eddb70b CW |
451 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
452 | low >>= PIPE_FRAME_LOW_SHIFT; | |
453 | return (high1 << 8) | low; | |
0a3e67a4 JB |
454 | } |
455 | ||
f71d4af4 | 456 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
9880b7a5 JB |
457 | { |
458 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
9db4a9c7 | 459 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
9880b7a5 JB |
460 | |
461 | if (!i915_pipe_enabled(dev, pipe)) { | |
44d98a61 | 462 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
9db4a9c7 | 463 | "pipe %c\n", pipe_name(pipe)); |
9880b7a5 JB |
464 | return 0; |
465 | } | |
466 | ||
467 | return I915_READ(reg); | |
468 | } | |
469 | ||
f71d4af4 | 470 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
0af7e4df MK |
471 | int *vpos, int *hpos) |
472 | { | |
473 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
474 | u32 vbl = 0, position = 0; | |
475 | int vbl_start, vbl_end, htotal, vtotal; | |
476 | bool in_vbl = true; | |
477 | int ret = 0; | |
fe2b8f9d PZ |
478 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
479 | pipe); | |
0af7e4df MK |
480 | |
481 | if (!i915_pipe_enabled(dev, pipe)) { | |
482 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | |
9db4a9c7 | 483 | "pipe %c\n", pipe_name(pipe)); |
0af7e4df MK |
484 | return 0; |
485 | } | |
486 | ||
487 | /* Get vtotal. */ | |
fe2b8f9d | 488 | vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
0af7e4df MK |
489 | |
490 | if (INTEL_INFO(dev)->gen >= 4) { | |
491 | /* No obvious pixelcount register. Only query vertical | |
492 | * scanout position from Display scan line register. | |
493 | */ | |
494 | position = I915_READ(PIPEDSL(pipe)); | |
495 | ||
496 | /* Decode into vertical scanout position. Don't have | |
497 | * horizontal scanout position. | |
498 | */ | |
499 | *vpos = position & 0x1fff; | |
500 | *hpos = 0; | |
501 | } else { | |
502 | /* Have access to pixelcount since start of frame. | |
503 | * We can split this into vertical and horizontal | |
504 | * scanout position. | |
505 | */ | |
506 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | |
507 | ||
fe2b8f9d | 508 | htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
0af7e4df MK |
509 | *vpos = position / htotal; |
510 | *hpos = position - (*vpos * htotal); | |
511 | } | |
512 | ||
513 | /* Query vblank area. */ | |
fe2b8f9d | 514 | vbl = I915_READ(VBLANK(cpu_transcoder)); |
0af7e4df MK |
515 | |
516 | /* Test position against vblank region. */ | |
517 | vbl_start = vbl & 0x1fff; | |
518 | vbl_end = (vbl >> 16) & 0x1fff; | |
519 | ||
520 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) | |
521 | in_vbl = false; | |
522 | ||
523 | /* Inside "upper part" of vblank area? Apply corrective offset: */ | |
524 | if (in_vbl && (*vpos >= vbl_start)) | |
525 | *vpos = *vpos - vtotal; | |
526 | ||
527 | /* Readouts valid? */ | |
528 | if (vbl > 0) | |
529 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | |
530 | ||
531 | /* In vblank? */ | |
532 | if (in_vbl) | |
533 | ret |= DRM_SCANOUTPOS_INVBL; | |
534 | ||
535 | return ret; | |
536 | } | |
537 | ||
f71d4af4 | 538 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, |
0af7e4df MK |
539 | int *max_error, |
540 | struct timeval *vblank_time, | |
541 | unsigned flags) | |
542 | { | |
4041b853 | 543 | struct drm_crtc *crtc; |
0af7e4df | 544 | |
7eb552ae | 545 | if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { |
4041b853 | 546 | DRM_ERROR("Invalid crtc %d\n", pipe); |
0af7e4df MK |
547 | return -EINVAL; |
548 | } | |
549 | ||
550 | /* Get drm_crtc to timestamp: */ | |
4041b853 CW |
551 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
552 | if (crtc == NULL) { | |
553 | DRM_ERROR("Invalid crtc %d\n", pipe); | |
554 | return -EINVAL; | |
555 | } | |
556 | ||
557 | if (!crtc->enabled) { | |
558 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | |
559 | return -EBUSY; | |
560 | } | |
0af7e4df MK |
561 | |
562 | /* Helper routine in DRM core does all the work: */ | |
4041b853 CW |
563 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
564 | vblank_time, flags, | |
565 | crtc); | |
0af7e4df MK |
566 | } |
567 | ||
321a1b30 EE |
568 | static int intel_hpd_irq_event(struct drm_device *dev, struct drm_connector *connector) |
569 | { | |
570 | enum drm_connector_status old_status; | |
571 | ||
572 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | |
573 | old_status = connector->status; | |
574 | ||
575 | connector->status = connector->funcs->detect(connector, false); | |
576 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n", | |
577 | connector->base.id, | |
578 | drm_get_connector_name(connector), | |
579 | old_status, connector->status); | |
580 | return (old_status != connector->status); | |
581 | } | |
582 | ||
5ca58282 JB |
583 | /* |
584 | * Handle hotplug events outside the interrupt handler proper. | |
585 | */ | |
ac4c16c5 EE |
586 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) |
587 | ||
5ca58282 JB |
588 | static void i915_hotplug_work_func(struct work_struct *work) |
589 | { | |
590 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
591 | hotplug_work); | |
592 | struct drm_device *dev = dev_priv->dev; | |
c31c4ba3 | 593 | struct drm_mode_config *mode_config = &dev->mode_config; |
cd569aed EE |
594 | struct intel_connector *intel_connector; |
595 | struct intel_encoder *intel_encoder; | |
596 | struct drm_connector *connector; | |
597 | unsigned long irqflags; | |
598 | bool hpd_disabled = false; | |
321a1b30 | 599 | bool changed = false; |
142e2398 | 600 | u32 hpd_event_bits; |
4ef69c7a | 601 | |
52d7eced DV |
602 | /* HPD irq before everything is fully set up. */ |
603 | if (!dev_priv->enable_hotplug_processing) | |
604 | return; | |
605 | ||
a65e34c7 | 606 | mutex_lock(&mode_config->mutex); |
e67189ab JB |
607 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
608 | ||
cd569aed | 609 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
142e2398 EE |
610 | |
611 | hpd_event_bits = dev_priv->hpd_event_bits; | |
612 | dev_priv->hpd_event_bits = 0; | |
cd569aed EE |
613 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
614 | intel_connector = to_intel_connector(connector); | |
615 | intel_encoder = intel_connector->encoder; | |
616 | if (intel_encoder->hpd_pin > HPD_NONE && | |
617 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && | |
618 | connector->polled == DRM_CONNECTOR_POLL_HPD) { | |
619 | DRM_INFO("HPD interrupt storm detected on connector %s: " | |
620 | "switching from hotplug detection to polling\n", | |
621 | drm_get_connector_name(connector)); | |
622 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; | |
623 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | |
624 | | DRM_CONNECTOR_POLL_DISCONNECT; | |
625 | hpd_disabled = true; | |
626 | } | |
142e2398 EE |
627 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { |
628 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", | |
629 | drm_get_connector_name(connector), intel_encoder->hpd_pin); | |
630 | } | |
cd569aed EE |
631 | } |
632 | /* if there were no outputs to poll, poll was disabled, | |
633 | * therefore make sure it's enabled when disabling HPD on | |
634 | * some connectors */ | |
ac4c16c5 | 635 | if (hpd_disabled) { |
cd569aed | 636 | drm_kms_helper_poll_enable(dev); |
ac4c16c5 EE |
637 | mod_timer(&dev_priv->hotplug_reenable_timer, |
638 | jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); | |
639 | } | |
cd569aed EE |
640 | |
641 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
642 | ||
321a1b30 EE |
643 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
644 | intel_connector = to_intel_connector(connector); | |
645 | intel_encoder = intel_connector->encoder; | |
646 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | |
647 | if (intel_encoder->hot_plug) | |
648 | intel_encoder->hot_plug(intel_encoder); | |
649 | if (intel_hpd_irq_event(dev, connector)) | |
650 | changed = true; | |
651 | } | |
652 | } | |
40ee3381 KP |
653 | mutex_unlock(&mode_config->mutex); |
654 | ||
321a1b30 EE |
655 | if (changed) |
656 | drm_kms_helper_hotplug_event(dev); | |
5ca58282 JB |
657 | } |
658 | ||
73edd18f | 659 | static void ironlake_handle_rps_change(struct drm_device *dev) |
f97108d1 JB |
660 | { |
661 | drm_i915_private_t *dev_priv = dev->dev_private; | |
b5b72e89 | 662 | u32 busy_up, busy_down, max_avg, min_avg; |
9270388e DV |
663 | u8 new_delay; |
664 | unsigned long flags; | |
665 | ||
666 | spin_lock_irqsave(&mchdev_lock, flags); | |
f97108d1 | 667 | |
73edd18f DV |
668 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
669 | ||
20e4d407 | 670 | new_delay = dev_priv->ips.cur_delay; |
9270388e | 671 | |
7648fa99 | 672 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); |
b5b72e89 MG |
673 | busy_up = I915_READ(RCPREVBSYTUPAVG); |
674 | busy_down = I915_READ(RCPREVBSYTDNAVG); | |
f97108d1 JB |
675 | max_avg = I915_READ(RCBMAXAVG); |
676 | min_avg = I915_READ(RCBMINAVG); | |
677 | ||
678 | /* Handle RCS change request from hw */ | |
b5b72e89 | 679 | if (busy_up > max_avg) { |
20e4d407 DV |
680 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) |
681 | new_delay = dev_priv->ips.cur_delay - 1; | |
682 | if (new_delay < dev_priv->ips.max_delay) | |
683 | new_delay = dev_priv->ips.max_delay; | |
b5b72e89 | 684 | } else if (busy_down < min_avg) { |
20e4d407 DV |
685 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) |
686 | new_delay = dev_priv->ips.cur_delay + 1; | |
687 | if (new_delay > dev_priv->ips.min_delay) | |
688 | new_delay = dev_priv->ips.min_delay; | |
f97108d1 JB |
689 | } |
690 | ||
7648fa99 | 691 | if (ironlake_set_drps(dev, new_delay)) |
20e4d407 | 692 | dev_priv->ips.cur_delay = new_delay; |
f97108d1 | 693 | |
9270388e DV |
694 | spin_unlock_irqrestore(&mchdev_lock, flags); |
695 | ||
f97108d1 JB |
696 | return; |
697 | } | |
698 | ||
549f7365 CW |
699 | static void notify_ring(struct drm_device *dev, |
700 | struct intel_ring_buffer *ring) | |
701 | { | |
702 | struct drm_i915_private *dev_priv = dev->dev_private; | |
9862e600 | 703 | |
475553de CW |
704 | if (ring->obj == NULL) |
705 | return; | |
706 | ||
b2eadbc8 | 707 | trace_i915_gem_request_complete(ring, ring->get_seqno(ring, false)); |
9862e600 | 708 | |
549f7365 | 709 | wake_up_all(&ring->irq_queue); |
3e0dc6b0 | 710 | if (i915_enable_hangcheck) { |
99584db3 | 711 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, |
cecc21fe | 712 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); |
3e0dc6b0 | 713 | } |
549f7365 CW |
714 | } |
715 | ||
4912d041 | 716 | static void gen6_pm_rps_work(struct work_struct *work) |
3b8d8d91 | 717 | { |
4912d041 | 718 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
c6a828d3 | 719 | rps.work); |
4912d041 | 720 | u32 pm_iir, pm_imr; |
7b9e0ae6 | 721 | u8 new_delay; |
4912d041 | 722 | |
c6a828d3 DV |
723 | spin_lock_irq(&dev_priv->rps.lock); |
724 | pm_iir = dev_priv->rps.pm_iir; | |
725 | dev_priv->rps.pm_iir = 0; | |
4912d041 | 726 | pm_imr = I915_READ(GEN6_PMIMR); |
4848405c BW |
727 | /* Make sure not to corrupt PMIMR state used by ringbuffer code */ |
728 | I915_WRITE(GEN6_PMIMR, pm_imr & ~GEN6_PM_RPS_EVENTS); | |
c6a828d3 | 729 | spin_unlock_irq(&dev_priv->rps.lock); |
3b8d8d91 | 730 | |
4848405c | 731 | if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) |
3b8d8d91 JB |
732 | return; |
733 | ||
4fc688ce | 734 | mutex_lock(&dev_priv->rps.hw_lock); |
7b9e0ae6 | 735 | |
7425034a | 736 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { |
c6a828d3 | 737 | new_delay = dev_priv->rps.cur_delay + 1; |
7425034a VS |
738 | |
739 | /* | |
740 | * For better performance, jump directly | |
741 | * to RPe if we're below it. | |
742 | */ | |
743 | if (IS_VALLEYVIEW(dev_priv->dev) && | |
744 | dev_priv->rps.cur_delay < dev_priv->rps.rpe_delay) | |
745 | new_delay = dev_priv->rps.rpe_delay; | |
746 | } else | |
c6a828d3 | 747 | new_delay = dev_priv->rps.cur_delay - 1; |
3b8d8d91 | 748 | |
79249636 BW |
749 | /* sysfs frequency interfaces may have snuck in while servicing the |
750 | * interrupt | |
751 | */ | |
d8289c9e VS |
752 | if (new_delay >= dev_priv->rps.min_delay && |
753 | new_delay <= dev_priv->rps.max_delay) { | |
0a073b84 JB |
754 | if (IS_VALLEYVIEW(dev_priv->dev)) |
755 | valleyview_set_rps(dev_priv->dev, new_delay); | |
756 | else | |
757 | gen6_set_rps(dev_priv->dev, new_delay); | |
79249636 | 758 | } |
3b8d8d91 | 759 | |
52ceb908 JB |
760 | if (IS_VALLEYVIEW(dev_priv->dev)) { |
761 | /* | |
762 | * On VLV, when we enter RC6 we may not be at the minimum | |
763 | * voltage level, so arm a timer to check. It should only | |
764 | * fire when there's activity or once after we've entered | |
765 | * RC6, and then won't be re-armed until the next RPS interrupt. | |
766 | */ | |
767 | mod_delayed_work(dev_priv->wq, &dev_priv->rps.vlv_work, | |
768 | msecs_to_jiffies(100)); | |
769 | } | |
770 | ||
4fc688ce | 771 | mutex_unlock(&dev_priv->rps.hw_lock); |
3b8d8d91 JB |
772 | } |
773 | ||
e3689190 BW |
774 | |
775 | /** | |
776 | * ivybridge_parity_work - Workqueue called when a parity error interrupt | |
777 | * occurred. | |
778 | * @work: workqueue struct | |
779 | * | |
780 | * Doesn't actually do anything except notify userspace. As a consequence of | |
781 | * this event, userspace should try to remap the bad rows since statistically | |
782 | * it is likely the same row is more likely to go bad again. | |
783 | */ | |
784 | static void ivybridge_parity_work(struct work_struct *work) | |
785 | { | |
786 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
a4da4fa4 | 787 | l3_parity.error_work); |
e3689190 BW |
788 | u32 error_status, row, bank, subbank; |
789 | char *parity_event[5]; | |
790 | uint32_t misccpctl; | |
791 | unsigned long flags; | |
792 | ||
793 | /* We must turn off DOP level clock gating to access the L3 registers. | |
794 | * In order to prevent a get/put style interface, acquire struct mutex | |
795 | * any time we access those registers. | |
796 | */ | |
797 | mutex_lock(&dev_priv->dev->struct_mutex); | |
798 | ||
799 | misccpctl = I915_READ(GEN7_MISCCPCTL); | |
800 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | |
801 | POSTING_READ(GEN7_MISCCPCTL); | |
802 | ||
803 | error_status = I915_READ(GEN7_L3CDERRST1); | |
804 | row = GEN7_PARITY_ERROR_ROW(error_status); | |
805 | bank = GEN7_PARITY_ERROR_BANK(error_status); | |
806 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); | |
807 | ||
808 | I915_WRITE(GEN7_L3CDERRST1, GEN7_PARITY_ERROR_VALID | | |
809 | GEN7_L3CDERRST1_ENABLE); | |
810 | POSTING_READ(GEN7_L3CDERRST1); | |
811 | ||
812 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); | |
813 | ||
814 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
cc609d5d | 815 | dev_priv->gt_irq_mask &= ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; |
e3689190 BW |
816 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
817 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
818 | ||
819 | mutex_unlock(&dev_priv->dev->struct_mutex); | |
820 | ||
821 | parity_event[0] = "L3_PARITY_ERROR=1"; | |
822 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | |
823 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | |
824 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | |
825 | parity_event[4] = NULL; | |
826 | ||
827 | kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, | |
828 | KOBJ_CHANGE, parity_event); | |
829 | ||
830 | DRM_DEBUG("Parity error: Row = %d, Bank = %d, Sub bank = %d.\n", | |
831 | row, bank, subbank); | |
832 | ||
833 | kfree(parity_event[3]); | |
834 | kfree(parity_event[2]); | |
835 | kfree(parity_event[1]); | |
836 | } | |
837 | ||
d2ba8470 | 838 | static void ivybridge_handle_parity_error(struct drm_device *dev) |
e3689190 BW |
839 | { |
840 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
841 | unsigned long flags; | |
842 | ||
e1ef7cc2 | 843 | if (!HAS_L3_GPU_CACHE(dev)) |
e3689190 BW |
844 | return; |
845 | ||
846 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
cc609d5d | 847 | dev_priv->gt_irq_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT; |
e3689190 BW |
848 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
849 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
850 | ||
a4da4fa4 | 851 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
e3689190 BW |
852 | } |
853 | ||
e7b4c6b1 DV |
854 | static void snb_gt_irq_handler(struct drm_device *dev, |
855 | struct drm_i915_private *dev_priv, | |
856 | u32 gt_iir) | |
857 | { | |
858 | ||
cc609d5d BW |
859 | if (gt_iir & |
860 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
e7b4c6b1 | 861 | notify_ring(dev, &dev_priv->ring[RCS]); |
cc609d5d | 862 | if (gt_iir & GT_BSD_USER_INTERRUPT) |
e7b4c6b1 | 863 | notify_ring(dev, &dev_priv->ring[VCS]); |
cc609d5d | 864 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
e7b4c6b1 DV |
865 | notify_ring(dev, &dev_priv->ring[BCS]); |
866 | ||
cc609d5d BW |
867 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | |
868 | GT_BSD_CS_ERROR_INTERRUPT | | |
869 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { | |
e7b4c6b1 DV |
870 | DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); |
871 | i915_handle_error(dev, false); | |
872 | } | |
e3689190 | 873 | |
cc609d5d | 874 | if (gt_iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) |
e3689190 | 875 | ivybridge_handle_parity_error(dev); |
e7b4c6b1 DV |
876 | } |
877 | ||
baf02a1f | 878 | /* Legacy way of handling PM interrupts */ |
fc6826d1 CW |
879 | static void gen6_queue_rps_work(struct drm_i915_private *dev_priv, |
880 | u32 pm_iir) | |
881 | { | |
882 | unsigned long flags; | |
883 | ||
884 | /* | |
885 | * IIR bits should never already be set because IMR should | |
886 | * prevent an interrupt from being shown in IIR. The warning | |
887 | * displays a case where we've unsafely cleared | |
c6a828d3 | 888 | * dev_priv->rps.pm_iir. Although missing an interrupt of the same |
fc6826d1 CW |
889 | * type is not a problem, it displays a problem in the logic. |
890 | * | |
c6a828d3 | 891 | * The mask bit in IMR is cleared by dev_priv->rps.work. |
fc6826d1 CW |
892 | */ |
893 | ||
c6a828d3 | 894 | spin_lock_irqsave(&dev_priv->rps.lock, flags); |
c6a828d3 DV |
895 | dev_priv->rps.pm_iir |= pm_iir; |
896 | I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); | |
fc6826d1 | 897 | POSTING_READ(GEN6_PMIMR); |
c6a828d3 | 898 | spin_unlock_irqrestore(&dev_priv->rps.lock, flags); |
fc6826d1 | 899 | |
c6a828d3 | 900 | queue_work(dev_priv->wq, &dev_priv->rps.work); |
fc6826d1 CW |
901 | } |
902 | ||
b543fb04 EE |
903 | #define HPD_STORM_DETECT_PERIOD 1000 |
904 | #define HPD_STORM_THRESHOLD 5 | |
905 | ||
10a504de | 906 | static inline void intel_hpd_irq_handler(struct drm_device *dev, |
22062dba DV |
907 | u32 hotplug_trigger, |
908 | const u32 *hpd) | |
b543fb04 EE |
909 | { |
910 | drm_i915_private_t *dev_priv = dev->dev_private; | |
b543fb04 | 911 | int i; |
10a504de | 912 | bool storm_detected = false; |
b543fb04 | 913 | |
91d131d2 DV |
914 | if (!hotplug_trigger) |
915 | return; | |
916 | ||
b5ea2d56 | 917 | spin_lock(&dev_priv->irq_lock); |
b543fb04 | 918 | for (i = 1; i < HPD_NUM_PINS; i++) { |
821450c6 | 919 | |
b543fb04 EE |
920 | if (!(hpd[i] & hotplug_trigger) || |
921 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | |
922 | continue; | |
923 | ||
bc5ead8c | 924 | dev_priv->hpd_event_bits |= (1 << i); |
b543fb04 EE |
925 | if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, |
926 | dev_priv->hpd_stats[i].hpd_last_jiffies | |
927 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { | |
928 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; | |
929 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
930 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { | |
931 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; | |
142e2398 | 932 | dev_priv->hpd_event_bits &= ~(1 << i); |
b543fb04 | 933 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); |
10a504de | 934 | storm_detected = true; |
b543fb04 EE |
935 | } else { |
936 | dev_priv->hpd_stats[i].hpd_cnt++; | |
937 | } | |
938 | } | |
939 | ||
10a504de DV |
940 | if (storm_detected) |
941 | dev_priv->display.hpd_irq_setup(dev); | |
b5ea2d56 | 942 | spin_unlock(&dev_priv->irq_lock); |
5876fa0d DV |
943 | |
944 | queue_work(dev_priv->wq, | |
945 | &dev_priv->hotplug_work); | |
b543fb04 EE |
946 | } |
947 | ||
515ac2bb DV |
948 | static void gmbus_irq_handler(struct drm_device *dev) |
949 | { | |
28c70f16 DV |
950 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
951 | ||
28c70f16 | 952 | wake_up_all(&dev_priv->gmbus_wait_queue); |
515ac2bb DV |
953 | } |
954 | ||
ce99c256 DV |
955 | static void dp_aux_irq_handler(struct drm_device *dev) |
956 | { | |
9ee32fea DV |
957 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
958 | ||
9ee32fea | 959 | wake_up_all(&dev_priv->gmbus_wait_queue); |
ce99c256 DV |
960 | } |
961 | ||
baf02a1f BW |
962 | /* Unlike gen6_queue_rps_work() from which this function is originally derived, |
963 | * we must be able to deal with other PM interrupts. This is complicated because | |
964 | * of the way in which we use the masks to defer the RPS work (which for | |
965 | * posterity is necessary because of forcewake). | |
966 | */ | |
967 | static void hsw_pm_irq_handler(struct drm_i915_private *dev_priv, | |
968 | u32 pm_iir) | |
969 | { | |
970 | unsigned long flags; | |
971 | ||
972 | spin_lock_irqsave(&dev_priv->rps.lock, flags); | |
4848405c | 973 | dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; |
baf02a1f BW |
974 | if (dev_priv->rps.pm_iir) { |
975 | I915_WRITE(GEN6_PMIMR, dev_priv->rps.pm_iir); | |
976 | /* never want to mask useful interrupts. (also posting read) */ | |
4848405c | 977 | WARN_ON(I915_READ_NOTRACE(GEN6_PMIMR) & ~GEN6_PM_RPS_EVENTS); |
baf02a1f BW |
978 | /* TODO: if queue_work is slow, move it out of the spinlock */ |
979 | queue_work(dev_priv->wq, &dev_priv->rps.work); | |
980 | } | |
981 | spin_unlock_irqrestore(&dev_priv->rps.lock, flags); | |
982 | ||
12638c57 BW |
983 | if (pm_iir & ~GEN6_PM_RPS_EVENTS) { |
984 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) | |
985 | notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); | |
986 | ||
987 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { | |
988 | DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); | |
989 | i915_handle_error(dev_priv->dev, false); | |
990 | } | |
991 | } | |
baf02a1f BW |
992 | } |
993 | ||
ff1f525e | 994 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
7e231dbe JB |
995 | { |
996 | struct drm_device *dev = (struct drm_device *) arg; | |
997 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
998 | u32 iir, gt_iir, pm_iir; | |
999 | irqreturn_t ret = IRQ_NONE; | |
1000 | unsigned long irqflags; | |
1001 | int pipe; | |
1002 | u32 pipe_stats[I915_MAX_PIPES]; | |
7e231dbe JB |
1003 | |
1004 | atomic_inc(&dev_priv->irq_received); | |
1005 | ||
7e231dbe JB |
1006 | while (true) { |
1007 | iir = I915_READ(VLV_IIR); | |
1008 | gt_iir = I915_READ(GTIIR); | |
1009 | pm_iir = I915_READ(GEN6_PMIIR); | |
1010 | ||
1011 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | |
1012 | goto out; | |
1013 | ||
1014 | ret = IRQ_HANDLED; | |
1015 | ||
e7b4c6b1 | 1016 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
7e231dbe JB |
1017 | |
1018 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1019 | for_each_pipe(pipe) { | |
1020 | int reg = PIPESTAT(pipe); | |
1021 | pipe_stats[pipe] = I915_READ(reg); | |
1022 | ||
1023 | /* | |
1024 | * Clear the PIPE*STAT regs before the IIR | |
1025 | */ | |
1026 | if (pipe_stats[pipe] & 0x8000ffff) { | |
1027 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
1028 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
1029 | pipe_name(pipe)); | |
1030 | I915_WRITE(reg, pipe_stats[pipe]); | |
1031 | } | |
1032 | } | |
1033 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1034 | ||
31acc7f5 JB |
1035 | for_each_pipe(pipe) { |
1036 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) | |
1037 | drm_handle_vblank(dev, pipe); | |
1038 | ||
1039 | if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { | |
1040 | intel_prepare_page_flip(dev, pipe); | |
1041 | intel_finish_page_flip(dev, pipe); | |
1042 | } | |
1043 | } | |
1044 | ||
7e231dbe JB |
1045 | /* Consume port. Then clear IIR or we'll miss events */ |
1046 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { | |
1047 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
b543fb04 | 1048 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
7e231dbe JB |
1049 | |
1050 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
1051 | hotplug_status); | |
91d131d2 DV |
1052 | |
1053 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); | |
1054 | ||
7e231dbe JB |
1055 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
1056 | I915_READ(PORT_HOTPLUG_STAT); | |
1057 | } | |
1058 | ||
515ac2bb DV |
1059 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
1060 | gmbus_irq_handler(dev); | |
7e231dbe | 1061 | |
4848405c | 1062 | if (pm_iir & GEN6_PM_RPS_EVENTS) |
fc6826d1 | 1063 | gen6_queue_rps_work(dev_priv, pm_iir); |
7e231dbe JB |
1064 | |
1065 | I915_WRITE(GTIIR, gt_iir); | |
1066 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
1067 | I915_WRITE(VLV_IIR, iir); | |
1068 | } | |
1069 | ||
1070 | out: | |
1071 | return ret; | |
1072 | } | |
1073 | ||
23e81d69 | 1074 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
776ad806 JB |
1075 | { |
1076 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
9db4a9c7 | 1077 | int pipe; |
b543fb04 | 1078 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; |
776ad806 | 1079 | |
91d131d2 DV |
1080 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); |
1081 | ||
cfc33bf7 VS |
1082 | if (pch_iir & SDE_AUDIO_POWER_MASK) { |
1083 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> | |
1084 | SDE_AUDIO_POWER_SHIFT); | |
776ad806 | 1085 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
cfc33bf7 VS |
1086 | port_name(port)); |
1087 | } | |
776ad806 | 1088 | |
ce99c256 DV |
1089 | if (pch_iir & SDE_AUX_MASK) |
1090 | dp_aux_irq_handler(dev); | |
1091 | ||
776ad806 | 1092 | if (pch_iir & SDE_GMBUS) |
515ac2bb | 1093 | gmbus_irq_handler(dev); |
776ad806 JB |
1094 | |
1095 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | |
1096 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | |
1097 | ||
1098 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | |
1099 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | |
1100 | ||
1101 | if (pch_iir & SDE_POISON) | |
1102 | DRM_ERROR("PCH poison interrupt\n"); | |
1103 | ||
9db4a9c7 JB |
1104 | if (pch_iir & SDE_FDI_MASK) |
1105 | for_each_pipe(pipe) | |
1106 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | |
1107 | pipe_name(pipe), | |
1108 | I915_READ(FDI_RX_IIR(pipe))); | |
776ad806 JB |
1109 | |
1110 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | |
1111 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | |
1112 | ||
1113 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | |
1114 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | |
1115 | ||
776ad806 | 1116 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) |
8664281b PZ |
1117 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, |
1118 | false)) | |
1119 | DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); | |
1120 | ||
1121 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | |
1122 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | |
1123 | false)) | |
1124 | DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); | |
1125 | } | |
1126 | ||
1127 | static void ivb_err_int_handler(struct drm_device *dev) | |
1128 | { | |
1129 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1130 | u32 err_int = I915_READ(GEN7_ERR_INT); | |
1131 | ||
de032bf4 PZ |
1132 | if (err_int & ERR_INT_POISON) |
1133 | DRM_ERROR("Poison interrupt\n"); | |
1134 | ||
8664281b PZ |
1135 | if (err_int & ERR_INT_FIFO_UNDERRUN_A) |
1136 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) | |
1137 | DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); | |
1138 | ||
1139 | if (err_int & ERR_INT_FIFO_UNDERRUN_B) | |
1140 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) | |
1141 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); | |
1142 | ||
1143 | if (err_int & ERR_INT_FIFO_UNDERRUN_C) | |
1144 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) | |
1145 | DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); | |
1146 | ||
1147 | I915_WRITE(GEN7_ERR_INT, err_int); | |
1148 | } | |
1149 | ||
1150 | static void cpt_serr_int_handler(struct drm_device *dev) | |
1151 | { | |
1152 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1153 | u32 serr_int = I915_READ(SERR_INT); | |
1154 | ||
de032bf4 PZ |
1155 | if (serr_int & SERR_INT_POISON) |
1156 | DRM_ERROR("PCH poison interrupt\n"); | |
1157 | ||
8664281b PZ |
1158 | if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) |
1159 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, | |
1160 | false)) | |
1161 | DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); | |
1162 | ||
1163 | if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) | |
1164 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | |
1165 | false)) | |
1166 | DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); | |
1167 | ||
1168 | if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) | |
1169 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, | |
1170 | false)) | |
1171 | DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); | |
1172 | ||
1173 | I915_WRITE(SERR_INT, serr_int); | |
776ad806 JB |
1174 | } |
1175 | ||
23e81d69 AJ |
1176 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) |
1177 | { | |
1178 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1179 | int pipe; | |
b543fb04 | 1180 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; |
23e81d69 | 1181 | |
91d131d2 DV |
1182 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); |
1183 | ||
cfc33bf7 VS |
1184 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { |
1185 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | |
1186 | SDE_AUDIO_POWER_SHIFT_CPT); | |
1187 | DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", | |
1188 | port_name(port)); | |
1189 | } | |
23e81d69 AJ |
1190 | |
1191 | if (pch_iir & SDE_AUX_MASK_CPT) | |
ce99c256 | 1192 | dp_aux_irq_handler(dev); |
23e81d69 AJ |
1193 | |
1194 | if (pch_iir & SDE_GMBUS_CPT) | |
515ac2bb | 1195 | gmbus_irq_handler(dev); |
23e81d69 AJ |
1196 | |
1197 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) | |
1198 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); | |
1199 | ||
1200 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) | |
1201 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); | |
1202 | ||
1203 | if (pch_iir & SDE_FDI_MASK_CPT) | |
1204 | for_each_pipe(pipe) | |
1205 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | |
1206 | pipe_name(pipe), | |
1207 | I915_READ(FDI_RX_IIR(pipe))); | |
8664281b PZ |
1208 | |
1209 | if (pch_iir & SDE_ERROR_CPT) | |
1210 | cpt_serr_int_handler(dev); | |
23e81d69 AJ |
1211 | } |
1212 | ||
ff1f525e | 1213 | static irqreturn_t ivybridge_irq_handler(int irq, void *arg) |
b1f14ad0 JB |
1214 | { |
1215 | struct drm_device *dev = (struct drm_device *) arg; | |
1216 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
ab5c608b | 1217 | u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier = 0; |
0e43406b CW |
1218 | irqreturn_t ret = IRQ_NONE; |
1219 | int i; | |
b1f14ad0 JB |
1220 | |
1221 | atomic_inc(&dev_priv->irq_received); | |
1222 | ||
8664281b PZ |
1223 | /* We get interrupts on unclaimed registers, so check for this before we |
1224 | * do any I915_{READ,WRITE}. */ | |
1225 | if (IS_HASWELL(dev) && | |
1226 | (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) { | |
1227 | DRM_ERROR("Unclaimed register before interrupt\n"); | |
1228 | I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM); | |
1229 | } | |
1230 | ||
b1f14ad0 JB |
1231 | /* disable master interrupt before clearing iir */ |
1232 | de_ier = I915_READ(DEIER); | |
1233 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | |
b1f14ad0 | 1234 | |
44498aea PZ |
1235 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
1236 | * interrupts will will be stored on its back queue, and then we'll be | |
1237 | * able to process them after we restore SDEIER (as soon as we restore | |
1238 | * it, we'll get an interrupt if SDEIIR still has something to process | |
1239 | * due to its back queue). */ | |
ab5c608b BW |
1240 | if (!HAS_PCH_NOP(dev)) { |
1241 | sde_ier = I915_READ(SDEIER); | |
1242 | I915_WRITE(SDEIER, 0); | |
1243 | POSTING_READ(SDEIER); | |
1244 | } | |
44498aea | 1245 | |
8664281b PZ |
1246 | /* On Haswell, also mask ERR_INT because we don't want to risk |
1247 | * generating "unclaimed register" interrupts from inside the interrupt | |
1248 | * handler. */ | |
4bc9d430 DV |
1249 | if (IS_HASWELL(dev)) { |
1250 | spin_lock(&dev_priv->irq_lock); | |
8664281b | 1251 | ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); |
4bc9d430 DV |
1252 | spin_unlock(&dev_priv->irq_lock); |
1253 | } | |
8664281b | 1254 | |
b1f14ad0 | 1255 | gt_iir = I915_READ(GTIIR); |
0e43406b CW |
1256 | if (gt_iir) { |
1257 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
1258 | I915_WRITE(GTIIR, gt_iir); | |
1259 | ret = IRQ_HANDLED; | |
b1f14ad0 JB |
1260 | } |
1261 | ||
0e43406b CW |
1262 | de_iir = I915_READ(DEIIR); |
1263 | if (de_iir) { | |
8664281b PZ |
1264 | if (de_iir & DE_ERR_INT_IVB) |
1265 | ivb_err_int_handler(dev); | |
1266 | ||
ce99c256 DV |
1267 | if (de_iir & DE_AUX_CHANNEL_A_IVB) |
1268 | dp_aux_irq_handler(dev); | |
1269 | ||
0e43406b | 1270 | if (de_iir & DE_GSE_IVB) |
81a07809 | 1271 | intel_opregion_asle_intr(dev); |
0e43406b CW |
1272 | |
1273 | for (i = 0; i < 3; i++) { | |
74d44445 DV |
1274 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) |
1275 | drm_handle_vblank(dev, i); | |
0e43406b CW |
1276 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { |
1277 | intel_prepare_page_flip(dev, i); | |
1278 | intel_finish_page_flip_plane(dev, i); | |
1279 | } | |
0e43406b | 1280 | } |
b615b57a | 1281 | |
0e43406b | 1282 | /* check event from PCH */ |
ab5c608b | 1283 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { |
0e43406b | 1284 | u32 pch_iir = I915_READ(SDEIIR); |
b1f14ad0 | 1285 | |
23e81d69 | 1286 | cpt_irq_handler(dev, pch_iir); |
b1f14ad0 | 1287 | |
0e43406b CW |
1288 | /* clear PCH hotplug event before clear CPU irq */ |
1289 | I915_WRITE(SDEIIR, pch_iir); | |
1290 | } | |
b615b57a | 1291 | |
0e43406b CW |
1292 | I915_WRITE(DEIIR, de_iir); |
1293 | ret = IRQ_HANDLED; | |
b1f14ad0 JB |
1294 | } |
1295 | ||
0e43406b CW |
1296 | pm_iir = I915_READ(GEN6_PMIIR); |
1297 | if (pm_iir) { | |
baf02a1f BW |
1298 | if (IS_HASWELL(dev)) |
1299 | hsw_pm_irq_handler(dev_priv, pm_iir); | |
4848405c | 1300 | else if (pm_iir & GEN6_PM_RPS_EVENTS) |
0e43406b CW |
1301 | gen6_queue_rps_work(dev_priv, pm_iir); |
1302 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
1303 | ret = IRQ_HANDLED; | |
1304 | } | |
b1f14ad0 | 1305 | |
4bc9d430 DV |
1306 | if (IS_HASWELL(dev)) { |
1307 | spin_lock(&dev_priv->irq_lock); | |
1308 | if (ivb_can_enable_err_int(dev)) | |
1309 | ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); | |
1310 | spin_unlock(&dev_priv->irq_lock); | |
1311 | } | |
8664281b | 1312 | |
b1f14ad0 JB |
1313 | I915_WRITE(DEIER, de_ier); |
1314 | POSTING_READ(DEIER); | |
ab5c608b BW |
1315 | if (!HAS_PCH_NOP(dev)) { |
1316 | I915_WRITE(SDEIER, sde_ier); | |
1317 | POSTING_READ(SDEIER); | |
1318 | } | |
b1f14ad0 JB |
1319 | |
1320 | return ret; | |
1321 | } | |
1322 | ||
e7b4c6b1 DV |
1323 | static void ilk_gt_irq_handler(struct drm_device *dev, |
1324 | struct drm_i915_private *dev_priv, | |
1325 | u32 gt_iir) | |
1326 | { | |
cc609d5d BW |
1327 | if (gt_iir & |
1328 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
e7b4c6b1 | 1329 | notify_ring(dev, &dev_priv->ring[RCS]); |
cc609d5d | 1330 | if (gt_iir & ILK_BSD_USER_INTERRUPT) |
e7b4c6b1 DV |
1331 | notify_ring(dev, &dev_priv->ring[VCS]); |
1332 | } | |
1333 | ||
ff1f525e | 1334 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
036a4a7d | 1335 | { |
4697995b | 1336 | struct drm_device *dev = (struct drm_device *) arg; |
036a4a7d ZW |
1337 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1338 | int ret = IRQ_NONE; | |
44498aea | 1339 | u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier; |
881f47b6 | 1340 | |
4697995b JB |
1341 | atomic_inc(&dev_priv->irq_received); |
1342 | ||
2d109a84 ZN |
1343 | /* disable master interrupt before clearing iir */ |
1344 | de_ier = I915_READ(DEIER); | |
1345 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | |
3143a2bf | 1346 | POSTING_READ(DEIER); |
2d109a84 | 1347 | |
44498aea PZ |
1348 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
1349 | * interrupts will will be stored on its back queue, and then we'll be | |
1350 | * able to process them after we restore SDEIER (as soon as we restore | |
1351 | * it, we'll get an interrupt if SDEIIR still has something to process | |
1352 | * due to its back queue). */ | |
1353 | sde_ier = I915_READ(SDEIER); | |
1354 | I915_WRITE(SDEIER, 0); | |
1355 | POSTING_READ(SDEIER); | |
1356 | ||
036a4a7d ZW |
1357 | de_iir = I915_READ(DEIIR); |
1358 | gt_iir = I915_READ(GTIIR); | |
3b8d8d91 | 1359 | pm_iir = I915_READ(GEN6_PMIIR); |
036a4a7d | 1360 | |
acd15b6c | 1361 | if (de_iir == 0 && gt_iir == 0 && (!IS_GEN6(dev) || pm_iir == 0)) |
c7c85101 | 1362 | goto done; |
036a4a7d | 1363 | |
c7c85101 | 1364 | ret = IRQ_HANDLED; |
036a4a7d | 1365 | |
e7b4c6b1 DV |
1366 | if (IS_GEN5(dev)) |
1367 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | |
1368 | else | |
1369 | snb_gt_irq_handler(dev, dev_priv, gt_iir); | |
01c66889 | 1370 | |
ce99c256 DV |
1371 | if (de_iir & DE_AUX_CHANNEL_A) |
1372 | dp_aux_irq_handler(dev); | |
1373 | ||
c7c85101 | 1374 | if (de_iir & DE_GSE) |
81a07809 | 1375 | intel_opregion_asle_intr(dev); |
c650156a | 1376 | |
74d44445 DV |
1377 | if (de_iir & DE_PIPEA_VBLANK) |
1378 | drm_handle_vblank(dev, 0); | |
1379 | ||
1380 | if (de_iir & DE_PIPEB_VBLANK) | |
1381 | drm_handle_vblank(dev, 1); | |
1382 | ||
de032bf4 PZ |
1383 | if (de_iir & DE_POISON) |
1384 | DRM_ERROR("Poison interrupt\n"); | |
1385 | ||
8664281b PZ |
1386 | if (de_iir & DE_PIPEA_FIFO_UNDERRUN) |
1387 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) | |
1388 | DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); | |
1389 | ||
1390 | if (de_iir & DE_PIPEB_FIFO_UNDERRUN) | |
1391 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) | |
1392 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); | |
1393 | ||
f072d2e7 | 1394 | if (de_iir & DE_PLANEA_FLIP_DONE) { |
013d5aa2 | 1395 | intel_prepare_page_flip(dev, 0); |
2bbda389 | 1396 | intel_finish_page_flip_plane(dev, 0); |
f072d2e7 | 1397 | } |
013d5aa2 | 1398 | |
f072d2e7 | 1399 | if (de_iir & DE_PLANEB_FLIP_DONE) { |
013d5aa2 | 1400 | intel_prepare_page_flip(dev, 1); |
2bbda389 | 1401 | intel_finish_page_flip_plane(dev, 1); |
f072d2e7 | 1402 | } |
013d5aa2 | 1403 | |
c7c85101 | 1404 | /* check event from PCH */ |
776ad806 | 1405 | if (de_iir & DE_PCH_EVENT) { |
acd15b6c DV |
1406 | u32 pch_iir = I915_READ(SDEIIR); |
1407 | ||
23e81d69 AJ |
1408 | if (HAS_PCH_CPT(dev)) |
1409 | cpt_irq_handler(dev, pch_iir); | |
1410 | else | |
1411 | ibx_irq_handler(dev, pch_iir); | |
acd15b6c DV |
1412 | |
1413 | /* should clear PCH hotplug event before clear CPU irq */ | |
1414 | I915_WRITE(SDEIIR, pch_iir); | |
776ad806 | 1415 | } |
036a4a7d | 1416 | |
73edd18f DV |
1417 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) |
1418 | ironlake_handle_rps_change(dev); | |
f97108d1 | 1419 | |
4848405c | 1420 | if (IS_GEN6(dev) && pm_iir & GEN6_PM_RPS_EVENTS) |
fc6826d1 | 1421 | gen6_queue_rps_work(dev_priv, pm_iir); |
3b8d8d91 | 1422 | |
c7c85101 ZN |
1423 | I915_WRITE(GTIIR, gt_iir); |
1424 | I915_WRITE(DEIIR, de_iir); | |
4912d041 | 1425 | I915_WRITE(GEN6_PMIIR, pm_iir); |
c7c85101 ZN |
1426 | |
1427 | done: | |
2d109a84 | 1428 | I915_WRITE(DEIER, de_ier); |
3143a2bf | 1429 | POSTING_READ(DEIER); |
44498aea PZ |
1430 | I915_WRITE(SDEIER, sde_ier); |
1431 | POSTING_READ(SDEIER); | |
2d109a84 | 1432 | |
036a4a7d ZW |
1433 | return ret; |
1434 | } | |
1435 | ||
8a905236 JB |
1436 | /** |
1437 | * i915_error_work_func - do process context error handling work | |
1438 | * @work: work struct | |
1439 | * | |
1440 | * Fire an error uevent so userspace can see that a hang or error | |
1441 | * was detected. | |
1442 | */ | |
1443 | static void i915_error_work_func(struct work_struct *work) | |
1444 | { | |
1f83fee0 DV |
1445 | struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, |
1446 | work); | |
1447 | drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, | |
1448 | gpu_error); | |
8a905236 | 1449 | struct drm_device *dev = dev_priv->dev; |
f69061be | 1450 | struct intel_ring_buffer *ring; |
f316a42c BG |
1451 | char *error_event[] = { "ERROR=1", NULL }; |
1452 | char *reset_event[] = { "RESET=1", NULL }; | |
1453 | char *reset_done_event[] = { "ERROR=0", NULL }; | |
f69061be | 1454 | int i, ret; |
8a905236 | 1455 | |
f316a42c BG |
1456 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); |
1457 | ||
7db0ba24 DV |
1458 | /* |
1459 | * Note that there's only one work item which does gpu resets, so we | |
1460 | * need not worry about concurrent gpu resets potentially incrementing | |
1461 | * error->reset_counter twice. We only need to take care of another | |
1462 | * racing irq/hangcheck declaring the gpu dead for a second time. A | |
1463 | * quick check for that is good enough: schedule_work ensures the | |
1464 | * correct ordering between hang detection and this work item, and since | |
1465 | * the reset in-progress bit is only ever set by code outside of this | |
1466 | * work we don't need to worry about any other races. | |
1467 | */ | |
1468 | if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { | |
f803aa55 | 1469 | DRM_DEBUG_DRIVER("resetting chip\n"); |
7db0ba24 DV |
1470 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, |
1471 | reset_event); | |
1f83fee0 | 1472 | |
f69061be DV |
1473 | ret = i915_reset(dev); |
1474 | ||
1475 | if (ret == 0) { | |
1476 | /* | |
1477 | * After all the gem state is reset, increment the reset | |
1478 | * counter and wake up everyone waiting for the reset to | |
1479 | * complete. | |
1480 | * | |
1481 | * Since unlock operations are a one-sided barrier only, | |
1482 | * we need to insert a barrier here to order any seqno | |
1483 | * updates before | |
1484 | * the counter increment. | |
1485 | */ | |
1486 | smp_mb__before_atomic_inc(); | |
1487 | atomic_inc(&dev_priv->gpu_error.reset_counter); | |
1488 | ||
1489 | kobject_uevent_env(&dev->primary->kdev.kobj, | |
1490 | KOBJ_CHANGE, reset_done_event); | |
1f83fee0 DV |
1491 | } else { |
1492 | atomic_set(&error->reset_counter, I915_WEDGED); | |
f316a42c | 1493 | } |
1f83fee0 | 1494 | |
f69061be DV |
1495 | for_each_ring(ring, dev_priv, i) |
1496 | wake_up_all(&ring->irq_queue); | |
1497 | ||
96a02917 VS |
1498 | intel_display_handle_reset(dev); |
1499 | ||
1f83fee0 | 1500 | wake_up_all(&dev_priv->gpu_error.reset_queue); |
f316a42c | 1501 | } |
8a905236 JB |
1502 | } |
1503 | ||
85f9e50d DV |
1504 | /* NB: please notice the memset */ |
1505 | static void i915_get_extra_instdone(struct drm_device *dev, | |
1506 | uint32_t *instdone) | |
1507 | { | |
1508 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1509 | memset(instdone, 0, sizeof(*instdone) * I915_NUM_INSTDONE_REG); | |
1510 | ||
1511 | switch(INTEL_INFO(dev)->gen) { | |
1512 | case 2: | |
1513 | case 3: | |
1514 | instdone[0] = I915_READ(INSTDONE); | |
1515 | break; | |
1516 | case 4: | |
1517 | case 5: | |
1518 | case 6: | |
1519 | instdone[0] = I915_READ(INSTDONE_I965); | |
1520 | instdone[1] = I915_READ(INSTDONE1); | |
1521 | break; | |
1522 | default: | |
1523 | WARN_ONCE(1, "Unsupported platform\n"); | |
1524 | case 7: | |
1525 | instdone[0] = I915_READ(GEN7_INSTDONE_1); | |
1526 | instdone[1] = I915_READ(GEN7_SC_INSTDONE); | |
1527 | instdone[2] = I915_READ(GEN7_SAMPLER_INSTDONE); | |
1528 | instdone[3] = I915_READ(GEN7_ROW_INSTDONE); | |
1529 | break; | |
1530 | } | |
1531 | } | |
1532 | ||
3bd3c932 | 1533 | #ifdef CONFIG_DEBUG_FS |
9df30794 | 1534 | static struct drm_i915_error_object * |
d0d045e8 BW |
1535 | i915_error_object_create_sized(struct drm_i915_private *dev_priv, |
1536 | struct drm_i915_gem_object *src, | |
1537 | const int num_pages) | |
9df30794 CW |
1538 | { |
1539 | struct drm_i915_error_object *dst; | |
d0d045e8 | 1540 | int i; |
e56660dd | 1541 | u32 reloc_offset; |
9df30794 | 1542 | |
05394f39 | 1543 | if (src == NULL || src->pages == NULL) |
9df30794 CW |
1544 | return NULL; |
1545 | ||
d0d045e8 | 1546 | dst = kmalloc(sizeof(*dst) + num_pages * sizeof(u32 *), GFP_ATOMIC); |
9df30794 CW |
1547 | if (dst == NULL) |
1548 | return NULL; | |
1549 | ||
f343c5f6 | 1550 | reloc_offset = dst->gtt_offset = i915_gem_obj_ggtt_offset(src); |
d0d045e8 | 1551 | for (i = 0; i < num_pages; i++) { |
788885ae | 1552 | unsigned long flags; |
e56660dd | 1553 | void *d; |
788885ae | 1554 | |
e56660dd | 1555 | d = kmalloc(PAGE_SIZE, GFP_ATOMIC); |
9df30794 CW |
1556 | if (d == NULL) |
1557 | goto unwind; | |
e56660dd | 1558 | |
788885ae | 1559 | local_irq_save(flags); |
5d4545ae | 1560 | if (reloc_offset < dev_priv->gtt.mappable_end && |
74898d7e | 1561 | src->has_global_gtt_mapping) { |
172975aa CW |
1562 | void __iomem *s; |
1563 | ||
1564 | /* Simply ignore tiling or any overlapping fence. | |
1565 | * It's part of the error state, and this hopefully | |
1566 | * captures what the GPU read. | |
1567 | */ | |
1568 | ||
5d4545ae | 1569 | s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable, |
172975aa CW |
1570 | reloc_offset); |
1571 | memcpy_fromio(d, s, PAGE_SIZE); | |
1572 | io_mapping_unmap_atomic(s); | |
960e3564 CW |
1573 | } else if (src->stolen) { |
1574 | unsigned long offset; | |
1575 | ||
1576 | offset = dev_priv->mm.stolen_base; | |
1577 | offset += src->stolen->start; | |
1578 | offset += i << PAGE_SHIFT; | |
1579 | ||
1a240d4d | 1580 | memcpy_fromio(d, (void __iomem *) offset, PAGE_SIZE); |
172975aa | 1581 | } else { |
9da3da66 | 1582 | struct page *page; |
172975aa CW |
1583 | void *s; |
1584 | ||
9da3da66 | 1585 | page = i915_gem_object_get_page(src, i); |
172975aa | 1586 | |
9da3da66 CW |
1587 | drm_clflush_pages(&page, 1); |
1588 | ||
1589 | s = kmap_atomic(page); | |
172975aa CW |
1590 | memcpy(d, s, PAGE_SIZE); |
1591 | kunmap_atomic(s); | |
1592 | ||
9da3da66 | 1593 | drm_clflush_pages(&page, 1); |
172975aa | 1594 | } |
788885ae | 1595 | local_irq_restore(flags); |
e56660dd | 1596 | |
9da3da66 | 1597 | dst->pages[i] = d; |
e56660dd CW |
1598 | |
1599 | reloc_offset += PAGE_SIZE; | |
9df30794 | 1600 | } |
d0d045e8 | 1601 | dst->page_count = num_pages; |
9df30794 CW |
1602 | |
1603 | return dst; | |
1604 | ||
1605 | unwind: | |
9da3da66 CW |
1606 | while (i--) |
1607 | kfree(dst->pages[i]); | |
9df30794 CW |
1608 | kfree(dst); |
1609 | return NULL; | |
1610 | } | |
d0d045e8 BW |
1611 | #define i915_error_object_create(dev_priv, src) \ |
1612 | i915_error_object_create_sized((dev_priv), (src), \ | |
1613 | (src)->base.size>>PAGE_SHIFT) | |
9df30794 CW |
1614 | |
1615 | static void | |
1616 | i915_error_object_free(struct drm_i915_error_object *obj) | |
1617 | { | |
1618 | int page; | |
1619 | ||
1620 | if (obj == NULL) | |
1621 | return; | |
1622 | ||
1623 | for (page = 0; page < obj->page_count; page++) | |
1624 | kfree(obj->pages[page]); | |
1625 | ||
1626 | kfree(obj); | |
1627 | } | |
1628 | ||
742cbee8 DV |
1629 | void |
1630 | i915_error_state_free(struct kref *error_ref) | |
9df30794 | 1631 | { |
742cbee8 DV |
1632 | struct drm_i915_error_state *error = container_of(error_ref, |
1633 | typeof(*error), ref); | |
e2f973d5 CW |
1634 | int i; |
1635 | ||
52d39a21 CW |
1636 | for (i = 0; i < ARRAY_SIZE(error->ring); i++) { |
1637 | i915_error_object_free(error->ring[i].batchbuffer); | |
1638 | i915_error_object_free(error->ring[i].ringbuffer); | |
7ed73da0 | 1639 | i915_error_object_free(error->ring[i].ctx); |
52d39a21 CW |
1640 | kfree(error->ring[i].requests); |
1641 | } | |
e2f973d5 | 1642 | |
9df30794 | 1643 | kfree(error->active_bo); |
6ef3d427 | 1644 | kfree(error->overlay); |
7ed73da0 | 1645 | kfree(error->display); |
9df30794 CW |
1646 | kfree(error); |
1647 | } | |
1b50247a CW |
1648 | static void capture_bo(struct drm_i915_error_buffer *err, |
1649 | struct drm_i915_gem_object *obj) | |
1650 | { | |
1651 | err->size = obj->base.size; | |
1652 | err->name = obj->base.name; | |
0201f1ec CW |
1653 | err->rseqno = obj->last_read_seqno; |
1654 | err->wseqno = obj->last_write_seqno; | |
f343c5f6 | 1655 | err->gtt_offset = i915_gem_obj_ggtt_offset(obj); |
1b50247a CW |
1656 | err->read_domains = obj->base.read_domains; |
1657 | err->write_domain = obj->base.write_domain; | |
1658 | err->fence_reg = obj->fence_reg; | |
1659 | err->pinned = 0; | |
1660 | if (obj->pin_count > 0) | |
1661 | err->pinned = 1; | |
1662 | if (obj->user_pin_count > 0) | |
1663 | err->pinned = -1; | |
1664 | err->tiling = obj->tiling_mode; | |
1665 | err->dirty = obj->dirty; | |
1666 | err->purgeable = obj->madv != I915_MADV_WILLNEED; | |
1667 | err->ring = obj->ring ? obj->ring->id : -1; | |
1668 | err->cache_level = obj->cache_level; | |
1669 | } | |
9df30794 | 1670 | |
1b50247a CW |
1671 | static u32 capture_active_bo(struct drm_i915_error_buffer *err, |
1672 | int count, struct list_head *head) | |
c724e8a9 CW |
1673 | { |
1674 | struct drm_i915_gem_object *obj; | |
1675 | int i = 0; | |
1676 | ||
1677 | list_for_each_entry(obj, head, mm_list) { | |
1b50247a | 1678 | capture_bo(err++, obj); |
c724e8a9 CW |
1679 | if (++i == count) |
1680 | break; | |
1b50247a CW |
1681 | } |
1682 | ||
1683 | return i; | |
1684 | } | |
1685 | ||
1686 | static u32 capture_pinned_bo(struct drm_i915_error_buffer *err, | |
1687 | int count, struct list_head *head) | |
1688 | { | |
1689 | struct drm_i915_gem_object *obj; | |
1690 | int i = 0; | |
1691 | ||
35c20a60 | 1692 | list_for_each_entry(obj, head, global_list) { |
1b50247a CW |
1693 | if (obj->pin_count == 0) |
1694 | continue; | |
c724e8a9 | 1695 | |
1b50247a CW |
1696 | capture_bo(err++, obj); |
1697 | if (++i == count) | |
1698 | break; | |
c724e8a9 CW |
1699 | } |
1700 | ||
1701 | return i; | |
1702 | } | |
1703 | ||
748ebc60 CW |
1704 | static void i915_gem_record_fences(struct drm_device *dev, |
1705 | struct drm_i915_error_state *error) | |
1706 | { | |
1707 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1708 | int i; | |
1709 | ||
1710 | /* Fences */ | |
1711 | switch (INTEL_INFO(dev)->gen) { | |
775d17b6 | 1712 | case 7: |
748ebc60 | 1713 | case 6: |
42b5aeab | 1714 | for (i = 0; i < dev_priv->num_fence_regs; i++) |
748ebc60 CW |
1715 | error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); |
1716 | break; | |
1717 | case 5: | |
1718 | case 4: | |
1719 | for (i = 0; i < 16; i++) | |
1720 | error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8)); | |
1721 | break; | |
1722 | case 3: | |
1723 | if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) | |
1724 | for (i = 0; i < 8; i++) | |
1725 | error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4)); | |
1726 | case 2: | |
1727 | for (i = 0; i < 8; i++) | |
1728 | error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4)); | |
1729 | break; | |
1730 | ||
7dbf9d6e BW |
1731 | default: |
1732 | BUG(); | |
748ebc60 CW |
1733 | } |
1734 | } | |
1735 | ||
bcfb2e28 CW |
1736 | static struct drm_i915_error_object * |
1737 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | |
1738 | struct intel_ring_buffer *ring) | |
1739 | { | |
1740 | struct drm_i915_gem_object *obj; | |
1741 | u32 seqno; | |
1742 | ||
1743 | if (!ring->get_seqno) | |
1744 | return NULL; | |
1745 | ||
b45305fc DV |
1746 | if (HAS_BROKEN_CS_TLB(dev_priv->dev)) { |
1747 | u32 acthd = I915_READ(ACTHD); | |
1748 | ||
1749 | if (WARN_ON(ring->id != RCS)) | |
1750 | return NULL; | |
1751 | ||
1752 | obj = ring->private; | |
f343c5f6 BW |
1753 | if (acthd >= i915_gem_obj_ggtt_offset(obj) && |
1754 | acthd < i915_gem_obj_ggtt_offset(obj) + obj->base.size) | |
b45305fc DV |
1755 | return i915_error_object_create(dev_priv, obj); |
1756 | } | |
1757 | ||
b2eadbc8 | 1758 | seqno = ring->get_seqno(ring, false); |
bcfb2e28 CW |
1759 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
1760 | if (obj->ring != ring) | |
1761 | continue; | |
1762 | ||
0201f1ec | 1763 | if (i915_seqno_passed(seqno, obj->last_read_seqno)) |
bcfb2e28 CW |
1764 | continue; |
1765 | ||
1766 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | |
1767 | continue; | |
1768 | ||
1769 | /* We need to copy these to an anonymous buffer as the simplest | |
1770 | * method to avoid being overwritten by userspace. | |
1771 | */ | |
1772 | return i915_error_object_create(dev_priv, obj); | |
1773 | } | |
1774 | ||
1775 | return NULL; | |
1776 | } | |
1777 | ||
d27b1e0e DV |
1778 | static void i915_record_ring_state(struct drm_device *dev, |
1779 | struct drm_i915_error_state *error, | |
1780 | struct intel_ring_buffer *ring) | |
1781 | { | |
1782 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1783 | ||
33f3f518 | 1784 | if (INTEL_INFO(dev)->gen >= 6) { |
12f55818 | 1785 | error->rc_psmi[ring->id] = I915_READ(ring->mmio_base + 0x50); |
33f3f518 | 1786 | error->fault_reg[ring->id] = I915_READ(RING_FAULT_REG(ring)); |
7e3b8737 DV |
1787 | error->semaphore_mboxes[ring->id][0] |
1788 | = I915_READ(RING_SYNC_0(ring->mmio_base)); | |
1789 | error->semaphore_mboxes[ring->id][1] | |
1790 | = I915_READ(RING_SYNC_1(ring->mmio_base)); | |
df2b23d9 CW |
1791 | error->semaphore_seqno[ring->id][0] = ring->sync_seqno[0]; |
1792 | error->semaphore_seqno[ring->id][1] = ring->sync_seqno[1]; | |
33f3f518 | 1793 | } |
c1cd90ed | 1794 | |
d27b1e0e | 1795 | if (INTEL_INFO(dev)->gen >= 4) { |
9d2f41fa | 1796 | error->faddr[ring->id] = I915_READ(RING_DMA_FADD(ring->mmio_base)); |
d27b1e0e DV |
1797 | error->ipeir[ring->id] = I915_READ(RING_IPEIR(ring->mmio_base)); |
1798 | error->ipehr[ring->id] = I915_READ(RING_IPEHR(ring->mmio_base)); | |
1799 | error->instdone[ring->id] = I915_READ(RING_INSTDONE(ring->mmio_base)); | |
c1cd90ed | 1800 | error->instps[ring->id] = I915_READ(RING_INSTPS(ring->mmio_base)); |
050ee91f | 1801 | if (ring->id == RCS) |
d27b1e0e | 1802 | error->bbaddr = I915_READ64(BB_ADDR); |
d27b1e0e | 1803 | } else { |
9d2f41fa | 1804 | error->faddr[ring->id] = I915_READ(DMA_FADD_I8XX); |
d27b1e0e DV |
1805 | error->ipeir[ring->id] = I915_READ(IPEIR); |
1806 | error->ipehr[ring->id] = I915_READ(IPEHR); | |
1807 | error->instdone[ring->id] = I915_READ(INSTDONE); | |
d27b1e0e DV |
1808 | } |
1809 | ||
9574b3fe | 1810 | error->waiting[ring->id] = waitqueue_active(&ring->irq_queue); |
c1cd90ed | 1811 | error->instpm[ring->id] = I915_READ(RING_INSTPM(ring->mmio_base)); |
b2eadbc8 | 1812 | error->seqno[ring->id] = ring->get_seqno(ring, false); |
d27b1e0e | 1813 | error->acthd[ring->id] = intel_ring_get_active_head(ring); |
c1cd90ed DV |
1814 | error->head[ring->id] = I915_READ_HEAD(ring); |
1815 | error->tail[ring->id] = I915_READ_TAIL(ring); | |
0f3b6849 | 1816 | error->ctl[ring->id] = I915_READ_CTL(ring); |
7e3b8737 DV |
1817 | |
1818 | error->cpu_ring_head[ring->id] = ring->head; | |
1819 | error->cpu_ring_tail[ring->id] = ring->tail; | |
d27b1e0e DV |
1820 | } |
1821 | ||
8c123e54 BW |
1822 | |
1823 | static void i915_gem_record_active_context(struct intel_ring_buffer *ring, | |
1824 | struct drm_i915_error_state *error, | |
1825 | struct drm_i915_error_ring *ering) | |
1826 | { | |
1827 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
1828 | struct drm_i915_gem_object *obj; | |
1829 | ||
1830 | /* Currently render ring is the only HW context user */ | |
1831 | if (ring->id != RCS || !error->ccid) | |
1832 | return; | |
1833 | ||
35c20a60 | 1834 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
f343c5f6 | 1835 | if ((error->ccid & PAGE_MASK) == i915_gem_obj_ggtt_offset(obj)) { |
8c123e54 BW |
1836 | ering->ctx = i915_error_object_create_sized(dev_priv, |
1837 | obj, 1); | |
3ef8fb5a | 1838 | break; |
8c123e54 BW |
1839 | } |
1840 | } | |
1841 | } | |
1842 | ||
52d39a21 CW |
1843 | static void i915_gem_record_rings(struct drm_device *dev, |
1844 | struct drm_i915_error_state *error) | |
1845 | { | |
1846 | struct drm_i915_private *dev_priv = dev->dev_private; | |
b4519513 | 1847 | struct intel_ring_buffer *ring; |
52d39a21 CW |
1848 | struct drm_i915_gem_request *request; |
1849 | int i, count; | |
1850 | ||
b4519513 | 1851 | for_each_ring(ring, dev_priv, i) { |
52d39a21 CW |
1852 | i915_record_ring_state(dev, error, ring); |
1853 | ||
1854 | error->ring[i].batchbuffer = | |
1855 | i915_error_first_batchbuffer(dev_priv, ring); | |
1856 | ||
1857 | error->ring[i].ringbuffer = | |
1858 | i915_error_object_create(dev_priv, ring->obj); | |
1859 | ||
8c123e54 BW |
1860 | |
1861 | i915_gem_record_active_context(ring, error, &error->ring[i]); | |
1862 | ||
52d39a21 CW |
1863 | count = 0; |
1864 | list_for_each_entry(request, &ring->request_list, list) | |
1865 | count++; | |
1866 | ||
1867 | error->ring[i].num_requests = count; | |
1868 | error->ring[i].requests = | |
1869 | kmalloc(count*sizeof(struct drm_i915_error_request), | |
1870 | GFP_ATOMIC); | |
1871 | if (error->ring[i].requests == NULL) { | |
1872 | error->ring[i].num_requests = 0; | |
1873 | continue; | |
1874 | } | |
1875 | ||
1876 | count = 0; | |
1877 | list_for_each_entry(request, &ring->request_list, list) { | |
1878 | struct drm_i915_error_request *erq; | |
1879 | ||
1880 | erq = &error->ring[i].requests[count++]; | |
1881 | erq->seqno = request->seqno; | |
1882 | erq->jiffies = request->emitted_jiffies; | |
ee4f42b1 | 1883 | erq->tail = request->tail; |
52d39a21 CW |
1884 | } |
1885 | } | |
1886 | } | |
1887 | ||
26b7c224 BW |
1888 | static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, |
1889 | struct drm_i915_error_state *error) | |
1890 | { | |
1891 | struct drm_i915_gem_object *obj; | |
1892 | int i; | |
1893 | ||
1894 | i = 0; | |
1895 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) | |
1896 | i++; | |
1897 | error->active_bo_count = i; | |
1898 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) | |
1899 | if (obj->pin_count) | |
1900 | i++; | |
1901 | error->pinned_bo_count = i - error->active_bo_count; | |
1902 | ||
1903 | if (i) { | |
1904 | error->active_bo = kmalloc(sizeof(*error->active_bo)*i, | |
1905 | GFP_ATOMIC); | |
1906 | if (error->active_bo) | |
1907 | error->pinned_bo = | |
1908 | error->active_bo + error->active_bo_count; | |
1909 | } | |
1910 | ||
1911 | if (error->active_bo) | |
1912 | error->active_bo_count = | |
1913 | capture_active_bo(error->active_bo, | |
1914 | error->active_bo_count, | |
1915 | &dev_priv->mm.active_list); | |
1916 | ||
1917 | if (error->pinned_bo) | |
1918 | error->pinned_bo_count = | |
1919 | capture_pinned_bo(error->pinned_bo, | |
1920 | error->pinned_bo_count, | |
1921 | &dev_priv->mm.bound_list); | |
1922 | } | |
1923 | ||
8a905236 JB |
1924 | /** |
1925 | * i915_capture_error_state - capture an error record for later analysis | |
1926 | * @dev: drm device | |
1927 | * | |
1928 | * Should be called when an error is detected (either a hang or an error | |
1929 | * interrupt) to capture error state from the time of the error. Fills | |
1930 | * out a structure which becomes available in debugfs for user level tools | |
1931 | * to pick up. | |
1932 | */ | |
63eeaf38 JB |
1933 | static void i915_capture_error_state(struct drm_device *dev) |
1934 | { | |
1935 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1936 | struct drm_i915_error_state *error; | |
1937 | unsigned long flags; | |
26b7c224 | 1938 | int pipe; |
63eeaf38 | 1939 | |
99584db3 DV |
1940 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
1941 | error = dev_priv->gpu_error.first_error; | |
1942 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); | |
9df30794 CW |
1943 | if (error) |
1944 | return; | |
63eeaf38 | 1945 | |
9db4a9c7 | 1946 | /* Account for pipe specific data like PIPE*STAT */ |
33f3f518 | 1947 | error = kzalloc(sizeof(*error), GFP_ATOMIC); |
63eeaf38 | 1948 | if (!error) { |
9df30794 CW |
1949 | DRM_DEBUG_DRIVER("out of memory, not capturing error state\n"); |
1950 | return; | |
63eeaf38 JB |
1951 | } |
1952 | ||
5d83d294 | 1953 | DRM_INFO("capturing error event; look for more information in " |
ef86ddce | 1954 | "/sys/class/drm/card%d/error\n", dev->primary->index); |
2fa772f3 | 1955 | |
742cbee8 | 1956 | kref_init(&error->ref); |
63eeaf38 JB |
1957 | error->eir = I915_READ(EIR); |
1958 | error->pgtbl_er = I915_READ(PGTBL_ER); | |
211816ec BW |
1959 | if (HAS_HW_CONTEXTS(dev)) |
1960 | error->ccid = I915_READ(CCID); | |
be998e2e BW |
1961 | |
1962 | if (HAS_PCH_SPLIT(dev)) | |
1963 | error->ier = I915_READ(DEIER) | I915_READ(GTIER); | |
1964 | else if (IS_VALLEYVIEW(dev)) | |
1965 | error->ier = I915_READ(GTIER) | I915_READ(VLV_IER); | |
1966 | else if (IS_GEN2(dev)) | |
1967 | error->ier = I915_READ16(IER); | |
1968 | else | |
1969 | error->ier = I915_READ(IER); | |
1970 | ||
0f3b6849 CW |
1971 | if (INTEL_INFO(dev)->gen >= 6) |
1972 | error->derrmr = I915_READ(DERRMR); | |
1973 | ||
1974 | if (IS_VALLEYVIEW(dev)) | |
1975 | error->forcewake = I915_READ(FORCEWAKE_VLV); | |
1976 | else if (INTEL_INFO(dev)->gen >= 7) | |
1977 | error->forcewake = I915_READ(FORCEWAKE_MT); | |
1978 | else if (INTEL_INFO(dev)->gen == 6) | |
1979 | error->forcewake = I915_READ(FORCEWAKE); | |
1980 | ||
4f3308b9 PZ |
1981 | if (!HAS_PCH_SPLIT(dev)) |
1982 | for_each_pipe(pipe) | |
1983 | error->pipestat[pipe] = I915_READ(PIPESTAT(pipe)); | |
d27b1e0e | 1984 | |
33f3f518 | 1985 | if (INTEL_INFO(dev)->gen >= 6) { |
f406839f | 1986 | error->error = I915_READ(ERROR_GEN6); |
33f3f518 DV |
1987 | error->done_reg = I915_READ(DONE_REG); |
1988 | } | |
d27b1e0e | 1989 | |
71e172e8 BW |
1990 | if (INTEL_INFO(dev)->gen == 7) |
1991 | error->err_int = I915_READ(GEN7_ERR_INT); | |
1992 | ||
050ee91f BW |
1993 | i915_get_extra_instdone(dev, error->extra_instdone); |
1994 | ||
26b7c224 | 1995 | i915_gem_capture_buffers(dev_priv, error); |
748ebc60 | 1996 | i915_gem_record_fences(dev, error); |
52d39a21 | 1997 | i915_gem_record_rings(dev, error); |
9df30794 | 1998 | |
9df30794 CW |
1999 | do_gettimeofday(&error->time); |
2000 | ||
6ef3d427 | 2001 | error->overlay = intel_overlay_capture_error_state(dev); |
c4a1d9e4 | 2002 | error->display = intel_display_capture_error_state(dev); |
6ef3d427 | 2003 | |
99584db3 DV |
2004 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
2005 | if (dev_priv->gpu_error.first_error == NULL) { | |
2006 | dev_priv->gpu_error.first_error = error; | |
9df30794 CW |
2007 | error = NULL; |
2008 | } | |
99584db3 | 2009 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); |
9df30794 CW |
2010 | |
2011 | if (error) | |
742cbee8 | 2012 | i915_error_state_free(&error->ref); |
9df30794 CW |
2013 | } |
2014 | ||
2015 | void i915_destroy_error_state(struct drm_device *dev) | |
2016 | { | |
2017 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2018 | struct drm_i915_error_state *error; | |
6dc0e816 | 2019 | unsigned long flags; |
9df30794 | 2020 | |
99584db3 DV |
2021 | spin_lock_irqsave(&dev_priv->gpu_error.lock, flags); |
2022 | error = dev_priv->gpu_error.first_error; | |
2023 | dev_priv->gpu_error.first_error = NULL; | |
2024 | spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags); | |
9df30794 CW |
2025 | |
2026 | if (error) | |
742cbee8 | 2027 | kref_put(&error->ref, i915_error_state_free); |
63eeaf38 | 2028 | } |
3bd3c932 CW |
2029 | #else |
2030 | #define i915_capture_error_state(x) | |
2031 | #endif | |
63eeaf38 | 2032 | |
35aed2e6 | 2033 | static void i915_report_and_clear_eir(struct drm_device *dev) |
8a905236 JB |
2034 | { |
2035 | struct drm_i915_private *dev_priv = dev->dev_private; | |
bd9854f9 | 2036 | uint32_t instdone[I915_NUM_INSTDONE_REG]; |
8a905236 | 2037 | u32 eir = I915_READ(EIR); |
050ee91f | 2038 | int pipe, i; |
8a905236 | 2039 | |
35aed2e6 CW |
2040 | if (!eir) |
2041 | return; | |
8a905236 | 2042 | |
a70491cc | 2043 | pr_err("render error detected, EIR: 0x%08x\n", eir); |
8a905236 | 2044 | |
bd9854f9 BW |
2045 | i915_get_extra_instdone(dev, instdone); |
2046 | ||
8a905236 JB |
2047 | if (IS_G4X(dev)) { |
2048 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | |
2049 | u32 ipeir = I915_READ(IPEIR_I965); | |
2050 | ||
a70491cc JP |
2051 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
2052 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
050ee91f BW |
2053 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
2054 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
a70491cc | 2055 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
a70491cc | 2056 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
8a905236 | 2057 | I915_WRITE(IPEIR_I965, ipeir); |
3143a2bf | 2058 | POSTING_READ(IPEIR_I965); |
8a905236 JB |
2059 | } |
2060 | if (eir & GM45_ERROR_PAGE_TABLE) { | |
2061 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
a70491cc JP |
2062 | pr_err("page table error\n"); |
2063 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
8a905236 | 2064 | I915_WRITE(PGTBL_ER, pgtbl_err); |
3143a2bf | 2065 | POSTING_READ(PGTBL_ER); |
8a905236 JB |
2066 | } |
2067 | } | |
2068 | ||
a6c45cf0 | 2069 | if (!IS_GEN2(dev)) { |
8a905236 JB |
2070 | if (eir & I915_ERROR_PAGE_TABLE) { |
2071 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
a70491cc JP |
2072 | pr_err("page table error\n"); |
2073 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
8a905236 | 2074 | I915_WRITE(PGTBL_ER, pgtbl_err); |
3143a2bf | 2075 | POSTING_READ(PGTBL_ER); |
8a905236 JB |
2076 | } |
2077 | } | |
2078 | ||
2079 | if (eir & I915_ERROR_MEMORY_REFRESH) { | |
a70491cc | 2080 | pr_err("memory refresh error:\n"); |
9db4a9c7 | 2081 | for_each_pipe(pipe) |
a70491cc | 2082 | pr_err("pipe %c stat: 0x%08x\n", |
9db4a9c7 | 2083 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
8a905236 JB |
2084 | /* pipestat has already been acked */ |
2085 | } | |
2086 | if (eir & I915_ERROR_INSTRUCTION) { | |
a70491cc JP |
2087 | pr_err("instruction error\n"); |
2088 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); | |
050ee91f BW |
2089 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
2090 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
a6c45cf0 | 2091 | if (INTEL_INFO(dev)->gen < 4) { |
8a905236 JB |
2092 | u32 ipeir = I915_READ(IPEIR); |
2093 | ||
a70491cc JP |
2094 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); |
2095 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); | |
a70491cc | 2096 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); |
8a905236 | 2097 | I915_WRITE(IPEIR, ipeir); |
3143a2bf | 2098 | POSTING_READ(IPEIR); |
8a905236 JB |
2099 | } else { |
2100 | u32 ipeir = I915_READ(IPEIR_I965); | |
2101 | ||
a70491cc JP |
2102 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
2103 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
a70491cc | 2104 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
a70491cc | 2105 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
8a905236 | 2106 | I915_WRITE(IPEIR_I965, ipeir); |
3143a2bf | 2107 | POSTING_READ(IPEIR_I965); |
8a905236 JB |
2108 | } |
2109 | } | |
2110 | ||
2111 | I915_WRITE(EIR, eir); | |
3143a2bf | 2112 | POSTING_READ(EIR); |
8a905236 JB |
2113 | eir = I915_READ(EIR); |
2114 | if (eir) { | |
2115 | /* | |
2116 | * some errors might have become stuck, | |
2117 | * mask them. | |
2118 | */ | |
2119 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | |
2120 | I915_WRITE(EMR, I915_READ(EMR) | eir); | |
2121 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2122 | } | |
35aed2e6 CW |
2123 | } |
2124 | ||
2125 | /** | |
2126 | * i915_handle_error - handle an error interrupt | |
2127 | * @dev: drm device | |
2128 | * | |
2129 | * Do some basic checking of regsiter state at error interrupt time and | |
2130 | * dump it to the syslog. Also call i915_capture_error_state() to make | |
2131 | * sure we get a record and make it available in debugfs. Fire a uevent | |
2132 | * so userspace knows something bad happened (should trigger collection | |
2133 | * of a ring dump etc.). | |
2134 | */ | |
527f9e90 | 2135 | void i915_handle_error(struct drm_device *dev, bool wedged) |
35aed2e6 CW |
2136 | { |
2137 | struct drm_i915_private *dev_priv = dev->dev_private; | |
b4519513 CW |
2138 | struct intel_ring_buffer *ring; |
2139 | int i; | |
35aed2e6 CW |
2140 | |
2141 | i915_capture_error_state(dev); | |
2142 | i915_report_and_clear_eir(dev); | |
8a905236 | 2143 | |
ba1234d1 | 2144 | if (wedged) { |
f69061be DV |
2145 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, |
2146 | &dev_priv->gpu_error.reset_counter); | |
ba1234d1 | 2147 | |
11ed50ec | 2148 | /* |
1f83fee0 DV |
2149 | * Wakeup waiting processes so that the reset work item |
2150 | * doesn't deadlock trying to grab various locks. | |
11ed50ec | 2151 | */ |
b4519513 CW |
2152 | for_each_ring(ring, dev_priv, i) |
2153 | wake_up_all(&ring->irq_queue); | |
11ed50ec BG |
2154 | } |
2155 | ||
99584db3 | 2156 | queue_work(dev_priv->wq, &dev_priv->gpu_error.work); |
8a905236 JB |
2157 | } |
2158 | ||
21ad8330 | 2159 | static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) |
4e5359cd SF |
2160 | { |
2161 | drm_i915_private_t *dev_priv = dev->dev_private; | |
2162 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
2163 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
05394f39 | 2164 | struct drm_i915_gem_object *obj; |
4e5359cd SF |
2165 | struct intel_unpin_work *work; |
2166 | unsigned long flags; | |
2167 | bool stall_detected; | |
2168 | ||
2169 | /* Ignore early vblank irqs */ | |
2170 | if (intel_crtc == NULL) | |
2171 | return; | |
2172 | ||
2173 | spin_lock_irqsave(&dev->event_lock, flags); | |
2174 | work = intel_crtc->unpin_work; | |
2175 | ||
e7d841ca CW |
2176 | if (work == NULL || |
2177 | atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || | |
2178 | !work->enable_stall_check) { | |
4e5359cd SF |
2179 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ |
2180 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
2181 | return; | |
2182 | } | |
2183 | ||
2184 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | |
05394f39 | 2185 | obj = work->pending_flip_obj; |
a6c45cf0 | 2186 | if (INTEL_INFO(dev)->gen >= 4) { |
9db4a9c7 | 2187 | int dspsurf = DSPSURF(intel_crtc->plane); |
446f2545 | 2188 | stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == |
f343c5f6 | 2189 | i915_gem_obj_ggtt_offset(obj); |
4e5359cd | 2190 | } else { |
9db4a9c7 | 2191 | int dspaddr = DSPADDR(intel_crtc->plane); |
f343c5f6 | 2192 | stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + |
01f2c773 | 2193 | crtc->y * crtc->fb->pitches[0] + |
4e5359cd SF |
2194 | crtc->x * crtc->fb->bits_per_pixel/8); |
2195 | } | |
2196 | ||
2197 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
2198 | ||
2199 | if (stall_detected) { | |
2200 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); | |
2201 | intel_prepare_page_flip(dev, intel_crtc->plane); | |
2202 | } | |
2203 | } | |
2204 | ||
42f52ef8 KP |
2205 | /* Called from drm generic code, passed 'crtc' which |
2206 | * we use as a pipe index | |
2207 | */ | |
f71d4af4 | 2208 | static int i915_enable_vblank(struct drm_device *dev, int pipe) |
0a3e67a4 JB |
2209 | { |
2210 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
e9d21d7f | 2211 | unsigned long irqflags; |
71e0ffa5 | 2212 | |
5eddb70b | 2213 | if (!i915_pipe_enabled(dev, pipe)) |
71e0ffa5 | 2214 | return -EINVAL; |
0a3e67a4 | 2215 | |
1ec14ad3 | 2216 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
f796cf8f | 2217 | if (INTEL_INFO(dev)->gen >= 4) |
7c463586 KP |
2218 | i915_enable_pipestat(dev_priv, pipe, |
2219 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
e9d21d7f | 2220 | else |
7c463586 KP |
2221 | i915_enable_pipestat(dev_priv, pipe, |
2222 | PIPE_VBLANK_INTERRUPT_ENABLE); | |
8692d00e CW |
2223 | |
2224 | /* maintain vblank delivery even in deep C-states */ | |
2225 | if (dev_priv->info->gen == 3) | |
6b26c86d | 2226 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); |
1ec14ad3 | 2227 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
8692d00e | 2228 | |
0a3e67a4 JB |
2229 | return 0; |
2230 | } | |
2231 | ||
f71d4af4 | 2232 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) |
f796cf8f JB |
2233 | { |
2234 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2235 | unsigned long irqflags; | |
2236 | ||
2237 | if (!i915_pipe_enabled(dev, pipe)) | |
2238 | return -EINVAL; | |
2239 | ||
2240 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2241 | ironlake_enable_display_irq(dev_priv, (pipe == 0) ? | |
0206e353 | 2242 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); |
f796cf8f JB |
2243 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2244 | ||
2245 | return 0; | |
2246 | } | |
2247 | ||
f71d4af4 | 2248 | static int ivybridge_enable_vblank(struct drm_device *dev, int pipe) |
b1f14ad0 JB |
2249 | { |
2250 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2251 | unsigned long irqflags; | |
2252 | ||
2253 | if (!i915_pipe_enabled(dev, pipe)) | |
2254 | return -EINVAL; | |
2255 | ||
2256 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
b615b57a CW |
2257 | ironlake_enable_display_irq(dev_priv, |
2258 | DE_PIPEA_VBLANK_IVB << (5 * pipe)); | |
b1f14ad0 JB |
2259 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2260 | ||
2261 | return 0; | |
2262 | } | |
2263 | ||
7e231dbe JB |
2264 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) |
2265 | { | |
2266 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2267 | unsigned long irqflags; | |
31acc7f5 | 2268 | u32 imr; |
7e231dbe JB |
2269 | |
2270 | if (!i915_pipe_enabled(dev, pipe)) | |
2271 | return -EINVAL; | |
2272 | ||
2273 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
7e231dbe | 2274 | imr = I915_READ(VLV_IMR); |
31acc7f5 | 2275 | if (pipe == 0) |
7e231dbe | 2276 | imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
31acc7f5 | 2277 | else |
7e231dbe | 2278 | imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
7e231dbe | 2279 | I915_WRITE(VLV_IMR, imr); |
31acc7f5 JB |
2280 | i915_enable_pipestat(dev_priv, pipe, |
2281 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
7e231dbe JB |
2282 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2283 | ||
2284 | return 0; | |
2285 | } | |
2286 | ||
42f52ef8 KP |
2287 | /* Called from drm generic code, passed 'crtc' which |
2288 | * we use as a pipe index | |
2289 | */ | |
f71d4af4 | 2290 | static void i915_disable_vblank(struct drm_device *dev, int pipe) |
0a3e67a4 JB |
2291 | { |
2292 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
e9d21d7f | 2293 | unsigned long irqflags; |
0a3e67a4 | 2294 | |
1ec14ad3 | 2295 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
8692d00e | 2296 | if (dev_priv->info->gen == 3) |
6b26c86d | 2297 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); |
8692d00e | 2298 | |
f796cf8f JB |
2299 | i915_disable_pipestat(dev_priv, pipe, |
2300 | PIPE_VBLANK_INTERRUPT_ENABLE | | |
2301 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
2302 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2303 | } | |
2304 | ||
f71d4af4 | 2305 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) |
f796cf8f JB |
2306 | { |
2307 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2308 | unsigned long irqflags; | |
2309 | ||
2310 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2311 | ironlake_disable_display_irq(dev_priv, (pipe == 0) ? | |
0206e353 | 2312 | DE_PIPEA_VBLANK : DE_PIPEB_VBLANK); |
1ec14ad3 | 2313 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
0a3e67a4 JB |
2314 | } |
2315 | ||
f71d4af4 | 2316 | static void ivybridge_disable_vblank(struct drm_device *dev, int pipe) |
b1f14ad0 JB |
2317 | { |
2318 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2319 | unsigned long irqflags; | |
2320 | ||
2321 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
b615b57a CW |
2322 | ironlake_disable_display_irq(dev_priv, |
2323 | DE_PIPEA_VBLANK_IVB << (pipe * 5)); | |
b1f14ad0 JB |
2324 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2325 | } | |
2326 | ||
7e231dbe JB |
2327 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) |
2328 | { | |
2329 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2330 | unsigned long irqflags; | |
31acc7f5 | 2331 | u32 imr; |
7e231dbe JB |
2332 | |
2333 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
31acc7f5 JB |
2334 | i915_disable_pipestat(dev_priv, pipe, |
2335 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
7e231dbe | 2336 | imr = I915_READ(VLV_IMR); |
31acc7f5 | 2337 | if (pipe == 0) |
7e231dbe | 2338 | imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
31acc7f5 | 2339 | else |
7e231dbe | 2340 | imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
7e231dbe | 2341 | I915_WRITE(VLV_IMR, imr); |
7e231dbe JB |
2342 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
2343 | } | |
2344 | ||
893eead0 CW |
2345 | static u32 |
2346 | ring_last_seqno(struct intel_ring_buffer *ring) | |
852835f3 | 2347 | { |
893eead0 CW |
2348 | return list_entry(ring->request_list.prev, |
2349 | struct drm_i915_gem_request, list)->seqno; | |
2350 | } | |
2351 | ||
9107e9d2 CW |
2352 | static bool |
2353 | ring_idle(struct intel_ring_buffer *ring, u32 seqno) | |
2354 | { | |
2355 | return (list_empty(&ring->request_list) || | |
2356 | i915_seqno_passed(seqno, ring_last_seqno(ring))); | |
f65d9421 BG |
2357 | } |
2358 | ||
6274f212 CW |
2359 | static struct intel_ring_buffer * |
2360 | semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) | |
a24a11e6 CW |
2361 | { |
2362 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
6274f212 | 2363 | u32 cmd, ipehr, acthd, acthd_min; |
a24a11e6 CW |
2364 | |
2365 | ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); | |
2366 | if ((ipehr & ~(0x3 << 16)) != | |
2367 | (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) | |
6274f212 | 2368 | return NULL; |
a24a11e6 CW |
2369 | |
2370 | /* ACTHD is likely pointing to the dword after the actual command, | |
2371 | * so scan backwards until we find the MBOX. | |
2372 | */ | |
6274f212 | 2373 | acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; |
a24a11e6 CW |
2374 | acthd_min = max((int)acthd - 3 * 4, 0); |
2375 | do { | |
2376 | cmd = ioread32(ring->virtual_start + acthd); | |
2377 | if (cmd == ipehr) | |
2378 | break; | |
2379 | ||
2380 | acthd -= 4; | |
2381 | if (acthd < acthd_min) | |
6274f212 | 2382 | return NULL; |
a24a11e6 CW |
2383 | } while (1); |
2384 | ||
6274f212 CW |
2385 | *seqno = ioread32(ring->virtual_start+acthd+4)+1; |
2386 | return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; | |
a24a11e6 CW |
2387 | } |
2388 | ||
6274f212 CW |
2389 | static int semaphore_passed(struct intel_ring_buffer *ring) |
2390 | { | |
2391 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
2392 | struct intel_ring_buffer *signaller; | |
2393 | u32 seqno, ctl; | |
2394 | ||
2395 | ring->hangcheck.deadlock = true; | |
2396 | ||
2397 | signaller = semaphore_waits_for(ring, &seqno); | |
2398 | if (signaller == NULL || signaller->hangcheck.deadlock) | |
2399 | return -1; | |
2400 | ||
2401 | /* cursory check for an unkickable deadlock */ | |
2402 | ctl = I915_READ_CTL(signaller); | |
2403 | if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) | |
2404 | return -1; | |
2405 | ||
2406 | return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); | |
2407 | } | |
2408 | ||
2409 | static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) | |
2410 | { | |
2411 | struct intel_ring_buffer *ring; | |
2412 | int i; | |
2413 | ||
2414 | for_each_ring(ring, dev_priv, i) | |
2415 | ring->hangcheck.deadlock = false; | |
2416 | } | |
2417 | ||
ad8beaea MK |
2418 | static enum intel_ring_hangcheck_action |
2419 | ring_stuck(struct intel_ring_buffer *ring, u32 acthd) | |
1ec14ad3 CW |
2420 | { |
2421 | struct drm_device *dev = ring->dev; | |
2422 | struct drm_i915_private *dev_priv = dev->dev_private; | |
9107e9d2 CW |
2423 | u32 tmp; |
2424 | ||
6274f212 CW |
2425 | if (ring->hangcheck.acthd != acthd) |
2426 | return active; | |
2427 | ||
9107e9d2 | 2428 | if (IS_GEN2(dev)) |
6274f212 | 2429 | return hung; |
9107e9d2 CW |
2430 | |
2431 | /* Is the chip hanging on a WAIT_FOR_EVENT? | |
2432 | * If so we can simply poke the RB_WAIT bit | |
2433 | * and break the hang. This should work on | |
2434 | * all but the second generation chipsets. | |
2435 | */ | |
2436 | tmp = I915_READ_CTL(ring); | |
1ec14ad3 CW |
2437 | if (tmp & RING_WAIT) { |
2438 | DRM_ERROR("Kicking stuck wait on %s\n", | |
2439 | ring->name); | |
2440 | I915_WRITE_CTL(ring, tmp); | |
6274f212 CW |
2441 | return kick; |
2442 | } | |
2443 | ||
2444 | if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { | |
2445 | switch (semaphore_passed(ring)) { | |
2446 | default: | |
2447 | return hung; | |
2448 | case 1: | |
2449 | DRM_ERROR("Kicking stuck semaphore on %s\n", | |
2450 | ring->name); | |
2451 | I915_WRITE_CTL(ring, tmp); | |
2452 | return kick; | |
2453 | case 0: | |
2454 | return wait; | |
2455 | } | |
9107e9d2 | 2456 | } |
ed5cbb03 | 2457 | |
6274f212 | 2458 | return hung; |
ed5cbb03 MK |
2459 | } |
2460 | ||
f65d9421 BG |
2461 | /** |
2462 | * This is called when the chip hasn't reported back with completed | |
05407ff8 MK |
2463 | * batchbuffers in a long time. We keep track per ring seqno progress and |
2464 | * if there are no progress, hangcheck score for that ring is increased. | |
2465 | * Further, acthd is inspected to see if the ring is stuck. On stuck case | |
2466 | * we kick the ring. If we see no progress on three subsequent calls | |
2467 | * we assume chip is wedged and try to fix it by resetting the chip. | |
f65d9421 BG |
2468 | */ |
2469 | void i915_hangcheck_elapsed(unsigned long data) | |
2470 | { | |
2471 | struct drm_device *dev = (struct drm_device *)data; | |
2472 | drm_i915_private_t *dev_priv = dev->dev_private; | |
b4519513 | 2473 | struct intel_ring_buffer *ring; |
b4519513 | 2474 | int i; |
05407ff8 | 2475 | int busy_count = 0, rings_hung = 0; |
9107e9d2 CW |
2476 | bool stuck[I915_NUM_RINGS] = { 0 }; |
2477 | #define BUSY 1 | |
2478 | #define KICK 5 | |
2479 | #define HUNG 20 | |
2480 | #define FIRE 30 | |
893eead0 | 2481 | |
3e0dc6b0 BW |
2482 | if (!i915_enable_hangcheck) |
2483 | return; | |
2484 | ||
b4519513 | 2485 | for_each_ring(ring, dev_priv, i) { |
05407ff8 | 2486 | u32 seqno, acthd; |
9107e9d2 | 2487 | bool busy = true; |
05407ff8 | 2488 | |
6274f212 CW |
2489 | semaphore_clear_deadlocks(dev_priv); |
2490 | ||
05407ff8 MK |
2491 | seqno = ring->get_seqno(ring, false); |
2492 | acthd = intel_ring_get_active_head(ring); | |
b4519513 | 2493 | |
9107e9d2 CW |
2494 | if (ring->hangcheck.seqno == seqno) { |
2495 | if (ring_idle(ring, seqno)) { | |
2496 | if (waitqueue_active(&ring->irq_queue)) { | |
2497 | /* Issue a wake-up to catch stuck h/w. */ | |
2498 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", | |
2499 | ring->name); | |
2500 | wake_up_all(&ring->irq_queue); | |
2501 | ring->hangcheck.score += HUNG; | |
2502 | } else | |
2503 | busy = false; | |
05407ff8 | 2504 | } else { |
9107e9d2 CW |
2505 | int score; |
2506 | ||
6274f212 CW |
2507 | /* We always increment the hangcheck score |
2508 | * if the ring is busy and still processing | |
2509 | * the same request, so that no single request | |
2510 | * can run indefinitely (such as a chain of | |
2511 | * batches). The only time we do not increment | |
2512 | * the hangcheck score on this ring, if this | |
2513 | * ring is in a legitimate wait for another | |
2514 | * ring. In that case the waiting ring is a | |
2515 | * victim and we want to be sure we catch the | |
2516 | * right culprit. Then every time we do kick | |
2517 | * the ring, add a small increment to the | |
2518 | * score so that we can catch a batch that is | |
2519 | * being repeatedly kicked and so responsible | |
2520 | * for stalling the machine. | |
2521 | */ | |
ad8beaea MK |
2522 | ring->hangcheck.action = ring_stuck(ring, |
2523 | acthd); | |
2524 | ||
2525 | switch (ring->hangcheck.action) { | |
6274f212 CW |
2526 | case wait: |
2527 | score = 0; | |
2528 | break; | |
2529 | case active: | |
9107e9d2 | 2530 | score = BUSY; |
6274f212 CW |
2531 | break; |
2532 | case kick: | |
2533 | score = KICK; | |
2534 | break; | |
2535 | case hung: | |
2536 | score = HUNG; | |
2537 | stuck[i] = true; | |
2538 | break; | |
2539 | } | |
9107e9d2 | 2540 | ring->hangcheck.score += score; |
05407ff8 | 2541 | } |
9107e9d2 CW |
2542 | } else { |
2543 | /* Gradually reduce the count so that we catch DoS | |
2544 | * attempts across multiple batches. | |
2545 | */ | |
2546 | if (ring->hangcheck.score > 0) | |
2547 | ring->hangcheck.score--; | |
d1e61e7f CW |
2548 | } |
2549 | ||
05407ff8 MK |
2550 | ring->hangcheck.seqno = seqno; |
2551 | ring->hangcheck.acthd = acthd; | |
9107e9d2 | 2552 | busy_count += busy; |
893eead0 | 2553 | } |
b9201c14 | 2554 | |
92cab734 | 2555 | for_each_ring(ring, dev_priv, i) { |
9107e9d2 | 2556 | if (ring->hangcheck.score > FIRE) { |
acd78c11 | 2557 | DRM_ERROR("%s on %s\n", |
05407ff8 | 2558 | stuck[i] ? "stuck" : "no progress", |
a43adf07 CW |
2559 | ring->name); |
2560 | rings_hung++; | |
92cab734 MK |
2561 | } |
2562 | } | |
2563 | ||
05407ff8 MK |
2564 | if (rings_hung) |
2565 | return i915_handle_error(dev, true); | |
f65d9421 | 2566 | |
05407ff8 MK |
2567 | if (busy_count) |
2568 | /* Reset timer case chip hangs without another request | |
2569 | * being added */ | |
2570 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, | |
2571 | round_jiffies_up(jiffies + | |
2572 | DRM_I915_HANGCHECK_JIFFIES)); | |
f65d9421 BG |
2573 | } |
2574 | ||
91738a95 PZ |
2575 | static void ibx_irq_preinstall(struct drm_device *dev) |
2576 | { | |
2577 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2578 | ||
2579 | if (HAS_PCH_NOP(dev)) | |
2580 | return; | |
2581 | ||
2582 | /* south display irq */ | |
2583 | I915_WRITE(SDEIMR, 0xffffffff); | |
2584 | /* | |
2585 | * SDEIER is also touched by the interrupt handler to work around missed | |
2586 | * PCH interrupts. Hence we can't update it after the interrupt handler | |
2587 | * is enabled - instead we unconditionally enable all PCH interrupt | |
2588 | * sources here, but then only unmask them as needed with SDEIMR. | |
2589 | */ | |
2590 | I915_WRITE(SDEIER, 0xffffffff); | |
2591 | POSTING_READ(SDEIER); | |
2592 | } | |
2593 | ||
1da177e4 LT |
2594 | /* drm_dma.h hooks |
2595 | */ | |
f71d4af4 | 2596 | static void ironlake_irq_preinstall(struct drm_device *dev) |
036a4a7d ZW |
2597 | { |
2598 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2599 | ||
4697995b JB |
2600 | atomic_set(&dev_priv->irq_received, 0); |
2601 | ||
036a4a7d | 2602 | I915_WRITE(HWSTAM, 0xeffe); |
bdfcdb63 | 2603 | |
036a4a7d ZW |
2604 | /* XXX hotplug from PCH */ |
2605 | ||
2606 | I915_WRITE(DEIMR, 0xffffffff); | |
2607 | I915_WRITE(DEIER, 0x0); | |
3143a2bf | 2608 | POSTING_READ(DEIER); |
036a4a7d ZW |
2609 | |
2610 | /* and GT */ | |
2611 | I915_WRITE(GTIMR, 0xffffffff); | |
2612 | I915_WRITE(GTIER, 0x0); | |
3143a2bf | 2613 | POSTING_READ(GTIER); |
c650156a | 2614 | |
91738a95 | 2615 | ibx_irq_preinstall(dev); |
7d99163d BW |
2616 | } |
2617 | ||
2618 | static void ivybridge_irq_preinstall(struct drm_device *dev) | |
2619 | { | |
2620 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2621 | ||
2622 | atomic_set(&dev_priv->irq_received, 0); | |
2623 | ||
2624 | I915_WRITE(HWSTAM, 0xeffe); | |
2625 | ||
2626 | /* XXX hotplug from PCH */ | |
2627 | ||
2628 | I915_WRITE(DEIMR, 0xffffffff); | |
2629 | I915_WRITE(DEIER, 0x0); | |
2630 | POSTING_READ(DEIER); | |
2631 | ||
2632 | /* and GT */ | |
2633 | I915_WRITE(GTIMR, 0xffffffff); | |
2634 | I915_WRITE(GTIER, 0x0); | |
2635 | POSTING_READ(GTIER); | |
2636 | ||
eda63ffb BW |
2637 | /* Power management */ |
2638 | I915_WRITE(GEN6_PMIMR, 0xffffffff); | |
2639 | I915_WRITE(GEN6_PMIER, 0x0); | |
2640 | POSTING_READ(GEN6_PMIER); | |
2641 | ||
91738a95 | 2642 | ibx_irq_preinstall(dev); |
036a4a7d ZW |
2643 | } |
2644 | ||
7e231dbe JB |
2645 | static void valleyview_irq_preinstall(struct drm_device *dev) |
2646 | { | |
2647 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2648 | int pipe; | |
2649 | ||
2650 | atomic_set(&dev_priv->irq_received, 0); | |
2651 | ||
7e231dbe JB |
2652 | /* VLV magic */ |
2653 | I915_WRITE(VLV_IMR, 0); | |
2654 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); | |
2655 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); | |
2656 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); | |
2657 | ||
7e231dbe JB |
2658 | /* and GT */ |
2659 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
2660 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
2661 | I915_WRITE(GTIMR, 0xffffffff); | |
2662 | I915_WRITE(GTIER, 0x0); | |
2663 | POSTING_READ(GTIER); | |
2664 | ||
2665 | I915_WRITE(DPINVGTT, 0xff); | |
2666 | ||
2667 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2668 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2669 | for_each_pipe(pipe) | |
2670 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
2671 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2672 | I915_WRITE(VLV_IMR, 0xffffffff); | |
2673 | I915_WRITE(VLV_IER, 0x0); | |
2674 | POSTING_READ(VLV_IER); | |
2675 | } | |
2676 | ||
82a28bcf | 2677 | static void ibx_hpd_irq_setup(struct drm_device *dev) |
7fe0b973 KP |
2678 | { |
2679 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
82a28bcf DV |
2680 | struct drm_mode_config *mode_config = &dev->mode_config; |
2681 | struct intel_encoder *intel_encoder; | |
fee884ed | 2682 | u32 hotplug_irqs, hotplug, enabled_irqs = 0; |
82a28bcf DV |
2683 | |
2684 | if (HAS_PCH_IBX(dev)) { | |
fee884ed | 2685 | hotplug_irqs = SDE_HOTPLUG_MASK; |
82a28bcf | 2686 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
cd569aed | 2687 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
fee884ed | 2688 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; |
82a28bcf | 2689 | } else { |
fee884ed | 2690 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; |
82a28bcf | 2691 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
cd569aed | 2692 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
fee884ed | 2693 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; |
82a28bcf | 2694 | } |
7fe0b973 | 2695 | |
fee884ed | 2696 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); |
82a28bcf DV |
2697 | |
2698 | /* | |
2699 | * Enable digital hotplug on the PCH, and configure the DP short pulse | |
2700 | * duration to 2ms (which is the minimum in the Display Port spec) | |
2701 | * | |
2702 | * This register is the same on all known PCH chips. | |
2703 | */ | |
7fe0b973 KP |
2704 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
2705 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | |
2706 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | |
2707 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | |
2708 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | |
2709 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | |
2710 | } | |
2711 | ||
d46da437 PZ |
2712 | static void ibx_irq_postinstall(struct drm_device *dev) |
2713 | { | |
2714 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
82a28bcf | 2715 | u32 mask; |
e5868a31 | 2716 | |
692a04cf DV |
2717 | if (HAS_PCH_NOP(dev)) |
2718 | return; | |
2719 | ||
8664281b PZ |
2720 | if (HAS_PCH_IBX(dev)) { |
2721 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | | |
de032bf4 | 2722 | SDE_TRANSA_FIFO_UNDER | SDE_POISON; |
8664281b PZ |
2723 | } else { |
2724 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; | |
2725 | ||
2726 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); | |
2727 | } | |
ab5c608b | 2728 | |
d46da437 PZ |
2729 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
2730 | I915_WRITE(SDEIMR, ~mask); | |
d46da437 PZ |
2731 | } |
2732 | ||
f71d4af4 | 2733 | static int ironlake_irq_postinstall(struct drm_device *dev) |
036a4a7d | 2734 | { |
4bc9d430 DV |
2735 | unsigned long irqflags; |
2736 | ||
036a4a7d ZW |
2737 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
2738 | /* enable kind of interrupts always enabled */ | |
013d5aa2 | 2739 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
ce99c256 | 2740 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | |
8664281b | 2741 | DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | |
de032bf4 | 2742 | DE_PIPEA_FIFO_UNDERRUN | DE_POISON; |
cc609d5d | 2743 | u32 gt_irqs; |
036a4a7d | 2744 | |
1ec14ad3 | 2745 | dev_priv->irq_mask = ~display_mask; |
036a4a7d ZW |
2746 | |
2747 | /* should always can generate irq */ | |
2748 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | |
1ec14ad3 | 2749 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
6005ce42 DV |
2750 | I915_WRITE(DEIER, display_mask | |
2751 | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT); | |
3143a2bf | 2752 | POSTING_READ(DEIER); |
036a4a7d | 2753 | |
1ec14ad3 | 2754 | dev_priv->gt_irq_mask = ~0; |
036a4a7d ZW |
2755 | |
2756 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
1ec14ad3 | 2757 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
881f47b6 | 2758 | |
cc609d5d BW |
2759 | gt_irqs = GT_RENDER_USER_INTERRUPT; |
2760 | ||
1ec14ad3 | 2761 | if (IS_GEN6(dev)) |
cc609d5d | 2762 | gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; |
1ec14ad3 | 2763 | else |
cc609d5d BW |
2764 | gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | |
2765 | ILK_BSD_USER_INTERRUPT; | |
2766 | ||
2767 | I915_WRITE(GTIER, gt_irqs); | |
3143a2bf | 2768 | POSTING_READ(GTIER); |
036a4a7d | 2769 | |
d46da437 | 2770 | ibx_irq_postinstall(dev); |
7fe0b973 | 2771 | |
f97108d1 | 2772 | if (IS_IRONLAKE_M(dev)) { |
6005ce42 DV |
2773 | /* Enable PCU event interrupts |
2774 | * | |
2775 | * spinlocking not required here for correctness since interrupt | |
4bc9d430 DV |
2776 | * setup is guaranteed to run in single-threaded context. But we |
2777 | * need it to make the assert_spin_locked happy. */ | |
2778 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
f97108d1 | 2779 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
4bc9d430 | 2780 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
f97108d1 JB |
2781 | } |
2782 | ||
036a4a7d ZW |
2783 | return 0; |
2784 | } | |
2785 | ||
f71d4af4 | 2786 | static int ivybridge_irq_postinstall(struct drm_device *dev) |
b1f14ad0 JB |
2787 | { |
2788 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2789 | /* enable kind of interrupts always enabled */ | |
b615b57a CW |
2790 | u32 display_mask = |
2791 | DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | DE_PCH_EVENT_IVB | | |
2792 | DE_PLANEC_FLIP_DONE_IVB | | |
2793 | DE_PLANEB_FLIP_DONE_IVB | | |
ce99c256 | 2794 | DE_PLANEA_FLIP_DONE_IVB | |
8664281b PZ |
2795 | DE_AUX_CHANNEL_A_IVB | |
2796 | DE_ERR_INT_IVB; | |
12638c57 | 2797 | u32 pm_irqs = GEN6_PM_RPS_EVENTS; |
cc609d5d | 2798 | u32 gt_irqs; |
b1f14ad0 | 2799 | |
b1f14ad0 JB |
2800 | dev_priv->irq_mask = ~display_mask; |
2801 | ||
2802 | /* should always can generate irq */ | |
8664281b | 2803 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); |
b1f14ad0 JB |
2804 | I915_WRITE(DEIIR, I915_READ(DEIIR)); |
2805 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
b615b57a CW |
2806 | I915_WRITE(DEIER, |
2807 | display_mask | | |
2808 | DE_PIPEC_VBLANK_IVB | | |
2809 | DE_PIPEB_VBLANK_IVB | | |
2810 | DE_PIPEA_VBLANK_IVB); | |
b1f14ad0 JB |
2811 | POSTING_READ(DEIER); |
2812 | ||
cc609d5d | 2813 | dev_priv->gt_irq_mask = ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT; |
b1f14ad0 JB |
2814 | |
2815 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
2816 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
2817 | ||
cc609d5d BW |
2818 | gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | |
2819 | GT_BLT_USER_INTERRUPT | GT_RENDER_L3_PARITY_ERROR_INTERRUPT; | |
2820 | I915_WRITE(GTIER, gt_irqs); | |
b1f14ad0 JB |
2821 | POSTING_READ(GTIER); |
2822 | ||
12638c57 BW |
2823 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); |
2824 | if (HAS_VEBOX(dev)) | |
2825 | pm_irqs |= PM_VEBOX_USER_INTERRUPT | | |
2826 | PM_VEBOX_CS_ERROR_INTERRUPT; | |
2827 | ||
2828 | /* Our enable/disable rps functions may touch these registers so | |
2829 | * make sure to set a known state for only the non-RPS bits. | |
2830 | * The RMW is extra paranoia since this should be called after being set | |
2831 | * to a known state in preinstall. | |
2832 | * */ | |
2833 | I915_WRITE(GEN6_PMIMR, | |
2834 | (I915_READ(GEN6_PMIMR) | ~GEN6_PM_RPS_EVENTS) & ~pm_irqs); | |
2835 | I915_WRITE(GEN6_PMIER, | |
2836 | (I915_READ(GEN6_PMIER) & GEN6_PM_RPS_EVENTS) | pm_irqs); | |
2837 | POSTING_READ(GEN6_PMIER); | |
eda63ffb | 2838 | |
d46da437 | 2839 | ibx_irq_postinstall(dev); |
7fe0b973 | 2840 | |
b1f14ad0 JB |
2841 | return 0; |
2842 | } | |
2843 | ||
7e231dbe JB |
2844 | static int valleyview_irq_postinstall(struct drm_device *dev) |
2845 | { | |
2846 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
cc609d5d | 2847 | u32 gt_irqs; |
7e231dbe | 2848 | u32 enable_mask; |
31acc7f5 | 2849 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; |
b79480ba | 2850 | unsigned long irqflags; |
7e231dbe JB |
2851 | |
2852 | enable_mask = I915_DISPLAY_PORT_INTERRUPT; | |
31acc7f5 JB |
2853 | enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
2854 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | |
2855 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
7e231dbe JB |
2856 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
2857 | ||
31acc7f5 JB |
2858 | /* |
2859 | *Leave vblank interrupts masked initially. enable/disable will | |
2860 | * toggle them based on usage. | |
2861 | */ | |
2862 | dev_priv->irq_mask = (~enable_mask) | | |
2863 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | |
2864 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | |
7e231dbe | 2865 | |
20afbda2 DV |
2866 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2867 | POSTING_READ(PORT_HOTPLUG_EN); | |
2868 | ||
7e231dbe JB |
2869 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
2870 | I915_WRITE(VLV_IER, enable_mask); | |
2871 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2872 | I915_WRITE(PIPESTAT(0), 0xffff); | |
2873 | I915_WRITE(PIPESTAT(1), 0xffff); | |
2874 | POSTING_READ(VLV_IER); | |
2875 | ||
b79480ba DV |
2876 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
2877 | * just to make the assert_spin_locked check happy. */ | |
2878 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
31acc7f5 | 2879 | i915_enable_pipestat(dev_priv, 0, pipestat_enable); |
515ac2bb | 2880 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
31acc7f5 | 2881 | i915_enable_pipestat(dev_priv, 1, pipestat_enable); |
b79480ba | 2882 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
31acc7f5 | 2883 | |
7e231dbe JB |
2884 | I915_WRITE(VLV_IIR, 0xffffffff); |
2885 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2886 | ||
7e231dbe | 2887 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
31acc7f5 | 2888 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); |
3bcedbe5 | 2889 | |
cc609d5d BW |
2890 | gt_irqs = GT_RENDER_USER_INTERRUPT | GT_BSD_USER_INTERRUPT | |
2891 | GT_BLT_USER_INTERRUPT; | |
2892 | I915_WRITE(GTIER, gt_irqs); | |
7e231dbe JB |
2893 | POSTING_READ(GTIER); |
2894 | ||
2895 | /* ack & enable invalid PTE error interrupts */ | |
2896 | #if 0 /* FIXME: add support to irq handler for checking these bits */ | |
2897 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | |
2898 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); | |
2899 | #endif | |
2900 | ||
2901 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | |
20afbda2 DV |
2902 | |
2903 | return 0; | |
2904 | } | |
2905 | ||
7e231dbe JB |
2906 | static void valleyview_irq_uninstall(struct drm_device *dev) |
2907 | { | |
2908 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2909 | int pipe; | |
2910 | ||
2911 | if (!dev_priv) | |
2912 | return; | |
2913 | ||
ac4c16c5 EE |
2914 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
2915 | ||
7e231dbe JB |
2916 | for_each_pipe(pipe) |
2917 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
2918 | ||
2919 | I915_WRITE(HWSTAM, 0xffffffff); | |
2920 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2921 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2922 | for_each_pipe(pipe) | |
2923 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
2924 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2925 | I915_WRITE(VLV_IMR, 0xffffffff); | |
2926 | I915_WRITE(VLV_IER, 0x0); | |
2927 | POSTING_READ(VLV_IER); | |
2928 | } | |
2929 | ||
f71d4af4 | 2930 | static void ironlake_irq_uninstall(struct drm_device *dev) |
036a4a7d ZW |
2931 | { |
2932 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
4697995b JB |
2933 | |
2934 | if (!dev_priv) | |
2935 | return; | |
2936 | ||
ac4c16c5 EE |
2937 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
2938 | ||
036a4a7d ZW |
2939 | I915_WRITE(HWSTAM, 0xffffffff); |
2940 | ||
2941 | I915_WRITE(DEIMR, 0xffffffff); | |
2942 | I915_WRITE(DEIER, 0x0); | |
2943 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | |
8664281b PZ |
2944 | if (IS_GEN7(dev)) |
2945 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); | |
036a4a7d ZW |
2946 | |
2947 | I915_WRITE(GTIMR, 0xffffffff); | |
2948 | I915_WRITE(GTIER, 0x0); | |
2949 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
192aac1f | 2950 | |
ab5c608b BW |
2951 | if (HAS_PCH_NOP(dev)) |
2952 | return; | |
2953 | ||
192aac1f KP |
2954 | I915_WRITE(SDEIMR, 0xffffffff); |
2955 | I915_WRITE(SDEIER, 0x0); | |
2956 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | |
8664281b PZ |
2957 | if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) |
2958 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); | |
036a4a7d ZW |
2959 | } |
2960 | ||
a266c7d5 | 2961 | static void i8xx_irq_preinstall(struct drm_device * dev) |
1da177e4 LT |
2962 | { |
2963 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
9db4a9c7 | 2964 | int pipe; |
91e3738e | 2965 | |
a266c7d5 | 2966 | atomic_set(&dev_priv->irq_received, 0); |
5ca58282 | 2967 | |
9db4a9c7 JB |
2968 | for_each_pipe(pipe) |
2969 | I915_WRITE(PIPESTAT(pipe), 0); | |
a266c7d5 CW |
2970 | I915_WRITE16(IMR, 0xffff); |
2971 | I915_WRITE16(IER, 0x0); | |
2972 | POSTING_READ16(IER); | |
c2798b19 CW |
2973 | } |
2974 | ||
2975 | static int i8xx_irq_postinstall(struct drm_device *dev) | |
2976 | { | |
2977 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2978 | ||
c2798b19 CW |
2979 | I915_WRITE16(EMR, |
2980 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | |
2981 | ||
2982 | /* Unmask the interrupts that we always want on. */ | |
2983 | dev_priv->irq_mask = | |
2984 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2985 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2986 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2987 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
2988 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2989 | I915_WRITE16(IMR, dev_priv->irq_mask); | |
2990 | ||
2991 | I915_WRITE16(IER, | |
2992 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2993 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2994 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
2995 | I915_USER_INTERRUPT); | |
2996 | POSTING_READ16(IER); | |
2997 | ||
2998 | return 0; | |
2999 | } | |
3000 | ||
90a72f87 VS |
3001 | /* |
3002 | * Returns true when a page flip has completed. | |
3003 | */ | |
3004 | static bool i8xx_handle_vblank(struct drm_device *dev, | |
3005 | int pipe, u16 iir) | |
3006 | { | |
3007 | drm_i915_private_t *dev_priv = dev->dev_private; | |
3008 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); | |
3009 | ||
3010 | if (!drm_handle_vblank(dev, pipe)) | |
3011 | return false; | |
3012 | ||
3013 | if ((iir & flip_pending) == 0) | |
3014 | return false; | |
3015 | ||
3016 | intel_prepare_page_flip(dev, pipe); | |
3017 | ||
3018 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
3019 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
3020 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
3021 | * the flip is completed (no longer pending). Since this doesn't raise | |
3022 | * an interrupt per se, we watch for the change at vblank. | |
3023 | */ | |
3024 | if (I915_READ16(ISR) & flip_pending) | |
3025 | return false; | |
3026 | ||
3027 | intel_finish_page_flip(dev, pipe); | |
3028 | ||
3029 | return true; | |
3030 | } | |
3031 | ||
ff1f525e | 3032 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
c2798b19 CW |
3033 | { |
3034 | struct drm_device *dev = (struct drm_device *) arg; | |
3035 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
c2798b19 CW |
3036 | u16 iir, new_iir; |
3037 | u32 pipe_stats[2]; | |
3038 | unsigned long irqflags; | |
3039 | int irq_received; | |
3040 | int pipe; | |
3041 | u16 flip_mask = | |
3042 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
3043 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
3044 | ||
3045 | atomic_inc(&dev_priv->irq_received); | |
3046 | ||
3047 | iir = I915_READ16(IIR); | |
3048 | if (iir == 0) | |
3049 | return IRQ_NONE; | |
3050 | ||
3051 | while (iir & ~flip_mask) { | |
3052 | /* Can't rely on pipestat interrupt bit in iir as it might | |
3053 | * have been cleared after the pipestat interrupt was received. | |
3054 | * It doesn't set the bit in iir again, but it still produces | |
3055 | * interrupts (for non-MSI). | |
3056 | */ | |
3057 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3058 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
3059 | i915_handle_error(dev, false); | |
3060 | ||
3061 | for_each_pipe(pipe) { | |
3062 | int reg = PIPESTAT(pipe); | |
3063 | pipe_stats[pipe] = I915_READ(reg); | |
3064 | ||
3065 | /* | |
3066 | * Clear the PIPE*STAT regs before the IIR | |
3067 | */ | |
3068 | if (pipe_stats[pipe] & 0x8000ffff) { | |
3069 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
3070 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
3071 | pipe_name(pipe)); | |
3072 | I915_WRITE(reg, pipe_stats[pipe]); | |
3073 | irq_received = 1; | |
3074 | } | |
3075 | } | |
3076 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3077 | ||
3078 | I915_WRITE16(IIR, iir & ~flip_mask); | |
3079 | new_iir = I915_READ16(IIR); /* Flush posted writes */ | |
3080 | ||
d05c617e | 3081 | i915_update_dri1_breadcrumb(dev); |
c2798b19 CW |
3082 | |
3083 | if (iir & I915_USER_INTERRUPT) | |
3084 | notify_ring(dev, &dev_priv->ring[RCS]); | |
3085 | ||
3086 | if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && | |
90a72f87 VS |
3087 | i8xx_handle_vblank(dev, 0, iir)) |
3088 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); | |
c2798b19 CW |
3089 | |
3090 | if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && | |
90a72f87 VS |
3091 | i8xx_handle_vblank(dev, 1, iir)) |
3092 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); | |
c2798b19 CW |
3093 | |
3094 | iir = new_iir; | |
3095 | } | |
3096 | ||
3097 | return IRQ_HANDLED; | |
3098 | } | |
3099 | ||
3100 | static void i8xx_irq_uninstall(struct drm_device * dev) | |
3101 | { | |
3102 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
3103 | int pipe; | |
3104 | ||
c2798b19 CW |
3105 | for_each_pipe(pipe) { |
3106 | /* Clear enable bits; then clear status bits */ | |
3107 | I915_WRITE(PIPESTAT(pipe), 0); | |
3108 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | |
3109 | } | |
3110 | I915_WRITE16(IMR, 0xffff); | |
3111 | I915_WRITE16(IER, 0x0); | |
3112 | I915_WRITE16(IIR, I915_READ16(IIR)); | |
3113 | } | |
3114 | ||
a266c7d5 CW |
3115 | static void i915_irq_preinstall(struct drm_device * dev) |
3116 | { | |
3117 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
3118 | int pipe; | |
3119 | ||
3120 | atomic_set(&dev_priv->irq_received, 0); | |
3121 | ||
3122 | if (I915_HAS_HOTPLUG(dev)) { | |
3123 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3124 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
3125 | } | |
3126 | ||
00d98ebd | 3127 | I915_WRITE16(HWSTAM, 0xeffe); |
a266c7d5 CW |
3128 | for_each_pipe(pipe) |
3129 | I915_WRITE(PIPESTAT(pipe), 0); | |
3130 | I915_WRITE(IMR, 0xffffffff); | |
3131 | I915_WRITE(IER, 0x0); | |
3132 | POSTING_READ(IER); | |
3133 | } | |
3134 | ||
3135 | static int i915_irq_postinstall(struct drm_device *dev) | |
3136 | { | |
3137 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
38bde180 | 3138 | u32 enable_mask; |
a266c7d5 | 3139 | |
38bde180 CW |
3140 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
3141 | ||
3142 | /* Unmask the interrupts that we always want on. */ | |
3143 | dev_priv->irq_mask = | |
3144 | ~(I915_ASLE_INTERRUPT | | |
3145 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3146 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
3147 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
3148 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
3149 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
3150 | ||
3151 | enable_mask = | |
3152 | I915_ASLE_INTERRUPT | | |
3153 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
3154 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
3155 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
3156 | I915_USER_INTERRUPT; | |
3157 | ||
a266c7d5 | 3158 | if (I915_HAS_HOTPLUG(dev)) { |
20afbda2 DV |
3159 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3160 | POSTING_READ(PORT_HOTPLUG_EN); | |
3161 | ||
a266c7d5 CW |
3162 | /* Enable in IER... */ |
3163 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | |
3164 | /* and unmask in IMR */ | |
3165 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; | |
3166 | } | |
3167 | ||
a266c7d5 CW |
3168 | I915_WRITE(IMR, dev_priv->irq_mask); |
3169 | I915_WRITE(IER, enable_mask); | |
3170 | POSTING_READ(IER); | |
3171 | ||
f49e38dd | 3172 | i915_enable_asle_pipestat(dev); |
20afbda2 DV |
3173 | |
3174 | return 0; | |
3175 | } | |
3176 | ||
90a72f87 VS |
3177 | /* |
3178 | * Returns true when a page flip has completed. | |
3179 | */ | |
3180 | static bool i915_handle_vblank(struct drm_device *dev, | |
3181 | int plane, int pipe, u32 iir) | |
3182 | { | |
3183 | drm_i915_private_t *dev_priv = dev->dev_private; | |
3184 | u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); | |
3185 | ||
3186 | if (!drm_handle_vblank(dev, pipe)) | |
3187 | return false; | |
3188 | ||
3189 | if ((iir & flip_pending) == 0) | |
3190 | return false; | |
3191 | ||
3192 | intel_prepare_page_flip(dev, plane); | |
3193 | ||
3194 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
3195 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
3196 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
3197 | * the flip is completed (no longer pending). Since this doesn't raise | |
3198 | * an interrupt per se, we watch for the change at vblank. | |
3199 | */ | |
3200 | if (I915_READ(ISR) & flip_pending) | |
3201 | return false; | |
3202 | ||
3203 | intel_finish_page_flip(dev, pipe); | |
3204 | ||
3205 | return true; | |
3206 | } | |
3207 | ||
ff1f525e | 3208 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
a266c7d5 CW |
3209 | { |
3210 | struct drm_device *dev = (struct drm_device *) arg; | |
3211 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
8291ee90 | 3212 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; |
a266c7d5 | 3213 | unsigned long irqflags; |
38bde180 CW |
3214 | u32 flip_mask = |
3215 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
3216 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
38bde180 | 3217 | int pipe, ret = IRQ_NONE; |
a266c7d5 CW |
3218 | |
3219 | atomic_inc(&dev_priv->irq_received); | |
3220 | ||
3221 | iir = I915_READ(IIR); | |
38bde180 CW |
3222 | do { |
3223 | bool irq_received = (iir & ~flip_mask) != 0; | |
8291ee90 | 3224 | bool blc_event = false; |
a266c7d5 CW |
3225 | |
3226 | /* Can't rely on pipestat interrupt bit in iir as it might | |
3227 | * have been cleared after the pipestat interrupt was received. | |
3228 | * It doesn't set the bit in iir again, but it still produces | |
3229 | * interrupts (for non-MSI). | |
3230 | */ | |
3231 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3232 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
3233 | i915_handle_error(dev, false); | |
3234 | ||
3235 | for_each_pipe(pipe) { | |
3236 | int reg = PIPESTAT(pipe); | |
3237 | pipe_stats[pipe] = I915_READ(reg); | |
3238 | ||
38bde180 | 3239 | /* Clear the PIPE*STAT regs before the IIR */ |
a266c7d5 CW |
3240 | if (pipe_stats[pipe] & 0x8000ffff) { |
3241 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
3242 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
3243 | pipe_name(pipe)); | |
3244 | I915_WRITE(reg, pipe_stats[pipe]); | |
38bde180 | 3245 | irq_received = true; |
a266c7d5 CW |
3246 | } |
3247 | } | |
3248 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3249 | ||
3250 | if (!irq_received) | |
3251 | break; | |
3252 | ||
a266c7d5 CW |
3253 | /* Consume port. Then clear IIR or we'll miss events */ |
3254 | if ((I915_HAS_HOTPLUG(dev)) && | |
3255 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { | |
3256 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
b543fb04 | 3257 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
a266c7d5 CW |
3258 | |
3259 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
3260 | hotplug_status); | |
91d131d2 DV |
3261 | |
3262 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); | |
3263 | ||
a266c7d5 | 3264 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
38bde180 | 3265 | POSTING_READ(PORT_HOTPLUG_STAT); |
a266c7d5 CW |
3266 | } |
3267 | ||
38bde180 | 3268 | I915_WRITE(IIR, iir & ~flip_mask); |
a266c7d5 CW |
3269 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
3270 | ||
a266c7d5 CW |
3271 | if (iir & I915_USER_INTERRUPT) |
3272 | notify_ring(dev, &dev_priv->ring[RCS]); | |
a266c7d5 | 3273 | |
a266c7d5 | 3274 | for_each_pipe(pipe) { |
38bde180 CW |
3275 | int plane = pipe; |
3276 | if (IS_MOBILE(dev)) | |
3277 | plane = !plane; | |
90a72f87 | 3278 | |
8291ee90 | 3279 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
90a72f87 VS |
3280 | i915_handle_vblank(dev, plane, pipe, iir)) |
3281 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); | |
a266c7d5 CW |
3282 | |
3283 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
3284 | blc_event = true; | |
3285 | } | |
3286 | ||
a266c7d5 CW |
3287 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
3288 | intel_opregion_asle_intr(dev); | |
3289 | ||
3290 | /* With MSI, interrupts are only generated when iir | |
3291 | * transitions from zero to nonzero. If another bit got | |
3292 | * set while we were handling the existing iir bits, then | |
3293 | * we would never get another interrupt. | |
3294 | * | |
3295 | * This is fine on non-MSI as well, as if we hit this path | |
3296 | * we avoid exiting the interrupt handler only to generate | |
3297 | * another one. | |
3298 | * | |
3299 | * Note that for MSI this could cause a stray interrupt report | |
3300 | * if an interrupt landed in the time between writing IIR and | |
3301 | * the posting read. This should be rare enough to never | |
3302 | * trigger the 99% of 100,000 interrupts test for disabling | |
3303 | * stray interrupts. | |
3304 | */ | |
38bde180 | 3305 | ret = IRQ_HANDLED; |
a266c7d5 | 3306 | iir = new_iir; |
38bde180 | 3307 | } while (iir & ~flip_mask); |
a266c7d5 | 3308 | |
d05c617e | 3309 | i915_update_dri1_breadcrumb(dev); |
8291ee90 | 3310 | |
a266c7d5 CW |
3311 | return ret; |
3312 | } | |
3313 | ||
3314 | static void i915_irq_uninstall(struct drm_device * dev) | |
3315 | { | |
3316 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
3317 | int pipe; | |
3318 | ||
ac4c16c5 EE |
3319 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
3320 | ||
a266c7d5 CW |
3321 | if (I915_HAS_HOTPLUG(dev)) { |
3322 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
3323 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
3324 | } | |
3325 | ||
00d98ebd | 3326 | I915_WRITE16(HWSTAM, 0xffff); |
55b39755 CW |
3327 | for_each_pipe(pipe) { |
3328 | /* Clear enable bits; then clear status bits */ | |
a266c7d5 | 3329 | I915_WRITE(PIPESTAT(pipe), 0); |
55b39755 CW |
3330 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
3331 | } | |
a266c7d5 CW |
3332 | I915_WRITE(IMR, 0xffffffff); |
3333 | I915_WRITE(IER, 0x0); | |
3334 | ||
a266c7d5 CW |
3335 | I915_WRITE(IIR, I915_READ(IIR)); |
3336 | } | |
3337 | ||
3338 | static void i965_irq_preinstall(struct drm_device * dev) | |
3339 | { | |
3340 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
3341 | int pipe; | |
3342 | ||
3343 | atomic_set(&dev_priv->irq_received, 0); | |
3344 | ||
adca4730 CW |
3345 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3346 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
a266c7d5 CW |
3347 | |
3348 | I915_WRITE(HWSTAM, 0xeffe); | |
3349 | for_each_pipe(pipe) | |
3350 | I915_WRITE(PIPESTAT(pipe), 0); | |
3351 | I915_WRITE(IMR, 0xffffffff); | |
3352 | I915_WRITE(IER, 0x0); | |
3353 | POSTING_READ(IER); | |
3354 | } | |
3355 | ||
3356 | static int i965_irq_postinstall(struct drm_device *dev) | |
3357 | { | |
3358 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
bbba0a97 | 3359 | u32 enable_mask; |
a266c7d5 | 3360 | u32 error_mask; |
b79480ba | 3361 | unsigned long irqflags; |
a266c7d5 | 3362 | |
a266c7d5 | 3363 | /* Unmask the interrupts that we always want on. */ |
bbba0a97 | 3364 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | |
adca4730 | 3365 | I915_DISPLAY_PORT_INTERRUPT | |
bbba0a97 CW |
3366 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
3367 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
3368 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
3369 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
3370 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
3371 | ||
3372 | enable_mask = ~dev_priv->irq_mask; | |
21ad8330 VS |
3373 | enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
3374 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); | |
bbba0a97 CW |
3375 | enable_mask |= I915_USER_INTERRUPT; |
3376 | ||
3377 | if (IS_G4X(dev)) | |
3378 | enable_mask |= I915_BSD_USER_INTERRUPT; | |
a266c7d5 | 3379 | |
b79480ba DV |
3380 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
3381 | * just to make the assert_spin_locked check happy. */ | |
3382 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
515ac2bb | 3383 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
b79480ba | 3384 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
a266c7d5 | 3385 | |
a266c7d5 CW |
3386 | /* |
3387 | * Enable some error detection, note the instruction error mask | |
3388 | * bit is reserved, so we leave it masked. | |
3389 | */ | |
3390 | if (IS_G4X(dev)) { | |
3391 | error_mask = ~(GM45_ERROR_PAGE_TABLE | | |
3392 | GM45_ERROR_MEM_PRIV | | |
3393 | GM45_ERROR_CP_PRIV | | |
3394 | I915_ERROR_MEMORY_REFRESH); | |
3395 | } else { | |
3396 | error_mask = ~(I915_ERROR_PAGE_TABLE | | |
3397 | I915_ERROR_MEMORY_REFRESH); | |
3398 | } | |
3399 | I915_WRITE(EMR, error_mask); | |
3400 | ||
3401 | I915_WRITE(IMR, dev_priv->irq_mask); | |
3402 | I915_WRITE(IER, enable_mask); | |
3403 | POSTING_READ(IER); | |
3404 | ||
20afbda2 DV |
3405 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3406 | POSTING_READ(PORT_HOTPLUG_EN); | |
3407 | ||
f49e38dd | 3408 | i915_enable_asle_pipestat(dev); |
20afbda2 DV |
3409 | |
3410 | return 0; | |
3411 | } | |
3412 | ||
bac56d5b | 3413 | static void i915_hpd_irq_setup(struct drm_device *dev) |
20afbda2 DV |
3414 | { |
3415 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
e5868a31 | 3416 | struct drm_mode_config *mode_config = &dev->mode_config; |
cd569aed | 3417 | struct intel_encoder *intel_encoder; |
20afbda2 DV |
3418 | u32 hotplug_en; |
3419 | ||
b5ea2d56 DV |
3420 | assert_spin_locked(&dev_priv->irq_lock); |
3421 | ||
bac56d5b EE |
3422 | if (I915_HAS_HOTPLUG(dev)) { |
3423 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); | |
3424 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; | |
3425 | /* Note HDMI and DP share hotplug bits */ | |
e5868a31 | 3426 | /* enable bits are the same for all generations */ |
cd569aed EE |
3427 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
3428 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | |
3429 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; | |
bac56d5b EE |
3430 | /* Programming the CRT detection parameters tends |
3431 | to generate a spurious hotplug event about three | |
3432 | seconds later. So just do it once. | |
3433 | */ | |
3434 | if (IS_G4X(dev)) | |
3435 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | |
85fc95ba | 3436 | hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; |
bac56d5b | 3437 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
a266c7d5 | 3438 | |
bac56d5b EE |
3439 | /* Ignore TV since it's buggy */ |
3440 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | |
3441 | } | |
a266c7d5 CW |
3442 | } |
3443 | ||
ff1f525e | 3444 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
a266c7d5 CW |
3445 | { |
3446 | struct drm_device *dev = (struct drm_device *) arg; | |
3447 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
a266c7d5 CW |
3448 | u32 iir, new_iir; |
3449 | u32 pipe_stats[I915_MAX_PIPES]; | |
a266c7d5 CW |
3450 | unsigned long irqflags; |
3451 | int irq_received; | |
3452 | int ret = IRQ_NONE, pipe; | |
21ad8330 VS |
3453 | u32 flip_mask = |
3454 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
3455 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
a266c7d5 CW |
3456 | |
3457 | atomic_inc(&dev_priv->irq_received); | |
3458 | ||
3459 | iir = I915_READ(IIR); | |
3460 | ||
a266c7d5 | 3461 | for (;;) { |
2c8ba29f CW |
3462 | bool blc_event = false; |
3463 | ||
21ad8330 | 3464 | irq_received = (iir & ~flip_mask) != 0; |
a266c7d5 CW |
3465 | |
3466 | /* Can't rely on pipestat interrupt bit in iir as it might | |
3467 | * have been cleared after the pipestat interrupt was received. | |
3468 | * It doesn't set the bit in iir again, but it still produces | |
3469 | * interrupts (for non-MSI). | |
3470 | */ | |
3471 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3472 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
3473 | i915_handle_error(dev, false); | |
3474 | ||
3475 | for_each_pipe(pipe) { | |
3476 | int reg = PIPESTAT(pipe); | |
3477 | pipe_stats[pipe] = I915_READ(reg); | |
3478 | ||
3479 | /* | |
3480 | * Clear the PIPE*STAT regs before the IIR | |
3481 | */ | |
3482 | if (pipe_stats[pipe] & 0x8000ffff) { | |
3483 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
3484 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
3485 | pipe_name(pipe)); | |
3486 | I915_WRITE(reg, pipe_stats[pipe]); | |
3487 | irq_received = 1; | |
3488 | } | |
3489 | } | |
3490 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3491 | ||
3492 | if (!irq_received) | |
3493 | break; | |
3494 | ||
3495 | ret = IRQ_HANDLED; | |
3496 | ||
3497 | /* Consume port. Then clear IIR or we'll miss events */ | |
adca4730 | 3498 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
a266c7d5 | 3499 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
b543fb04 EE |
3500 | u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? |
3501 | HOTPLUG_INT_STATUS_G4X : | |
4f7fd709 | 3502 | HOTPLUG_INT_STATUS_I915); |
a266c7d5 CW |
3503 | |
3504 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
3505 | hotplug_status); | |
91d131d2 DV |
3506 | |
3507 | intel_hpd_irq_handler(dev, hotplug_trigger, | |
3508 | IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); | |
3509 | ||
a266c7d5 CW |
3510 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
3511 | I915_READ(PORT_HOTPLUG_STAT); | |
3512 | } | |
3513 | ||
21ad8330 | 3514 | I915_WRITE(IIR, iir & ~flip_mask); |
a266c7d5 CW |
3515 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
3516 | ||
a266c7d5 CW |
3517 | if (iir & I915_USER_INTERRUPT) |
3518 | notify_ring(dev, &dev_priv->ring[RCS]); | |
3519 | if (iir & I915_BSD_USER_INTERRUPT) | |
3520 | notify_ring(dev, &dev_priv->ring[VCS]); | |
3521 | ||
a266c7d5 | 3522 | for_each_pipe(pipe) { |
2c8ba29f | 3523 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
90a72f87 VS |
3524 | i915_handle_vblank(dev, pipe, pipe, iir)) |
3525 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); | |
a266c7d5 CW |
3526 | |
3527 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
3528 | blc_event = true; | |
3529 | } | |
3530 | ||
3531 | ||
3532 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | |
3533 | intel_opregion_asle_intr(dev); | |
3534 | ||
515ac2bb DV |
3535 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
3536 | gmbus_irq_handler(dev); | |
3537 | ||
a266c7d5 CW |
3538 | /* With MSI, interrupts are only generated when iir |
3539 | * transitions from zero to nonzero. If another bit got | |
3540 | * set while we were handling the existing iir bits, then | |
3541 | * we would never get another interrupt. | |
3542 | * | |
3543 | * This is fine on non-MSI as well, as if we hit this path | |
3544 | * we avoid exiting the interrupt handler only to generate | |
3545 | * another one. | |
3546 | * | |
3547 | * Note that for MSI this could cause a stray interrupt report | |
3548 | * if an interrupt landed in the time between writing IIR and | |
3549 | * the posting read. This should be rare enough to never | |
3550 | * trigger the 99% of 100,000 interrupts test for disabling | |
3551 | * stray interrupts. | |
3552 | */ | |
3553 | iir = new_iir; | |
3554 | } | |
3555 | ||
d05c617e | 3556 | i915_update_dri1_breadcrumb(dev); |
2c8ba29f | 3557 | |
a266c7d5 CW |
3558 | return ret; |
3559 | } | |
3560 | ||
3561 | static void i965_irq_uninstall(struct drm_device * dev) | |
3562 | { | |
3563 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
3564 | int pipe; | |
3565 | ||
3566 | if (!dev_priv) | |
3567 | return; | |
3568 | ||
ac4c16c5 EE |
3569 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
3570 | ||
adca4730 CW |
3571 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3572 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
a266c7d5 CW |
3573 | |
3574 | I915_WRITE(HWSTAM, 0xffffffff); | |
3575 | for_each_pipe(pipe) | |
3576 | I915_WRITE(PIPESTAT(pipe), 0); | |
3577 | I915_WRITE(IMR, 0xffffffff); | |
3578 | I915_WRITE(IER, 0x0); | |
3579 | ||
3580 | for_each_pipe(pipe) | |
3581 | I915_WRITE(PIPESTAT(pipe), | |
3582 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); | |
3583 | I915_WRITE(IIR, I915_READ(IIR)); | |
3584 | } | |
3585 | ||
ac4c16c5 EE |
3586 | static void i915_reenable_hotplug_timer_func(unsigned long data) |
3587 | { | |
3588 | drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; | |
3589 | struct drm_device *dev = dev_priv->dev; | |
3590 | struct drm_mode_config *mode_config = &dev->mode_config; | |
3591 | unsigned long irqflags; | |
3592 | int i; | |
3593 | ||
3594 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3595 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { | |
3596 | struct drm_connector *connector; | |
3597 | ||
3598 | if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) | |
3599 | continue; | |
3600 | ||
3601 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
3602 | ||
3603 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
3604 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
3605 | ||
3606 | if (intel_connector->encoder->hpd_pin == i) { | |
3607 | if (connector->polled != intel_connector->polled) | |
3608 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | |
3609 | drm_get_connector_name(connector)); | |
3610 | connector->polled = intel_connector->polled; | |
3611 | if (!connector->polled) | |
3612 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
3613 | } | |
3614 | } | |
3615 | } | |
3616 | if (dev_priv->display.hpd_irq_setup) | |
3617 | dev_priv->display.hpd_irq_setup(dev); | |
3618 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3619 | } | |
3620 | ||
f71d4af4 JB |
3621 | void intel_irq_init(struct drm_device *dev) |
3622 | { | |
8b2e326d CW |
3623 | struct drm_i915_private *dev_priv = dev->dev_private; |
3624 | ||
3625 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | |
99584db3 | 3626 | INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); |
c6a828d3 | 3627 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
a4da4fa4 | 3628 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
8b2e326d | 3629 | |
99584db3 DV |
3630 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, |
3631 | i915_hangcheck_elapsed, | |
61bac78e | 3632 | (unsigned long) dev); |
ac4c16c5 EE |
3633 | setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, |
3634 | (unsigned long) dev_priv); | |
61bac78e | 3635 | |
97a19a24 | 3636 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
9ee32fea | 3637 | |
f71d4af4 JB |
3638 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
3639 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | |
7d4e146f | 3640 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
f71d4af4 JB |
3641 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
3642 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | |
3643 | } | |
3644 | ||
c3613de9 KP |
3645 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
3646 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | |
3647 | else | |
3648 | dev->driver->get_vblank_timestamp = NULL; | |
f71d4af4 JB |
3649 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
3650 | ||
7e231dbe JB |
3651 | if (IS_VALLEYVIEW(dev)) { |
3652 | dev->driver->irq_handler = valleyview_irq_handler; | |
3653 | dev->driver->irq_preinstall = valleyview_irq_preinstall; | |
3654 | dev->driver->irq_postinstall = valleyview_irq_postinstall; | |
3655 | dev->driver->irq_uninstall = valleyview_irq_uninstall; | |
3656 | dev->driver->enable_vblank = valleyview_enable_vblank; | |
3657 | dev->driver->disable_vblank = valleyview_disable_vblank; | |
fa00abe0 | 3658 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
4a06e201 | 3659 | } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { |
7d99163d | 3660 | /* Share uninstall handlers with ILK/SNB */ |
f71d4af4 | 3661 | dev->driver->irq_handler = ivybridge_irq_handler; |
7d99163d | 3662 | dev->driver->irq_preinstall = ivybridge_irq_preinstall; |
f71d4af4 JB |
3663 | dev->driver->irq_postinstall = ivybridge_irq_postinstall; |
3664 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | |
3665 | dev->driver->enable_vblank = ivybridge_enable_vblank; | |
3666 | dev->driver->disable_vblank = ivybridge_disable_vblank; | |
82a28bcf | 3667 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
f71d4af4 JB |
3668 | } else if (HAS_PCH_SPLIT(dev)) { |
3669 | dev->driver->irq_handler = ironlake_irq_handler; | |
3670 | dev->driver->irq_preinstall = ironlake_irq_preinstall; | |
3671 | dev->driver->irq_postinstall = ironlake_irq_postinstall; | |
3672 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | |
3673 | dev->driver->enable_vblank = ironlake_enable_vblank; | |
3674 | dev->driver->disable_vblank = ironlake_disable_vblank; | |
82a28bcf | 3675 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
f71d4af4 | 3676 | } else { |
c2798b19 CW |
3677 | if (INTEL_INFO(dev)->gen == 2) { |
3678 | dev->driver->irq_preinstall = i8xx_irq_preinstall; | |
3679 | dev->driver->irq_postinstall = i8xx_irq_postinstall; | |
3680 | dev->driver->irq_handler = i8xx_irq_handler; | |
3681 | dev->driver->irq_uninstall = i8xx_irq_uninstall; | |
a266c7d5 CW |
3682 | } else if (INTEL_INFO(dev)->gen == 3) { |
3683 | dev->driver->irq_preinstall = i915_irq_preinstall; | |
3684 | dev->driver->irq_postinstall = i915_irq_postinstall; | |
3685 | dev->driver->irq_uninstall = i915_irq_uninstall; | |
3686 | dev->driver->irq_handler = i915_irq_handler; | |
20afbda2 | 3687 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
c2798b19 | 3688 | } else { |
a266c7d5 CW |
3689 | dev->driver->irq_preinstall = i965_irq_preinstall; |
3690 | dev->driver->irq_postinstall = i965_irq_postinstall; | |
3691 | dev->driver->irq_uninstall = i965_irq_uninstall; | |
3692 | dev->driver->irq_handler = i965_irq_handler; | |
bac56d5b | 3693 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
c2798b19 | 3694 | } |
f71d4af4 JB |
3695 | dev->driver->enable_vblank = i915_enable_vblank; |
3696 | dev->driver->disable_vblank = i915_disable_vblank; | |
3697 | } | |
3698 | } | |
20afbda2 DV |
3699 | |
3700 | void intel_hpd_init(struct drm_device *dev) | |
3701 | { | |
3702 | struct drm_i915_private *dev_priv = dev->dev_private; | |
821450c6 EE |
3703 | struct drm_mode_config *mode_config = &dev->mode_config; |
3704 | struct drm_connector *connector; | |
b5ea2d56 | 3705 | unsigned long irqflags; |
821450c6 | 3706 | int i; |
20afbda2 | 3707 | |
821450c6 EE |
3708 | for (i = 1; i < HPD_NUM_PINS; i++) { |
3709 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
3710 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
3711 | } | |
3712 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
3713 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
3714 | connector->polled = intel_connector->polled; | |
3715 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) | |
3716 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
3717 | } | |
b5ea2d56 DV |
3718 | |
3719 | /* Interrupt setup is already guaranteed to be single-threaded, this is | |
3720 | * just to make the assert_spin_locked checks happy. */ | |
3721 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
20afbda2 DV |
3722 | if (dev_priv->display.hpd_irq_setup) |
3723 | dev_priv->display.hpd_irq_setup(dev); | |
b5ea2d56 | 3724 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
20afbda2 | 3725 | } |