]>
Commit | Line | Data |
---|---|---|
0d6aa60b | 1 | /* i915_irq.c -- IRQ support for the I915 -*- linux-c -*- |
1da177e4 | 2 | */ |
0d6aa60b | 3 | /* |
1da177e4 LT |
4 | * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. |
5 | * All Rights Reserved. | |
bc54fd1a DA |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a | |
8 | * copy of this software and associated documentation files (the | |
9 | * "Software"), to deal in the Software without restriction, including | |
10 | * without limitation the rights to use, copy, modify, merge, publish, | |
11 | * distribute, sub license, and/or sell copies of the Software, and to | |
12 | * permit persons to whom the Software is furnished to do so, subject to | |
13 | * the following conditions: | |
14 | * | |
15 | * The above copyright notice and this permission notice (including the | |
16 | * next paragraph) shall be included in all copies or substantial portions | |
17 | * of the Software. | |
18 | * | |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | |
20 | * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
21 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. | |
22 | * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR | |
23 | * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, | |
24 | * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE | |
25 | * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | |
26 | * | |
0d6aa60b | 27 | */ |
1da177e4 | 28 | |
a70491cc JP |
29 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
30 | ||
63eeaf38 | 31 | #include <linux/sysrq.h> |
5a0e3ad6 | 32 | #include <linux/slab.h> |
760285e7 DH |
33 | #include <drm/drmP.h> |
34 | #include <drm/i915_drm.h> | |
1da177e4 | 35 | #include "i915_drv.h" |
1c5d22f7 | 36 | #include "i915_trace.h" |
79e53945 | 37 | #include "intel_drv.h" |
1da177e4 | 38 | |
e5868a31 EE |
39 | static const u32 hpd_ibx[] = { |
40 | [HPD_CRT] = SDE_CRT_HOTPLUG, | |
41 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG, | |
42 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG, | |
43 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG, | |
44 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG | |
45 | }; | |
46 | ||
47 | static const u32 hpd_cpt[] = { | |
48 | [HPD_CRT] = SDE_CRT_HOTPLUG_CPT, | |
73c352a2 | 49 | [HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT, |
e5868a31 EE |
50 | [HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT, |
51 | [HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT, | |
52 | [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT | |
53 | }; | |
54 | ||
55 | static const u32 hpd_mask_i915[] = { | |
56 | [HPD_CRT] = CRT_HOTPLUG_INT_EN, | |
57 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_EN, | |
58 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN, | |
59 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_EN, | |
60 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_EN, | |
61 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN | |
62 | }; | |
63 | ||
64 | static const u32 hpd_status_gen4[] = { | |
65 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | |
66 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_G4X, | |
67 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X, | |
68 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
69 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
70 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
71 | }; | |
72 | ||
e5868a31 EE |
73 | static const u32 hpd_status_i915[] = { /* i915 and valleyview are the same */ |
74 | [HPD_CRT] = CRT_HOTPLUG_INT_STATUS, | |
75 | [HPD_SDVO_B] = SDVOB_HOTPLUG_INT_STATUS_I915, | |
76 | [HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915, | |
77 | [HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS, | |
78 | [HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS, | |
79 | [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS | |
80 | }; | |
81 | ||
036a4a7d | 82 | /* For display hotplug interrupt */ |
995b6762 | 83 | static void |
f2b115e6 | 84 | ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
036a4a7d | 85 | { |
4bc9d430 DV |
86 | assert_spin_locked(&dev_priv->irq_lock); |
87 | ||
c67a470b PZ |
88 | if (dev_priv->pc8.irqs_disabled) { |
89 | WARN(1, "IRQs disabled\n"); | |
90 | dev_priv->pc8.regsave.deimr &= ~mask; | |
91 | return; | |
92 | } | |
93 | ||
1ec14ad3 CW |
94 | if ((dev_priv->irq_mask & mask) != 0) { |
95 | dev_priv->irq_mask &= ~mask; | |
96 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
3143a2bf | 97 | POSTING_READ(DEIMR); |
036a4a7d ZW |
98 | } |
99 | } | |
100 | ||
0ff9800a | 101 | static void |
f2b115e6 | 102 | ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask) |
036a4a7d | 103 | { |
4bc9d430 DV |
104 | assert_spin_locked(&dev_priv->irq_lock); |
105 | ||
c67a470b PZ |
106 | if (dev_priv->pc8.irqs_disabled) { |
107 | WARN(1, "IRQs disabled\n"); | |
108 | dev_priv->pc8.regsave.deimr |= mask; | |
109 | return; | |
110 | } | |
111 | ||
1ec14ad3 CW |
112 | if ((dev_priv->irq_mask & mask) != mask) { |
113 | dev_priv->irq_mask |= mask; | |
114 | I915_WRITE(DEIMR, dev_priv->irq_mask); | |
3143a2bf | 115 | POSTING_READ(DEIMR); |
036a4a7d ZW |
116 | } |
117 | } | |
118 | ||
43eaea13 PZ |
119 | /** |
120 | * ilk_update_gt_irq - update GTIMR | |
121 | * @dev_priv: driver private | |
122 | * @interrupt_mask: mask of interrupt bits to update | |
123 | * @enabled_irq_mask: mask of interrupt bits to enable | |
124 | */ | |
125 | static void ilk_update_gt_irq(struct drm_i915_private *dev_priv, | |
126 | uint32_t interrupt_mask, | |
127 | uint32_t enabled_irq_mask) | |
128 | { | |
129 | assert_spin_locked(&dev_priv->irq_lock); | |
130 | ||
c67a470b PZ |
131 | if (dev_priv->pc8.irqs_disabled) { |
132 | WARN(1, "IRQs disabled\n"); | |
133 | dev_priv->pc8.regsave.gtimr &= ~interrupt_mask; | |
134 | dev_priv->pc8.regsave.gtimr |= (~enabled_irq_mask & | |
135 | interrupt_mask); | |
136 | return; | |
137 | } | |
138 | ||
43eaea13 PZ |
139 | dev_priv->gt_irq_mask &= ~interrupt_mask; |
140 | dev_priv->gt_irq_mask |= (~enabled_irq_mask & interrupt_mask); | |
141 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
142 | POSTING_READ(GTIMR); | |
143 | } | |
144 | ||
145 | void ilk_enable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) | |
146 | { | |
147 | ilk_update_gt_irq(dev_priv, mask, mask); | |
148 | } | |
149 | ||
150 | void ilk_disable_gt_irq(struct drm_i915_private *dev_priv, uint32_t mask) | |
151 | { | |
152 | ilk_update_gt_irq(dev_priv, mask, 0); | |
153 | } | |
154 | ||
edbfdb45 PZ |
155 | /** |
156 | * snb_update_pm_irq - update GEN6_PMIMR | |
157 | * @dev_priv: driver private | |
158 | * @interrupt_mask: mask of interrupt bits to update | |
159 | * @enabled_irq_mask: mask of interrupt bits to enable | |
160 | */ | |
161 | static void snb_update_pm_irq(struct drm_i915_private *dev_priv, | |
162 | uint32_t interrupt_mask, | |
163 | uint32_t enabled_irq_mask) | |
164 | { | |
605cd25b | 165 | uint32_t new_val; |
edbfdb45 PZ |
166 | |
167 | assert_spin_locked(&dev_priv->irq_lock); | |
168 | ||
c67a470b PZ |
169 | if (dev_priv->pc8.irqs_disabled) { |
170 | WARN(1, "IRQs disabled\n"); | |
171 | dev_priv->pc8.regsave.gen6_pmimr &= ~interrupt_mask; | |
172 | dev_priv->pc8.regsave.gen6_pmimr |= (~enabled_irq_mask & | |
173 | interrupt_mask); | |
174 | return; | |
175 | } | |
176 | ||
605cd25b | 177 | new_val = dev_priv->pm_irq_mask; |
f52ecbcf PZ |
178 | new_val &= ~interrupt_mask; |
179 | new_val |= (~enabled_irq_mask & interrupt_mask); | |
180 | ||
605cd25b PZ |
181 | if (new_val != dev_priv->pm_irq_mask) { |
182 | dev_priv->pm_irq_mask = new_val; | |
183 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); | |
f52ecbcf PZ |
184 | POSTING_READ(GEN6_PMIMR); |
185 | } | |
edbfdb45 PZ |
186 | } |
187 | ||
188 | void snb_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | |
189 | { | |
190 | snb_update_pm_irq(dev_priv, mask, mask); | |
191 | } | |
192 | ||
193 | void snb_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | |
194 | { | |
195 | snb_update_pm_irq(dev_priv, mask, 0); | |
196 | } | |
197 | ||
8664281b PZ |
198 | static bool ivb_can_enable_err_int(struct drm_device *dev) |
199 | { | |
200 | struct drm_i915_private *dev_priv = dev->dev_private; | |
201 | struct intel_crtc *crtc; | |
202 | enum pipe pipe; | |
203 | ||
4bc9d430 DV |
204 | assert_spin_locked(&dev_priv->irq_lock); |
205 | ||
8664281b PZ |
206 | for_each_pipe(pipe) { |
207 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | |
208 | ||
209 | if (crtc->cpu_fifo_underrun_disabled) | |
210 | return false; | |
211 | } | |
212 | ||
213 | return true; | |
214 | } | |
215 | ||
216 | static bool cpt_can_enable_serr_int(struct drm_device *dev) | |
217 | { | |
218 | struct drm_i915_private *dev_priv = dev->dev_private; | |
219 | enum pipe pipe; | |
220 | struct intel_crtc *crtc; | |
221 | ||
fee884ed DV |
222 | assert_spin_locked(&dev_priv->irq_lock); |
223 | ||
8664281b PZ |
224 | for_each_pipe(pipe) { |
225 | crtc = to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); | |
226 | ||
227 | if (crtc->pch_fifo_underrun_disabled) | |
228 | return false; | |
229 | } | |
230 | ||
231 | return true; | |
232 | } | |
233 | ||
234 | static void ironlake_set_fifo_underrun_reporting(struct drm_device *dev, | |
235 | enum pipe pipe, bool enable) | |
236 | { | |
237 | struct drm_i915_private *dev_priv = dev->dev_private; | |
238 | uint32_t bit = (pipe == PIPE_A) ? DE_PIPEA_FIFO_UNDERRUN : | |
239 | DE_PIPEB_FIFO_UNDERRUN; | |
240 | ||
241 | if (enable) | |
242 | ironlake_enable_display_irq(dev_priv, bit); | |
243 | else | |
244 | ironlake_disable_display_irq(dev_priv, bit); | |
245 | } | |
246 | ||
247 | static void ivybridge_set_fifo_underrun_reporting(struct drm_device *dev, | |
7336df65 | 248 | enum pipe pipe, bool enable) |
8664281b PZ |
249 | { |
250 | struct drm_i915_private *dev_priv = dev->dev_private; | |
8664281b | 251 | if (enable) { |
7336df65 DV |
252 | I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe)); |
253 | ||
8664281b PZ |
254 | if (!ivb_can_enable_err_int(dev)) |
255 | return; | |
256 | ||
8664281b PZ |
257 | ironlake_enable_display_irq(dev_priv, DE_ERR_INT_IVB); |
258 | } else { | |
7336df65 DV |
259 | bool was_enabled = !(I915_READ(DEIMR) & DE_ERR_INT_IVB); |
260 | ||
261 | /* Change the state _after_ we've read out the current one. */ | |
8664281b | 262 | ironlake_disable_display_irq(dev_priv, DE_ERR_INT_IVB); |
7336df65 DV |
263 | |
264 | if (!was_enabled && | |
265 | (I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe))) { | |
266 | DRM_DEBUG_KMS("uncleared fifo underrun on pipe %c\n", | |
267 | pipe_name(pipe)); | |
268 | } | |
8664281b PZ |
269 | } |
270 | } | |
271 | ||
fee884ed DV |
272 | /** |
273 | * ibx_display_interrupt_update - update SDEIMR | |
274 | * @dev_priv: driver private | |
275 | * @interrupt_mask: mask of interrupt bits to update | |
276 | * @enabled_irq_mask: mask of interrupt bits to enable | |
277 | */ | |
278 | static void ibx_display_interrupt_update(struct drm_i915_private *dev_priv, | |
279 | uint32_t interrupt_mask, | |
280 | uint32_t enabled_irq_mask) | |
281 | { | |
282 | uint32_t sdeimr = I915_READ(SDEIMR); | |
283 | sdeimr &= ~interrupt_mask; | |
284 | sdeimr |= (~enabled_irq_mask & interrupt_mask); | |
285 | ||
286 | assert_spin_locked(&dev_priv->irq_lock); | |
287 | ||
c67a470b PZ |
288 | if (dev_priv->pc8.irqs_disabled && |
289 | (interrupt_mask & SDE_HOTPLUG_MASK_CPT)) { | |
290 | WARN(1, "IRQs disabled\n"); | |
291 | dev_priv->pc8.regsave.sdeimr &= ~interrupt_mask; | |
292 | dev_priv->pc8.regsave.sdeimr |= (~enabled_irq_mask & | |
293 | interrupt_mask); | |
294 | return; | |
295 | } | |
296 | ||
fee884ed DV |
297 | I915_WRITE(SDEIMR, sdeimr); |
298 | POSTING_READ(SDEIMR); | |
299 | } | |
300 | #define ibx_enable_display_interrupt(dev_priv, bits) \ | |
301 | ibx_display_interrupt_update((dev_priv), (bits), (bits)) | |
302 | #define ibx_disable_display_interrupt(dev_priv, bits) \ | |
303 | ibx_display_interrupt_update((dev_priv), (bits), 0) | |
304 | ||
de28075d DV |
305 | static void ibx_set_fifo_underrun_reporting(struct drm_device *dev, |
306 | enum transcoder pch_transcoder, | |
8664281b PZ |
307 | bool enable) |
308 | { | |
8664281b | 309 | struct drm_i915_private *dev_priv = dev->dev_private; |
de28075d DV |
310 | uint32_t bit = (pch_transcoder == TRANSCODER_A) ? |
311 | SDE_TRANSA_FIFO_UNDER : SDE_TRANSB_FIFO_UNDER; | |
8664281b PZ |
312 | |
313 | if (enable) | |
fee884ed | 314 | ibx_enable_display_interrupt(dev_priv, bit); |
8664281b | 315 | else |
fee884ed | 316 | ibx_disable_display_interrupt(dev_priv, bit); |
8664281b PZ |
317 | } |
318 | ||
319 | static void cpt_set_fifo_underrun_reporting(struct drm_device *dev, | |
320 | enum transcoder pch_transcoder, | |
321 | bool enable) | |
322 | { | |
323 | struct drm_i915_private *dev_priv = dev->dev_private; | |
324 | ||
325 | if (enable) { | |
1dd246fb DV |
326 | I915_WRITE(SERR_INT, |
327 | SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)); | |
328 | ||
8664281b PZ |
329 | if (!cpt_can_enable_serr_int(dev)) |
330 | return; | |
331 | ||
fee884ed | 332 | ibx_enable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
8664281b | 333 | } else { |
1dd246fb DV |
334 | uint32_t tmp = I915_READ(SERR_INT); |
335 | bool was_enabled = !(I915_READ(SDEIMR) & SDE_ERROR_CPT); | |
336 | ||
337 | /* Change the state _after_ we've read out the current one. */ | |
fee884ed | 338 | ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT); |
1dd246fb DV |
339 | |
340 | if (!was_enabled && | |
341 | (tmp & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder))) { | |
342 | DRM_DEBUG_KMS("uncleared pch fifo underrun on pch transcoder %c\n", | |
343 | transcoder_name(pch_transcoder)); | |
344 | } | |
8664281b | 345 | } |
8664281b PZ |
346 | } |
347 | ||
348 | /** | |
349 | * intel_set_cpu_fifo_underrun_reporting - enable/disable FIFO underrun messages | |
350 | * @dev: drm device | |
351 | * @pipe: pipe | |
352 | * @enable: true if we want to report FIFO underrun errors, false otherwise | |
353 | * | |
354 | * This function makes us disable or enable CPU fifo underruns for a specific | |
355 | * pipe. Notice that on some Gens (e.g. IVB, HSW), disabling FIFO underrun | |
356 | * reporting for one pipe may also disable all the other CPU error interruts for | |
357 | * the other pipes, due to the fact that there's just one interrupt mask/enable | |
358 | * bit for all the pipes. | |
359 | * | |
360 | * Returns the previous state of underrun reporting. | |
361 | */ | |
362 | bool intel_set_cpu_fifo_underrun_reporting(struct drm_device *dev, | |
363 | enum pipe pipe, bool enable) | |
364 | { | |
365 | struct drm_i915_private *dev_priv = dev->dev_private; | |
366 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
367 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
368 | unsigned long flags; | |
369 | bool ret; | |
370 | ||
371 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
372 | ||
373 | ret = !intel_crtc->cpu_fifo_underrun_disabled; | |
374 | ||
375 | if (enable == ret) | |
376 | goto done; | |
377 | ||
378 | intel_crtc->cpu_fifo_underrun_disabled = !enable; | |
379 | ||
380 | if (IS_GEN5(dev) || IS_GEN6(dev)) | |
381 | ironlake_set_fifo_underrun_reporting(dev, pipe, enable); | |
382 | else if (IS_GEN7(dev)) | |
7336df65 | 383 | ivybridge_set_fifo_underrun_reporting(dev, pipe, enable); |
8664281b PZ |
384 | |
385 | done: | |
386 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
387 | return ret; | |
388 | } | |
389 | ||
390 | /** | |
391 | * intel_set_pch_fifo_underrun_reporting - enable/disable FIFO underrun messages | |
392 | * @dev: drm device | |
393 | * @pch_transcoder: the PCH transcoder (same as pipe on IVB and older) | |
394 | * @enable: true if we want to report FIFO underrun errors, false otherwise | |
395 | * | |
396 | * This function makes us disable or enable PCH fifo underruns for a specific | |
397 | * PCH transcoder. Notice that on some PCHs (e.g. CPT/PPT), disabling FIFO | |
398 | * underrun reporting for one transcoder may also disable all the other PCH | |
399 | * error interruts for the other transcoders, due to the fact that there's just | |
400 | * one interrupt mask/enable bit for all the transcoders. | |
401 | * | |
402 | * Returns the previous state of underrun reporting. | |
403 | */ | |
404 | bool intel_set_pch_fifo_underrun_reporting(struct drm_device *dev, | |
405 | enum transcoder pch_transcoder, | |
406 | bool enable) | |
407 | { | |
408 | struct drm_i915_private *dev_priv = dev->dev_private; | |
de28075d DV |
409 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pch_transcoder]; |
410 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
8664281b PZ |
411 | unsigned long flags; |
412 | bool ret; | |
413 | ||
de28075d DV |
414 | /* |
415 | * NOTE: Pre-LPT has a fixed cpu pipe -> pch transcoder mapping, but LPT | |
416 | * has only one pch transcoder A that all pipes can use. To avoid racy | |
417 | * pch transcoder -> pipe lookups from interrupt code simply store the | |
418 | * underrun statistics in crtc A. Since we never expose this anywhere | |
419 | * nor use it outside of the fifo underrun code here using the "wrong" | |
420 | * crtc on LPT won't cause issues. | |
421 | */ | |
8664281b PZ |
422 | |
423 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
424 | ||
425 | ret = !intel_crtc->pch_fifo_underrun_disabled; | |
426 | ||
427 | if (enable == ret) | |
428 | goto done; | |
429 | ||
430 | intel_crtc->pch_fifo_underrun_disabled = !enable; | |
431 | ||
432 | if (HAS_PCH_IBX(dev)) | |
de28075d | 433 | ibx_set_fifo_underrun_reporting(dev, pch_transcoder, enable); |
8664281b PZ |
434 | else |
435 | cpt_set_fifo_underrun_reporting(dev, pch_transcoder, enable); | |
436 | ||
437 | done: | |
438 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
439 | return ret; | |
440 | } | |
441 | ||
442 | ||
7c463586 KP |
443 | void |
444 | i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |
445 | { | |
46c06a30 VS |
446 | u32 reg = PIPESTAT(pipe); |
447 | u32 pipestat = I915_READ(reg) & 0x7fff0000; | |
7c463586 | 448 | |
b79480ba DV |
449 | assert_spin_locked(&dev_priv->irq_lock); |
450 | ||
46c06a30 VS |
451 | if ((pipestat & mask) == mask) |
452 | return; | |
453 | ||
454 | /* Enable the interrupt, clear any pending status */ | |
455 | pipestat |= mask | (mask >> 16); | |
456 | I915_WRITE(reg, pipestat); | |
457 | POSTING_READ(reg); | |
7c463586 KP |
458 | } |
459 | ||
460 | void | |
461 | i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask) | |
462 | { | |
46c06a30 VS |
463 | u32 reg = PIPESTAT(pipe); |
464 | u32 pipestat = I915_READ(reg) & 0x7fff0000; | |
7c463586 | 465 | |
b79480ba DV |
466 | assert_spin_locked(&dev_priv->irq_lock); |
467 | ||
46c06a30 VS |
468 | if ((pipestat & mask) == 0) |
469 | return; | |
470 | ||
471 | pipestat &= ~mask; | |
472 | I915_WRITE(reg, pipestat); | |
473 | POSTING_READ(reg); | |
7c463586 KP |
474 | } |
475 | ||
01c66889 | 476 | /** |
f49e38dd | 477 | * i915_enable_asle_pipestat - enable ASLE pipestat for OpRegion |
01c66889 | 478 | */ |
f49e38dd | 479 | static void i915_enable_asle_pipestat(struct drm_device *dev) |
01c66889 | 480 | { |
1ec14ad3 CW |
481 | drm_i915_private_t *dev_priv = dev->dev_private; |
482 | unsigned long irqflags; | |
483 | ||
f49e38dd JN |
484 | if (!dev_priv->opregion.asle || !IS_MOBILE(dev)) |
485 | return; | |
486 | ||
1ec14ad3 | 487 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
01c66889 | 488 | |
f898780b JN |
489 | i915_enable_pipestat(dev_priv, 1, PIPE_LEGACY_BLC_EVENT_ENABLE); |
490 | if (INTEL_INFO(dev)->gen >= 4) | |
491 | i915_enable_pipestat(dev_priv, 0, PIPE_LEGACY_BLC_EVENT_ENABLE); | |
1ec14ad3 CW |
492 | |
493 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
01c66889 ZY |
494 | } |
495 | ||
0a3e67a4 JB |
496 | /** |
497 | * i915_pipe_enabled - check if a pipe is enabled | |
498 | * @dev: DRM device | |
499 | * @pipe: pipe to check | |
500 | * | |
501 | * Reading certain registers when the pipe is disabled can hang the chip. | |
502 | * Use this routine to make sure the PLL is running and the pipe is active | |
503 | * before reading such registers if unsure. | |
504 | */ | |
505 | static int | |
506 | i915_pipe_enabled(struct drm_device *dev, int pipe) | |
507 | { | |
508 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
702e7a56 | 509 | |
a01025af DV |
510 | if (drm_core_check_feature(dev, DRIVER_MODESET)) { |
511 | /* Locking is horribly broken here, but whatever. */ | |
512 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
513 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
71f8ba6b | 514 | |
a01025af DV |
515 | return intel_crtc->active; |
516 | } else { | |
517 | return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE; | |
518 | } | |
0a3e67a4 JB |
519 | } |
520 | ||
42f52ef8 KP |
521 | /* Called from drm generic code, passed a 'crtc', which |
522 | * we use as a pipe index | |
523 | */ | |
f71d4af4 | 524 | static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe) |
0a3e67a4 JB |
525 | { |
526 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
527 | unsigned long high_frame; | |
528 | unsigned long low_frame; | |
5eddb70b | 529 | u32 high1, high2, low; |
0a3e67a4 JB |
530 | |
531 | if (!i915_pipe_enabled(dev, pipe)) { | |
44d98a61 | 532 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
9db4a9c7 | 533 | "pipe %c\n", pipe_name(pipe)); |
0a3e67a4 JB |
534 | return 0; |
535 | } | |
536 | ||
9db4a9c7 JB |
537 | high_frame = PIPEFRAME(pipe); |
538 | low_frame = PIPEFRAMEPIXEL(pipe); | |
5eddb70b | 539 | |
0a3e67a4 JB |
540 | /* |
541 | * High & low register fields aren't synchronized, so make sure | |
542 | * we get a low value that's stable across two reads of the high | |
543 | * register. | |
544 | */ | |
545 | do { | |
5eddb70b CW |
546 | high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; |
547 | low = I915_READ(low_frame) & PIPE_FRAME_LOW_MASK; | |
548 | high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK; | |
0a3e67a4 JB |
549 | } while (high1 != high2); |
550 | ||
5eddb70b CW |
551 | high1 >>= PIPE_FRAME_HIGH_SHIFT; |
552 | low >>= PIPE_FRAME_LOW_SHIFT; | |
553 | return (high1 << 8) | low; | |
0a3e67a4 JB |
554 | } |
555 | ||
f71d4af4 | 556 | static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe) |
9880b7a5 JB |
557 | { |
558 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
9db4a9c7 | 559 | int reg = PIPE_FRMCOUNT_GM45(pipe); |
9880b7a5 JB |
560 | |
561 | if (!i915_pipe_enabled(dev, pipe)) { | |
44d98a61 | 562 | DRM_DEBUG_DRIVER("trying to get vblank count for disabled " |
9db4a9c7 | 563 | "pipe %c\n", pipe_name(pipe)); |
9880b7a5 JB |
564 | return 0; |
565 | } | |
566 | ||
567 | return I915_READ(reg); | |
568 | } | |
569 | ||
f71d4af4 | 570 | static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe, |
0af7e4df MK |
571 | int *vpos, int *hpos) |
572 | { | |
573 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
574 | u32 vbl = 0, position = 0; | |
575 | int vbl_start, vbl_end, htotal, vtotal; | |
576 | bool in_vbl = true; | |
577 | int ret = 0; | |
fe2b8f9d PZ |
578 | enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, |
579 | pipe); | |
0af7e4df MK |
580 | |
581 | if (!i915_pipe_enabled(dev, pipe)) { | |
582 | DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled " | |
9db4a9c7 | 583 | "pipe %c\n", pipe_name(pipe)); |
0af7e4df MK |
584 | return 0; |
585 | } | |
586 | ||
587 | /* Get vtotal. */ | |
fe2b8f9d | 588 | vtotal = 1 + ((I915_READ(VTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
0af7e4df MK |
589 | |
590 | if (INTEL_INFO(dev)->gen >= 4) { | |
591 | /* No obvious pixelcount register. Only query vertical | |
592 | * scanout position from Display scan line register. | |
593 | */ | |
594 | position = I915_READ(PIPEDSL(pipe)); | |
595 | ||
596 | /* Decode into vertical scanout position. Don't have | |
597 | * horizontal scanout position. | |
598 | */ | |
599 | *vpos = position & 0x1fff; | |
600 | *hpos = 0; | |
601 | } else { | |
602 | /* Have access to pixelcount since start of frame. | |
603 | * We can split this into vertical and horizontal | |
604 | * scanout position. | |
605 | */ | |
606 | position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT; | |
607 | ||
fe2b8f9d | 608 | htotal = 1 + ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff); |
0af7e4df MK |
609 | *vpos = position / htotal; |
610 | *hpos = position - (*vpos * htotal); | |
611 | } | |
612 | ||
613 | /* Query vblank area. */ | |
fe2b8f9d | 614 | vbl = I915_READ(VBLANK(cpu_transcoder)); |
0af7e4df MK |
615 | |
616 | /* Test position against vblank region. */ | |
617 | vbl_start = vbl & 0x1fff; | |
618 | vbl_end = (vbl >> 16) & 0x1fff; | |
619 | ||
620 | if ((*vpos < vbl_start) || (*vpos > vbl_end)) | |
621 | in_vbl = false; | |
622 | ||
623 | /* Inside "upper part" of vblank area? Apply corrective offset: */ | |
624 | if (in_vbl && (*vpos >= vbl_start)) | |
625 | *vpos = *vpos - vtotal; | |
626 | ||
627 | /* Readouts valid? */ | |
628 | if (vbl > 0) | |
629 | ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE; | |
630 | ||
631 | /* In vblank? */ | |
632 | if (in_vbl) | |
633 | ret |= DRM_SCANOUTPOS_INVBL; | |
634 | ||
635 | return ret; | |
636 | } | |
637 | ||
f71d4af4 | 638 | static int i915_get_vblank_timestamp(struct drm_device *dev, int pipe, |
0af7e4df MK |
639 | int *max_error, |
640 | struct timeval *vblank_time, | |
641 | unsigned flags) | |
642 | { | |
4041b853 | 643 | struct drm_crtc *crtc; |
0af7e4df | 644 | |
7eb552ae | 645 | if (pipe < 0 || pipe >= INTEL_INFO(dev)->num_pipes) { |
4041b853 | 646 | DRM_ERROR("Invalid crtc %d\n", pipe); |
0af7e4df MK |
647 | return -EINVAL; |
648 | } | |
649 | ||
650 | /* Get drm_crtc to timestamp: */ | |
4041b853 CW |
651 | crtc = intel_get_crtc_for_pipe(dev, pipe); |
652 | if (crtc == NULL) { | |
653 | DRM_ERROR("Invalid crtc %d\n", pipe); | |
654 | return -EINVAL; | |
655 | } | |
656 | ||
657 | if (!crtc->enabled) { | |
658 | DRM_DEBUG_KMS("crtc %d is disabled\n", pipe); | |
659 | return -EBUSY; | |
660 | } | |
0af7e4df MK |
661 | |
662 | /* Helper routine in DRM core does all the work: */ | |
4041b853 CW |
663 | return drm_calc_vbltimestamp_from_scanoutpos(dev, pipe, max_error, |
664 | vblank_time, flags, | |
665 | crtc); | |
0af7e4df MK |
666 | } |
667 | ||
67c347ff JN |
668 | static bool intel_hpd_irq_event(struct drm_device *dev, |
669 | struct drm_connector *connector) | |
321a1b30 EE |
670 | { |
671 | enum drm_connector_status old_status; | |
672 | ||
673 | WARN_ON(!mutex_is_locked(&dev->mode_config.mutex)); | |
674 | old_status = connector->status; | |
675 | ||
676 | connector->status = connector->funcs->detect(connector, false); | |
67c347ff JN |
677 | if (old_status == connector->status) |
678 | return false; | |
679 | ||
680 | DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n", | |
321a1b30 EE |
681 | connector->base.id, |
682 | drm_get_connector_name(connector), | |
67c347ff JN |
683 | drm_get_connector_status_name(old_status), |
684 | drm_get_connector_status_name(connector->status)); | |
685 | ||
686 | return true; | |
321a1b30 EE |
687 | } |
688 | ||
5ca58282 JB |
689 | /* |
690 | * Handle hotplug events outside the interrupt handler proper. | |
691 | */ | |
ac4c16c5 EE |
692 | #define I915_REENABLE_HOTPLUG_DELAY (2*60*1000) |
693 | ||
5ca58282 JB |
694 | static void i915_hotplug_work_func(struct work_struct *work) |
695 | { | |
696 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
697 | hotplug_work); | |
698 | struct drm_device *dev = dev_priv->dev; | |
c31c4ba3 | 699 | struct drm_mode_config *mode_config = &dev->mode_config; |
cd569aed EE |
700 | struct intel_connector *intel_connector; |
701 | struct intel_encoder *intel_encoder; | |
702 | struct drm_connector *connector; | |
703 | unsigned long irqflags; | |
704 | bool hpd_disabled = false; | |
321a1b30 | 705 | bool changed = false; |
142e2398 | 706 | u32 hpd_event_bits; |
4ef69c7a | 707 | |
52d7eced DV |
708 | /* HPD irq before everything is fully set up. */ |
709 | if (!dev_priv->enable_hotplug_processing) | |
710 | return; | |
711 | ||
a65e34c7 | 712 | mutex_lock(&mode_config->mutex); |
e67189ab JB |
713 | DRM_DEBUG_KMS("running encoder hotplug functions\n"); |
714 | ||
cd569aed | 715 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
142e2398 EE |
716 | |
717 | hpd_event_bits = dev_priv->hpd_event_bits; | |
718 | dev_priv->hpd_event_bits = 0; | |
cd569aed EE |
719 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
720 | intel_connector = to_intel_connector(connector); | |
721 | intel_encoder = intel_connector->encoder; | |
722 | if (intel_encoder->hpd_pin > HPD_NONE && | |
723 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_MARK_DISABLED && | |
724 | connector->polled == DRM_CONNECTOR_POLL_HPD) { | |
725 | DRM_INFO("HPD interrupt storm detected on connector %s: " | |
726 | "switching from hotplug detection to polling\n", | |
727 | drm_get_connector_name(connector)); | |
728 | dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark = HPD_DISABLED; | |
729 | connector->polled = DRM_CONNECTOR_POLL_CONNECT | |
730 | | DRM_CONNECTOR_POLL_DISCONNECT; | |
731 | hpd_disabled = true; | |
732 | } | |
142e2398 EE |
733 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { |
734 | DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n", | |
735 | drm_get_connector_name(connector), intel_encoder->hpd_pin); | |
736 | } | |
cd569aed EE |
737 | } |
738 | /* if there were no outputs to poll, poll was disabled, | |
739 | * therefore make sure it's enabled when disabling HPD on | |
740 | * some connectors */ | |
ac4c16c5 | 741 | if (hpd_disabled) { |
cd569aed | 742 | drm_kms_helper_poll_enable(dev); |
ac4c16c5 EE |
743 | mod_timer(&dev_priv->hotplug_reenable_timer, |
744 | jiffies + msecs_to_jiffies(I915_REENABLE_HOTPLUG_DELAY)); | |
745 | } | |
cd569aed EE |
746 | |
747 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
748 | ||
321a1b30 EE |
749 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
750 | intel_connector = to_intel_connector(connector); | |
751 | intel_encoder = intel_connector->encoder; | |
752 | if (hpd_event_bits & (1 << intel_encoder->hpd_pin)) { | |
753 | if (intel_encoder->hot_plug) | |
754 | intel_encoder->hot_plug(intel_encoder); | |
755 | if (intel_hpd_irq_event(dev, connector)) | |
756 | changed = true; | |
757 | } | |
758 | } | |
40ee3381 KP |
759 | mutex_unlock(&mode_config->mutex); |
760 | ||
321a1b30 EE |
761 | if (changed) |
762 | drm_kms_helper_hotplug_event(dev); | |
5ca58282 JB |
763 | } |
764 | ||
d0ecd7e2 | 765 | static void ironlake_rps_change_irq_handler(struct drm_device *dev) |
f97108d1 JB |
766 | { |
767 | drm_i915_private_t *dev_priv = dev->dev_private; | |
b5b72e89 | 768 | u32 busy_up, busy_down, max_avg, min_avg; |
9270388e | 769 | u8 new_delay; |
9270388e | 770 | |
d0ecd7e2 | 771 | spin_lock(&mchdev_lock); |
f97108d1 | 772 | |
73edd18f DV |
773 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
774 | ||
20e4d407 | 775 | new_delay = dev_priv->ips.cur_delay; |
9270388e | 776 | |
7648fa99 | 777 | I915_WRITE16(MEMINTRSTS, MEMINT_EVAL_CHG); |
b5b72e89 MG |
778 | busy_up = I915_READ(RCPREVBSYTUPAVG); |
779 | busy_down = I915_READ(RCPREVBSYTDNAVG); | |
f97108d1 JB |
780 | max_avg = I915_READ(RCBMAXAVG); |
781 | min_avg = I915_READ(RCBMINAVG); | |
782 | ||
783 | /* Handle RCS change request from hw */ | |
b5b72e89 | 784 | if (busy_up > max_avg) { |
20e4d407 DV |
785 | if (dev_priv->ips.cur_delay != dev_priv->ips.max_delay) |
786 | new_delay = dev_priv->ips.cur_delay - 1; | |
787 | if (new_delay < dev_priv->ips.max_delay) | |
788 | new_delay = dev_priv->ips.max_delay; | |
b5b72e89 | 789 | } else if (busy_down < min_avg) { |
20e4d407 DV |
790 | if (dev_priv->ips.cur_delay != dev_priv->ips.min_delay) |
791 | new_delay = dev_priv->ips.cur_delay + 1; | |
792 | if (new_delay > dev_priv->ips.min_delay) | |
793 | new_delay = dev_priv->ips.min_delay; | |
f97108d1 JB |
794 | } |
795 | ||
7648fa99 | 796 | if (ironlake_set_drps(dev, new_delay)) |
20e4d407 | 797 | dev_priv->ips.cur_delay = new_delay; |
f97108d1 | 798 | |
d0ecd7e2 | 799 | spin_unlock(&mchdev_lock); |
9270388e | 800 | |
f97108d1 JB |
801 | return; |
802 | } | |
803 | ||
549f7365 CW |
804 | static void notify_ring(struct drm_device *dev, |
805 | struct intel_ring_buffer *ring) | |
806 | { | |
475553de CW |
807 | if (ring->obj == NULL) |
808 | return; | |
809 | ||
814e9b57 | 810 | trace_i915_gem_request_complete(ring); |
9862e600 | 811 | |
549f7365 | 812 | wake_up_all(&ring->irq_queue); |
10cd45b6 | 813 | i915_queue_hangcheck(dev); |
549f7365 CW |
814 | } |
815 | ||
4912d041 | 816 | static void gen6_pm_rps_work(struct work_struct *work) |
3b8d8d91 | 817 | { |
4912d041 | 818 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, |
c6a828d3 | 819 | rps.work); |
edbfdb45 | 820 | u32 pm_iir; |
dd75fdc8 | 821 | int new_delay, adj; |
4912d041 | 822 | |
59cdb63d | 823 | spin_lock_irq(&dev_priv->irq_lock); |
c6a828d3 DV |
824 | pm_iir = dev_priv->rps.pm_iir; |
825 | dev_priv->rps.pm_iir = 0; | |
4848405c | 826 | /* Make sure not to corrupt PMIMR state used by ringbuffer code */ |
edbfdb45 | 827 | snb_enable_pm_irq(dev_priv, GEN6_PM_RPS_EVENTS); |
59cdb63d | 828 | spin_unlock_irq(&dev_priv->irq_lock); |
3b8d8d91 | 829 | |
60611c13 PZ |
830 | /* Make sure we didn't queue anything we're not going to process. */ |
831 | WARN_ON(pm_iir & ~GEN6_PM_RPS_EVENTS); | |
832 | ||
4848405c | 833 | if ((pm_iir & GEN6_PM_RPS_EVENTS) == 0) |
3b8d8d91 JB |
834 | return; |
835 | ||
4fc688ce | 836 | mutex_lock(&dev_priv->rps.hw_lock); |
7b9e0ae6 | 837 | |
dd75fdc8 | 838 | adj = dev_priv->rps.last_adj; |
7425034a | 839 | if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) { |
dd75fdc8 CW |
840 | if (adj > 0) |
841 | adj *= 2; | |
842 | else | |
843 | adj = 1; | |
844 | new_delay = dev_priv->rps.cur_delay + adj; | |
7425034a VS |
845 | |
846 | /* | |
847 | * For better performance, jump directly | |
848 | * to RPe if we're below it. | |
849 | */ | |
dd75fdc8 CW |
850 | if (new_delay < dev_priv->rps.rpe_delay) |
851 | new_delay = dev_priv->rps.rpe_delay; | |
852 | } else if (pm_iir & GEN6_PM_RP_DOWN_TIMEOUT) { | |
853 | if (dev_priv->rps.cur_delay > dev_priv->rps.rpe_delay) | |
7425034a | 854 | new_delay = dev_priv->rps.rpe_delay; |
dd75fdc8 CW |
855 | else |
856 | new_delay = dev_priv->rps.min_delay; | |
857 | adj = 0; | |
858 | } else if (pm_iir & GEN6_PM_RP_DOWN_THRESHOLD) { | |
859 | if (adj < 0) | |
860 | adj *= 2; | |
861 | else | |
862 | adj = -1; | |
863 | new_delay = dev_priv->rps.cur_delay + adj; | |
864 | } else { /* unknown event */ | |
865 | new_delay = dev_priv->rps.cur_delay; | |
866 | } | |
3b8d8d91 | 867 | |
79249636 BW |
868 | /* sysfs frequency interfaces may have snuck in while servicing the |
869 | * interrupt | |
870 | */ | |
dd75fdc8 CW |
871 | if (new_delay < (int)dev_priv->rps.min_delay) |
872 | new_delay = dev_priv->rps.min_delay; | |
873 | if (new_delay > (int)dev_priv->rps.max_delay) | |
874 | new_delay = dev_priv->rps.max_delay; | |
875 | dev_priv->rps.last_adj = new_delay - dev_priv->rps.cur_delay; | |
876 | ||
877 | if (IS_VALLEYVIEW(dev_priv->dev)) | |
878 | valleyview_set_rps(dev_priv->dev, new_delay); | |
879 | else | |
880 | gen6_set_rps(dev_priv->dev, new_delay); | |
3b8d8d91 | 881 | |
4fc688ce | 882 | mutex_unlock(&dev_priv->rps.hw_lock); |
3b8d8d91 JB |
883 | } |
884 | ||
e3689190 BW |
885 | |
886 | /** | |
887 | * ivybridge_parity_work - Workqueue called when a parity error interrupt | |
888 | * occurred. | |
889 | * @work: workqueue struct | |
890 | * | |
891 | * Doesn't actually do anything except notify userspace. As a consequence of | |
892 | * this event, userspace should try to remap the bad rows since statistically | |
893 | * it is likely the same row is more likely to go bad again. | |
894 | */ | |
895 | static void ivybridge_parity_work(struct work_struct *work) | |
896 | { | |
897 | drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t, | |
a4da4fa4 | 898 | l3_parity.error_work); |
e3689190 | 899 | u32 error_status, row, bank, subbank; |
35a85ac6 | 900 | char *parity_event[6]; |
e3689190 BW |
901 | uint32_t misccpctl; |
902 | unsigned long flags; | |
35a85ac6 | 903 | uint8_t slice = 0; |
e3689190 BW |
904 | |
905 | /* We must turn off DOP level clock gating to access the L3 registers. | |
906 | * In order to prevent a get/put style interface, acquire struct mutex | |
907 | * any time we access those registers. | |
908 | */ | |
909 | mutex_lock(&dev_priv->dev->struct_mutex); | |
910 | ||
35a85ac6 BW |
911 | /* If we've screwed up tracking, just let the interrupt fire again */ |
912 | if (WARN_ON(!dev_priv->l3_parity.which_slice)) | |
913 | goto out; | |
914 | ||
e3689190 BW |
915 | misccpctl = I915_READ(GEN7_MISCCPCTL); |
916 | I915_WRITE(GEN7_MISCCPCTL, misccpctl & ~GEN7_DOP_CLOCK_GATE_ENABLE); | |
917 | POSTING_READ(GEN7_MISCCPCTL); | |
918 | ||
35a85ac6 BW |
919 | while ((slice = ffs(dev_priv->l3_parity.which_slice)) != 0) { |
920 | u32 reg; | |
e3689190 | 921 | |
35a85ac6 BW |
922 | slice--; |
923 | if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv->dev))) | |
924 | break; | |
e3689190 | 925 | |
35a85ac6 | 926 | dev_priv->l3_parity.which_slice &= ~(1<<slice); |
e3689190 | 927 | |
35a85ac6 | 928 | reg = GEN7_L3CDERRST1 + (slice * 0x200); |
e3689190 | 929 | |
35a85ac6 BW |
930 | error_status = I915_READ(reg); |
931 | row = GEN7_PARITY_ERROR_ROW(error_status); | |
932 | bank = GEN7_PARITY_ERROR_BANK(error_status); | |
933 | subbank = GEN7_PARITY_ERROR_SUBBANK(error_status); | |
934 | ||
935 | I915_WRITE(reg, GEN7_PARITY_ERROR_VALID | GEN7_L3CDERRST1_ENABLE); | |
936 | POSTING_READ(reg); | |
937 | ||
938 | parity_event[0] = I915_L3_PARITY_UEVENT "=1"; | |
939 | parity_event[1] = kasprintf(GFP_KERNEL, "ROW=%d", row); | |
940 | parity_event[2] = kasprintf(GFP_KERNEL, "BANK=%d", bank); | |
941 | parity_event[3] = kasprintf(GFP_KERNEL, "SUBBANK=%d", subbank); | |
942 | parity_event[4] = kasprintf(GFP_KERNEL, "SLICE=%d", slice); | |
943 | parity_event[5] = NULL; | |
944 | ||
945 | kobject_uevent_env(&dev_priv->dev->primary->kdev.kobj, | |
946 | KOBJ_CHANGE, parity_event); | |
e3689190 | 947 | |
35a85ac6 BW |
948 | DRM_DEBUG("Parity error: Slice = %d, Row = %d, Bank = %d, Sub bank = %d.\n", |
949 | slice, row, bank, subbank); | |
e3689190 | 950 | |
35a85ac6 BW |
951 | kfree(parity_event[4]); |
952 | kfree(parity_event[3]); | |
953 | kfree(parity_event[2]); | |
954 | kfree(parity_event[1]); | |
955 | } | |
e3689190 | 956 | |
35a85ac6 | 957 | I915_WRITE(GEN7_MISCCPCTL, misccpctl); |
e3689190 | 958 | |
35a85ac6 BW |
959 | out: |
960 | WARN_ON(dev_priv->l3_parity.which_slice); | |
961 | spin_lock_irqsave(&dev_priv->irq_lock, flags); | |
962 | ilk_enable_gt_irq(dev_priv, GT_PARITY_ERROR(dev_priv->dev)); | |
963 | spin_unlock_irqrestore(&dev_priv->irq_lock, flags); | |
964 | ||
965 | mutex_unlock(&dev_priv->dev->struct_mutex); | |
e3689190 BW |
966 | } |
967 | ||
35a85ac6 | 968 | static void ivybridge_parity_error_irq_handler(struct drm_device *dev, u32 iir) |
e3689190 BW |
969 | { |
970 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
e3689190 | 971 | |
040d2baa | 972 | if (!HAS_L3_DPF(dev)) |
e3689190 BW |
973 | return; |
974 | ||
d0ecd7e2 | 975 | spin_lock(&dev_priv->irq_lock); |
35a85ac6 | 976 | ilk_disable_gt_irq(dev_priv, GT_PARITY_ERROR(dev)); |
d0ecd7e2 | 977 | spin_unlock(&dev_priv->irq_lock); |
e3689190 | 978 | |
35a85ac6 BW |
979 | iir &= GT_PARITY_ERROR(dev); |
980 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT_S1) | |
981 | dev_priv->l3_parity.which_slice |= 1 << 1; | |
982 | ||
983 | if (iir & GT_RENDER_L3_PARITY_ERROR_INTERRUPT) | |
984 | dev_priv->l3_parity.which_slice |= 1 << 0; | |
985 | ||
a4da4fa4 | 986 | queue_work(dev_priv->wq, &dev_priv->l3_parity.error_work); |
e3689190 BW |
987 | } |
988 | ||
f1af8fc1 PZ |
989 | static void ilk_gt_irq_handler(struct drm_device *dev, |
990 | struct drm_i915_private *dev_priv, | |
991 | u32 gt_iir) | |
992 | { | |
993 | if (gt_iir & | |
994 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
995 | notify_ring(dev, &dev_priv->ring[RCS]); | |
996 | if (gt_iir & ILK_BSD_USER_INTERRUPT) | |
997 | notify_ring(dev, &dev_priv->ring[VCS]); | |
998 | } | |
999 | ||
e7b4c6b1 DV |
1000 | static void snb_gt_irq_handler(struct drm_device *dev, |
1001 | struct drm_i915_private *dev_priv, | |
1002 | u32 gt_iir) | |
1003 | { | |
1004 | ||
cc609d5d BW |
1005 | if (gt_iir & |
1006 | (GT_RENDER_USER_INTERRUPT | GT_RENDER_PIPECTL_NOTIFY_INTERRUPT)) | |
e7b4c6b1 | 1007 | notify_ring(dev, &dev_priv->ring[RCS]); |
cc609d5d | 1008 | if (gt_iir & GT_BSD_USER_INTERRUPT) |
e7b4c6b1 | 1009 | notify_ring(dev, &dev_priv->ring[VCS]); |
cc609d5d | 1010 | if (gt_iir & GT_BLT_USER_INTERRUPT) |
e7b4c6b1 DV |
1011 | notify_ring(dev, &dev_priv->ring[BCS]); |
1012 | ||
cc609d5d BW |
1013 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | |
1014 | GT_BSD_CS_ERROR_INTERRUPT | | |
1015 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { | |
e7b4c6b1 DV |
1016 | DRM_ERROR("GT error interrupt 0x%08x\n", gt_iir); |
1017 | i915_handle_error(dev, false); | |
1018 | } | |
e3689190 | 1019 | |
35a85ac6 BW |
1020 | if (gt_iir & GT_PARITY_ERROR(dev)) |
1021 | ivybridge_parity_error_irq_handler(dev, gt_iir); | |
e7b4c6b1 DV |
1022 | } |
1023 | ||
b543fb04 EE |
1024 | #define HPD_STORM_DETECT_PERIOD 1000 |
1025 | #define HPD_STORM_THRESHOLD 5 | |
1026 | ||
10a504de | 1027 | static inline void intel_hpd_irq_handler(struct drm_device *dev, |
22062dba DV |
1028 | u32 hotplug_trigger, |
1029 | const u32 *hpd) | |
b543fb04 EE |
1030 | { |
1031 | drm_i915_private_t *dev_priv = dev->dev_private; | |
b543fb04 | 1032 | int i; |
10a504de | 1033 | bool storm_detected = false; |
b543fb04 | 1034 | |
91d131d2 DV |
1035 | if (!hotplug_trigger) |
1036 | return; | |
1037 | ||
b5ea2d56 | 1038 | spin_lock(&dev_priv->irq_lock); |
b543fb04 | 1039 | for (i = 1; i < HPD_NUM_PINS; i++) { |
821450c6 | 1040 | |
b8f102e8 EE |
1041 | WARN(((hpd[i] & hotplug_trigger) && |
1042 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED), | |
1043 | "Received HPD interrupt although disabled\n"); | |
1044 | ||
b543fb04 EE |
1045 | if (!(hpd[i] & hotplug_trigger) || |
1046 | dev_priv->hpd_stats[i].hpd_mark != HPD_ENABLED) | |
1047 | continue; | |
1048 | ||
bc5ead8c | 1049 | dev_priv->hpd_event_bits |= (1 << i); |
b543fb04 EE |
1050 | if (!time_in_range(jiffies, dev_priv->hpd_stats[i].hpd_last_jiffies, |
1051 | dev_priv->hpd_stats[i].hpd_last_jiffies | |
1052 | + msecs_to_jiffies(HPD_STORM_DETECT_PERIOD))) { | |
1053 | dev_priv->hpd_stats[i].hpd_last_jiffies = jiffies; | |
1054 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
b8f102e8 | 1055 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: 0\n", i); |
b543fb04 EE |
1056 | } else if (dev_priv->hpd_stats[i].hpd_cnt > HPD_STORM_THRESHOLD) { |
1057 | dev_priv->hpd_stats[i].hpd_mark = HPD_MARK_DISABLED; | |
142e2398 | 1058 | dev_priv->hpd_event_bits &= ~(1 << i); |
b543fb04 | 1059 | DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", i); |
10a504de | 1060 | storm_detected = true; |
b543fb04 EE |
1061 | } else { |
1062 | dev_priv->hpd_stats[i].hpd_cnt++; | |
b8f102e8 EE |
1063 | DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", i, |
1064 | dev_priv->hpd_stats[i].hpd_cnt); | |
b543fb04 EE |
1065 | } |
1066 | } | |
1067 | ||
10a504de DV |
1068 | if (storm_detected) |
1069 | dev_priv->display.hpd_irq_setup(dev); | |
b5ea2d56 | 1070 | spin_unlock(&dev_priv->irq_lock); |
5876fa0d | 1071 | |
645416f5 DV |
1072 | /* |
1073 | * Our hotplug handler can grab modeset locks (by calling down into the | |
1074 | * fb helpers). Hence it must not be run on our own dev-priv->wq work | |
1075 | * queue for otherwise the flush_work in the pageflip code will | |
1076 | * deadlock. | |
1077 | */ | |
1078 | schedule_work(&dev_priv->hotplug_work); | |
b543fb04 EE |
1079 | } |
1080 | ||
515ac2bb DV |
1081 | static void gmbus_irq_handler(struct drm_device *dev) |
1082 | { | |
28c70f16 DV |
1083 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1084 | ||
28c70f16 | 1085 | wake_up_all(&dev_priv->gmbus_wait_queue); |
515ac2bb DV |
1086 | } |
1087 | ||
ce99c256 DV |
1088 | static void dp_aux_irq_handler(struct drm_device *dev) |
1089 | { | |
9ee32fea DV |
1090 | struct drm_i915_private *dev_priv = (drm_i915_private_t *) dev->dev_private; |
1091 | ||
9ee32fea | 1092 | wake_up_all(&dev_priv->gmbus_wait_queue); |
ce99c256 DV |
1093 | } |
1094 | ||
1403c0d4 PZ |
1095 | /* The RPS events need forcewake, so we add them to a work queue and mask their |
1096 | * IMR bits until the work is done. Other interrupts can be processed without | |
1097 | * the work queue. */ | |
1098 | static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | |
baf02a1f | 1099 | { |
41a05a3a | 1100 | if (pm_iir & GEN6_PM_RPS_EVENTS) { |
59cdb63d | 1101 | spin_lock(&dev_priv->irq_lock); |
41a05a3a | 1102 | dev_priv->rps.pm_iir |= pm_iir & GEN6_PM_RPS_EVENTS; |
4d3b3d5f | 1103 | snb_disable_pm_irq(dev_priv, pm_iir & GEN6_PM_RPS_EVENTS); |
59cdb63d | 1104 | spin_unlock(&dev_priv->irq_lock); |
2adbee62 DV |
1105 | |
1106 | queue_work(dev_priv->wq, &dev_priv->rps.work); | |
baf02a1f | 1107 | } |
baf02a1f | 1108 | |
1403c0d4 PZ |
1109 | if (HAS_VEBOX(dev_priv->dev)) { |
1110 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) | |
1111 | notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); | |
12638c57 | 1112 | |
1403c0d4 PZ |
1113 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { |
1114 | DRM_ERROR("VEBOX CS error interrupt 0x%08x\n", pm_iir); | |
1115 | i915_handle_error(dev_priv->dev, false); | |
1116 | } | |
12638c57 | 1117 | } |
baf02a1f BW |
1118 | } |
1119 | ||
ff1f525e | 1120 | static irqreturn_t valleyview_irq_handler(int irq, void *arg) |
7e231dbe JB |
1121 | { |
1122 | struct drm_device *dev = (struct drm_device *) arg; | |
1123 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1124 | u32 iir, gt_iir, pm_iir; | |
1125 | irqreturn_t ret = IRQ_NONE; | |
1126 | unsigned long irqflags; | |
1127 | int pipe; | |
1128 | u32 pipe_stats[I915_MAX_PIPES]; | |
7e231dbe JB |
1129 | |
1130 | atomic_inc(&dev_priv->irq_received); | |
1131 | ||
7e231dbe JB |
1132 | while (true) { |
1133 | iir = I915_READ(VLV_IIR); | |
1134 | gt_iir = I915_READ(GTIIR); | |
1135 | pm_iir = I915_READ(GEN6_PMIIR); | |
1136 | ||
1137 | if (gt_iir == 0 && pm_iir == 0 && iir == 0) | |
1138 | goto out; | |
1139 | ||
1140 | ret = IRQ_HANDLED; | |
1141 | ||
e7b4c6b1 | 1142 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
7e231dbe JB |
1143 | |
1144 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
1145 | for_each_pipe(pipe) { | |
1146 | int reg = PIPESTAT(pipe); | |
1147 | pipe_stats[pipe] = I915_READ(reg); | |
1148 | ||
1149 | /* | |
1150 | * Clear the PIPE*STAT regs before the IIR | |
1151 | */ | |
1152 | if (pipe_stats[pipe] & 0x8000ffff) { | |
1153 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
1154 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
1155 | pipe_name(pipe)); | |
1156 | I915_WRITE(reg, pipe_stats[pipe]); | |
1157 | } | |
1158 | } | |
1159 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1160 | ||
31acc7f5 JB |
1161 | for_each_pipe(pipe) { |
1162 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS) | |
1163 | drm_handle_vblank(dev, pipe); | |
1164 | ||
1165 | if (pipe_stats[pipe] & PLANE_FLIPDONE_INT_STATUS_VLV) { | |
1166 | intel_prepare_page_flip(dev, pipe); | |
1167 | intel_finish_page_flip(dev, pipe); | |
1168 | } | |
1169 | } | |
1170 | ||
7e231dbe JB |
1171 | /* Consume port. Then clear IIR or we'll miss events */ |
1172 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { | |
1173 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
b543fb04 | 1174 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
7e231dbe JB |
1175 | |
1176 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
1177 | hotplug_status); | |
91d131d2 DV |
1178 | |
1179 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); | |
1180 | ||
7e231dbe JB |
1181 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
1182 | I915_READ(PORT_HOTPLUG_STAT); | |
1183 | } | |
1184 | ||
515ac2bb DV |
1185 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
1186 | gmbus_irq_handler(dev); | |
7e231dbe | 1187 | |
60611c13 | 1188 | if (pm_iir) |
d0ecd7e2 | 1189 | gen6_rps_irq_handler(dev_priv, pm_iir); |
7e231dbe JB |
1190 | |
1191 | I915_WRITE(GTIIR, gt_iir); | |
1192 | I915_WRITE(GEN6_PMIIR, pm_iir); | |
1193 | I915_WRITE(VLV_IIR, iir); | |
1194 | } | |
1195 | ||
1196 | out: | |
1197 | return ret; | |
1198 | } | |
1199 | ||
23e81d69 | 1200 | static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir) |
776ad806 JB |
1201 | { |
1202 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
9db4a9c7 | 1203 | int pipe; |
b543fb04 | 1204 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK; |
776ad806 | 1205 | |
91d131d2 DV |
1206 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_ibx); |
1207 | ||
cfc33bf7 VS |
1208 | if (pch_iir & SDE_AUDIO_POWER_MASK) { |
1209 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >> | |
1210 | SDE_AUDIO_POWER_SHIFT); | |
776ad806 | 1211 | DRM_DEBUG_DRIVER("PCH audio power change on port %d\n", |
cfc33bf7 VS |
1212 | port_name(port)); |
1213 | } | |
776ad806 | 1214 | |
ce99c256 DV |
1215 | if (pch_iir & SDE_AUX_MASK) |
1216 | dp_aux_irq_handler(dev); | |
1217 | ||
776ad806 | 1218 | if (pch_iir & SDE_GMBUS) |
515ac2bb | 1219 | gmbus_irq_handler(dev); |
776ad806 JB |
1220 | |
1221 | if (pch_iir & SDE_AUDIO_HDCP_MASK) | |
1222 | DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n"); | |
1223 | ||
1224 | if (pch_iir & SDE_AUDIO_TRANS_MASK) | |
1225 | DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n"); | |
1226 | ||
1227 | if (pch_iir & SDE_POISON) | |
1228 | DRM_ERROR("PCH poison interrupt\n"); | |
1229 | ||
9db4a9c7 JB |
1230 | if (pch_iir & SDE_FDI_MASK) |
1231 | for_each_pipe(pipe) | |
1232 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | |
1233 | pipe_name(pipe), | |
1234 | I915_READ(FDI_RX_IIR(pipe))); | |
776ad806 JB |
1235 | |
1236 | if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE)) | |
1237 | DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n"); | |
1238 | ||
1239 | if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR)) | |
1240 | DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n"); | |
1241 | ||
776ad806 | 1242 | if (pch_iir & SDE_TRANSA_FIFO_UNDER) |
8664281b PZ |
1243 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, |
1244 | false)) | |
1245 | DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); | |
1246 | ||
1247 | if (pch_iir & SDE_TRANSB_FIFO_UNDER) | |
1248 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | |
1249 | false)) | |
1250 | DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); | |
1251 | } | |
1252 | ||
1253 | static void ivb_err_int_handler(struct drm_device *dev) | |
1254 | { | |
1255 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1256 | u32 err_int = I915_READ(GEN7_ERR_INT); | |
1257 | ||
de032bf4 PZ |
1258 | if (err_int & ERR_INT_POISON) |
1259 | DRM_ERROR("Poison interrupt\n"); | |
1260 | ||
8664281b PZ |
1261 | if (err_int & ERR_INT_FIFO_UNDERRUN_A) |
1262 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) | |
1263 | DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); | |
1264 | ||
1265 | if (err_int & ERR_INT_FIFO_UNDERRUN_B) | |
1266 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) | |
1267 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); | |
1268 | ||
1269 | if (err_int & ERR_INT_FIFO_UNDERRUN_C) | |
1270 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_C, false)) | |
1271 | DRM_DEBUG_DRIVER("Pipe C FIFO underrun\n"); | |
1272 | ||
1273 | I915_WRITE(GEN7_ERR_INT, err_int); | |
1274 | } | |
1275 | ||
1276 | static void cpt_serr_int_handler(struct drm_device *dev) | |
1277 | { | |
1278 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1279 | u32 serr_int = I915_READ(SERR_INT); | |
1280 | ||
de032bf4 PZ |
1281 | if (serr_int & SERR_INT_POISON) |
1282 | DRM_ERROR("PCH poison interrupt\n"); | |
1283 | ||
8664281b PZ |
1284 | if (serr_int & SERR_INT_TRANS_A_FIFO_UNDERRUN) |
1285 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, | |
1286 | false)) | |
1287 | DRM_DEBUG_DRIVER("PCH transcoder A FIFO underrun\n"); | |
1288 | ||
1289 | if (serr_int & SERR_INT_TRANS_B_FIFO_UNDERRUN) | |
1290 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_B, | |
1291 | false)) | |
1292 | DRM_DEBUG_DRIVER("PCH transcoder B FIFO underrun\n"); | |
1293 | ||
1294 | if (serr_int & SERR_INT_TRANS_C_FIFO_UNDERRUN) | |
1295 | if (intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_C, | |
1296 | false)) | |
1297 | DRM_DEBUG_DRIVER("PCH transcoder C FIFO underrun\n"); | |
1298 | ||
1299 | I915_WRITE(SERR_INT, serr_int); | |
776ad806 JB |
1300 | } |
1301 | ||
23e81d69 AJ |
1302 | static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir) |
1303 | { | |
1304 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1305 | int pipe; | |
b543fb04 | 1306 | u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT; |
23e81d69 | 1307 | |
91d131d2 DV |
1308 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_cpt); |
1309 | ||
cfc33bf7 VS |
1310 | if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) { |
1311 | int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >> | |
1312 | SDE_AUDIO_POWER_SHIFT_CPT); | |
1313 | DRM_DEBUG_DRIVER("PCH audio power change on port %c\n", | |
1314 | port_name(port)); | |
1315 | } | |
23e81d69 AJ |
1316 | |
1317 | if (pch_iir & SDE_AUX_MASK_CPT) | |
ce99c256 | 1318 | dp_aux_irq_handler(dev); |
23e81d69 AJ |
1319 | |
1320 | if (pch_iir & SDE_GMBUS_CPT) | |
515ac2bb | 1321 | gmbus_irq_handler(dev); |
23e81d69 AJ |
1322 | |
1323 | if (pch_iir & SDE_AUDIO_CP_REQ_CPT) | |
1324 | DRM_DEBUG_DRIVER("Audio CP request interrupt\n"); | |
1325 | ||
1326 | if (pch_iir & SDE_AUDIO_CP_CHG_CPT) | |
1327 | DRM_DEBUG_DRIVER("Audio CP change interrupt\n"); | |
1328 | ||
1329 | if (pch_iir & SDE_FDI_MASK_CPT) | |
1330 | for_each_pipe(pipe) | |
1331 | DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n", | |
1332 | pipe_name(pipe), | |
1333 | I915_READ(FDI_RX_IIR(pipe))); | |
8664281b PZ |
1334 | |
1335 | if (pch_iir & SDE_ERROR_CPT) | |
1336 | cpt_serr_int_handler(dev); | |
23e81d69 AJ |
1337 | } |
1338 | ||
c008bc6e PZ |
1339 | static void ilk_display_irq_handler(struct drm_device *dev, u32 de_iir) |
1340 | { | |
1341 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1342 | ||
1343 | if (de_iir & DE_AUX_CHANNEL_A) | |
1344 | dp_aux_irq_handler(dev); | |
1345 | ||
1346 | if (de_iir & DE_GSE) | |
1347 | intel_opregion_asle_intr(dev); | |
1348 | ||
1349 | if (de_iir & DE_PIPEA_VBLANK) | |
1350 | drm_handle_vblank(dev, 0); | |
1351 | ||
1352 | if (de_iir & DE_PIPEB_VBLANK) | |
1353 | drm_handle_vblank(dev, 1); | |
1354 | ||
1355 | if (de_iir & DE_POISON) | |
1356 | DRM_ERROR("Poison interrupt\n"); | |
1357 | ||
1358 | if (de_iir & DE_PIPEA_FIFO_UNDERRUN) | |
1359 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_A, false)) | |
1360 | DRM_DEBUG_DRIVER("Pipe A FIFO underrun\n"); | |
1361 | ||
1362 | if (de_iir & DE_PIPEB_FIFO_UNDERRUN) | |
1363 | if (intel_set_cpu_fifo_underrun_reporting(dev, PIPE_B, false)) | |
1364 | DRM_DEBUG_DRIVER("Pipe B FIFO underrun\n"); | |
1365 | ||
1366 | if (de_iir & DE_PLANEA_FLIP_DONE) { | |
1367 | intel_prepare_page_flip(dev, 0); | |
1368 | intel_finish_page_flip_plane(dev, 0); | |
1369 | } | |
1370 | ||
1371 | if (de_iir & DE_PLANEB_FLIP_DONE) { | |
1372 | intel_prepare_page_flip(dev, 1); | |
1373 | intel_finish_page_flip_plane(dev, 1); | |
1374 | } | |
1375 | ||
1376 | /* check event from PCH */ | |
1377 | if (de_iir & DE_PCH_EVENT) { | |
1378 | u32 pch_iir = I915_READ(SDEIIR); | |
1379 | ||
1380 | if (HAS_PCH_CPT(dev)) | |
1381 | cpt_irq_handler(dev, pch_iir); | |
1382 | else | |
1383 | ibx_irq_handler(dev, pch_iir); | |
1384 | ||
1385 | /* should clear PCH hotplug event before clear CPU irq */ | |
1386 | I915_WRITE(SDEIIR, pch_iir); | |
1387 | } | |
1388 | ||
1389 | if (IS_GEN5(dev) && de_iir & DE_PCU_EVENT) | |
1390 | ironlake_rps_change_irq_handler(dev); | |
1391 | } | |
1392 | ||
9719fb98 PZ |
1393 | static void ivb_display_irq_handler(struct drm_device *dev, u32 de_iir) |
1394 | { | |
1395 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1396 | int i; | |
1397 | ||
1398 | if (de_iir & DE_ERR_INT_IVB) | |
1399 | ivb_err_int_handler(dev); | |
1400 | ||
1401 | if (de_iir & DE_AUX_CHANNEL_A_IVB) | |
1402 | dp_aux_irq_handler(dev); | |
1403 | ||
1404 | if (de_iir & DE_GSE_IVB) | |
1405 | intel_opregion_asle_intr(dev); | |
1406 | ||
1407 | for (i = 0; i < 3; i++) { | |
1408 | if (de_iir & (DE_PIPEA_VBLANK_IVB << (5 * i))) | |
1409 | drm_handle_vblank(dev, i); | |
1410 | if (de_iir & (DE_PLANEA_FLIP_DONE_IVB << (5 * i))) { | |
1411 | intel_prepare_page_flip(dev, i); | |
1412 | intel_finish_page_flip_plane(dev, i); | |
1413 | } | |
1414 | } | |
1415 | ||
1416 | /* check event from PCH */ | |
1417 | if (!HAS_PCH_NOP(dev) && (de_iir & DE_PCH_EVENT_IVB)) { | |
1418 | u32 pch_iir = I915_READ(SDEIIR); | |
1419 | ||
1420 | cpt_irq_handler(dev, pch_iir); | |
1421 | ||
1422 | /* clear PCH hotplug event before clear CPU irq */ | |
1423 | I915_WRITE(SDEIIR, pch_iir); | |
1424 | } | |
1425 | } | |
1426 | ||
f1af8fc1 | 1427 | static irqreturn_t ironlake_irq_handler(int irq, void *arg) |
b1f14ad0 JB |
1428 | { |
1429 | struct drm_device *dev = (struct drm_device *) arg; | |
1430 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
f1af8fc1 | 1431 | u32 de_iir, gt_iir, de_ier, sde_ier = 0; |
0e43406b | 1432 | irqreturn_t ret = IRQ_NONE; |
b1f14ad0 JB |
1433 | |
1434 | atomic_inc(&dev_priv->irq_received); | |
1435 | ||
8664281b PZ |
1436 | /* We get interrupts on unclaimed registers, so check for this before we |
1437 | * do any I915_{READ,WRITE}. */ | |
907b28c5 | 1438 | intel_uncore_check_errors(dev); |
8664281b | 1439 | |
b1f14ad0 JB |
1440 | /* disable master interrupt before clearing iir */ |
1441 | de_ier = I915_READ(DEIER); | |
1442 | I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL); | |
23a78516 | 1443 | POSTING_READ(DEIER); |
b1f14ad0 | 1444 | |
44498aea PZ |
1445 | /* Disable south interrupts. We'll only write to SDEIIR once, so further |
1446 | * interrupts will will be stored on its back queue, and then we'll be | |
1447 | * able to process them after we restore SDEIER (as soon as we restore | |
1448 | * it, we'll get an interrupt if SDEIIR still has something to process | |
1449 | * due to its back queue). */ | |
ab5c608b BW |
1450 | if (!HAS_PCH_NOP(dev)) { |
1451 | sde_ier = I915_READ(SDEIER); | |
1452 | I915_WRITE(SDEIER, 0); | |
1453 | POSTING_READ(SDEIER); | |
1454 | } | |
44498aea | 1455 | |
b1f14ad0 | 1456 | gt_iir = I915_READ(GTIIR); |
0e43406b | 1457 | if (gt_iir) { |
d8fc8a47 | 1458 | if (INTEL_INFO(dev)->gen >= 6) |
f1af8fc1 | 1459 | snb_gt_irq_handler(dev, dev_priv, gt_iir); |
d8fc8a47 PZ |
1460 | else |
1461 | ilk_gt_irq_handler(dev, dev_priv, gt_iir); | |
0e43406b CW |
1462 | I915_WRITE(GTIIR, gt_iir); |
1463 | ret = IRQ_HANDLED; | |
b1f14ad0 JB |
1464 | } |
1465 | ||
0e43406b CW |
1466 | de_iir = I915_READ(DEIIR); |
1467 | if (de_iir) { | |
f1af8fc1 PZ |
1468 | if (INTEL_INFO(dev)->gen >= 7) |
1469 | ivb_display_irq_handler(dev, de_iir); | |
1470 | else | |
1471 | ilk_display_irq_handler(dev, de_iir); | |
0e43406b CW |
1472 | I915_WRITE(DEIIR, de_iir); |
1473 | ret = IRQ_HANDLED; | |
b1f14ad0 JB |
1474 | } |
1475 | ||
f1af8fc1 PZ |
1476 | if (INTEL_INFO(dev)->gen >= 6) { |
1477 | u32 pm_iir = I915_READ(GEN6_PMIIR); | |
1478 | if (pm_iir) { | |
1403c0d4 | 1479 | gen6_rps_irq_handler(dev_priv, pm_iir); |
f1af8fc1 PZ |
1480 | I915_WRITE(GEN6_PMIIR, pm_iir); |
1481 | ret = IRQ_HANDLED; | |
1482 | } | |
0e43406b | 1483 | } |
b1f14ad0 | 1484 | |
b1f14ad0 JB |
1485 | I915_WRITE(DEIER, de_ier); |
1486 | POSTING_READ(DEIER); | |
ab5c608b BW |
1487 | if (!HAS_PCH_NOP(dev)) { |
1488 | I915_WRITE(SDEIER, sde_ier); | |
1489 | POSTING_READ(SDEIER); | |
1490 | } | |
b1f14ad0 JB |
1491 | |
1492 | return ret; | |
1493 | } | |
1494 | ||
17e1df07 DV |
1495 | static void i915_error_wake_up(struct drm_i915_private *dev_priv, |
1496 | bool reset_completed) | |
1497 | { | |
1498 | struct intel_ring_buffer *ring; | |
1499 | int i; | |
1500 | ||
1501 | /* | |
1502 | * Notify all waiters for GPU completion events that reset state has | |
1503 | * been changed, and that they need to restart their wait after | |
1504 | * checking for potential errors (and bail out to drop locks if there is | |
1505 | * a gpu reset pending so that i915_error_work_func can acquire them). | |
1506 | */ | |
1507 | ||
1508 | /* Wake up __wait_seqno, potentially holding dev->struct_mutex. */ | |
1509 | for_each_ring(ring, dev_priv, i) | |
1510 | wake_up_all(&ring->irq_queue); | |
1511 | ||
1512 | /* Wake up intel_crtc_wait_for_pending_flips, holding crtc->mutex. */ | |
1513 | wake_up_all(&dev_priv->pending_flip_queue); | |
1514 | ||
1515 | /* | |
1516 | * Signal tasks blocked in i915_gem_wait_for_error that the pending | |
1517 | * reset state is cleared. | |
1518 | */ | |
1519 | if (reset_completed) | |
1520 | wake_up_all(&dev_priv->gpu_error.reset_queue); | |
1521 | } | |
1522 | ||
8a905236 JB |
1523 | /** |
1524 | * i915_error_work_func - do process context error handling work | |
1525 | * @work: work struct | |
1526 | * | |
1527 | * Fire an error uevent so userspace can see that a hang or error | |
1528 | * was detected. | |
1529 | */ | |
1530 | static void i915_error_work_func(struct work_struct *work) | |
1531 | { | |
1f83fee0 DV |
1532 | struct i915_gpu_error *error = container_of(work, struct i915_gpu_error, |
1533 | work); | |
1534 | drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t, | |
1535 | gpu_error); | |
8a905236 | 1536 | struct drm_device *dev = dev_priv->dev; |
cce723ed BW |
1537 | char *error_event[] = { I915_ERROR_UEVENT "=1", NULL }; |
1538 | char *reset_event[] = { I915_RESET_UEVENT "=1", NULL }; | |
1539 | char *reset_done_event[] = { I915_ERROR_UEVENT "=0", NULL }; | |
17e1df07 | 1540 | int ret; |
8a905236 | 1541 | |
f316a42c BG |
1542 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event); |
1543 | ||
7db0ba24 DV |
1544 | /* |
1545 | * Note that there's only one work item which does gpu resets, so we | |
1546 | * need not worry about concurrent gpu resets potentially incrementing | |
1547 | * error->reset_counter twice. We only need to take care of another | |
1548 | * racing irq/hangcheck declaring the gpu dead for a second time. A | |
1549 | * quick check for that is good enough: schedule_work ensures the | |
1550 | * correct ordering between hang detection and this work item, and since | |
1551 | * the reset in-progress bit is only ever set by code outside of this | |
1552 | * work we don't need to worry about any other races. | |
1553 | */ | |
1554 | if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) { | |
f803aa55 | 1555 | DRM_DEBUG_DRIVER("resetting chip\n"); |
7db0ba24 DV |
1556 | kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, |
1557 | reset_event); | |
1f83fee0 | 1558 | |
17e1df07 DV |
1559 | /* |
1560 | * All state reset _must_ be completed before we update the | |
1561 | * reset counter, for otherwise waiters might miss the reset | |
1562 | * pending state and not properly drop locks, resulting in | |
1563 | * deadlocks with the reset work. | |
1564 | */ | |
f69061be DV |
1565 | ret = i915_reset(dev); |
1566 | ||
17e1df07 DV |
1567 | intel_display_handle_reset(dev); |
1568 | ||
f69061be DV |
1569 | if (ret == 0) { |
1570 | /* | |
1571 | * After all the gem state is reset, increment the reset | |
1572 | * counter and wake up everyone waiting for the reset to | |
1573 | * complete. | |
1574 | * | |
1575 | * Since unlock operations are a one-sided barrier only, | |
1576 | * we need to insert a barrier here to order any seqno | |
1577 | * updates before | |
1578 | * the counter increment. | |
1579 | */ | |
1580 | smp_mb__before_atomic_inc(); | |
1581 | atomic_inc(&dev_priv->gpu_error.reset_counter); | |
1582 | ||
1583 | kobject_uevent_env(&dev->primary->kdev.kobj, | |
1584 | KOBJ_CHANGE, reset_done_event); | |
1f83fee0 DV |
1585 | } else { |
1586 | atomic_set(&error->reset_counter, I915_WEDGED); | |
f316a42c | 1587 | } |
1f83fee0 | 1588 | |
17e1df07 DV |
1589 | /* |
1590 | * Note: The wake_up also serves as a memory barrier so that | |
1591 | * waiters see the update value of the reset counter atomic_t. | |
1592 | */ | |
1593 | i915_error_wake_up(dev_priv, true); | |
f316a42c | 1594 | } |
8a905236 JB |
1595 | } |
1596 | ||
35aed2e6 | 1597 | static void i915_report_and_clear_eir(struct drm_device *dev) |
8a905236 JB |
1598 | { |
1599 | struct drm_i915_private *dev_priv = dev->dev_private; | |
bd9854f9 | 1600 | uint32_t instdone[I915_NUM_INSTDONE_REG]; |
8a905236 | 1601 | u32 eir = I915_READ(EIR); |
050ee91f | 1602 | int pipe, i; |
8a905236 | 1603 | |
35aed2e6 CW |
1604 | if (!eir) |
1605 | return; | |
8a905236 | 1606 | |
a70491cc | 1607 | pr_err("render error detected, EIR: 0x%08x\n", eir); |
8a905236 | 1608 | |
bd9854f9 BW |
1609 | i915_get_extra_instdone(dev, instdone); |
1610 | ||
8a905236 JB |
1611 | if (IS_G4X(dev)) { |
1612 | if (eir & (GM45_ERROR_MEM_PRIV | GM45_ERROR_CP_PRIV)) { | |
1613 | u32 ipeir = I915_READ(IPEIR_I965); | |
1614 | ||
a70491cc JP |
1615 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
1616 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
050ee91f BW |
1617 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
1618 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
a70491cc | 1619 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
a70491cc | 1620 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
8a905236 | 1621 | I915_WRITE(IPEIR_I965, ipeir); |
3143a2bf | 1622 | POSTING_READ(IPEIR_I965); |
8a905236 JB |
1623 | } |
1624 | if (eir & GM45_ERROR_PAGE_TABLE) { | |
1625 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
a70491cc JP |
1626 | pr_err("page table error\n"); |
1627 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
8a905236 | 1628 | I915_WRITE(PGTBL_ER, pgtbl_err); |
3143a2bf | 1629 | POSTING_READ(PGTBL_ER); |
8a905236 JB |
1630 | } |
1631 | } | |
1632 | ||
a6c45cf0 | 1633 | if (!IS_GEN2(dev)) { |
8a905236 JB |
1634 | if (eir & I915_ERROR_PAGE_TABLE) { |
1635 | u32 pgtbl_err = I915_READ(PGTBL_ER); | |
a70491cc JP |
1636 | pr_err("page table error\n"); |
1637 | pr_err(" PGTBL_ER: 0x%08x\n", pgtbl_err); | |
8a905236 | 1638 | I915_WRITE(PGTBL_ER, pgtbl_err); |
3143a2bf | 1639 | POSTING_READ(PGTBL_ER); |
8a905236 JB |
1640 | } |
1641 | } | |
1642 | ||
1643 | if (eir & I915_ERROR_MEMORY_REFRESH) { | |
a70491cc | 1644 | pr_err("memory refresh error:\n"); |
9db4a9c7 | 1645 | for_each_pipe(pipe) |
a70491cc | 1646 | pr_err("pipe %c stat: 0x%08x\n", |
9db4a9c7 | 1647 | pipe_name(pipe), I915_READ(PIPESTAT(pipe))); |
8a905236 JB |
1648 | /* pipestat has already been acked */ |
1649 | } | |
1650 | if (eir & I915_ERROR_INSTRUCTION) { | |
a70491cc JP |
1651 | pr_err("instruction error\n"); |
1652 | pr_err(" INSTPM: 0x%08x\n", I915_READ(INSTPM)); | |
050ee91f BW |
1653 | for (i = 0; i < ARRAY_SIZE(instdone); i++) |
1654 | pr_err(" INSTDONE_%d: 0x%08x\n", i, instdone[i]); | |
a6c45cf0 | 1655 | if (INTEL_INFO(dev)->gen < 4) { |
8a905236 JB |
1656 | u32 ipeir = I915_READ(IPEIR); |
1657 | ||
a70491cc JP |
1658 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR)); |
1659 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR)); | |
a70491cc | 1660 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD)); |
8a905236 | 1661 | I915_WRITE(IPEIR, ipeir); |
3143a2bf | 1662 | POSTING_READ(IPEIR); |
8a905236 JB |
1663 | } else { |
1664 | u32 ipeir = I915_READ(IPEIR_I965); | |
1665 | ||
a70491cc JP |
1666 | pr_err(" IPEIR: 0x%08x\n", I915_READ(IPEIR_I965)); |
1667 | pr_err(" IPEHR: 0x%08x\n", I915_READ(IPEHR_I965)); | |
a70491cc | 1668 | pr_err(" INSTPS: 0x%08x\n", I915_READ(INSTPS)); |
a70491cc | 1669 | pr_err(" ACTHD: 0x%08x\n", I915_READ(ACTHD_I965)); |
8a905236 | 1670 | I915_WRITE(IPEIR_I965, ipeir); |
3143a2bf | 1671 | POSTING_READ(IPEIR_I965); |
8a905236 JB |
1672 | } |
1673 | } | |
1674 | ||
1675 | I915_WRITE(EIR, eir); | |
3143a2bf | 1676 | POSTING_READ(EIR); |
8a905236 JB |
1677 | eir = I915_READ(EIR); |
1678 | if (eir) { | |
1679 | /* | |
1680 | * some errors might have become stuck, | |
1681 | * mask them. | |
1682 | */ | |
1683 | DRM_ERROR("EIR stuck: 0x%08x, masking\n", eir); | |
1684 | I915_WRITE(EMR, I915_READ(EMR) | eir); | |
1685 | I915_WRITE(IIR, I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
1686 | } | |
35aed2e6 CW |
1687 | } |
1688 | ||
1689 | /** | |
1690 | * i915_handle_error - handle an error interrupt | |
1691 | * @dev: drm device | |
1692 | * | |
1693 | * Do some basic checking of regsiter state at error interrupt time and | |
1694 | * dump it to the syslog. Also call i915_capture_error_state() to make | |
1695 | * sure we get a record and make it available in debugfs. Fire a uevent | |
1696 | * so userspace knows something bad happened (should trigger collection | |
1697 | * of a ring dump etc.). | |
1698 | */ | |
527f9e90 | 1699 | void i915_handle_error(struct drm_device *dev, bool wedged) |
35aed2e6 CW |
1700 | { |
1701 | struct drm_i915_private *dev_priv = dev->dev_private; | |
1702 | ||
1703 | i915_capture_error_state(dev); | |
1704 | i915_report_and_clear_eir(dev); | |
8a905236 | 1705 | |
ba1234d1 | 1706 | if (wedged) { |
f69061be DV |
1707 | atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG, |
1708 | &dev_priv->gpu_error.reset_counter); | |
ba1234d1 | 1709 | |
11ed50ec | 1710 | /* |
17e1df07 DV |
1711 | * Wakeup waiting processes so that the reset work function |
1712 | * i915_error_work_func doesn't deadlock trying to grab various | |
1713 | * locks. By bumping the reset counter first, the woken | |
1714 | * processes will see a reset in progress and back off, | |
1715 | * releasing their locks and then wait for the reset completion. | |
1716 | * We must do this for _all_ gpu waiters that might hold locks | |
1717 | * that the reset work needs to acquire. | |
1718 | * | |
1719 | * Note: The wake_up serves as the required memory barrier to | |
1720 | * ensure that the waiters see the updated value of the reset | |
1721 | * counter atomic_t. | |
11ed50ec | 1722 | */ |
17e1df07 | 1723 | i915_error_wake_up(dev_priv, false); |
11ed50ec BG |
1724 | } |
1725 | ||
122f46ba DV |
1726 | /* |
1727 | * Our reset work can grab modeset locks (since it needs to reset the | |
1728 | * state of outstanding pagelips). Hence it must not be run on our own | |
1729 | * dev-priv->wq work queue for otherwise the flush_work in the pageflip | |
1730 | * code will deadlock. | |
1731 | */ | |
1732 | schedule_work(&dev_priv->gpu_error.work); | |
8a905236 JB |
1733 | } |
1734 | ||
21ad8330 | 1735 | static void __always_unused i915_pageflip_stall_check(struct drm_device *dev, int pipe) |
4e5359cd SF |
1736 | { |
1737 | drm_i915_private_t *dev_priv = dev->dev_private; | |
1738 | struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; | |
1739 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | |
05394f39 | 1740 | struct drm_i915_gem_object *obj; |
4e5359cd SF |
1741 | struct intel_unpin_work *work; |
1742 | unsigned long flags; | |
1743 | bool stall_detected; | |
1744 | ||
1745 | /* Ignore early vblank irqs */ | |
1746 | if (intel_crtc == NULL) | |
1747 | return; | |
1748 | ||
1749 | spin_lock_irqsave(&dev->event_lock, flags); | |
1750 | work = intel_crtc->unpin_work; | |
1751 | ||
e7d841ca CW |
1752 | if (work == NULL || |
1753 | atomic_read(&work->pending) >= INTEL_FLIP_COMPLETE || | |
1754 | !work->enable_stall_check) { | |
4e5359cd SF |
1755 | /* Either the pending flip IRQ arrived, or we're too early. Don't check */ |
1756 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
1757 | return; | |
1758 | } | |
1759 | ||
1760 | /* Potential stall - if we see that the flip has happened, assume a missed interrupt */ | |
05394f39 | 1761 | obj = work->pending_flip_obj; |
a6c45cf0 | 1762 | if (INTEL_INFO(dev)->gen >= 4) { |
9db4a9c7 | 1763 | int dspsurf = DSPSURF(intel_crtc->plane); |
446f2545 | 1764 | stall_detected = I915_HI_DISPBASE(I915_READ(dspsurf)) == |
f343c5f6 | 1765 | i915_gem_obj_ggtt_offset(obj); |
4e5359cd | 1766 | } else { |
9db4a9c7 | 1767 | int dspaddr = DSPADDR(intel_crtc->plane); |
f343c5f6 | 1768 | stall_detected = I915_READ(dspaddr) == (i915_gem_obj_ggtt_offset(obj) + |
01f2c773 | 1769 | crtc->y * crtc->fb->pitches[0] + |
4e5359cd SF |
1770 | crtc->x * crtc->fb->bits_per_pixel/8); |
1771 | } | |
1772 | ||
1773 | spin_unlock_irqrestore(&dev->event_lock, flags); | |
1774 | ||
1775 | if (stall_detected) { | |
1776 | DRM_DEBUG_DRIVER("Pageflip stall detected\n"); | |
1777 | intel_prepare_page_flip(dev, intel_crtc->plane); | |
1778 | } | |
1779 | } | |
1780 | ||
42f52ef8 KP |
1781 | /* Called from drm generic code, passed 'crtc' which |
1782 | * we use as a pipe index | |
1783 | */ | |
f71d4af4 | 1784 | static int i915_enable_vblank(struct drm_device *dev, int pipe) |
0a3e67a4 JB |
1785 | { |
1786 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
e9d21d7f | 1787 | unsigned long irqflags; |
71e0ffa5 | 1788 | |
5eddb70b | 1789 | if (!i915_pipe_enabled(dev, pipe)) |
71e0ffa5 | 1790 | return -EINVAL; |
0a3e67a4 | 1791 | |
1ec14ad3 | 1792 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
f796cf8f | 1793 | if (INTEL_INFO(dev)->gen >= 4) |
7c463586 KP |
1794 | i915_enable_pipestat(dev_priv, pipe, |
1795 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
e9d21d7f | 1796 | else |
7c463586 KP |
1797 | i915_enable_pipestat(dev_priv, pipe, |
1798 | PIPE_VBLANK_INTERRUPT_ENABLE); | |
8692d00e CW |
1799 | |
1800 | /* maintain vblank delivery even in deep C-states */ | |
1801 | if (dev_priv->info->gen == 3) | |
6b26c86d | 1802 | I915_WRITE(INSTPM, _MASKED_BIT_DISABLE(INSTPM_AGPBUSY_DIS)); |
1ec14ad3 | 1803 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
8692d00e | 1804 | |
0a3e67a4 JB |
1805 | return 0; |
1806 | } | |
1807 | ||
f71d4af4 | 1808 | static int ironlake_enable_vblank(struct drm_device *dev, int pipe) |
f796cf8f JB |
1809 | { |
1810 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1811 | unsigned long irqflags; | |
b518421f PZ |
1812 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : |
1813 | DE_PIPE_VBLANK_ILK(pipe); | |
f796cf8f JB |
1814 | |
1815 | if (!i915_pipe_enabled(dev, pipe)) | |
1816 | return -EINVAL; | |
1817 | ||
1818 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
b518421f | 1819 | ironlake_enable_display_irq(dev_priv, bit); |
b1f14ad0 JB |
1820 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1821 | ||
1822 | return 0; | |
1823 | } | |
1824 | ||
7e231dbe JB |
1825 | static int valleyview_enable_vblank(struct drm_device *dev, int pipe) |
1826 | { | |
1827 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1828 | unsigned long irqflags; | |
31acc7f5 | 1829 | u32 imr; |
7e231dbe JB |
1830 | |
1831 | if (!i915_pipe_enabled(dev, pipe)) | |
1832 | return -EINVAL; | |
1833 | ||
1834 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
7e231dbe | 1835 | imr = I915_READ(VLV_IMR); |
31acc7f5 | 1836 | if (pipe == 0) |
7e231dbe | 1837 | imr &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
31acc7f5 | 1838 | else |
7e231dbe | 1839 | imr &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
7e231dbe | 1840 | I915_WRITE(VLV_IMR, imr); |
31acc7f5 JB |
1841 | i915_enable_pipestat(dev_priv, pipe, |
1842 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
7e231dbe JB |
1843 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1844 | ||
1845 | return 0; | |
1846 | } | |
1847 | ||
42f52ef8 KP |
1848 | /* Called from drm generic code, passed 'crtc' which |
1849 | * we use as a pipe index | |
1850 | */ | |
f71d4af4 | 1851 | static void i915_disable_vblank(struct drm_device *dev, int pipe) |
0a3e67a4 JB |
1852 | { |
1853 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
e9d21d7f | 1854 | unsigned long irqflags; |
0a3e67a4 | 1855 | |
1ec14ad3 | 1856 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); |
8692d00e | 1857 | if (dev_priv->info->gen == 3) |
6b26c86d | 1858 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_DIS)); |
8692d00e | 1859 | |
f796cf8f JB |
1860 | i915_disable_pipestat(dev_priv, pipe, |
1861 | PIPE_VBLANK_INTERRUPT_ENABLE | | |
1862 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
1863 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
1864 | } | |
1865 | ||
f71d4af4 | 1866 | static void ironlake_disable_vblank(struct drm_device *dev, int pipe) |
f796cf8f JB |
1867 | { |
1868 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1869 | unsigned long irqflags; | |
b518421f PZ |
1870 | uint32_t bit = (INTEL_INFO(dev)->gen >= 7) ? DE_PIPE_VBLANK_IVB(pipe) : |
1871 | DE_PIPE_VBLANK_ILK(pipe); | |
f796cf8f JB |
1872 | |
1873 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
b518421f | 1874 | ironlake_disable_display_irq(dev_priv, bit); |
b1f14ad0 JB |
1875 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1876 | } | |
1877 | ||
7e231dbe JB |
1878 | static void valleyview_disable_vblank(struct drm_device *dev, int pipe) |
1879 | { | |
1880 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
1881 | unsigned long irqflags; | |
31acc7f5 | 1882 | u32 imr; |
7e231dbe JB |
1883 | |
1884 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
31acc7f5 JB |
1885 | i915_disable_pipestat(dev_priv, pipe, |
1886 | PIPE_START_VBLANK_INTERRUPT_ENABLE); | |
7e231dbe | 1887 | imr = I915_READ(VLV_IMR); |
31acc7f5 | 1888 | if (pipe == 0) |
7e231dbe | 1889 | imr |= I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT; |
31acc7f5 | 1890 | else |
7e231dbe | 1891 | imr |= I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
7e231dbe | 1892 | I915_WRITE(VLV_IMR, imr); |
7e231dbe JB |
1893 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
1894 | } | |
1895 | ||
893eead0 CW |
1896 | static u32 |
1897 | ring_last_seqno(struct intel_ring_buffer *ring) | |
852835f3 | 1898 | { |
893eead0 CW |
1899 | return list_entry(ring->request_list.prev, |
1900 | struct drm_i915_gem_request, list)->seqno; | |
1901 | } | |
1902 | ||
9107e9d2 CW |
1903 | static bool |
1904 | ring_idle(struct intel_ring_buffer *ring, u32 seqno) | |
1905 | { | |
1906 | return (list_empty(&ring->request_list) || | |
1907 | i915_seqno_passed(seqno, ring_last_seqno(ring))); | |
f65d9421 BG |
1908 | } |
1909 | ||
6274f212 CW |
1910 | static struct intel_ring_buffer * |
1911 | semaphore_waits_for(struct intel_ring_buffer *ring, u32 *seqno) | |
a24a11e6 CW |
1912 | { |
1913 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
6274f212 | 1914 | u32 cmd, ipehr, acthd, acthd_min; |
a24a11e6 CW |
1915 | |
1916 | ipehr = I915_READ(RING_IPEHR(ring->mmio_base)); | |
1917 | if ((ipehr & ~(0x3 << 16)) != | |
1918 | (MI_SEMAPHORE_MBOX | MI_SEMAPHORE_COMPARE | MI_SEMAPHORE_REGISTER)) | |
6274f212 | 1919 | return NULL; |
a24a11e6 CW |
1920 | |
1921 | /* ACTHD is likely pointing to the dword after the actual command, | |
1922 | * so scan backwards until we find the MBOX. | |
1923 | */ | |
6274f212 | 1924 | acthd = intel_ring_get_active_head(ring) & HEAD_ADDR; |
a24a11e6 CW |
1925 | acthd_min = max((int)acthd - 3 * 4, 0); |
1926 | do { | |
1927 | cmd = ioread32(ring->virtual_start + acthd); | |
1928 | if (cmd == ipehr) | |
1929 | break; | |
1930 | ||
1931 | acthd -= 4; | |
1932 | if (acthd < acthd_min) | |
6274f212 | 1933 | return NULL; |
a24a11e6 CW |
1934 | } while (1); |
1935 | ||
6274f212 CW |
1936 | *seqno = ioread32(ring->virtual_start+acthd+4)+1; |
1937 | return &dev_priv->ring[(ring->id + (((ipehr >> 17) & 1) + 1)) % 3]; | |
a24a11e6 CW |
1938 | } |
1939 | ||
6274f212 CW |
1940 | static int semaphore_passed(struct intel_ring_buffer *ring) |
1941 | { | |
1942 | struct drm_i915_private *dev_priv = ring->dev->dev_private; | |
1943 | struct intel_ring_buffer *signaller; | |
1944 | u32 seqno, ctl; | |
1945 | ||
1946 | ring->hangcheck.deadlock = true; | |
1947 | ||
1948 | signaller = semaphore_waits_for(ring, &seqno); | |
1949 | if (signaller == NULL || signaller->hangcheck.deadlock) | |
1950 | return -1; | |
1951 | ||
1952 | /* cursory check for an unkickable deadlock */ | |
1953 | ctl = I915_READ_CTL(signaller); | |
1954 | if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0) | |
1955 | return -1; | |
1956 | ||
1957 | return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno); | |
1958 | } | |
1959 | ||
1960 | static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv) | |
1961 | { | |
1962 | struct intel_ring_buffer *ring; | |
1963 | int i; | |
1964 | ||
1965 | for_each_ring(ring, dev_priv, i) | |
1966 | ring->hangcheck.deadlock = false; | |
1967 | } | |
1968 | ||
ad8beaea MK |
1969 | static enum intel_ring_hangcheck_action |
1970 | ring_stuck(struct intel_ring_buffer *ring, u32 acthd) | |
1ec14ad3 CW |
1971 | { |
1972 | struct drm_device *dev = ring->dev; | |
1973 | struct drm_i915_private *dev_priv = dev->dev_private; | |
9107e9d2 CW |
1974 | u32 tmp; |
1975 | ||
6274f212 | 1976 | if (ring->hangcheck.acthd != acthd) |
f2f4d82f | 1977 | return HANGCHECK_ACTIVE; |
6274f212 | 1978 | |
9107e9d2 | 1979 | if (IS_GEN2(dev)) |
f2f4d82f | 1980 | return HANGCHECK_HUNG; |
9107e9d2 CW |
1981 | |
1982 | /* Is the chip hanging on a WAIT_FOR_EVENT? | |
1983 | * If so we can simply poke the RB_WAIT bit | |
1984 | * and break the hang. This should work on | |
1985 | * all but the second generation chipsets. | |
1986 | */ | |
1987 | tmp = I915_READ_CTL(ring); | |
1ec14ad3 CW |
1988 | if (tmp & RING_WAIT) { |
1989 | DRM_ERROR("Kicking stuck wait on %s\n", | |
1990 | ring->name); | |
09e14bf3 | 1991 | i915_handle_error(dev, false); |
1ec14ad3 | 1992 | I915_WRITE_CTL(ring, tmp); |
f2f4d82f | 1993 | return HANGCHECK_KICK; |
6274f212 CW |
1994 | } |
1995 | ||
1996 | if (INTEL_INFO(dev)->gen >= 6 && tmp & RING_WAIT_SEMAPHORE) { | |
1997 | switch (semaphore_passed(ring)) { | |
1998 | default: | |
f2f4d82f | 1999 | return HANGCHECK_HUNG; |
6274f212 CW |
2000 | case 1: |
2001 | DRM_ERROR("Kicking stuck semaphore on %s\n", | |
2002 | ring->name); | |
09e14bf3 | 2003 | i915_handle_error(dev, false); |
6274f212 | 2004 | I915_WRITE_CTL(ring, tmp); |
f2f4d82f | 2005 | return HANGCHECK_KICK; |
6274f212 | 2006 | case 0: |
f2f4d82f | 2007 | return HANGCHECK_WAIT; |
6274f212 | 2008 | } |
9107e9d2 | 2009 | } |
ed5cbb03 | 2010 | |
f2f4d82f | 2011 | return HANGCHECK_HUNG; |
ed5cbb03 MK |
2012 | } |
2013 | ||
f65d9421 BG |
2014 | /** |
2015 | * This is called when the chip hasn't reported back with completed | |
05407ff8 MK |
2016 | * batchbuffers in a long time. We keep track per ring seqno progress and |
2017 | * if there are no progress, hangcheck score for that ring is increased. | |
2018 | * Further, acthd is inspected to see if the ring is stuck. On stuck case | |
2019 | * we kick the ring. If we see no progress on three subsequent calls | |
2020 | * we assume chip is wedged and try to fix it by resetting the chip. | |
f65d9421 | 2021 | */ |
a658b5d2 | 2022 | static void i915_hangcheck_elapsed(unsigned long data) |
f65d9421 BG |
2023 | { |
2024 | struct drm_device *dev = (struct drm_device *)data; | |
2025 | drm_i915_private_t *dev_priv = dev->dev_private; | |
b4519513 | 2026 | struct intel_ring_buffer *ring; |
b4519513 | 2027 | int i; |
05407ff8 | 2028 | int busy_count = 0, rings_hung = 0; |
9107e9d2 CW |
2029 | bool stuck[I915_NUM_RINGS] = { 0 }; |
2030 | #define BUSY 1 | |
2031 | #define KICK 5 | |
2032 | #define HUNG 20 | |
2033 | #define FIRE 30 | |
893eead0 | 2034 | |
3e0dc6b0 BW |
2035 | if (!i915_enable_hangcheck) |
2036 | return; | |
2037 | ||
b4519513 | 2038 | for_each_ring(ring, dev_priv, i) { |
05407ff8 | 2039 | u32 seqno, acthd; |
9107e9d2 | 2040 | bool busy = true; |
05407ff8 | 2041 | |
6274f212 CW |
2042 | semaphore_clear_deadlocks(dev_priv); |
2043 | ||
05407ff8 MK |
2044 | seqno = ring->get_seqno(ring, false); |
2045 | acthd = intel_ring_get_active_head(ring); | |
b4519513 | 2046 | |
9107e9d2 CW |
2047 | if (ring->hangcheck.seqno == seqno) { |
2048 | if (ring_idle(ring, seqno)) { | |
da661464 MK |
2049 | ring->hangcheck.action = HANGCHECK_IDLE; |
2050 | ||
9107e9d2 CW |
2051 | if (waitqueue_active(&ring->irq_queue)) { |
2052 | /* Issue a wake-up to catch stuck h/w. */ | |
094f9a54 CW |
2053 | if (!test_and_set_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings)) { |
2054 | DRM_ERROR("Hangcheck timer elapsed... %s idle\n", | |
2055 | ring->name); | |
2056 | wake_up_all(&ring->irq_queue); | |
2057 | } | |
2058 | /* Safeguard against driver failure */ | |
2059 | ring->hangcheck.score += BUSY; | |
9107e9d2 CW |
2060 | } else |
2061 | busy = false; | |
05407ff8 | 2062 | } else { |
6274f212 CW |
2063 | /* We always increment the hangcheck score |
2064 | * if the ring is busy and still processing | |
2065 | * the same request, so that no single request | |
2066 | * can run indefinitely (such as a chain of | |
2067 | * batches). The only time we do not increment | |
2068 | * the hangcheck score on this ring, if this | |
2069 | * ring is in a legitimate wait for another | |
2070 | * ring. In that case the waiting ring is a | |
2071 | * victim and we want to be sure we catch the | |
2072 | * right culprit. Then every time we do kick | |
2073 | * the ring, add a small increment to the | |
2074 | * score so that we can catch a batch that is | |
2075 | * being repeatedly kicked and so responsible | |
2076 | * for stalling the machine. | |
2077 | */ | |
ad8beaea MK |
2078 | ring->hangcheck.action = ring_stuck(ring, |
2079 | acthd); | |
2080 | ||
2081 | switch (ring->hangcheck.action) { | |
da661464 | 2082 | case HANGCHECK_IDLE: |
f2f4d82f | 2083 | case HANGCHECK_WAIT: |
6274f212 | 2084 | break; |
f2f4d82f | 2085 | case HANGCHECK_ACTIVE: |
ea04cb31 | 2086 | ring->hangcheck.score += BUSY; |
6274f212 | 2087 | break; |
f2f4d82f | 2088 | case HANGCHECK_KICK: |
ea04cb31 | 2089 | ring->hangcheck.score += KICK; |
6274f212 | 2090 | break; |
f2f4d82f | 2091 | case HANGCHECK_HUNG: |
ea04cb31 | 2092 | ring->hangcheck.score += HUNG; |
6274f212 CW |
2093 | stuck[i] = true; |
2094 | break; | |
2095 | } | |
05407ff8 | 2096 | } |
9107e9d2 | 2097 | } else { |
da661464 MK |
2098 | ring->hangcheck.action = HANGCHECK_ACTIVE; |
2099 | ||
9107e9d2 CW |
2100 | /* Gradually reduce the count so that we catch DoS |
2101 | * attempts across multiple batches. | |
2102 | */ | |
2103 | if (ring->hangcheck.score > 0) | |
2104 | ring->hangcheck.score--; | |
d1e61e7f CW |
2105 | } |
2106 | ||
05407ff8 MK |
2107 | ring->hangcheck.seqno = seqno; |
2108 | ring->hangcheck.acthd = acthd; | |
9107e9d2 | 2109 | busy_count += busy; |
893eead0 | 2110 | } |
b9201c14 | 2111 | |
92cab734 | 2112 | for_each_ring(ring, dev_priv, i) { |
9107e9d2 | 2113 | if (ring->hangcheck.score > FIRE) { |
b8d88d1d DV |
2114 | DRM_INFO("%s on %s\n", |
2115 | stuck[i] ? "stuck" : "no progress", | |
2116 | ring->name); | |
a43adf07 | 2117 | rings_hung++; |
92cab734 MK |
2118 | } |
2119 | } | |
2120 | ||
05407ff8 MK |
2121 | if (rings_hung) |
2122 | return i915_handle_error(dev, true); | |
f65d9421 | 2123 | |
05407ff8 MK |
2124 | if (busy_count) |
2125 | /* Reset timer case chip hangs without another request | |
2126 | * being added */ | |
10cd45b6 MK |
2127 | i915_queue_hangcheck(dev); |
2128 | } | |
2129 | ||
2130 | void i915_queue_hangcheck(struct drm_device *dev) | |
2131 | { | |
2132 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2133 | if (!i915_enable_hangcheck) | |
2134 | return; | |
2135 | ||
2136 | mod_timer(&dev_priv->gpu_error.hangcheck_timer, | |
2137 | round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES)); | |
f65d9421 BG |
2138 | } |
2139 | ||
91738a95 PZ |
2140 | static void ibx_irq_preinstall(struct drm_device *dev) |
2141 | { | |
2142 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2143 | ||
2144 | if (HAS_PCH_NOP(dev)) | |
2145 | return; | |
2146 | ||
2147 | /* south display irq */ | |
2148 | I915_WRITE(SDEIMR, 0xffffffff); | |
2149 | /* | |
2150 | * SDEIER is also touched by the interrupt handler to work around missed | |
2151 | * PCH interrupts. Hence we can't update it after the interrupt handler | |
2152 | * is enabled - instead we unconditionally enable all PCH interrupt | |
2153 | * sources here, but then only unmask them as needed with SDEIMR. | |
2154 | */ | |
2155 | I915_WRITE(SDEIER, 0xffffffff); | |
2156 | POSTING_READ(SDEIER); | |
2157 | } | |
2158 | ||
d18ea1b5 DV |
2159 | static void gen5_gt_irq_preinstall(struct drm_device *dev) |
2160 | { | |
2161 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2162 | ||
2163 | /* and GT */ | |
2164 | I915_WRITE(GTIMR, 0xffffffff); | |
2165 | I915_WRITE(GTIER, 0x0); | |
2166 | POSTING_READ(GTIER); | |
2167 | ||
2168 | if (INTEL_INFO(dev)->gen >= 6) { | |
2169 | /* and PM */ | |
2170 | I915_WRITE(GEN6_PMIMR, 0xffffffff); | |
2171 | I915_WRITE(GEN6_PMIER, 0x0); | |
2172 | POSTING_READ(GEN6_PMIER); | |
2173 | } | |
2174 | } | |
2175 | ||
1da177e4 LT |
2176 | /* drm_dma.h hooks |
2177 | */ | |
f71d4af4 | 2178 | static void ironlake_irq_preinstall(struct drm_device *dev) |
036a4a7d ZW |
2179 | { |
2180 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2181 | ||
4697995b JB |
2182 | atomic_set(&dev_priv->irq_received, 0); |
2183 | ||
036a4a7d | 2184 | I915_WRITE(HWSTAM, 0xeffe); |
bdfcdb63 | 2185 | |
036a4a7d ZW |
2186 | I915_WRITE(DEIMR, 0xffffffff); |
2187 | I915_WRITE(DEIER, 0x0); | |
3143a2bf | 2188 | POSTING_READ(DEIER); |
036a4a7d | 2189 | |
d18ea1b5 | 2190 | gen5_gt_irq_preinstall(dev); |
c650156a | 2191 | |
91738a95 | 2192 | ibx_irq_preinstall(dev); |
7d99163d BW |
2193 | } |
2194 | ||
7e231dbe JB |
2195 | static void valleyview_irq_preinstall(struct drm_device *dev) |
2196 | { | |
2197 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2198 | int pipe; | |
2199 | ||
2200 | atomic_set(&dev_priv->irq_received, 0); | |
2201 | ||
7e231dbe JB |
2202 | /* VLV magic */ |
2203 | I915_WRITE(VLV_IMR, 0); | |
2204 | I915_WRITE(RING_IMR(RENDER_RING_BASE), 0); | |
2205 | I915_WRITE(RING_IMR(GEN6_BSD_RING_BASE), 0); | |
2206 | I915_WRITE(RING_IMR(BLT_RING_BASE), 0); | |
2207 | ||
7e231dbe JB |
2208 | /* and GT */ |
2209 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
2210 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
d18ea1b5 DV |
2211 | |
2212 | gen5_gt_irq_preinstall(dev); | |
7e231dbe JB |
2213 | |
2214 | I915_WRITE(DPINVGTT, 0xff); | |
2215 | ||
2216 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2217 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2218 | for_each_pipe(pipe) | |
2219 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
2220 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2221 | I915_WRITE(VLV_IMR, 0xffffffff); | |
2222 | I915_WRITE(VLV_IER, 0x0); | |
2223 | POSTING_READ(VLV_IER); | |
2224 | } | |
2225 | ||
82a28bcf | 2226 | static void ibx_hpd_irq_setup(struct drm_device *dev) |
7fe0b973 KP |
2227 | { |
2228 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
82a28bcf DV |
2229 | struct drm_mode_config *mode_config = &dev->mode_config; |
2230 | struct intel_encoder *intel_encoder; | |
fee884ed | 2231 | u32 hotplug_irqs, hotplug, enabled_irqs = 0; |
82a28bcf DV |
2232 | |
2233 | if (HAS_PCH_IBX(dev)) { | |
fee884ed | 2234 | hotplug_irqs = SDE_HOTPLUG_MASK; |
82a28bcf | 2235 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
cd569aed | 2236 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
fee884ed | 2237 | enabled_irqs |= hpd_ibx[intel_encoder->hpd_pin]; |
82a28bcf | 2238 | } else { |
fee884ed | 2239 | hotplug_irqs = SDE_HOTPLUG_MASK_CPT; |
82a28bcf | 2240 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
cd569aed | 2241 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) |
fee884ed | 2242 | enabled_irqs |= hpd_cpt[intel_encoder->hpd_pin]; |
82a28bcf | 2243 | } |
7fe0b973 | 2244 | |
fee884ed | 2245 | ibx_display_interrupt_update(dev_priv, hotplug_irqs, enabled_irqs); |
82a28bcf DV |
2246 | |
2247 | /* | |
2248 | * Enable digital hotplug on the PCH, and configure the DP short pulse | |
2249 | * duration to 2ms (which is the minimum in the Display Port spec) | |
2250 | * | |
2251 | * This register is the same on all known PCH chips. | |
2252 | */ | |
7fe0b973 KP |
2253 | hotplug = I915_READ(PCH_PORT_HOTPLUG); |
2254 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | |
2255 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | |
2256 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | |
2257 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | |
2258 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | |
2259 | } | |
2260 | ||
d46da437 PZ |
2261 | static void ibx_irq_postinstall(struct drm_device *dev) |
2262 | { | |
2263 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
82a28bcf | 2264 | u32 mask; |
e5868a31 | 2265 | |
692a04cf DV |
2266 | if (HAS_PCH_NOP(dev)) |
2267 | return; | |
2268 | ||
8664281b PZ |
2269 | if (HAS_PCH_IBX(dev)) { |
2270 | mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER | | |
de032bf4 | 2271 | SDE_TRANSA_FIFO_UNDER | SDE_POISON; |
8664281b PZ |
2272 | } else { |
2273 | mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT; | |
2274 | ||
2275 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); | |
2276 | } | |
ab5c608b | 2277 | |
d46da437 PZ |
2278 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); |
2279 | I915_WRITE(SDEIMR, ~mask); | |
d46da437 PZ |
2280 | } |
2281 | ||
0a9a8c91 DV |
2282 | static void gen5_gt_irq_postinstall(struct drm_device *dev) |
2283 | { | |
2284 | struct drm_i915_private *dev_priv = dev->dev_private; | |
2285 | u32 pm_irqs, gt_irqs; | |
2286 | ||
2287 | pm_irqs = gt_irqs = 0; | |
2288 | ||
2289 | dev_priv->gt_irq_mask = ~0; | |
040d2baa | 2290 | if (HAS_L3_DPF(dev)) { |
0a9a8c91 | 2291 | /* L3 parity interrupt is always unmasked. */ |
35a85ac6 BW |
2292 | dev_priv->gt_irq_mask = ~GT_PARITY_ERROR(dev); |
2293 | gt_irqs |= GT_PARITY_ERROR(dev); | |
0a9a8c91 DV |
2294 | } |
2295 | ||
2296 | gt_irqs |= GT_RENDER_USER_INTERRUPT; | |
2297 | if (IS_GEN5(dev)) { | |
2298 | gt_irqs |= GT_RENDER_PIPECTL_NOTIFY_INTERRUPT | | |
2299 | ILK_BSD_USER_INTERRUPT; | |
2300 | } else { | |
2301 | gt_irqs |= GT_BLT_USER_INTERRUPT | GT_BSD_USER_INTERRUPT; | |
2302 | } | |
2303 | ||
2304 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
2305 | I915_WRITE(GTIMR, dev_priv->gt_irq_mask); | |
2306 | I915_WRITE(GTIER, gt_irqs); | |
2307 | POSTING_READ(GTIER); | |
2308 | ||
2309 | if (INTEL_INFO(dev)->gen >= 6) { | |
2310 | pm_irqs |= GEN6_PM_RPS_EVENTS; | |
2311 | ||
2312 | if (HAS_VEBOX(dev)) | |
2313 | pm_irqs |= PM_VEBOX_USER_INTERRUPT; | |
2314 | ||
605cd25b | 2315 | dev_priv->pm_irq_mask = 0xffffffff; |
0a9a8c91 | 2316 | I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR)); |
605cd25b | 2317 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_irq_mask); |
0a9a8c91 DV |
2318 | I915_WRITE(GEN6_PMIER, pm_irqs); |
2319 | POSTING_READ(GEN6_PMIER); | |
2320 | } | |
2321 | } | |
2322 | ||
f71d4af4 | 2323 | static int ironlake_irq_postinstall(struct drm_device *dev) |
036a4a7d | 2324 | { |
4bc9d430 | 2325 | unsigned long irqflags; |
036a4a7d | 2326 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
8e76f8dc PZ |
2327 | u32 display_mask, extra_mask; |
2328 | ||
2329 | if (INTEL_INFO(dev)->gen >= 7) { | |
2330 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB | | |
2331 | DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB | | |
2332 | DE_PLANEB_FLIP_DONE_IVB | | |
2333 | DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB | | |
2334 | DE_ERR_INT_IVB); | |
2335 | extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB | | |
2336 | DE_PIPEA_VBLANK_IVB); | |
2337 | ||
2338 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); | |
2339 | } else { | |
2340 | display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | |
2341 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE | | |
2342 | DE_AUX_CHANNEL_A | DE_PIPEB_FIFO_UNDERRUN | | |
2343 | DE_PIPEA_FIFO_UNDERRUN | DE_POISON); | |
2344 | extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT; | |
2345 | } | |
036a4a7d | 2346 | |
1ec14ad3 | 2347 | dev_priv->irq_mask = ~display_mask; |
036a4a7d ZW |
2348 | |
2349 | /* should always can generate irq */ | |
2350 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | |
1ec14ad3 | 2351 | I915_WRITE(DEIMR, dev_priv->irq_mask); |
8e76f8dc | 2352 | I915_WRITE(DEIER, display_mask | extra_mask); |
3143a2bf | 2353 | POSTING_READ(DEIER); |
036a4a7d | 2354 | |
0a9a8c91 | 2355 | gen5_gt_irq_postinstall(dev); |
036a4a7d | 2356 | |
d46da437 | 2357 | ibx_irq_postinstall(dev); |
7fe0b973 | 2358 | |
f97108d1 | 2359 | if (IS_IRONLAKE_M(dev)) { |
6005ce42 DV |
2360 | /* Enable PCU event interrupts |
2361 | * | |
2362 | * spinlocking not required here for correctness since interrupt | |
4bc9d430 DV |
2363 | * setup is guaranteed to run in single-threaded context. But we |
2364 | * need it to make the assert_spin_locked happy. */ | |
2365 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
f97108d1 | 2366 | ironlake_enable_display_irq(dev_priv, DE_PCU_EVENT); |
4bc9d430 | 2367 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
f97108d1 JB |
2368 | } |
2369 | ||
036a4a7d ZW |
2370 | return 0; |
2371 | } | |
2372 | ||
7e231dbe JB |
2373 | static int valleyview_irq_postinstall(struct drm_device *dev) |
2374 | { | |
2375 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
7e231dbe | 2376 | u32 enable_mask; |
31acc7f5 | 2377 | u32 pipestat_enable = PLANE_FLIP_DONE_INT_EN_VLV; |
b79480ba | 2378 | unsigned long irqflags; |
7e231dbe JB |
2379 | |
2380 | enable_mask = I915_DISPLAY_PORT_INTERRUPT; | |
31acc7f5 JB |
2381 | enable_mask |= I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
2382 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | |
2383 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
7e231dbe JB |
2384 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; |
2385 | ||
31acc7f5 JB |
2386 | /* |
2387 | *Leave vblank interrupts masked initially. enable/disable will | |
2388 | * toggle them based on usage. | |
2389 | */ | |
2390 | dev_priv->irq_mask = (~enable_mask) | | |
2391 | I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT | | |
2392 | I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT; | |
7e231dbe | 2393 | |
20afbda2 DV |
2394 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2395 | POSTING_READ(PORT_HOTPLUG_EN); | |
2396 | ||
7e231dbe JB |
2397 | I915_WRITE(VLV_IMR, dev_priv->irq_mask); |
2398 | I915_WRITE(VLV_IER, enable_mask); | |
2399 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2400 | I915_WRITE(PIPESTAT(0), 0xffff); | |
2401 | I915_WRITE(PIPESTAT(1), 0xffff); | |
2402 | POSTING_READ(VLV_IER); | |
2403 | ||
b79480ba DV |
2404 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
2405 | * just to make the assert_spin_locked check happy. */ | |
2406 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
31acc7f5 | 2407 | i915_enable_pipestat(dev_priv, 0, pipestat_enable); |
515ac2bb | 2408 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
31acc7f5 | 2409 | i915_enable_pipestat(dev_priv, 1, pipestat_enable); |
b79480ba | 2410 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
31acc7f5 | 2411 | |
7e231dbe JB |
2412 | I915_WRITE(VLV_IIR, 0xffffffff); |
2413 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2414 | ||
0a9a8c91 | 2415 | gen5_gt_irq_postinstall(dev); |
7e231dbe JB |
2416 | |
2417 | /* ack & enable invalid PTE error interrupts */ | |
2418 | #if 0 /* FIXME: add support to irq handler for checking these bits */ | |
2419 | I915_WRITE(DPINVGTT, DPINVGTT_STATUS_MASK); | |
2420 | I915_WRITE(DPINVGTT, DPINVGTT_EN_MASK); | |
2421 | #endif | |
2422 | ||
2423 | I915_WRITE(VLV_MASTER_IER, MASTER_INTERRUPT_ENABLE); | |
20afbda2 DV |
2424 | |
2425 | return 0; | |
2426 | } | |
2427 | ||
7e231dbe JB |
2428 | static void valleyview_irq_uninstall(struct drm_device *dev) |
2429 | { | |
2430 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2431 | int pipe; | |
2432 | ||
2433 | if (!dev_priv) | |
2434 | return; | |
2435 | ||
ac4c16c5 EE |
2436 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
2437 | ||
7e231dbe JB |
2438 | for_each_pipe(pipe) |
2439 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
2440 | ||
2441 | I915_WRITE(HWSTAM, 0xffffffff); | |
2442 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2443 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2444 | for_each_pipe(pipe) | |
2445 | I915_WRITE(PIPESTAT(pipe), 0xffff); | |
2446 | I915_WRITE(VLV_IIR, 0xffffffff); | |
2447 | I915_WRITE(VLV_IMR, 0xffffffff); | |
2448 | I915_WRITE(VLV_IER, 0x0); | |
2449 | POSTING_READ(VLV_IER); | |
2450 | } | |
2451 | ||
f71d4af4 | 2452 | static void ironlake_irq_uninstall(struct drm_device *dev) |
036a4a7d ZW |
2453 | { |
2454 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
4697995b JB |
2455 | |
2456 | if (!dev_priv) | |
2457 | return; | |
2458 | ||
ac4c16c5 EE |
2459 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
2460 | ||
036a4a7d ZW |
2461 | I915_WRITE(HWSTAM, 0xffffffff); |
2462 | ||
2463 | I915_WRITE(DEIMR, 0xffffffff); | |
2464 | I915_WRITE(DEIER, 0x0); | |
2465 | I915_WRITE(DEIIR, I915_READ(DEIIR)); | |
8664281b PZ |
2466 | if (IS_GEN7(dev)) |
2467 | I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT)); | |
036a4a7d ZW |
2468 | |
2469 | I915_WRITE(GTIMR, 0xffffffff); | |
2470 | I915_WRITE(GTIER, 0x0); | |
2471 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | |
192aac1f | 2472 | |
ab5c608b BW |
2473 | if (HAS_PCH_NOP(dev)) |
2474 | return; | |
2475 | ||
192aac1f KP |
2476 | I915_WRITE(SDEIMR, 0xffffffff); |
2477 | I915_WRITE(SDEIER, 0x0); | |
2478 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | |
8664281b PZ |
2479 | if (HAS_PCH_CPT(dev) || HAS_PCH_LPT(dev)) |
2480 | I915_WRITE(SERR_INT, I915_READ(SERR_INT)); | |
036a4a7d ZW |
2481 | } |
2482 | ||
a266c7d5 | 2483 | static void i8xx_irq_preinstall(struct drm_device * dev) |
1da177e4 LT |
2484 | { |
2485 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
9db4a9c7 | 2486 | int pipe; |
91e3738e | 2487 | |
a266c7d5 | 2488 | atomic_set(&dev_priv->irq_received, 0); |
5ca58282 | 2489 | |
9db4a9c7 JB |
2490 | for_each_pipe(pipe) |
2491 | I915_WRITE(PIPESTAT(pipe), 0); | |
a266c7d5 CW |
2492 | I915_WRITE16(IMR, 0xffff); |
2493 | I915_WRITE16(IER, 0x0); | |
2494 | POSTING_READ16(IER); | |
c2798b19 CW |
2495 | } |
2496 | ||
2497 | static int i8xx_irq_postinstall(struct drm_device *dev) | |
2498 | { | |
2499 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2500 | ||
c2798b19 CW |
2501 | I915_WRITE16(EMR, |
2502 | ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); | |
2503 | ||
2504 | /* Unmask the interrupts that we always want on. */ | |
2505 | dev_priv->irq_mask = | |
2506 | ~(I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2507 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2508 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2509 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
2510 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2511 | I915_WRITE16(IMR, dev_priv->irq_mask); | |
2512 | ||
2513 | I915_WRITE16(IER, | |
2514 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2515 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2516 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
2517 | I915_USER_INTERRUPT); | |
2518 | POSTING_READ16(IER); | |
2519 | ||
2520 | return 0; | |
2521 | } | |
2522 | ||
90a72f87 VS |
2523 | /* |
2524 | * Returns true when a page flip has completed. | |
2525 | */ | |
2526 | static bool i8xx_handle_vblank(struct drm_device *dev, | |
2527 | int pipe, u16 iir) | |
2528 | { | |
2529 | drm_i915_private_t *dev_priv = dev->dev_private; | |
2530 | u16 flip_pending = DISPLAY_PLANE_FLIP_PENDING(pipe); | |
2531 | ||
2532 | if (!drm_handle_vblank(dev, pipe)) | |
2533 | return false; | |
2534 | ||
2535 | if ((iir & flip_pending) == 0) | |
2536 | return false; | |
2537 | ||
2538 | intel_prepare_page_flip(dev, pipe); | |
2539 | ||
2540 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
2541 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
2542 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
2543 | * the flip is completed (no longer pending). Since this doesn't raise | |
2544 | * an interrupt per se, we watch for the change at vblank. | |
2545 | */ | |
2546 | if (I915_READ16(ISR) & flip_pending) | |
2547 | return false; | |
2548 | ||
2549 | intel_finish_page_flip(dev, pipe); | |
2550 | ||
2551 | return true; | |
2552 | } | |
2553 | ||
ff1f525e | 2554 | static irqreturn_t i8xx_irq_handler(int irq, void *arg) |
c2798b19 CW |
2555 | { |
2556 | struct drm_device *dev = (struct drm_device *) arg; | |
2557 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
c2798b19 CW |
2558 | u16 iir, new_iir; |
2559 | u32 pipe_stats[2]; | |
2560 | unsigned long irqflags; | |
c2798b19 CW |
2561 | int pipe; |
2562 | u16 flip_mask = | |
2563 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2564 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
2565 | ||
2566 | atomic_inc(&dev_priv->irq_received); | |
2567 | ||
2568 | iir = I915_READ16(IIR); | |
2569 | if (iir == 0) | |
2570 | return IRQ_NONE; | |
2571 | ||
2572 | while (iir & ~flip_mask) { | |
2573 | /* Can't rely on pipestat interrupt bit in iir as it might | |
2574 | * have been cleared after the pipestat interrupt was received. | |
2575 | * It doesn't set the bit in iir again, but it still produces | |
2576 | * interrupts (for non-MSI). | |
2577 | */ | |
2578 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2579 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
2580 | i915_handle_error(dev, false); | |
2581 | ||
2582 | for_each_pipe(pipe) { | |
2583 | int reg = PIPESTAT(pipe); | |
2584 | pipe_stats[pipe] = I915_READ(reg); | |
2585 | ||
2586 | /* | |
2587 | * Clear the PIPE*STAT regs before the IIR | |
2588 | */ | |
2589 | if (pipe_stats[pipe] & 0x8000ffff) { | |
2590 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
2591 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
2592 | pipe_name(pipe)); | |
2593 | I915_WRITE(reg, pipe_stats[pipe]); | |
c2798b19 CW |
2594 | } |
2595 | } | |
2596 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2597 | ||
2598 | I915_WRITE16(IIR, iir & ~flip_mask); | |
2599 | new_iir = I915_READ16(IIR); /* Flush posted writes */ | |
2600 | ||
d05c617e | 2601 | i915_update_dri1_breadcrumb(dev); |
c2798b19 CW |
2602 | |
2603 | if (iir & I915_USER_INTERRUPT) | |
2604 | notify_ring(dev, &dev_priv->ring[RCS]); | |
2605 | ||
2606 | if (pipe_stats[0] & PIPE_VBLANK_INTERRUPT_STATUS && | |
90a72f87 VS |
2607 | i8xx_handle_vblank(dev, 0, iir)) |
2608 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(0); | |
c2798b19 CW |
2609 | |
2610 | if (pipe_stats[1] & PIPE_VBLANK_INTERRUPT_STATUS && | |
90a72f87 VS |
2611 | i8xx_handle_vblank(dev, 1, iir)) |
2612 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(1); | |
c2798b19 CW |
2613 | |
2614 | iir = new_iir; | |
2615 | } | |
2616 | ||
2617 | return IRQ_HANDLED; | |
2618 | } | |
2619 | ||
2620 | static void i8xx_irq_uninstall(struct drm_device * dev) | |
2621 | { | |
2622 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2623 | int pipe; | |
2624 | ||
c2798b19 CW |
2625 | for_each_pipe(pipe) { |
2626 | /* Clear enable bits; then clear status bits */ | |
2627 | I915_WRITE(PIPESTAT(pipe), 0); | |
2628 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); | |
2629 | } | |
2630 | I915_WRITE16(IMR, 0xffff); | |
2631 | I915_WRITE16(IER, 0x0); | |
2632 | I915_WRITE16(IIR, I915_READ16(IIR)); | |
2633 | } | |
2634 | ||
a266c7d5 CW |
2635 | static void i915_irq_preinstall(struct drm_device * dev) |
2636 | { | |
2637 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2638 | int pipe; | |
2639 | ||
2640 | atomic_set(&dev_priv->irq_received, 0); | |
2641 | ||
2642 | if (I915_HAS_HOTPLUG(dev)) { | |
2643 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2644 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2645 | } | |
2646 | ||
00d98ebd | 2647 | I915_WRITE16(HWSTAM, 0xeffe); |
a266c7d5 CW |
2648 | for_each_pipe(pipe) |
2649 | I915_WRITE(PIPESTAT(pipe), 0); | |
2650 | I915_WRITE(IMR, 0xffffffff); | |
2651 | I915_WRITE(IER, 0x0); | |
2652 | POSTING_READ(IER); | |
2653 | } | |
2654 | ||
2655 | static int i915_irq_postinstall(struct drm_device *dev) | |
2656 | { | |
2657 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
38bde180 | 2658 | u32 enable_mask; |
a266c7d5 | 2659 | |
38bde180 CW |
2660 | I915_WRITE(EMR, ~(I915_ERROR_PAGE_TABLE | I915_ERROR_MEMORY_REFRESH)); |
2661 | ||
2662 | /* Unmask the interrupts that we always want on. */ | |
2663 | dev_priv->irq_mask = | |
2664 | ~(I915_ASLE_INTERRUPT | | |
2665 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2666 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2667 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2668 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
2669 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2670 | ||
2671 | enable_mask = | |
2672 | I915_ASLE_INTERRUPT | | |
2673 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | | |
2674 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2675 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT | | |
2676 | I915_USER_INTERRUPT; | |
2677 | ||
a266c7d5 | 2678 | if (I915_HAS_HOTPLUG(dev)) { |
20afbda2 DV |
2679 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2680 | POSTING_READ(PORT_HOTPLUG_EN); | |
2681 | ||
a266c7d5 CW |
2682 | /* Enable in IER... */ |
2683 | enable_mask |= I915_DISPLAY_PORT_INTERRUPT; | |
2684 | /* and unmask in IMR */ | |
2685 | dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; | |
2686 | } | |
2687 | ||
a266c7d5 CW |
2688 | I915_WRITE(IMR, dev_priv->irq_mask); |
2689 | I915_WRITE(IER, enable_mask); | |
2690 | POSTING_READ(IER); | |
2691 | ||
f49e38dd | 2692 | i915_enable_asle_pipestat(dev); |
20afbda2 DV |
2693 | |
2694 | return 0; | |
2695 | } | |
2696 | ||
90a72f87 VS |
2697 | /* |
2698 | * Returns true when a page flip has completed. | |
2699 | */ | |
2700 | static bool i915_handle_vblank(struct drm_device *dev, | |
2701 | int plane, int pipe, u32 iir) | |
2702 | { | |
2703 | drm_i915_private_t *dev_priv = dev->dev_private; | |
2704 | u32 flip_pending = DISPLAY_PLANE_FLIP_PENDING(plane); | |
2705 | ||
2706 | if (!drm_handle_vblank(dev, pipe)) | |
2707 | return false; | |
2708 | ||
2709 | if ((iir & flip_pending) == 0) | |
2710 | return false; | |
2711 | ||
2712 | intel_prepare_page_flip(dev, plane); | |
2713 | ||
2714 | /* We detect FlipDone by looking for the change in PendingFlip from '1' | |
2715 | * to '0' on the following vblank, i.e. IIR has the Pendingflip | |
2716 | * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence | |
2717 | * the flip is completed (no longer pending). Since this doesn't raise | |
2718 | * an interrupt per se, we watch for the change at vblank. | |
2719 | */ | |
2720 | if (I915_READ(ISR) & flip_pending) | |
2721 | return false; | |
2722 | ||
2723 | intel_finish_page_flip(dev, pipe); | |
2724 | ||
2725 | return true; | |
2726 | } | |
2727 | ||
ff1f525e | 2728 | static irqreturn_t i915_irq_handler(int irq, void *arg) |
a266c7d5 CW |
2729 | { |
2730 | struct drm_device *dev = (struct drm_device *) arg; | |
2731 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
8291ee90 | 2732 | u32 iir, new_iir, pipe_stats[I915_MAX_PIPES]; |
a266c7d5 | 2733 | unsigned long irqflags; |
38bde180 CW |
2734 | u32 flip_mask = |
2735 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2736 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
38bde180 | 2737 | int pipe, ret = IRQ_NONE; |
a266c7d5 CW |
2738 | |
2739 | atomic_inc(&dev_priv->irq_received); | |
2740 | ||
2741 | iir = I915_READ(IIR); | |
38bde180 CW |
2742 | do { |
2743 | bool irq_received = (iir & ~flip_mask) != 0; | |
8291ee90 | 2744 | bool blc_event = false; |
a266c7d5 CW |
2745 | |
2746 | /* Can't rely on pipestat interrupt bit in iir as it might | |
2747 | * have been cleared after the pipestat interrupt was received. | |
2748 | * It doesn't set the bit in iir again, but it still produces | |
2749 | * interrupts (for non-MSI). | |
2750 | */ | |
2751 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2752 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
2753 | i915_handle_error(dev, false); | |
2754 | ||
2755 | for_each_pipe(pipe) { | |
2756 | int reg = PIPESTAT(pipe); | |
2757 | pipe_stats[pipe] = I915_READ(reg); | |
2758 | ||
38bde180 | 2759 | /* Clear the PIPE*STAT regs before the IIR */ |
a266c7d5 CW |
2760 | if (pipe_stats[pipe] & 0x8000ffff) { |
2761 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
2762 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
2763 | pipe_name(pipe)); | |
2764 | I915_WRITE(reg, pipe_stats[pipe]); | |
38bde180 | 2765 | irq_received = true; |
a266c7d5 CW |
2766 | } |
2767 | } | |
2768 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
2769 | ||
2770 | if (!irq_received) | |
2771 | break; | |
2772 | ||
a266c7d5 CW |
2773 | /* Consume port. Then clear IIR or we'll miss events */ |
2774 | if ((I915_HAS_HOTPLUG(dev)) && | |
2775 | (iir & I915_DISPLAY_PORT_INTERRUPT)) { | |
2776 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); | |
b543fb04 | 2777 | u32 hotplug_trigger = hotplug_status & HOTPLUG_INT_STATUS_I915; |
a266c7d5 CW |
2778 | |
2779 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
2780 | hotplug_status); | |
91d131d2 DV |
2781 | |
2782 | intel_hpd_irq_handler(dev, hotplug_trigger, hpd_status_i915); | |
2783 | ||
a266c7d5 | 2784 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
38bde180 | 2785 | POSTING_READ(PORT_HOTPLUG_STAT); |
a266c7d5 CW |
2786 | } |
2787 | ||
38bde180 | 2788 | I915_WRITE(IIR, iir & ~flip_mask); |
a266c7d5 CW |
2789 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
2790 | ||
a266c7d5 CW |
2791 | if (iir & I915_USER_INTERRUPT) |
2792 | notify_ring(dev, &dev_priv->ring[RCS]); | |
a266c7d5 | 2793 | |
a266c7d5 | 2794 | for_each_pipe(pipe) { |
38bde180 CW |
2795 | int plane = pipe; |
2796 | if (IS_MOBILE(dev)) | |
2797 | plane = !plane; | |
90a72f87 | 2798 | |
8291ee90 | 2799 | if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS && |
90a72f87 VS |
2800 | i915_handle_vblank(dev, plane, pipe, iir)) |
2801 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(plane); | |
a266c7d5 CW |
2802 | |
2803 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
2804 | blc_event = true; | |
2805 | } | |
2806 | ||
a266c7d5 CW |
2807 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) |
2808 | intel_opregion_asle_intr(dev); | |
2809 | ||
2810 | /* With MSI, interrupts are only generated when iir | |
2811 | * transitions from zero to nonzero. If another bit got | |
2812 | * set while we were handling the existing iir bits, then | |
2813 | * we would never get another interrupt. | |
2814 | * | |
2815 | * This is fine on non-MSI as well, as if we hit this path | |
2816 | * we avoid exiting the interrupt handler only to generate | |
2817 | * another one. | |
2818 | * | |
2819 | * Note that for MSI this could cause a stray interrupt report | |
2820 | * if an interrupt landed in the time between writing IIR and | |
2821 | * the posting read. This should be rare enough to never | |
2822 | * trigger the 99% of 100,000 interrupts test for disabling | |
2823 | * stray interrupts. | |
2824 | */ | |
38bde180 | 2825 | ret = IRQ_HANDLED; |
a266c7d5 | 2826 | iir = new_iir; |
38bde180 | 2827 | } while (iir & ~flip_mask); |
a266c7d5 | 2828 | |
d05c617e | 2829 | i915_update_dri1_breadcrumb(dev); |
8291ee90 | 2830 | |
a266c7d5 CW |
2831 | return ret; |
2832 | } | |
2833 | ||
2834 | static void i915_irq_uninstall(struct drm_device * dev) | |
2835 | { | |
2836 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2837 | int pipe; | |
2838 | ||
ac4c16c5 EE |
2839 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
2840 | ||
a266c7d5 CW |
2841 | if (I915_HAS_HOTPLUG(dev)) { |
2842 | I915_WRITE(PORT_HOTPLUG_EN, 0); | |
2843 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
2844 | } | |
2845 | ||
00d98ebd | 2846 | I915_WRITE16(HWSTAM, 0xffff); |
55b39755 CW |
2847 | for_each_pipe(pipe) { |
2848 | /* Clear enable bits; then clear status bits */ | |
a266c7d5 | 2849 | I915_WRITE(PIPESTAT(pipe), 0); |
55b39755 CW |
2850 | I915_WRITE(PIPESTAT(pipe), I915_READ(PIPESTAT(pipe))); |
2851 | } | |
a266c7d5 CW |
2852 | I915_WRITE(IMR, 0xffffffff); |
2853 | I915_WRITE(IER, 0x0); | |
2854 | ||
a266c7d5 CW |
2855 | I915_WRITE(IIR, I915_READ(IIR)); |
2856 | } | |
2857 | ||
2858 | static void i965_irq_preinstall(struct drm_device * dev) | |
2859 | { | |
2860 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
2861 | int pipe; | |
2862 | ||
2863 | atomic_set(&dev_priv->irq_received, 0); | |
2864 | ||
adca4730 CW |
2865 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2866 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
a266c7d5 CW |
2867 | |
2868 | I915_WRITE(HWSTAM, 0xeffe); | |
2869 | for_each_pipe(pipe) | |
2870 | I915_WRITE(PIPESTAT(pipe), 0); | |
2871 | I915_WRITE(IMR, 0xffffffff); | |
2872 | I915_WRITE(IER, 0x0); | |
2873 | POSTING_READ(IER); | |
2874 | } | |
2875 | ||
2876 | static int i965_irq_postinstall(struct drm_device *dev) | |
2877 | { | |
2878 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
bbba0a97 | 2879 | u32 enable_mask; |
a266c7d5 | 2880 | u32 error_mask; |
b79480ba | 2881 | unsigned long irqflags; |
a266c7d5 | 2882 | |
a266c7d5 | 2883 | /* Unmask the interrupts that we always want on. */ |
bbba0a97 | 2884 | dev_priv->irq_mask = ~(I915_ASLE_INTERRUPT | |
adca4730 | 2885 | I915_DISPLAY_PORT_INTERRUPT | |
bbba0a97 CW |
2886 | I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | |
2887 | I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | | |
2888 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2889 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT | | |
2890 | I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT); | |
2891 | ||
2892 | enable_mask = ~dev_priv->irq_mask; | |
21ad8330 VS |
2893 | enable_mask &= ~(I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | |
2894 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT); | |
bbba0a97 CW |
2895 | enable_mask |= I915_USER_INTERRUPT; |
2896 | ||
2897 | if (IS_G4X(dev)) | |
2898 | enable_mask |= I915_BSD_USER_INTERRUPT; | |
a266c7d5 | 2899 | |
b79480ba DV |
2900 | /* Interrupt setup is already guaranteed to be single-threaded, this is |
2901 | * just to make the assert_spin_locked check happy. */ | |
2902 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
515ac2bb | 2903 | i915_enable_pipestat(dev_priv, 0, PIPE_GMBUS_EVENT_ENABLE); |
b79480ba | 2904 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
a266c7d5 | 2905 | |
a266c7d5 CW |
2906 | /* |
2907 | * Enable some error detection, note the instruction error mask | |
2908 | * bit is reserved, so we leave it masked. | |
2909 | */ | |
2910 | if (IS_G4X(dev)) { | |
2911 | error_mask = ~(GM45_ERROR_PAGE_TABLE | | |
2912 | GM45_ERROR_MEM_PRIV | | |
2913 | GM45_ERROR_CP_PRIV | | |
2914 | I915_ERROR_MEMORY_REFRESH); | |
2915 | } else { | |
2916 | error_mask = ~(I915_ERROR_PAGE_TABLE | | |
2917 | I915_ERROR_MEMORY_REFRESH); | |
2918 | } | |
2919 | I915_WRITE(EMR, error_mask); | |
2920 | ||
2921 | I915_WRITE(IMR, dev_priv->irq_mask); | |
2922 | I915_WRITE(IER, enable_mask); | |
2923 | POSTING_READ(IER); | |
2924 | ||
20afbda2 DV |
2925 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
2926 | POSTING_READ(PORT_HOTPLUG_EN); | |
2927 | ||
f49e38dd | 2928 | i915_enable_asle_pipestat(dev); |
20afbda2 DV |
2929 | |
2930 | return 0; | |
2931 | } | |
2932 | ||
bac56d5b | 2933 | static void i915_hpd_irq_setup(struct drm_device *dev) |
20afbda2 DV |
2934 | { |
2935 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
e5868a31 | 2936 | struct drm_mode_config *mode_config = &dev->mode_config; |
cd569aed | 2937 | struct intel_encoder *intel_encoder; |
20afbda2 DV |
2938 | u32 hotplug_en; |
2939 | ||
b5ea2d56 DV |
2940 | assert_spin_locked(&dev_priv->irq_lock); |
2941 | ||
bac56d5b EE |
2942 | if (I915_HAS_HOTPLUG(dev)) { |
2943 | hotplug_en = I915_READ(PORT_HOTPLUG_EN); | |
2944 | hotplug_en &= ~HOTPLUG_INT_EN_MASK; | |
2945 | /* Note HDMI and DP share hotplug bits */ | |
e5868a31 | 2946 | /* enable bits are the same for all generations */ |
cd569aed EE |
2947 | list_for_each_entry(intel_encoder, &mode_config->encoder_list, base.head) |
2948 | if (dev_priv->hpd_stats[intel_encoder->hpd_pin].hpd_mark == HPD_ENABLED) | |
2949 | hotplug_en |= hpd_mask_i915[intel_encoder->hpd_pin]; | |
bac56d5b EE |
2950 | /* Programming the CRT detection parameters tends |
2951 | to generate a spurious hotplug event about three | |
2952 | seconds later. So just do it once. | |
2953 | */ | |
2954 | if (IS_G4X(dev)) | |
2955 | hotplug_en |= CRT_HOTPLUG_ACTIVATION_PERIOD_64; | |
85fc95ba | 2956 | hotplug_en &= ~CRT_HOTPLUG_VOLTAGE_COMPARE_MASK; |
bac56d5b | 2957 | hotplug_en |= CRT_HOTPLUG_VOLTAGE_COMPARE_50; |
a266c7d5 | 2958 | |
bac56d5b EE |
2959 | /* Ignore TV since it's buggy */ |
2960 | I915_WRITE(PORT_HOTPLUG_EN, hotplug_en); | |
2961 | } | |
a266c7d5 CW |
2962 | } |
2963 | ||
ff1f525e | 2964 | static irqreturn_t i965_irq_handler(int irq, void *arg) |
a266c7d5 CW |
2965 | { |
2966 | struct drm_device *dev = (struct drm_device *) arg; | |
2967 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
a266c7d5 CW |
2968 | u32 iir, new_iir; |
2969 | u32 pipe_stats[I915_MAX_PIPES]; | |
a266c7d5 CW |
2970 | unsigned long irqflags; |
2971 | int irq_received; | |
2972 | int ret = IRQ_NONE, pipe; | |
21ad8330 VS |
2973 | u32 flip_mask = |
2974 | I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT | | |
2975 | I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT; | |
a266c7d5 CW |
2976 | |
2977 | atomic_inc(&dev_priv->irq_received); | |
2978 | ||
2979 | iir = I915_READ(IIR); | |
2980 | ||
a266c7d5 | 2981 | for (;;) { |
2c8ba29f CW |
2982 | bool blc_event = false; |
2983 | ||
21ad8330 | 2984 | irq_received = (iir & ~flip_mask) != 0; |
a266c7d5 CW |
2985 | |
2986 | /* Can't rely on pipestat interrupt bit in iir as it might | |
2987 | * have been cleared after the pipestat interrupt was received. | |
2988 | * It doesn't set the bit in iir again, but it still produces | |
2989 | * interrupts (for non-MSI). | |
2990 | */ | |
2991 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
2992 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | |
2993 | i915_handle_error(dev, false); | |
2994 | ||
2995 | for_each_pipe(pipe) { | |
2996 | int reg = PIPESTAT(pipe); | |
2997 | pipe_stats[pipe] = I915_READ(reg); | |
2998 | ||
2999 | /* | |
3000 | * Clear the PIPE*STAT regs before the IIR | |
3001 | */ | |
3002 | if (pipe_stats[pipe] & 0x8000ffff) { | |
3003 | if (pipe_stats[pipe] & PIPE_FIFO_UNDERRUN_STATUS) | |
3004 | DRM_DEBUG_DRIVER("pipe %c underrun\n", | |
3005 | pipe_name(pipe)); | |
3006 | I915_WRITE(reg, pipe_stats[pipe]); | |
3007 | irq_received = 1; | |
3008 | } | |
3009 | } | |
3010 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3011 | ||
3012 | if (!irq_received) | |
3013 | break; | |
3014 | ||
3015 | ret = IRQ_HANDLED; | |
3016 | ||
3017 | /* Consume port. Then clear IIR or we'll miss events */ | |
adca4730 | 3018 | if (iir & I915_DISPLAY_PORT_INTERRUPT) { |
a266c7d5 | 3019 | u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT); |
b543fb04 EE |
3020 | u32 hotplug_trigger = hotplug_status & (IS_G4X(dev) ? |
3021 | HOTPLUG_INT_STATUS_G4X : | |
4f7fd709 | 3022 | HOTPLUG_INT_STATUS_I915); |
a266c7d5 CW |
3023 | |
3024 | DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n", | |
3025 | hotplug_status); | |
91d131d2 DV |
3026 | |
3027 | intel_hpd_irq_handler(dev, hotplug_trigger, | |
3028 | IS_G4X(dev) ? hpd_status_gen4 : hpd_status_i915); | |
3029 | ||
a266c7d5 CW |
3030 | I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status); |
3031 | I915_READ(PORT_HOTPLUG_STAT); | |
3032 | } | |
3033 | ||
21ad8330 | 3034 | I915_WRITE(IIR, iir & ~flip_mask); |
a266c7d5 CW |
3035 | new_iir = I915_READ(IIR); /* Flush posted writes */ |
3036 | ||
a266c7d5 CW |
3037 | if (iir & I915_USER_INTERRUPT) |
3038 | notify_ring(dev, &dev_priv->ring[RCS]); | |
3039 | if (iir & I915_BSD_USER_INTERRUPT) | |
3040 | notify_ring(dev, &dev_priv->ring[VCS]); | |
3041 | ||
a266c7d5 | 3042 | for_each_pipe(pipe) { |
2c8ba29f | 3043 | if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS && |
90a72f87 VS |
3044 | i915_handle_vblank(dev, pipe, pipe, iir)) |
3045 | flip_mask &= ~DISPLAY_PLANE_FLIP_PENDING(pipe); | |
a266c7d5 CW |
3046 | |
3047 | if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS) | |
3048 | blc_event = true; | |
3049 | } | |
3050 | ||
3051 | ||
3052 | if (blc_event || (iir & I915_ASLE_INTERRUPT)) | |
3053 | intel_opregion_asle_intr(dev); | |
3054 | ||
515ac2bb DV |
3055 | if (pipe_stats[0] & PIPE_GMBUS_INTERRUPT_STATUS) |
3056 | gmbus_irq_handler(dev); | |
3057 | ||
a266c7d5 CW |
3058 | /* With MSI, interrupts are only generated when iir |
3059 | * transitions from zero to nonzero. If another bit got | |
3060 | * set while we were handling the existing iir bits, then | |
3061 | * we would never get another interrupt. | |
3062 | * | |
3063 | * This is fine on non-MSI as well, as if we hit this path | |
3064 | * we avoid exiting the interrupt handler only to generate | |
3065 | * another one. | |
3066 | * | |
3067 | * Note that for MSI this could cause a stray interrupt report | |
3068 | * if an interrupt landed in the time between writing IIR and | |
3069 | * the posting read. This should be rare enough to never | |
3070 | * trigger the 99% of 100,000 interrupts test for disabling | |
3071 | * stray interrupts. | |
3072 | */ | |
3073 | iir = new_iir; | |
3074 | } | |
3075 | ||
d05c617e | 3076 | i915_update_dri1_breadcrumb(dev); |
2c8ba29f | 3077 | |
a266c7d5 CW |
3078 | return ret; |
3079 | } | |
3080 | ||
3081 | static void i965_irq_uninstall(struct drm_device * dev) | |
3082 | { | |
3083 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | |
3084 | int pipe; | |
3085 | ||
3086 | if (!dev_priv) | |
3087 | return; | |
3088 | ||
ac4c16c5 EE |
3089 | del_timer_sync(&dev_priv->hotplug_reenable_timer); |
3090 | ||
adca4730 CW |
3091 | I915_WRITE(PORT_HOTPLUG_EN, 0); |
3092 | I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); | |
a266c7d5 CW |
3093 | |
3094 | I915_WRITE(HWSTAM, 0xffffffff); | |
3095 | for_each_pipe(pipe) | |
3096 | I915_WRITE(PIPESTAT(pipe), 0); | |
3097 | I915_WRITE(IMR, 0xffffffff); | |
3098 | I915_WRITE(IER, 0x0); | |
3099 | ||
3100 | for_each_pipe(pipe) | |
3101 | I915_WRITE(PIPESTAT(pipe), | |
3102 | I915_READ(PIPESTAT(pipe)) & 0x8000ffff); | |
3103 | I915_WRITE(IIR, I915_READ(IIR)); | |
3104 | } | |
3105 | ||
ac4c16c5 EE |
3106 | static void i915_reenable_hotplug_timer_func(unsigned long data) |
3107 | { | |
3108 | drm_i915_private_t *dev_priv = (drm_i915_private_t *)data; | |
3109 | struct drm_device *dev = dev_priv->dev; | |
3110 | struct drm_mode_config *mode_config = &dev->mode_config; | |
3111 | unsigned long irqflags; | |
3112 | int i; | |
3113 | ||
3114 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3115 | for (i = (HPD_NONE + 1); i < HPD_NUM_PINS; i++) { | |
3116 | struct drm_connector *connector; | |
3117 | ||
3118 | if (dev_priv->hpd_stats[i].hpd_mark != HPD_DISABLED) | |
3119 | continue; | |
3120 | ||
3121 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
3122 | ||
3123 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
3124 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
3125 | ||
3126 | if (intel_connector->encoder->hpd_pin == i) { | |
3127 | if (connector->polled != intel_connector->polled) | |
3128 | DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n", | |
3129 | drm_get_connector_name(connector)); | |
3130 | connector->polled = intel_connector->polled; | |
3131 | if (!connector->polled) | |
3132 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
3133 | } | |
3134 | } | |
3135 | } | |
3136 | if (dev_priv->display.hpd_irq_setup) | |
3137 | dev_priv->display.hpd_irq_setup(dev); | |
3138 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3139 | } | |
3140 | ||
f71d4af4 JB |
3141 | void intel_irq_init(struct drm_device *dev) |
3142 | { | |
8b2e326d CW |
3143 | struct drm_i915_private *dev_priv = dev->dev_private; |
3144 | ||
3145 | INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); | |
99584db3 | 3146 | INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func); |
c6a828d3 | 3147 | INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work); |
a4da4fa4 | 3148 | INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work); |
8b2e326d | 3149 | |
99584db3 DV |
3150 | setup_timer(&dev_priv->gpu_error.hangcheck_timer, |
3151 | i915_hangcheck_elapsed, | |
61bac78e | 3152 | (unsigned long) dev); |
ac4c16c5 EE |
3153 | setup_timer(&dev_priv->hotplug_reenable_timer, i915_reenable_hotplug_timer_func, |
3154 | (unsigned long) dev_priv); | |
61bac78e | 3155 | |
97a19a24 | 3156 | pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); |
9ee32fea | 3157 | |
f71d4af4 JB |
3158 | dev->driver->get_vblank_counter = i915_get_vblank_counter; |
3159 | dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ | |
7d4e146f | 3160 | if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5) { |
f71d4af4 JB |
3161 | dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */ |
3162 | dev->driver->get_vblank_counter = gm45_get_vblank_counter; | |
3163 | } | |
3164 | ||
c3613de9 KP |
3165 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
3166 | dev->driver->get_vblank_timestamp = i915_get_vblank_timestamp; | |
3167 | else | |
3168 | dev->driver->get_vblank_timestamp = NULL; | |
f71d4af4 JB |
3169 | dev->driver->get_scanout_position = i915_get_crtc_scanoutpos; |
3170 | ||
7e231dbe JB |
3171 | if (IS_VALLEYVIEW(dev)) { |
3172 | dev->driver->irq_handler = valleyview_irq_handler; | |
3173 | dev->driver->irq_preinstall = valleyview_irq_preinstall; | |
3174 | dev->driver->irq_postinstall = valleyview_irq_postinstall; | |
3175 | dev->driver->irq_uninstall = valleyview_irq_uninstall; | |
3176 | dev->driver->enable_vblank = valleyview_enable_vblank; | |
3177 | dev->driver->disable_vblank = valleyview_disable_vblank; | |
fa00abe0 | 3178 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
f71d4af4 JB |
3179 | } else if (HAS_PCH_SPLIT(dev)) { |
3180 | dev->driver->irq_handler = ironlake_irq_handler; | |
3181 | dev->driver->irq_preinstall = ironlake_irq_preinstall; | |
3182 | dev->driver->irq_postinstall = ironlake_irq_postinstall; | |
3183 | dev->driver->irq_uninstall = ironlake_irq_uninstall; | |
3184 | dev->driver->enable_vblank = ironlake_enable_vblank; | |
3185 | dev->driver->disable_vblank = ironlake_disable_vblank; | |
82a28bcf | 3186 | dev_priv->display.hpd_irq_setup = ibx_hpd_irq_setup; |
f71d4af4 | 3187 | } else { |
c2798b19 CW |
3188 | if (INTEL_INFO(dev)->gen == 2) { |
3189 | dev->driver->irq_preinstall = i8xx_irq_preinstall; | |
3190 | dev->driver->irq_postinstall = i8xx_irq_postinstall; | |
3191 | dev->driver->irq_handler = i8xx_irq_handler; | |
3192 | dev->driver->irq_uninstall = i8xx_irq_uninstall; | |
a266c7d5 CW |
3193 | } else if (INTEL_INFO(dev)->gen == 3) { |
3194 | dev->driver->irq_preinstall = i915_irq_preinstall; | |
3195 | dev->driver->irq_postinstall = i915_irq_postinstall; | |
3196 | dev->driver->irq_uninstall = i915_irq_uninstall; | |
3197 | dev->driver->irq_handler = i915_irq_handler; | |
20afbda2 | 3198 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
c2798b19 | 3199 | } else { |
a266c7d5 CW |
3200 | dev->driver->irq_preinstall = i965_irq_preinstall; |
3201 | dev->driver->irq_postinstall = i965_irq_postinstall; | |
3202 | dev->driver->irq_uninstall = i965_irq_uninstall; | |
3203 | dev->driver->irq_handler = i965_irq_handler; | |
bac56d5b | 3204 | dev_priv->display.hpd_irq_setup = i915_hpd_irq_setup; |
c2798b19 | 3205 | } |
f71d4af4 JB |
3206 | dev->driver->enable_vblank = i915_enable_vblank; |
3207 | dev->driver->disable_vblank = i915_disable_vblank; | |
3208 | } | |
3209 | } | |
20afbda2 DV |
3210 | |
3211 | void intel_hpd_init(struct drm_device *dev) | |
3212 | { | |
3213 | struct drm_i915_private *dev_priv = dev->dev_private; | |
821450c6 EE |
3214 | struct drm_mode_config *mode_config = &dev->mode_config; |
3215 | struct drm_connector *connector; | |
b5ea2d56 | 3216 | unsigned long irqflags; |
821450c6 | 3217 | int i; |
20afbda2 | 3218 | |
821450c6 EE |
3219 | for (i = 1; i < HPD_NUM_PINS; i++) { |
3220 | dev_priv->hpd_stats[i].hpd_cnt = 0; | |
3221 | dev_priv->hpd_stats[i].hpd_mark = HPD_ENABLED; | |
3222 | } | |
3223 | list_for_each_entry(connector, &mode_config->connector_list, head) { | |
3224 | struct intel_connector *intel_connector = to_intel_connector(connector); | |
3225 | connector->polled = intel_connector->polled; | |
3226 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && intel_connector->encoder->hpd_pin > HPD_NONE) | |
3227 | connector->polled = DRM_CONNECTOR_POLL_HPD; | |
3228 | } | |
b5ea2d56 DV |
3229 | |
3230 | /* Interrupt setup is already guaranteed to be single-threaded, this is | |
3231 | * just to make the assert_spin_locked checks happy. */ | |
3232 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
20afbda2 DV |
3233 | if (dev_priv->display.hpd_irq_setup) |
3234 | dev_priv->display.hpd_irq_setup(dev); | |
b5ea2d56 | 3235 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); |
20afbda2 | 3236 | } |
c67a470b PZ |
3237 | |
3238 | /* Disable interrupts so we can allow Package C8+. */ | |
3239 | void hsw_pc8_disable_interrupts(struct drm_device *dev) | |
3240 | { | |
3241 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3242 | unsigned long irqflags; | |
3243 | ||
3244 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3245 | ||
3246 | dev_priv->pc8.regsave.deimr = I915_READ(DEIMR); | |
3247 | dev_priv->pc8.regsave.sdeimr = I915_READ(SDEIMR); | |
3248 | dev_priv->pc8.regsave.gtimr = I915_READ(GTIMR); | |
3249 | dev_priv->pc8.regsave.gtier = I915_READ(GTIER); | |
3250 | dev_priv->pc8.regsave.gen6_pmimr = I915_READ(GEN6_PMIMR); | |
3251 | ||
3252 | ironlake_disable_display_irq(dev_priv, ~DE_PCH_EVENT_IVB); | |
3253 | ibx_disable_display_interrupt(dev_priv, ~SDE_HOTPLUG_MASK_CPT); | |
3254 | ilk_disable_gt_irq(dev_priv, 0xffffffff); | |
3255 | snb_disable_pm_irq(dev_priv, 0xffffffff); | |
3256 | ||
3257 | dev_priv->pc8.irqs_disabled = true; | |
3258 | ||
3259 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3260 | } | |
3261 | ||
3262 | /* Restore interrupts so we can recover from Package C8+. */ | |
3263 | void hsw_pc8_restore_interrupts(struct drm_device *dev) | |
3264 | { | |
3265 | struct drm_i915_private *dev_priv = dev->dev_private; | |
3266 | unsigned long irqflags; | |
3267 | uint32_t val, expected; | |
3268 | ||
3269 | spin_lock_irqsave(&dev_priv->irq_lock, irqflags); | |
3270 | ||
3271 | val = I915_READ(DEIMR); | |
3272 | expected = ~DE_PCH_EVENT_IVB; | |
3273 | WARN(val != expected, "DEIMR is 0x%08x, not 0x%08x\n", val, expected); | |
3274 | ||
3275 | val = I915_READ(SDEIMR) & ~SDE_HOTPLUG_MASK_CPT; | |
3276 | expected = ~SDE_HOTPLUG_MASK_CPT; | |
3277 | WARN(val != expected, "SDEIMR non-HPD bits are 0x%08x, not 0x%08x\n", | |
3278 | val, expected); | |
3279 | ||
3280 | val = I915_READ(GTIMR); | |
3281 | expected = 0xffffffff; | |
3282 | WARN(val != expected, "GTIMR is 0x%08x, not 0x%08x\n", val, expected); | |
3283 | ||
3284 | val = I915_READ(GEN6_PMIMR); | |
3285 | expected = 0xffffffff; | |
3286 | WARN(val != expected, "GEN6_PMIMR is 0x%08x, not 0x%08x\n", val, | |
3287 | expected); | |
3288 | ||
3289 | dev_priv->pc8.irqs_disabled = false; | |
3290 | ||
3291 | ironlake_enable_display_irq(dev_priv, ~dev_priv->pc8.regsave.deimr); | |
3292 | ibx_enable_display_interrupt(dev_priv, | |
3293 | ~dev_priv->pc8.regsave.sdeimr & | |
3294 | ~SDE_HOTPLUG_MASK_CPT); | |
3295 | ilk_enable_gt_irq(dev_priv, ~dev_priv->pc8.regsave.gtimr); | |
3296 | snb_enable_pm_irq(dev_priv, ~dev_priv->pc8.regsave.gen6_pmimr); | |
3297 | I915_WRITE(GTIER, dev_priv->pc8.regsave.gtier); | |
3298 | ||
3299 | spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); | |
3300 | } |