]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/irq/spurious.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar | |
5 | * | |
6 | * This file contains spurious interrupt handling. | |
7 | */ | |
8 | ||
188fd89d | 9 | #include <linux/jiffies.h> |
1da177e4 LT |
10 | #include <linux/irq.h> |
11 | #include <linux/module.h> | |
12 | #include <linux/kallsyms.h> | |
13 | #include <linux/interrupt.h> | |
9e094c17 | 14 | #include <linux/moduleparam.h> |
f84dbb91 | 15 | #include <linux/timer.h> |
1da177e4 | 16 | |
bd151412 TG |
17 | #include "internals.h" |
18 | ||
83d4e6e7 | 19 | static int irqfixup __read_mostly; |
200803df | 20 | |
f84dbb91 EB |
21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
22 | static void poll_spurious_irqs(unsigned long dummy); | |
23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); | |
d05c65ff TG |
24 | static int irq_poll_cpu; |
25 | static atomic_t irq_poll_active; | |
f84dbb91 | 26 | |
fe200ae4 TG |
27 | /* |
28 | * We wait here for a poller to finish. | |
29 | * | |
30 | * If the poll runs on this CPU, then we yell loudly and return | |
31 | * false. That will leave the interrupt line disabled in the worst | |
32 | * case, but it should never happen. | |
33 | * | |
34 | * We wait until the poller is done and then recheck disabled and | |
35 | * action (about to be disabled). Only if it's still active, we return | |
36 | * true and let the handler run. | |
37 | */ | |
38 | bool irq_wait_for_poll(struct irq_desc *desc) | |
39 | { | |
40 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), | |
41 | "irq poll in progress on cpu %d for irq %d\n", | |
42 | smp_processor_id(), desc->irq_data.irq)) | |
43 | return false; | |
44 | ||
45 | #ifdef CONFIG_SMP | |
46 | do { | |
47 | raw_spin_unlock(&desc->lock); | |
32f4125e | 48 | while (irqd_irq_inprogress(&desc->irq_data)) |
fe200ae4 TG |
49 | cpu_relax(); |
50 | raw_spin_lock(&desc->lock); | |
a6aeddd1 | 51 | } while (irqd_irq_inprogress(&desc->irq_data)); |
fe200ae4 | 52 | /* Might have been disabled in meantime */ |
32f4125e | 53 | return !irqd_irq_disabled(&desc->irq_data) && desc->action; |
fe200ae4 TG |
54 | #else |
55 | return false; | |
56 | #endif | |
57 | } | |
58 | ||
0877d662 | 59 | |
200803df AC |
60 | /* |
61 | * Recovery handler for misrouted interrupts. | |
62 | */ | |
c1e5bd8c | 63 | static int try_one_irq(struct irq_desc *desc, bool force) |
200803df | 64 | { |
0877d662 | 65 | irqreturn_t ret = IRQ_NONE; |
f84dbb91 | 66 | struct irqaction *action; |
200803df | 67 | |
239007b8 | 68 | raw_spin_lock(&desc->lock); |
c7259cd7 | 69 | |
b39898cd TG |
70 | /* |
71 | * PER_CPU, nested thread interrupts and interrupts explicitely | |
72 | * marked polled are excluded from polling. | |
73 | */ | |
74 | if (irq_settings_is_per_cpu(desc) || | |
75 | irq_settings_is_nested_thread(desc) || | |
76 | irq_settings_is_polled(desc)) | |
c7259cd7 TG |
77 | goto out; |
78 | ||
79 | /* | |
80 | * Do not poll disabled interrupts unless the spurious | |
81 | * disabled poller asks explicitely. | |
82 | */ | |
32f4125e | 83 | if (irqd_irq_disabled(&desc->irq_data) && !force) |
c7259cd7 TG |
84 | goto out; |
85 | ||
86 | /* | |
87 | * All handlers must agree on IRQF_SHARED, so we test just the | |
e716efde | 88 | * first. |
c7259cd7 TG |
89 | */ |
90 | action = desc->action; | |
91 | if (!action || !(action->flags & IRQF_SHARED) || | |
e716efde | 92 | (action->flags & __IRQF_TIMER)) |
c7259cd7 TG |
93 | goto out; |
94 | ||
f84dbb91 | 95 | /* Already running on another processor */ |
32f4125e | 96 | if (irqd_irq_inprogress(&desc->irq_data)) { |
f84dbb91 EB |
97 | /* |
98 | * Already running: If it is shared get the other | |
99 | * CPU to go looking for our mystery interrupt too | |
100 | */ | |
2a0d6fb3 | 101 | desc->istate |= IRQS_PENDING; |
fa27271b | 102 | goto out; |
c7259cd7 | 103 | } |
fa27271b | 104 | |
0877d662 | 105 | /* Mark it poll in progress */ |
6954b75b | 106 | desc->istate |= IRQS_POLL_INPROGRESS; |
fa27271b | 107 | do { |
0877d662 TG |
108 | if (handle_irq_event(desc) == IRQ_HANDLED) |
109 | ret = IRQ_HANDLED; | |
e716efde | 110 | /* Make sure that there is still a valid action */ |
fa27271b | 111 | action = desc->action; |
2a0d6fb3 | 112 | } while ((desc->istate & IRQS_PENDING) && action); |
6954b75b | 113 | desc->istate &= ~IRQS_POLL_INPROGRESS; |
fa27271b TG |
114 | out: |
115 | raw_spin_unlock(&desc->lock); | |
0877d662 | 116 | return ret == IRQ_HANDLED; |
f84dbb91 EB |
117 | } |
118 | ||
119 | static int misrouted_irq(int irq) | |
120 | { | |
e00585bb | 121 | struct irq_desc *desc; |
d3c60047 | 122 | int i, ok = 0; |
f84dbb91 | 123 | |
c75d720f | 124 | if (atomic_inc_return(&irq_poll_active) != 1) |
d05c65ff TG |
125 | goto out; |
126 | ||
127 | irq_poll_cpu = smp_processor_id(); | |
128 | ||
e00585bb YL |
129 | for_each_irq_desc(i, desc) { |
130 | if (!i) | |
131 | continue; | |
f84dbb91 EB |
132 | |
133 | if (i == irq) /* Already tried */ | |
134 | continue; | |
135 | ||
c1e5bd8c | 136 | if (try_one_irq(desc, false)) |
f84dbb91 | 137 | ok = 1; |
200803df | 138 | } |
d05c65ff TG |
139 | out: |
140 | atomic_dec(&irq_poll_active); | |
200803df AC |
141 | /* So the caller can adjust the irq error counts */ |
142 | return ok; | |
143 | } | |
144 | ||
663e6959 | 145 | static void poll_spurious_irqs(unsigned long dummy) |
f84dbb91 | 146 | { |
e00585bb | 147 | struct irq_desc *desc; |
d3c60047 | 148 | int i; |
e00585bb | 149 | |
d05c65ff TG |
150 | if (atomic_inc_return(&irq_poll_active) != 1) |
151 | goto out; | |
152 | irq_poll_cpu = smp_processor_id(); | |
153 | ||
e00585bb | 154 | for_each_irq_desc(i, desc) { |
7acdd53e | 155 | unsigned int state; |
f84dbb91 | 156 | |
e00585bb YL |
157 | if (!i) |
158 | continue; | |
159 | ||
f84dbb91 | 160 | /* Racy but it doesn't matter */ |
7acdd53e | 161 | state = desc->istate; |
f84dbb91 | 162 | barrier(); |
7acdd53e | 163 | if (!(state & IRQS_SPURIOUS_DISABLED)) |
f84dbb91 EB |
164 | continue; |
165 | ||
e7e7e0c0 | 166 | local_irq_disable(); |
c1e5bd8c | 167 | try_one_irq(desc, true); |
e7e7e0c0 | 168 | local_irq_enable(); |
f84dbb91 | 169 | } |
d05c65ff TG |
170 | out: |
171 | atomic_dec(&irq_poll_active); | |
d3c60047 TG |
172 | mod_timer(&poll_spurious_irq_timer, |
173 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | |
f84dbb91 EB |
174 | } |
175 | ||
3a43e05f SAS |
176 | static inline int bad_action_ret(irqreturn_t action_ret) |
177 | { | |
5d4bac9a JK |
178 | unsigned int r = action_ret; |
179 | ||
180 | if (likely(r <= (IRQ_HANDLED | IRQ_WAKE_THREAD))) | |
3a43e05f SAS |
181 | return 0; |
182 | return 1; | |
183 | } | |
184 | ||
1da177e4 LT |
185 | /* |
186 | * If 99,900 of the previous 100,000 interrupts have not been handled | |
187 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | |
188 | * and try to turn the IRQ off. | |
189 | * | |
190 | * (The other 100-of-100,000 interrupts may have been a correctly | |
191 | * functioning device sharing an IRQ with the failing one) | |
1da177e4 | 192 | */ |
02d00eaa | 193 | static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) |
1da177e4 | 194 | { |
02d00eaa | 195 | unsigned int irq = irq_desc_get_irq(desc); |
1da177e4 | 196 | struct irqaction *action; |
1082687e | 197 | unsigned long flags; |
1da177e4 | 198 | |
3a43e05f | 199 | if (bad_action_ret(action_ret)) { |
1da177e4 LT |
200 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
201 | irq, action_ret); | |
202 | } else { | |
200803df AC |
203 | printk(KERN_ERR "irq %d: nobody cared (try booting with " |
204 | "the \"irqpoll\" option)\n", irq); | |
1da177e4 LT |
205 | } |
206 | dump_stack(); | |
207 | printk(KERN_ERR "handlers:\n"); | |
06fcb0c6 | 208 | |
1082687e TG |
209 | /* |
210 | * We need to take desc->lock here. note_interrupt() is called | |
211 | * w/o desc->lock held, but IRQ_PROGRESS set. We might race | |
212 | * with something else removing an action. It's ok to take | |
213 | * desc->lock here. See synchronize_irq(). | |
214 | */ | |
215 | raw_spin_lock_irqsave(&desc->lock, flags); | |
f944b5a7 | 216 | for_each_action_of_desc(desc, action) { |
ef26f20c SAS |
217 | printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler); |
218 | if (action->thread_fn) | |
219 | printk(KERN_CONT " threaded [<%p>] %pf", | |
220 | action->thread_fn, action->thread_fn); | |
221 | printk(KERN_CONT "\n"); | |
1da177e4 | 222 | } |
1082687e | 223 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 LT |
224 | } |
225 | ||
02d00eaa | 226 | static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) |
1da177e4 LT |
227 | { |
228 | static int count = 100; | |
229 | ||
230 | if (count > 0) { | |
231 | count--; | |
02d00eaa | 232 | __report_bad_irq(desc, action_ret); |
1da177e4 LT |
233 | } |
234 | } | |
235 | ||
d3c60047 TG |
236 | static inline int |
237 | try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |
238 | irqreturn_t action_ret) | |
92ea7727 LT |
239 | { |
240 | struct irqaction *action; | |
241 | ||
242 | if (!irqfixup) | |
243 | return 0; | |
244 | ||
245 | /* We didn't actually handle the IRQ - see if it was misrouted? */ | |
246 | if (action_ret == IRQ_NONE) | |
247 | return 1; | |
248 | ||
249 | /* | |
250 | * But for 'irqfixup == 2' we also do it for handled interrupts if | |
251 | * they are marked as IRQF_IRQPOLL (or for irq zero, which is the | |
252 | * traditional PC timer interrupt.. Legacy) | |
253 | */ | |
254 | if (irqfixup < 2) | |
255 | return 0; | |
256 | ||
257 | if (!irq) | |
258 | return 1; | |
259 | ||
260 | /* | |
261 | * Since we don't get the descriptor lock, "action" can | |
262 | * change under us. We don't really care, but we don't | |
263 | * want to follow a NULL pointer. So tell the compiler to | |
264 | * just load it once by using a barrier. | |
265 | */ | |
266 | action = desc->action; | |
267 | barrier(); | |
268 | return action && (action->flags & IRQF_IRQPOLL); | |
269 | } | |
270 | ||
1e77d0a1 TG |
271 | #define SPURIOUS_DEFERRED 0x80000000 |
272 | ||
0dcdbc97 | 273 | void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret) |
1da177e4 | 274 | { |
0dcdbc97 JL |
275 | unsigned int irq; |
276 | ||
b39898cd TG |
277 | if (desc->istate & IRQS_POLL_INPROGRESS || |
278 | irq_settings_is_polled(desc)) | |
fe200ae4 TG |
279 | return; |
280 | ||
3a43e05f | 281 | if (bad_action_ret(action_ret)) { |
02d00eaa | 282 | report_bad_irq(desc, action_ret); |
3a43e05f SAS |
283 | return; |
284 | } | |
285 | ||
1e77d0a1 TG |
286 | /* |
287 | * We cannot call note_interrupt from the threaded handler | |
288 | * because we need to look at the compound of all handlers | |
289 | * (primary and threaded). Aside of that in the threaded | |
290 | * shared case we have no serialization against an incoming | |
291 | * hardware interrupt while we are dealing with a threaded | |
292 | * result. | |
293 | * | |
294 | * So in case a thread is woken, we just note the fact and | |
295 | * defer the analysis to the next hardware interrupt. | |
296 | * | |
297 | * The threaded handlers store whether they sucessfully | |
298 | * handled an interrupt and we check whether that number | |
299 | * changed versus the last invocation. | |
300 | * | |
301 | * We could handle all interrupts with the delayed by one | |
302 | * mechanism, but for the non forced threaded case we'd just | |
303 | * add pointless overhead to the straight hardirq interrupts | |
304 | * for the sake of a few lines less code. | |
305 | */ | |
306 | if (action_ret & IRQ_WAKE_THREAD) { | |
307 | /* | |
308 | * There is a thread woken. Check whether one of the | |
309 | * shared primary handlers returned IRQ_HANDLED. If | |
310 | * not we defer the spurious detection to the next | |
311 | * interrupt. | |
312 | */ | |
313 | if (action_ret == IRQ_WAKE_THREAD) { | |
314 | int handled; | |
315 | /* | |
316 | * We use bit 31 of thread_handled_last to | |
317 | * denote the deferred spurious detection | |
318 | * active. No locking necessary as | |
319 | * thread_handled_last is only accessed here | |
320 | * and we have the guarantee that hard | |
321 | * interrupts are not reentrant. | |
322 | */ | |
323 | if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { | |
324 | desc->threads_handled_last |= SPURIOUS_DEFERRED; | |
325 | return; | |
326 | } | |
327 | /* | |
328 | * Check whether one of the threaded handlers | |
329 | * returned IRQ_HANDLED since the last | |
330 | * interrupt happened. | |
331 | * | |
332 | * For simplicity we just set bit 31, as it is | |
333 | * set in threads_handled_last as well. So we | |
334 | * avoid extra masking. And we really do not | |
335 | * care about the high bits of the handled | |
336 | * count. We just care about the count being | |
337 | * different than the one we saw before. | |
338 | */ | |
339 | handled = atomic_read(&desc->threads_handled); | |
340 | handled |= SPURIOUS_DEFERRED; | |
341 | if (handled != desc->threads_handled_last) { | |
342 | action_ret = IRQ_HANDLED; | |
343 | /* | |
344 | * Note: We keep the SPURIOUS_DEFERRED | |
345 | * bit set. We are handling the | |
346 | * previous invocation right now. | |
347 | * Keep it for the current one, so the | |
348 | * next hardware interrupt will | |
349 | * account for it. | |
350 | */ | |
351 | desc->threads_handled_last = handled; | |
352 | } else { | |
353 | /* | |
354 | * None of the threaded handlers felt | |
355 | * responsible for the last interrupt | |
356 | * | |
357 | * We keep the SPURIOUS_DEFERRED bit | |
358 | * set in threads_handled_last as we | |
359 | * need to account for the current | |
360 | * interrupt as well. | |
361 | */ | |
362 | action_ret = IRQ_NONE; | |
363 | } | |
364 | } else { | |
365 | /* | |
366 | * One of the primary handlers returned | |
367 | * IRQ_HANDLED. So we don't care about the | |
368 | * threaded handlers on the same line. Clear | |
369 | * the deferred detection bit. | |
370 | * | |
371 | * In theory we could/should check whether the | |
372 | * deferred bit is set and take the result of | |
373 | * the previous run into account here as | |
374 | * well. But it's really not worth the | |
375 | * trouble. If every other interrupt is | |
376 | * handled we never trigger the spurious | |
377 | * detector. And if this is just the one out | |
378 | * of 100k unhandled ones which is handled | |
379 | * then we merily delay the spurious detection | |
380 | * by one hard interrupt. Not a real problem. | |
381 | */ | |
382 | desc->threads_handled_last &= ~SPURIOUS_DEFERRED; | |
383 | } | |
384 | } | |
385 | ||
3a43e05f | 386 | if (unlikely(action_ret == IRQ_NONE)) { |
4f27c00b AC |
387 | /* |
388 | * If we are seeing only the odd spurious IRQ caused by | |
389 | * bus asynchronicity then don't eventually trigger an error, | |
fbfecd37 | 390 | * otherwise the counter becomes a doomsday timer for otherwise |
4f27c00b AC |
391 | * working systems |
392 | */ | |
188fd89d | 393 | if (time_after(jiffies, desc->last_unhandled + HZ/10)) |
4f27c00b AC |
394 | desc->irqs_unhandled = 1; |
395 | else | |
396 | desc->irqs_unhandled++; | |
397 | desc->last_unhandled = jiffies; | |
1da177e4 LT |
398 | } |
399 | ||
0dcdbc97 | 400 | irq = irq_desc_get_irq(desc); |
92ea7727 LT |
401 | if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { |
402 | int ok = misrouted_irq(irq); | |
403 | if (action_ret == IRQ_NONE) | |
404 | desc->irqs_unhandled -= ok; | |
200803df AC |
405 | } |
406 | ||
1da177e4 | 407 | desc->irq_count++; |
83d4e6e7 | 408 | if (likely(desc->irq_count < 100000)) |
1da177e4 LT |
409 | return; |
410 | ||
411 | desc->irq_count = 0; | |
83d4e6e7 | 412 | if (unlikely(desc->irqs_unhandled > 99900)) { |
1da177e4 LT |
413 | /* |
414 | * The interrupt is stuck | |
415 | */ | |
02d00eaa | 416 | __report_bad_irq(desc, action_ret); |
1da177e4 LT |
417 | /* |
418 | * Now kill the IRQ | |
419 | */ | |
420 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | |
7acdd53e | 421 | desc->istate |= IRQS_SPURIOUS_DISABLED; |
1adb0850 | 422 | desc->depth++; |
87923470 | 423 | irq_disable(desc); |
f84dbb91 | 424 | |
d3c60047 TG |
425 | mod_timer(&poll_spurious_irq_timer, |
426 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | |
1da177e4 LT |
427 | } |
428 | desc->irqs_unhandled = 0; | |
429 | } | |
430 | ||
2329abfa | 431 | bool noirqdebug __read_mostly; |
1da177e4 | 432 | |
343cde51 | 433 | int noirqdebug_setup(char *str) |
1da177e4 LT |
434 | { |
435 | noirqdebug = 1; | |
436 | printk(KERN_INFO "IRQ lockup detection disabled\n"); | |
06fcb0c6 | 437 | |
1da177e4 LT |
438 | return 1; |
439 | } | |
440 | ||
441 | __setup("noirqdebug", noirqdebug_setup); | |
9e094c17 AK |
442 | module_param(noirqdebug, bool, 0644); |
443 | MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); | |
1da177e4 | 444 | |
200803df AC |
445 | static int __init irqfixup_setup(char *str) |
446 | { | |
447 | irqfixup = 1; | |
448 | printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); | |
449 | printk(KERN_WARNING "This may impact system performance.\n"); | |
06fcb0c6 | 450 | |
200803df AC |
451 | return 1; |
452 | } | |
453 | ||
454 | __setup("irqfixup", irqfixup_setup); | |
9e094c17 | 455 | module_param(irqfixup, int, 0644); |
200803df AC |
456 | |
457 | static int __init irqpoll_setup(char *str) | |
458 | { | |
459 | irqfixup = 2; | |
460 | printk(KERN_WARNING "Misrouted IRQ fixup and polling support " | |
461 | "enabled\n"); | |
462 | printk(KERN_WARNING "This may significantly impact system " | |
463 | "performance\n"); | |
464 | return 1; | |
465 | } | |
466 | ||
467 | __setup("irqpoll", irqpoll_setup); |