]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/kernel/irq/spurious.c | |
4 | * | |
5 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar | |
6 | * | |
7 | * This file contains spurious interrupt handling. | |
8 | */ | |
9 | ||
188fd89d | 10 | #include <linux/jiffies.h> |
1da177e4 LT |
11 | #include <linux/irq.h> |
12 | #include <linux/module.h> | |
13 | #include <linux/kallsyms.h> | |
14 | #include <linux/interrupt.h> | |
9e094c17 | 15 | #include <linux/moduleparam.h> |
f84dbb91 | 16 | #include <linux/timer.h> |
1da177e4 | 17 | |
bd151412 TG |
18 | #include "internals.h" |
19 | ||
83d4e6e7 | 20 | static int irqfixup __read_mostly; |
200803df | 21 | |
f84dbb91 | 22 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
24ed960a | 23 | static void poll_spurious_irqs(struct timer_list *unused); |
1d27e3e2 | 24 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs); |
d05c65ff TG |
25 | static int irq_poll_cpu; |
26 | static atomic_t irq_poll_active; | |
f84dbb91 | 27 | |
fe200ae4 TG |
28 | /* |
29 | * We wait here for a poller to finish. | |
30 | * | |
31 | * If the poll runs on this CPU, then we yell loudly and return | |
32 | * false. That will leave the interrupt line disabled in the worst | |
33 | * case, but it should never happen. | |
34 | * | |
35 | * We wait until the poller is done and then recheck disabled and | |
36 | * action (about to be disabled). Only if it's still active, we return | |
37 | * true and let the handler run. | |
38 | */ | |
39 | bool irq_wait_for_poll(struct irq_desc *desc) | |
40 | { | |
41 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), | |
42 | "irq poll in progress on cpu %d for irq %d\n", | |
43 | smp_processor_id(), desc->irq_data.irq)) | |
44 | return false; | |
45 | ||
46 | #ifdef CONFIG_SMP | |
47 | do { | |
48 | raw_spin_unlock(&desc->lock); | |
32f4125e | 49 | while (irqd_irq_inprogress(&desc->irq_data)) |
fe200ae4 TG |
50 | cpu_relax(); |
51 | raw_spin_lock(&desc->lock); | |
a6aeddd1 | 52 | } while (irqd_irq_inprogress(&desc->irq_data)); |
fe200ae4 | 53 | /* Might have been disabled in meantime */ |
32f4125e | 54 | return !irqd_irq_disabled(&desc->irq_data) && desc->action; |
fe200ae4 TG |
55 | #else |
56 | return false; | |
57 | #endif | |
58 | } | |
59 | ||
0877d662 | 60 | |
200803df AC |
61 | /* |
62 | * Recovery handler for misrouted interrupts. | |
63 | */ | |
c1e5bd8c | 64 | static int try_one_irq(struct irq_desc *desc, bool force) |
200803df | 65 | { |
0877d662 | 66 | irqreturn_t ret = IRQ_NONE; |
f84dbb91 | 67 | struct irqaction *action; |
200803df | 68 | |
239007b8 | 69 | raw_spin_lock(&desc->lock); |
c7259cd7 | 70 | |
b39898cd TG |
71 | /* |
72 | * PER_CPU, nested thread interrupts and interrupts explicitely | |
73 | * marked polled are excluded from polling. | |
74 | */ | |
75 | if (irq_settings_is_per_cpu(desc) || | |
76 | irq_settings_is_nested_thread(desc) || | |
77 | irq_settings_is_polled(desc)) | |
c7259cd7 TG |
78 | goto out; |
79 | ||
80 | /* | |
81 | * Do not poll disabled interrupts unless the spurious | |
82 | * disabled poller asks explicitely. | |
83 | */ | |
32f4125e | 84 | if (irqd_irq_disabled(&desc->irq_data) && !force) |
c7259cd7 TG |
85 | goto out; |
86 | ||
87 | /* | |
88 | * All handlers must agree on IRQF_SHARED, so we test just the | |
e716efde | 89 | * first. |
c7259cd7 TG |
90 | */ |
91 | action = desc->action; | |
92 | if (!action || !(action->flags & IRQF_SHARED) || | |
e716efde | 93 | (action->flags & __IRQF_TIMER)) |
c7259cd7 TG |
94 | goto out; |
95 | ||
f84dbb91 | 96 | /* Already running on another processor */ |
32f4125e | 97 | if (irqd_irq_inprogress(&desc->irq_data)) { |
f84dbb91 EB |
98 | /* |
99 | * Already running: If it is shared get the other | |
100 | * CPU to go looking for our mystery interrupt too | |
101 | */ | |
2a0d6fb3 | 102 | desc->istate |= IRQS_PENDING; |
fa27271b | 103 | goto out; |
c7259cd7 | 104 | } |
fa27271b | 105 | |
0877d662 | 106 | /* Mark it poll in progress */ |
6954b75b | 107 | desc->istate |= IRQS_POLL_INPROGRESS; |
fa27271b | 108 | do { |
0877d662 TG |
109 | if (handle_irq_event(desc) == IRQ_HANDLED) |
110 | ret = IRQ_HANDLED; | |
e716efde | 111 | /* Make sure that there is still a valid action */ |
fa27271b | 112 | action = desc->action; |
2a0d6fb3 | 113 | } while ((desc->istate & IRQS_PENDING) && action); |
6954b75b | 114 | desc->istate &= ~IRQS_POLL_INPROGRESS; |
fa27271b TG |
115 | out: |
116 | raw_spin_unlock(&desc->lock); | |
0877d662 | 117 | return ret == IRQ_HANDLED; |
f84dbb91 EB |
118 | } |
119 | ||
120 | static int misrouted_irq(int irq) | |
121 | { | |
e00585bb | 122 | struct irq_desc *desc; |
d3c60047 | 123 | int i, ok = 0; |
f84dbb91 | 124 | |
c75d720f | 125 | if (atomic_inc_return(&irq_poll_active) != 1) |
d05c65ff TG |
126 | goto out; |
127 | ||
128 | irq_poll_cpu = smp_processor_id(); | |
129 | ||
e00585bb YL |
130 | for_each_irq_desc(i, desc) { |
131 | if (!i) | |
132 | continue; | |
f84dbb91 EB |
133 | |
134 | if (i == irq) /* Already tried */ | |
135 | continue; | |
136 | ||
c1e5bd8c | 137 | if (try_one_irq(desc, false)) |
f84dbb91 | 138 | ok = 1; |
200803df | 139 | } |
d05c65ff TG |
140 | out: |
141 | atomic_dec(&irq_poll_active); | |
200803df AC |
142 | /* So the caller can adjust the irq error counts */ |
143 | return ok; | |
144 | } | |
145 | ||
24ed960a | 146 | static void poll_spurious_irqs(struct timer_list *unused) |
f84dbb91 | 147 | { |
e00585bb | 148 | struct irq_desc *desc; |
d3c60047 | 149 | int i; |
e00585bb | 150 | |
d05c65ff TG |
151 | if (atomic_inc_return(&irq_poll_active) != 1) |
152 | goto out; | |
153 | irq_poll_cpu = smp_processor_id(); | |
154 | ||
e00585bb | 155 | for_each_irq_desc(i, desc) { |
7acdd53e | 156 | unsigned int state; |
f84dbb91 | 157 | |
e00585bb YL |
158 | if (!i) |
159 | continue; | |
160 | ||
f84dbb91 | 161 | /* Racy but it doesn't matter */ |
7acdd53e | 162 | state = desc->istate; |
f84dbb91 | 163 | barrier(); |
7acdd53e | 164 | if (!(state & IRQS_SPURIOUS_DISABLED)) |
f84dbb91 EB |
165 | continue; |
166 | ||
e7e7e0c0 | 167 | local_irq_disable(); |
c1e5bd8c | 168 | try_one_irq(desc, true); |
e7e7e0c0 | 169 | local_irq_enable(); |
f84dbb91 | 170 | } |
d05c65ff TG |
171 | out: |
172 | atomic_dec(&irq_poll_active); | |
d3c60047 TG |
173 | mod_timer(&poll_spurious_irq_timer, |
174 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | |
f84dbb91 EB |
175 | } |
176 | ||
3a43e05f SAS |
177 | static inline int bad_action_ret(irqreturn_t action_ret) |
178 | { | |
5d4bac9a JK |
179 | unsigned int r = action_ret; |
180 | ||
181 | if (likely(r <= (IRQ_HANDLED | IRQ_WAKE_THREAD))) | |
3a43e05f SAS |
182 | return 0; |
183 | return 1; | |
184 | } | |
185 | ||
1da177e4 LT |
186 | /* |
187 | * If 99,900 of the previous 100,000 interrupts have not been handled | |
188 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | |
189 | * and try to turn the IRQ off. | |
190 | * | |
191 | * (The other 100-of-100,000 interrupts may have been a correctly | |
192 | * functioning device sharing an IRQ with the failing one) | |
1da177e4 | 193 | */ |
02d00eaa | 194 | static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) |
1da177e4 | 195 | { |
02d00eaa | 196 | unsigned int irq = irq_desc_get_irq(desc); |
1da177e4 | 197 | struct irqaction *action; |
1082687e | 198 | unsigned long flags; |
1da177e4 | 199 | |
3a43e05f | 200 | if (bad_action_ret(action_ret)) { |
1da177e4 LT |
201 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
202 | irq, action_ret); | |
203 | } else { | |
200803df AC |
204 | printk(KERN_ERR "irq %d: nobody cared (try booting with " |
205 | "the \"irqpoll\" option)\n", irq); | |
1da177e4 LT |
206 | } |
207 | dump_stack(); | |
208 | printk(KERN_ERR "handlers:\n"); | |
06fcb0c6 | 209 | |
1082687e TG |
210 | /* |
211 | * We need to take desc->lock here. note_interrupt() is called | |
212 | * w/o desc->lock held, but IRQ_PROGRESS set. We might race | |
213 | * with something else removing an action. It's ok to take | |
214 | * desc->lock here. See synchronize_irq(). | |
215 | */ | |
216 | raw_spin_lock_irqsave(&desc->lock, flags); | |
f944b5a7 | 217 | for_each_action_of_desc(desc, action) { |
ef26f20c SAS |
218 | printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler); |
219 | if (action->thread_fn) | |
220 | printk(KERN_CONT " threaded [<%p>] %pf", | |
221 | action->thread_fn, action->thread_fn); | |
222 | printk(KERN_CONT "\n"); | |
1da177e4 | 223 | } |
1082687e | 224 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 LT |
225 | } |
226 | ||
02d00eaa | 227 | static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) |
1da177e4 LT |
228 | { |
229 | static int count = 100; | |
230 | ||
231 | if (count > 0) { | |
232 | count--; | |
02d00eaa | 233 | __report_bad_irq(desc, action_ret); |
1da177e4 LT |
234 | } |
235 | } | |
236 | ||
d3c60047 TG |
237 | static inline int |
238 | try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |
239 | irqreturn_t action_ret) | |
92ea7727 LT |
240 | { |
241 | struct irqaction *action; | |
242 | ||
243 | if (!irqfixup) | |
244 | return 0; | |
245 | ||
246 | /* We didn't actually handle the IRQ - see if it was misrouted? */ | |
247 | if (action_ret == IRQ_NONE) | |
248 | return 1; | |
249 | ||
250 | /* | |
251 | * But for 'irqfixup == 2' we also do it for handled interrupts if | |
252 | * they are marked as IRQF_IRQPOLL (or for irq zero, which is the | |
253 | * traditional PC timer interrupt.. Legacy) | |
254 | */ | |
255 | if (irqfixup < 2) | |
256 | return 0; | |
257 | ||
258 | if (!irq) | |
259 | return 1; | |
260 | ||
261 | /* | |
262 | * Since we don't get the descriptor lock, "action" can | |
263 | * change under us. We don't really care, but we don't | |
264 | * want to follow a NULL pointer. So tell the compiler to | |
265 | * just load it once by using a barrier. | |
266 | */ | |
267 | action = desc->action; | |
268 | barrier(); | |
269 | return action && (action->flags & IRQF_IRQPOLL); | |
270 | } | |
271 | ||
1e77d0a1 TG |
272 | #define SPURIOUS_DEFERRED 0x80000000 |
273 | ||
0dcdbc97 | 274 | void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret) |
1da177e4 | 275 | { |
0dcdbc97 JL |
276 | unsigned int irq; |
277 | ||
b39898cd TG |
278 | if (desc->istate & IRQS_POLL_INPROGRESS || |
279 | irq_settings_is_polled(desc)) | |
fe200ae4 TG |
280 | return; |
281 | ||
3a43e05f | 282 | if (bad_action_ret(action_ret)) { |
02d00eaa | 283 | report_bad_irq(desc, action_ret); |
3a43e05f SAS |
284 | return; |
285 | } | |
286 | ||
1e77d0a1 TG |
287 | /* |
288 | * We cannot call note_interrupt from the threaded handler | |
289 | * because we need to look at the compound of all handlers | |
290 | * (primary and threaded). Aside of that in the threaded | |
291 | * shared case we have no serialization against an incoming | |
292 | * hardware interrupt while we are dealing with a threaded | |
293 | * result. | |
294 | * | |
295 | * So in case a thread is woken, we just note the fact and | |
296 | * defer the analysis to the next hardware interrupt. | |
297 | * | |
298 | * The threaded handlers store whether they sucessfully | |
299 | * handled an interrupt and we check whether that number | |
300 | * changed versus the last invocation. | |
301 | * | |
302 | * We could handle all interrupts with the delayed by one | |
303 | * mechanism, but for the non forced threaded case we'd just | |
304 | * add pointless overhead to the straight hardirq interrupts | |
305 | * for the sake of a few lines less code. | |
306 | */ | |
307 | if (action_ret & IRQ_WAKE_THREAD) { | |
308 | /* | |
309 | * There is a thread woken. Check whether one of the | |
310 | * shared primary handlers returned IRQ_HANDLED. If | |
311 | * not we defer the spurious detection to the next | |
312 | * interrupt. | |
313 | */ | |
314 | if (action_ret == IRQ_WAKE_THREAD) { | |
315 | int handled; | |
316 | /* | |
317 | * We use bit 31 of thread_handled_last to | |
318 | * denote the deferred spurious detection | |
319 | * active. No locking necessary as | |
320 | * thread_handled_last is only accessed here | |
321 | * and we have the guarantee that hard | |
322 | * interrupts are not reentrant. | |
323 | */ | |
324 | if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { | |
325 | desc->threads_handled_last |= SPURIOUS_DEFERRED; | |
326 | return; | |
327 | } | |
328 | /* | |
329 | * Check whether one of the threaded handlers | |
330 | * returned IRQ_HANDLED since the last | |
331 | * interrupt happened. | |
332 | * | |
333 | * For simplicity we just set bit 31, as it is | |
334 | * set in threads_handled_last as well. So we | |
335 | * avoid extra masking. And we really do not | |
336 | * care about the high bits of the handled | |
337 | * count. We just care about the count being | |
338 | * different than the one we saw before. | |
339 | */ | |
340 | handled = atomic_read(&desc->threads_handled); | |
341 | handled |= SPURIOUS_DEFERRED; | |
342 | if (handled != desc->threads_handled_last) { | |
343 | action_ret = IRQ_HANDLED; | |
344 | /* | |
345 | * Note: We keep the SPURIOUS_DEFERRED | |
346 | * bit set. We are handling the | |
347 | * previous invocation right now. | |
348 | * Keep it for the current one, so the | |
349 | * next hardware interrupt will | |
350 | * account for it. | |
351 | */ | |
352 | desc->threads_handled_last = handled; | |
353 | } else { | |
354 | /* | |
355 | * None of the threaded handlers felt | |
356 | * responsible for the last interrupt | |
357 | * | |
358 | * We keep the SPURIOUS_DEFERRED bit | |
359 | * set in threads_handled_last as we | |
360 | * need to account for the current | |
361 | * interrupt as well. | |
362 | */ | |
363 | action_ret = IRQ_NONE; | |
364 | } | |
365 | } else { | |
366 | /* | |
367 | * One of the primary handlers returned | |
368 | * IRQ_HANDLED. So we don't care about the | |
369 | * threaded handlers on the same line. Clear | |
370 | * the deferred detection bit. | |
371 | * | |
372 | * In theory we could/should check whether the | |
373 | * deferred bit is set and take the result of | |
374 | * the previous run into account here as | |
375 | * well. But it's really not worth the | |
376 | * trouble. If every other interrupt is | |
377 | * handled we never trigger the spurious | |
378 | * detector. And if this is just the one out | |
379 | * of 100k unhandled ones which is handled | |
380 | * then we merily delay the spurious detection | |
381 | * by one hard interrupt. Not a real problem. | |
382 | */ | |
383 | desc->threads_handled_last &= ~SPURIOUS_DEFERRED; | |
384 | } | |
385 | } | |
386 | ||
3a43e05f | 387 | if (unlikely(action_ret == IRQ_NONE)) { |
4f27c00b AC |
388 | /* |
389 | * If we are seeing only the odd spurious IRQ caused by | |
390 | * bus asynchronicity then don't eventually trigger an error, | |
fbfecd37 | 391 | * otherwise the counter becomes a doomsday timer for otherwise |
4f27c00b AC |
392 | * working systems |
393 | */ | |
188fd89d | 394 | if (time_after(jiffies, desc->last_unhandled + HZ/10)) |
4f27c00b AC |
395 | desc->irqs_unhandled = 1; |
396 | else | |
397 | desc->irqs_unhandled++; | |
398 | desc->last_unhandled = jiffies; | |
1da177e4 LT |
399 | } |
400 | ||
0dcdbc97 | 401 | irq = irq_desc_get_irq(desc); |
92ea7727 LT |
402 | if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { |
403 | int ok = misrouted_irq(irq); | |
404 | if (action_ret == IRQ_NONE) | |
405 | desc->irqs_unhandled -= ok; | |
200803df AC |
406 | } |
407 | ||
1da177e4 | 408 | desc->irq_count++; |
83d4e6e7 | 409 | if (likely(desc->irq_count < 100000)) |
1da177e4 LT |
410 | return; |
411 | ||
412 | desc->irq_count = 0; | |
83d4e6e7 | 413 | if (unlikely(desc->irqs_unhandled > 99900)) { |
1da177e4 LT |
414 | /* |
415 | * The interrupt is stuck | |
416 | */ | |
02d00eaa | 417 | __report_bad_irq(desc, action_ret); |
1da177e4 LT |
418 | /* |
419 | * Now kill the IRQ | |
420 | */ | |
421 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | |
7acdd53e | 422 | desc->istate |= IRQS_SPURIOUS_DISABLED; |
1adb0850 | 423 | desc->depth++; |
87923470 | 424 | irq_disable(desc); |
f84dbb91 | 425 | |
d3c60047 TG |
426 | mod_timer(&poll_spurious_irq_timer, |
427 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | |
1da177e4 LT |
428 | } |
429 | desc->irqs_unhandled = 0; | |
430 | } | |
431 | ||
2329abfa | 432 | bool noirqdebug __read_mostly; |
1da177e4 | 433 | |
343cde51 | 434 | int noirqdebug_setup(char *str) |
1da177e4 LT |
435 | { |
436 | noirqdebug = 1; | |
437 | printk(KERN_INFO "IRQ lockup detection disabled\n"); | |
06fcb0c6 | 438 | |
1da177e4 LT |
439 | return 1; |
440 | } | |
441 | ||
442 | __setup("noirqdebug", noirqdebug_setup); | |
9e094c17 AK |
443 | module_param(noirqdebug, bool, 0644); |
444 | MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); | |
1da177e4 | 445 | |
200803df AC |
446 | static int __init irqfixup_setup(char *str) |
447 | { | |
448 | irqfixup = 1; | |
449 | printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); | |
450 | printk(KERN_WARNING "This may impact system performance.\n"); | |
06fcb0c6 | 451 | |
200803df AC |
452 | return 1; |
453 | } | |
454 | ||
455 | __setup("irqfixup", irqfixup_setup); | |
9e094c17 | 456 | module_param(irqfixup, int, 0644); |
200803df AC |
457 | |
458 | static int __init irqpoll_setup(char *str) | |
459 | { | |
460 | irqfixup = 2; | |
461 | printk(KERN_WARNING "Misrouted IRQ fixup and polling support " | |
462 | "enabled\n"); | |
463 | printk(KERN_WARNING "This may significantly impact system " | |
464 | "performance\n"); | |
465 | return 1; | |
466 | } | |
467 | ||
468 | __setup("irqpoll", irqpoll_setup); |