]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/kernel/irq/manage.c | |
3 | * | |
a34db9b2 IM |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar |
5 | * Copyright (C) 2005-2006 Thomas Gleixner | |
1da177e4 LT |
6 | * |
7 | * This file contains driver APIs to the irq subsystem. | |
8 | */ | |
9 | ||
10 | #include <linux/irq.h> | |
3aa551c9 | 11 | #include <linux/kthread.h> |
1da177e4 LT |
12 | #include <linux/module.h> |
13 | #include <linux/random.h> | |
14 | #include <linux/interrupt.h> | |
1aeb272c | 15 | #include <linux/slab.h> |
3aa551c9 | 16 | #include <linux/sched.h> |
1da177e4 LT |
17 | |
18 | #include "internals.h" | |
19 | ||
1da177e4 LT |
20 | /** |
21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | |
1e5d5331 | 22 | * @irq: interrupt number to wait for |
1da177e4 LT |
23 | * |
24 | * This function waits for any pending IRQ handlers for this interrupt | |
25 | * to complete before returning. If you use this function while | |
26 | * holding a resource the IRQ handler may need you will deadlock. | |
27 | * | |
28 | * This function may be called - with care - from IRQ context. | |
29 | */ | |
30 | void synchronize_irq(unsigned int irq) | |
31 | { | |
cb5bc832 | 32 | struct irq_desc *desc = irq_to_desc(irq); |
a98ce5c6 | 33 | unsigned int status; |
1da177e4 | 34 | |
7d94f7ca | 35 | if (!desc) |
c2b5a251 MW |
36 | return; |
37 | ||
a98ce5c6 HX |
38 | do { |
39 | unsigned long flags; | |
40 | ||
41 | /* | |
42 | * Wait until we're out of the critical section. This might | |
43 | * give the wrong answer due to the lack of memory barriers. | |
44 | */ | |
45 | while (desc->status & IRQ_INPROGRESS) | |
46 | cpu_relax(); | |
47 | ||
48 | /* Ok, that indicated we're done: double-check carefully. */ | |
239007b8 | 49 | raw_spin_lock_irqsave(&desc->lock, flags); |
a98ce5c6 | 50 | status = desc->status; |
239007b8 | 51 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
a98ce5c6 HX |
52 | |
53 | /* Oops, that failed? */ | |
54 | } while (status & IRQ_INPROGRESS); | |
3aa551c9 TG |
55 | |
56 | /* | |
57 | * We made sure that no hardirq handler is running. Now verify | |
58 | * that no threaded handlers are active. | |
59 | */ | |
60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | |
1da177e4 | 61 | } |
1da177e4 LT |
62 | EXPORT_SYMBOL(synchronize_irq); |
63 | ||
3aa551c9 TG |
64 | #ifdef CONFIG_SMP |
65 | cpumask_var_t irq_default_affinity; | |
66 | ||
771ee3b0 TG |
67 | /** |
68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | |
69 | * @irq: Interrupt to check | |
70 | * | |
71 | */ | |
72 | int irq_can_set_affinity(unsigned int irq) | |
73 | { | |
08678b08 | 74 | struct irq_desc *desc = irq_to_desc(irq); |
771ee3b0 TG |
75 | |
76 | if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || | |
77 | !desc->chip->set_affinity) | |
78 | return 0; | |
79 | ||
80 | return 1; | |
81 | } | |
82 | ||
591d2fb0 TG |
83 | /** |
84 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | |
85 | * @desc: irq descriptor which has affitnity changed | |
86 | * | |
87 | * We just set IRQTF_AFFINITY and delegate the affinity setting | |
88 | * to the interrupt thread itself. We can not call | |
89 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | |
90 | * code can be called from hard interrupt context. | |
91 | */ | |
92 | void irq_set_thread_affinity(struct irq_desc *desc) | |
3aa551c9 TG |
93 | { |
94 | struct irqaction *action = desc->action; | |
95 | ||
96 | while (action) { | |
97 | if (action->thread) | |
591d2fb0 | 98 | set_bit(IRQTF_AFFINITY, &action->thread_flags); |
3aa551c9 TG |
99 | action = action->next; |
100 | } | |
101 | } | |
102 | ||
771ee3b0 TG |
103 | /** |
104 | * irq_set_affinity - Set the irq affinity of a given irq | |
105 | * @irq: Interrupt to set affinity | |
106 | * @cpumask: cpumask | |
107 | * | |
108 | */ | |
0de26520 | 109 | int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask) |
771ee3b0 | 110 | { |
08678b08 | 111 | struct irq_desc *desc = irq_to_desc(irq); |
f6d87f4b | 112 | unsigned long flags; |
771ee3b0 TG |
113 | |
114 | if (!desc->chip->set_affinity) | |
115 | return -EINVAL; | |
116 | ||
239007b8 | 117 | raw_spin_lock_irqsave(&desc->lock, flags); |
f6d87f4b | 118 | |
771ee3b0 | 119 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
57b150cc YL |
120 | if (desc->status & IRQ_MOVE_PCNTXT) { |
121 | if (!desc->chip->set_affinity(irq, cpumask)) { | |
122 | cpumask_copy(desc->affinity, cpumask); | |
591d2fb0 | 123 | irq_set_thread_affinity(desc); |
57b150cc YL |
124 | } |
125 | } | |
6ec3cfec | 126 | else { |
f6d87f4b | 127 | desc->status |= IRQ_MOVE_PENDING; |
7f7ace0c | 128 | cpumask_copy(desc->pending_mask, cpumask); |
f6d87f4b | 129 | } |
771ee3b0 | 130 | #else |
57b150cc YL |
131 | if (!desc->chip->set_affinity(irq, cpumask)) { |
132 | cpumask_copy(desc->affinity, cpumask); | |
591d2fb0 | 133 | irq_set_thread_affinity(desc); |
57b150cc | 134 | } |
771ee3b0 | 135 | #endif |
f6d87f4b | 136 | desc->status |= IRQ_AFFINITY_SET; |
239007b8 | 137 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
771ee3b0 TG |
138 | return 0; |
139 | } | |
140 | ||
e7a297b0 PWJ |
141 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) |
142 | { | |
143 | struct irq_desc *desc = irq_to_desc(irq); | |
144 | unsigned long flags; | |
145 | ||
146 | if (!desc) | |
147 | return -EINVAL; | |
148 | ||
149 | raw_spin_lock_irqsave(&desc->lock, flags); | |
150 | desc->affinity_hint = m; | |
151 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
152 | ||
153 | return 0; | |
154 | } | |
155 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | |
156 | ||
18404756 MK |
157 | #ifndef CONFIG_AUTO_IRQ_AFFINITY |
158 | /* | |
159 | * Generic version of the affinity autoselector. | |
160 | */ | |
548c8933 | 161 | static int setup_affinity(unsigned int irq, struct irq_desc *desc) |
18404756 | 162 | { |
18404756 MK |
163 | if (!irq_can_set_affinity(irq)) |
164 | return 0; | |
165 | ||
f6d87f4b TG |
166 | /* |
167 | * Preserve an userspace affinity setup, but make sure that | |
168 | * one of the targets is online. | |
169 | */ | |
612e3684 | 170 | if (desc->status & (IRQ_AFFINITY_SET | IRQ_NO_BALANCING)) { |
7f7ace0c | 171 | if (cpumask_any_and(desc->affinity, cpu_online_mask) |
0de26520 RR |
172 | < nr_cpu_ids) |
173 | goto set_affinity; | |
f6d87f4b TG |
174 | else |
175 | desc->status &= ~IRQ_AFFINITY_SET; | |
176 | } | |
177 | ||
7f7ace0c | 178 | cpumask_and(desc->affinity, cpu_online_mask, irq_default_affinity); |
0de26520 | 179 | set_affinity: |
7f7ace0c | 180 | desc->chip->set_affinity(irq, desc->affinity); |
18404756 | 181 | |
18404756 MK |
182 | return 0; |
183 | } | |
f6d87f4b | 184 | #else |
548c8933 | 185 | static inline int setup_affinity(unsigned int irq, struct irq_desc *d) |
f6d87f4b TG |
186 | { |
187 | return irq_select_affinity(irq); | |
188 | } | |
18404756 MK |
189 | #endif |
190 | ||
f6d87f4b TG |
191 | /* |
192 | * Called when affinity is set via /proc/irq | |
193 | */ | |
194 | int irq_select_affinity_usr(unsigned int irq) | |
195 | { | |
196 | struct irq_desc *desc = irq_to_desc(irq); | |
197 | unsigned long flags; | |
198 | int ret; | |
199 | ||
239007b8 | 200 | raw_spin_lock_irqsave(&desc->lock, flags); |
548c8933 | 201 | ret = setup_affinity(irq, desc); |
3aa551c9 | 202 | if (!ret) |
591d2fb0 | 203 | irq_set_thread_affinity(desc); |
239007b8 | 204 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
f6d87f4b TG |
205 | |
206 | return ret; | |
207 | } | |
208 | ||
209 | #else | |
548c8933 | 210 | static inline int setup_affinity(unsigned int irq, struct irq_desc *desc) |
f6d87f4b TG |
211 | { |
212 | return 0; | |
213 | } | |
1da177e4 LT |
214 | #endif |
215 | ||
0a0c5168 RW |
216 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) |
217 | { | |
218 | if (suspend) { | |
685fd0b4 | 219 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) |
0a0c5168 RW |
220 | return; |
221 | desc->status |= IRQ_SUSPENDED; | |
222 | } | |
223 | ||
224 | if (!desc->depth++) { | |
225 | desc->status |= IRQ_DISABLED; | |
226 | desc->chip->disable(irq); | |
227 | } | |
228 | } | |
229 | ||
1da177e4 LT |
230 | /** |
231 | * disable_irq_nosync - disable an irq without waiting | |
232 | * @irq: Interrupt to disable | |
233 | * | |
234 | * Disable the selected interrupt line. Disables and Enables are | |
235 | * nested. | |
236 | * Unlike disable_irq(), this function does not ensure existing | |
237 | * instances of the IRQ handler have completed before returning. | |
238 | * | |
239 | * This function may be called from IRQ context. | |
240 | */ | |
241 | void disable_irq_nosync(unsigned int irq) | |
242 | { | |
d3c60047 | 243 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 LT |
244 | unsigned long flags; |
245 | ||
7d94f7ca | 246 | if (!desc) |
c2b5a251 MW |
247 | return; |
248 | ||
70aedd24 | 249 | chip_bus_lock(irq, desc); |
239007b8 | 250 | raw_spin_lock_irqsave(&desc->lock, flags); |
0a0c5168 | 251 | __disable_irq(desc, irq, false); |
239007b8 | 252 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
70aedd24 | 253 | chip_bus_sync_unlock(irq, desc); |
1da177e4 | 254 | } |
1da177e4 LT |
255 | EXPORT_SYMBOL(disable_irq_nosync); |
256 | ||
257 | /** | |
258 | * disable_irq - disable an irq and wait for completion | |
259 | * @irq: Interrupt to disable | |
260 | * | |
261 | * Disable the selected interrupt line. Enables and Disables are | |
262 | * nested. | |
263 | * This function waits for any pending IRQ handlers for this interrupt | |
264 | * to complete before returning. If you use this function while | |
265 | * holding a resource the IRQ handler may need you will deadlock. | |
266 | * | |
267 | * This function may be called - with care - from IRQ context. | |
268 | */ | |
269 | void disable_irq(unsigned int irq) | |
270 | { | |
d3c60047 | 271 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 | 272 | |
7d94f7ca | 273 | if (!desc) |
c2b5a251 MW |
274 | return; |
275 | ||
1da177e4 LT |
276 | disable_irq_nosync(irq); |
277 | if (desc->action) | |
278 | synchronize_irq(irq); | |
279 | } | |
1da177e4 LT |
280 | EXPORT_SYMBOL(disable_irq); |
281 | ||
0a0c5168 | 282 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) |
1adb0850 | 283 | { |
0a0c5168 RW |
284 | if (resume) |
285 | desc->status &= ~IRQ_SUSPENDED; | |
286 | ||
1adb0850 TG |
287 | switch (desc->depth) { |
288 | case 0: | |
0a0c5168 | 289 | err_out: |
b8c512f6 | 290 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); |
1adb0850 TG |
291 | break; |
292 | case 1: { | |
293 | unsigned int status = desc->status & ~IRQ_DISABLED; | |
294 | ||
0a0c5168 RW |
295 | if (desc->status & IRQ_SUSPENDED) |
296 | goto err_out; | |
1adb0850 TG |
297 | /* Prevent probing on this irq: */ |
298 | desc->status = status | IRQ_NOPROBE; | |
299 | check_irq_resend(desc, irq); | |
300 | /* fall-through */ | |
301 | } | |
302 | default: | |
303 | desc->depth--; | |
304 | } | |
305 | } | |
306 | ||
1da177e4 LT |
307 | /** |
308 | * enable_irq - enable handling of an irq | |
309 | * @irq: Interrupt to enable | |
310 | * | |
311 | * Undoes the effect of one call to disable_irq(). If this | |
312 | * matches the last disable, processing of interrupts on this | |
313 | * IRQ line is re-enabled. | |
314 | * | |
70aedd24 TG |
315 | * This function may be called from IRQ context only when |
316 | * desc->chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | |
1da177e4 LT |
317 | */ |
318 | void enable_irq(unsigned int irq) | |
319 | { | |
d3c60047 | 320 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 LT |
321 | unsigned long flags; |
322 | ||
7d94f7ca | 323 | if (!desc) |
c2b5a251 MW |
324 | return; |
325 | ||
70aedd24 | 326 | chip_bus_lock(irq, desc); |
239007b8 | 327 | raw_spin_lock_irqsave(&desc->lock, flags); |
0a0c5168 | 328 | __enable_irq(desc, irq, false); |
239007b8 | 329 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
70aedd24 | 330 | chip_bus_sync_unlock(irq, desc); |
1da177e4 | 331 | } |
1da177e4 LT |
332 | EXPORT_SYMBOL(enable_irq); |
333 | ||
0c5d1eb7 | 334 | static int set_irq_wake_real(unsigned int irq, unsigned int on) |
2db87321 | 335 | { |
08678b08 | 336 | struct irq_desc *desc = irq_to_desc(irq); |
2db87321 UKK |
337 | int ret = -ENXIO; |
338 | ||
339 | if (desc->chip->set_wake) | |
340 | ret = desc->chip->set_wake(irq, on); | |
341 | ||
342 | return ret; | |
343 | } | |
344 | ||
ba9a2331 TG |
345 | /** |
346 | * set_irq_wake - control irq power management wakeup | |
347 | * @irq: interrupt to control | |
348 | * @on: enable/disable power management wakeup | |
349 | * | |
15a647eb DB |
350 | * Enable/disable power management wakeup mode, which is |
351 | * disabled by default. Enables and disables must match, | |
352 | * just as they match for non-wakeup mode support. | |
353 | * | |
354 | * Wakeup mode lets this IRQ wake the system from sleep | |
355 | * states like "suspend to RAM". | |
ba9a2331 TG |
356 | */ |
357 | int set_irq_wake(unsigned int irq, unsigned int on) | |
358 | { | |
08678b08 | 359 | struct irq_desc *desc = irq_to_desc(irq); |
ba9a2331 | 360 | unsigned long flags; |
2db87321 | 361 | int ret = 0; |
ba9a2331 | 362 | |
15a647eb DB |
363 | /* wakeup-capable irqs can be shared between drivers that |
364 | * don't need to have the same sleep mode behaviors. | |
365 | */ | |
239007b8 | 366 | raw_spin_lock_irqsave(&desc->lock, flags); |
15a647eb | 367 | if (on) { |
2db87321 UKK |
368 | if (desc->wake_depth++ == 0) { |
369 | ret = set_irq_wake_real(irq, on); | |
370 | if (ret) | |
371 | desc->wake_depth = 0; | |
372 | else | |
373 | desc->status |= IRQ_WAKEUP; | |
374 | } | |
15a647eb DB |
375 | } else { |
376 | if (desc->wake_depth == 0) { | |
7a2c4770 | 377 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); |
2db87321 UKK |
378 | } else if (--desc->wake_depth == 0) { |
379 | ret = set_irq_wake_real(irq, on); | |
380 | if (ret) | |
381 | desc->wake_depth = 1; | |
382 | else | |
383 | desc->status &= ~IRQ_WAKEUP; | |
384 | } | |
15a647eb | 385 | } |
2db87321 | 386 | |
239007b8 | 387 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
ba9a2331 TG |
388 | return ret; |
389 | } | |
390 | EXPORT_SYMBOL(set_irq_wake); | |
391 | ||
1da177e4 LT |
392 | /* |
393 | * Internal function that tells the architecture code whether a | |
394 | * particular irq has been exclusively allocated or is available | |
395 | * for driver use. | |
396 | */ | |
397 | int can_request_irq(unsigned int irq, unsigned long irqflags) | |
398 | { | |
d3c60047 | 399 | struct irq_desc *desc = irq_to_desc(irq); |
1da177e4 | 400 | struct irqaction *action; |
cc8c3b78 | 401 | unsigned long flags; |
1da177e4 | 402 | |
7d94f7ca YL |
403 | if (!desc) |
404 | return 0; | |
405 | ||
406 | if (desc->status & IRQ_NOREQUEST) | |
1da177e4 LT |
407 | return 0; |
408 | ||
cc8c3b78 | 409 | raw_spin_lock_irqsave(&desc->lock, flags); |
08678b08 | 410 | action = desc->action; |
1da177e4 | 411 | if (action) |
3cca53b0 | 412 | if (irqflags & action->flags & IRQF_SHARED) |
1da177e4 LT |
413 | action = NULL; |
414 | ||
cc8c3b78 TG |
415 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
416 | ||
1da177e4 LT |
417 | return !action; |
418 | } | |
419 | ||
6a6de9ef TG |
420 | void compat_irq_chip_set_default_handler(struct irq_desc *desc) |
421 | { | |
422 | /* | |
423 | * If the architecture still has not overriden | |
424 | * the flow handler then zap the default. This | |
425 | * should catch incorrect flow-type setting. | |
426 | */ | |
427 | if (desc->handle_irq == &handle_bad_irq) | |
428 | desc->handle_irq = NULL; | |
429 | } | |
430 | ||
0c5d1eb7 | 431 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, |
82736f4d UKK |
432 | unsigned long flags) |
433 | { | |
434 | int ret; | |
0c5d1eb7 | 435 | struct irq_chip *chip = desc->chip; |
82736f4d UKK |
436 | |
437 | if (!chip || !chip->set_type) { | |
438 | /* | |
439 | * IRQF_TRIGGER_* but the PIC does not support multiple | |
440 | * flow-types? | |
441 | */ | |
3ff68a6a | 442 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, |
82736f4d UKK |
443 | chip ? (chip->name ? : "unknown") : "unknown"); |
444 | return 0; | |
445 | } | |
446 | ||
f2b662da DB |
447 | /* caller masked out all except trigger mode flags */ |
448 | ret = chip->set_type(irq, flags); | |
82736f4d UKK |
449 | |
450 | if (ret) | |
c69ad71b | 451 | pr_err("setting trigger mode %d for irq %u failed (%pF)\n", |
f2b662da | 452 | (int)flags, irq, chip->set_type); |
0c5d1eb7 | 453 | else { |
f2b662da DB |
454 | if (flags & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) |
455 | flags |= IRQ_LEVEL; | |
0c5d1eb7 | 456 | /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */ |
f2b662da DB |
457 | desc->status &= ~(IRQ_LEVEL | IRQ_TYPE_SENSE_MASK); |
458 | desc->status |= flags; | |
46732475 TG |
459 | |
460 | if (chip != desc->chip) | |
461 | irq_chip_set_defaults(desc->chip); | |
0c5d1eb7 | 462 | } |
82736f4d UKK |
463 | |
464 | return ret; | |
465 | } | |
466 | ||
b25c340c TG |
467 | /* |
468 | * Default primary interrupt handler for threaded interrupts. Is | |
469 | * assigned as primary handler when request_threaded_irq is called | |
470 | * with handler == NULL. Useful for oneshot interrupts. | |
471 | */ | |
472 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | |
473 | { | |
474 | return IRQ_WAKE_THREAD; | |
475 | } | |
476 | ||
399b5da2 TG |
477 | /* |
478 | * Primary handler for nested threaded interrupts. Should never be | |
479 | * called. | |
480 | */ | |
481 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | |
482 | { | |
483 | WARN(1, "Primary handler called for nested irq %d\n", irq); | |
484 | return IRQ_NONE; | |
485 | } | |
486 | ||
3aa551c9 TG |
487 | static int irq_wait_for_interrupt(struct irqaction *action) |
488 | { | |
489 | while (!kthread_should_stop()) { | |
490 | set_current_state(TASK_INTERRUPTIBLE); | |
f48fe81e TG |
491 | |
492 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | |
493 | &action->thread_flags)) { | |
3aa551c9 TG |
494 | __set_current_state(TASK_RUNNING); |
495 | return 0; | |
f48fe81e TG |
496 | } |
497 | schedule(); | |
3aa551c9 TG |
498 | } |
499 | return -1; | |
500 | } | |
501 | ||
b25c340c TG |
502 | /* |
503 | * Oneshot interrupts keep the irq line masked until the threaded | |
504 | * handler finished. unmask if the interrupt has not been disabled and | |
505 | * is marked MASKED. | |
506 | */ | |
507 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | |
508 | { | |
0b1adaa0 | 509 | again: |
70aedd24 | 510 | chip_bus_lock(irq, desc); |
239007b8 | 511 | raw_spin_lock_irq(&desc->lock); |
0b1adaa0 TG |
512 | |
513 | /* | |
514 | * Implausible though it may be we need to protect us against | |
515 | * the following scenario: | |
516 | * | |
517 | * The thread is faster done than the hard interrupt handler | |
518 | * on the other CPU. If we unmask the irq line then the | |
519 | * interrupt can come in again and masks the line, leaves due | |
520 | * to IRQ_INPROGRESS and the irq line is masked forever. | |
521 | */ | |
522 | if (unlikely(desc->status & IRQ_INPROGRESS)) { | |
523 | raw_spin_unlock_irq(&desc->lock); | |
524 | chip_bus_sync_unlock(irq, desc); | |
525 | cpu_relax(); | |
526 | goto again; | |
527 | } | |
528 | ||
b25c340c TG |
529 | if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) { |
530 | desc->status &= ~IRQ_MASKED; | |
531 | desc->chip->unmask(irq); | |
532 | } | |
239007b8 | 533 | raw_spin_unlock_irq(&desc->lock); |
70aedd24 | 534 | chip_bus_sync_unlock(irq, desc); |
b25c340c TG |
535 | } |
536 | ||
61f38261 | 537 | #ifdef CONFIG_SMP |
591d2fb0 TG |
538 | /* |
539 | * Check whether we need to change the affinity of the interrupt thread. | |
540 | */ | |
541 | static void | |
542 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |
543 | { | |
544 | cpumask_var_t mask; | |
545 | ||
546 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | |
547 | return; | |
548 | ||
549 | /* | |
550 | * In case we are out of memory we set IRQTF_AFFINITY again and | |
551 | * try again next time | |
552 | */ | |
553 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | |
554 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | |
555 | return; | |
556 | } | |
557 | ||
239007b8 | 558 | raw_spin_lock_irq(&desc->lock); |
591d2fb0 | 559 | cpumask_copy(mask, desc->affinity); |
239007b8 | 560 | raw_spin_unlock_irq(&desc->lock); |
591d2fb0 TG |
561 | |
562 | set_cpus_allowed_ptr(current, mask); | |
563 | free_cpumask_var(mask); | |
564 | } | |
61f38261 BP |
565 | #else |
566 | static inline void | |
567 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |
568 | #endif | |
591d2fb0 | 569 | |
3aa551c9 TG |
570 | /* |
571 | * Interrupt handler thread | |
572 | */ | |
573 | static int irq_thread(void *data) | |
574 | { | |
575 | struct sched_param param = { .sched_priority = MAX_USER_RT_PRIO/2, }; | |
576 | struct irqaction *action = data; | |
577 | struct irq_desc *desc = irq_to_desc(action->irq); | |
b25c340c | 578 | int wake, oneshot = desc->status & IRQ_ONESHOT; |
3aa551c9 TG |
579 | |
580 | sched_setscheduler(current, SCHED_FIFO, ¶m); | |
581 | current->irqaction = action; | |
582 | ||
583 | while (!irq_wait_for_interrupt(action)) { | |
584 | ||
591d2fb0 TG |
585 | irq_thread_check_affinity(desc, action); |
586 | ||
3aa551c9 TG |
587 | atomic_inc(&desc->threads_active); |
588 | ||
239007b8 | 589 | raw_spin_lock_irq(&desc->lock); |
3aa551c9 TG |
590 | if (unlikely(desc->status & IRQ_DISABLED)) { |
591 | /* | |
592 | * CHECKME: We might need a dedicated | |
593 | * IRQ_THREAD_PENDING flag here, which | |
594 | * retriggers the thread in check_irq_resend() | |
595 | * but AFAICT IRQ_PENDING should be fine as it | |
596 | * retriggers the interrupt itself --- tglx | |
597 | */ | |
598 | desc->status |= IRQ_PENDING; | |
239007b8 | 599 | raw_spin_unlock_irq(&desc->lock); |
3aa551c9 | 600 | } else { |
239007b8 | 601 | raw_spin_unlock_irq(&desc->lock); |
3aa551c9 TG |
602 | |
603 | action->thread_fn(action->irq, action->dev_id); | |
b25c340c TG |
604 | |
605 | if (oneshot) | |
606 | irq_finalize_oneshot(action->irq, desc); | |
3aa551c9 TG |
607 | } |
608 | ||
609 | wake = atomic_dec_and_test(&desc->threads_active); | |
610 | ||
611 | if (wake && waitqueue_active(&desc->wait_for_threads)) | |
612 | wake_up(&desc->wait_for_threads); | |
613 | } | |
614 | ||
615 | /* | |
616 | * Clear irqaction. Otherwise exit_irq_thread() would make | |
617 | * fuzz about an active irq thread going into nirvana. | |
618 | */ | |
619 | current->irqaction = NULL; | |
620 | return 0; | |
621 | } | |
622 | ||
623 | /* | |
624 | * Called from do_exit() | |
625 | */ | |
626 | void exit_irq_thread(void) | |
627 | { | |
628 | struct task_struct *tsk = current; | |
629 | ||
630 | if (!tsk->irqaction) | |
631 | return; | |
632 | ||
633 | printk(KERN_ERR | |
634 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | |
635 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | |
636 | ||
637 | /* | |
638 | * Set the THREAD DIED flag to prevent further wakeups of the | |
639 | * soon to be gone threaded handler. | |
640 | */ | |
641 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | |
642 | } | |
643 | ||
1da177e4 LT |
644 | /* |
645 | * Internal function to register an irqaction - typically used to | |
646 | * allocate special interrupts that are part of the architecture. | |
647 | */ | |
d3c60047 | 648 | static int |
327ec569 | 649 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) |
1da177e4 | 650 | { |
f17c7545 | 651 | struct irqaction *old, **old_ptr; |
8b126b77 | 652 | const char *old_name = NULL; |
1da177e4 | 653 | unsigned long flags; |
399b5da2 | 654 | int nested, shared = 0; |
82736f4d | 655 | int ret; |
1da177e4 | 656 | |
7d94f7ca | 657 | if (!desc) |
c2b5a251 MW |
658 | return -EINVAL; |
659 | ||
f1c2662c | 660 | if (desc->chip == &no_irq_chip) |
1da177e4 LT |
661 | return -ENOSYS; |
662 | /* | |
663 | * Some drivers like serial.c use request_irq() heavily, | |
664 | * so we have to be careful not to interfere with a | |
665 | * running system. | |
666 | */ | |
3cca53b0 | 667 | if (new->flags & IRQF_SAMPLE_RANDOM) { |
1da177e4 LT |
668 | /* |
669 | * This function might sleep, we want to call it first, | |
670 | * outside of the atomic block. | |
671 | * Yes, this might clear the entropy pool if the wrong | |
672 | * driver is attempted to be loaded, without actually | |
673 | * installing a new handler, but is this really a problem, | |
674 | * only the sysadmin is able to do this. | |
675 | */ | |
676 | rand_initialize_irq(irq); | |
677 | } | |
678 | ||
b25c340c TG |
679 | /* Oneshot interrupts are not allowed with shared */ |
680 | if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | |
681 | return -EINVAL; | |
682 | ||
3aa551c9 | 683 | /* |
399b5da2 TG |
684 | * Check whether the interrupt nests into another interrupt |
685 | * thread. | |
686 | */ | |
687 | nested = desc->status & IRQ_NESTED_THREAD; | |
688 | if (nested) { | |
689 | if (!new->thread_fn) | |
690 | return -EINVAL; | |
691 | /* | |
692 | * Replace the primary handler which was provided from | |
693 | * the driver for non nested interrupt handling by the | |
694 | * dummy function which warns when called. | |
695 | */ | |
696 | new->handler = irq_nested_primary_handler; | |
697 | } | |
698 | ||
3aa551c9 | 699 | /* |
399b5da2 TG |
700 | * Create a handler thread when a thread function is supplied |
701 | * and the interrupt does not nest into another interrupt | |
702 | * thread. | |
3aa551c9 | 703 | */ |
399b5da2 | 704 | if (new->thread_fn && !nested) { |
3aa551c9 TG |
705 | struct task_struct *t; |
706 | ||
707 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | |
708 | new->name); | |
709 | if (IS_ERR(t)) | |
710 | return PTR_ERR(t); | |
711 | /* | |
712 | * We keep the reference to the task struct even if | |
713 | * the thread dies to avoid that the interrupt code | |
714 | * references an already freed task_struct. | |
715 | */ | |
716 | get_task_struct(t); | |
717 | new->thread = t; | |
3aa551c9 TG |
718 | } |
719 | ||
1da177e4 LT |
720 | /* |
721 | * The following block of code has to be executed atomically | |
722 | */ | |
239007b8 | 723 | raw_spin_lock_irqsave(&desc->lock, flags); |
f17c7545 IM |
724 | old_ptr = &desc->action; |
725 | old = *old_ptr; | |
06fcb0c6 | 726 | if (old) { |
e76de9f8 TG |
727 | /* |
728 | * Can't share interrupts unless both agree to and are | |
729 | * the same type (level, edge, polarity). So both flag | |
3cca53b0 | 730 | * fields must have IRQF_SHARED set and the bits which |
e76de9f8 TG |
731 | * set the trigger type must match. |
732 | */ | |
3cca53b0 | 733 | if (!((old->flags & new->flags) & IRQF_SHARED) || |
8b126b77 AM |
734 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { |
735 | old_name = old->name; | |
f5163427 | 736 | goto mismatch; |
8b126b77 | 737 | } |
f5163427 | 738 | |
284c6680 | 739 | #if defined(CONFIG_IRQ_PER_CPU) |
f5163427 | 740 | /* All handlers must agree on per-cpuness */ |
3cca53b0 TG |
741 | if ((old->flags & IRQF_PERCPU) != |
742 | (new->flags & IRQF_PERCPU)) | |
f5163427 DS |
743 | goto mismatch; |
744 | #endif | |
1da177e4 LT |
745 | |
746 | /* add new interrupt at end of irq queue */ | |
747 | do { | |
f17c7545 IM |
748 | old_ptr = &old->next; |
749 | old = *old_ptr; | |
1da177e4 LT |
750 | } while (old); |
751 | shared = 1; | |
752 | } | |
753 | ||
1da177e4 | 754 | if (!shared) { |
6a6de9ef | 755 | irq_chip_set_defaults(desc->chip); |
e76de9f8 | 756 | |
3aa551c9 TG |
757 | init_waitqueue_head(&desc->wait_for_threads); |
758 | ||
e76de9f8 | 759 | /* Setup the type (level, edge polarity) if configured: */ |
3cca53b0 | 760 | if (new->flags & IRQF_TRIGGER_MASK) { |
f2b662da DB |
761 | ret = __irq_set_trigger(desc, irq, |
762 | new->flags & IRQF_TRIGGER_MASK); | |
82736f4d | 763 | |
3aa551c9 TG |
764 | if (ret) |
765 | goto out_thread; | |
e76de9f8 TG |
766 | } else |
767 | compat_irq_chip_set_default_handler(desc); | |
82736f4d UKK |
768 | #if defined(CONFIG_IRQ_PER_CPU) |
769 | if (new->flags & IRQF_PERCPU) | |
770 | desc->status |= IRQ_PER_CPU; | |
771 | #endif | |
6a6de9ef | 772 | |
b25c340c | 773 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | IRQ_ONESHOT | |
1adb0850 | 774 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); |
94d39e1f | 775 | |
b25c340c TG |
776 | if (new->flags & IRQF_ONESHOT) |
777 | desc->status |= IRQ_ONESHOT; | |
778 | ||
94d39e1f TG |
779 | if (!(desc->status & IRQ_NOAUTOEN)) { |
780 | desc->depth = 0; | |
781 | desc->status &= ~IRQ_DISABLED; | |
7e6e178a | 782 | desc->chip->startup(irq); |
e76de9f8 TG |
783 | } else |
784 | /* Undo nested disables: */ | |
785 | desc->depth = 1; | |
18404756 | 786 | |
612e3684 TG |
787 | /* Exclude IRQ from balancing if requested */ |
788 | if (new->flags & IRQF_NOBALANCING) | |
789 | desc->status |= IRQ_NO_BALANCING; | |
790 | ||
18404756 | 791 | /* Set default affinity mask once everything is setup */ |
548c8933 | 792 | setup_affinity(irq, desc); |
0c5d1eb7 DB |
793 | |
794 | } else if ((new->flags & IRQF_TRIGGER_MASK) | |
795 | && (new->flags & IRQF_TRIGGER_MASK) | |
796 | != (desc->status & IRQ_TYPE_SENSE_MASK)) { | |
797 | /* hope the handler works with the actual trigger mode... */ | |
798 | pr_warning("IRQ %d uses trigger mode %d; requested %d\n", | |
799 | irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK), | |
800 | (int)(new->flags & IRQF_TRIGGER_MASK)); | |
1da177e4 | 801 | } |
82736f4d | 802 | |
69ab8494 | 803 | new->irq = irq; |
f17c7545 | 804 | *old_ptr = new; |
82736f4d | 805 | |
8528b0f1 LT |
806 | /* Reset broken irq detection when installing new handler */ |
807 | desc->irq_count = 0; | |
808 | desc->irqs_unhandled = 0; | |
1adb0850 TG |
809 | |
810 | /* | |
811 | * Check whether we disabled the irq via the spurious handler | |
812 | * before. Reenable it and give it another chance. | |
813 | */ | |
814 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | |
815 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | |
0a0c5168 | 816 | __enable_irq(desc, irq, false); |
1adb0850 TG |
817 | } |
818 | ||
239007b8 | 819 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 | 820 | |
69ab8494 TG |
821 | /* |
822 | * Strictly no need to wake it up, but hung_task complains | |
823 | * when no hard interrupt wakes the thread up. | |
824 | */ | |
825 | if (new->thread) | |
826 | wake_up_process(new->thread); | |
827 | ||
2c6927a3 | 828 | register_irq_proc(irq, desc); |
1da177e4 LT |
829 | new->dir = NULL; |
830 | register_handler_proc(irq, new); | |
831 | ||
832 | return 0; | |
f5163427 DS |
833 | |
834 | mismatch: | |
3f050447 | 835 | #ifdef CONFIG_DEBUG_SHIRQ |
3cca53b0 | 836 | if (!(new->flags & IRQF_PROBE_SHARED)) { |
e8c4b9d0 | 837 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); |
8b126b77 AM |
838 | if (old_name) |
839 | printk(KERN_ERR "current handler: %s\n", old_name); | |
13e87ec6 AM |
840 | dump_stack(); |
841 | } | |
3f050447 | 842 | #endif |
3aa551c9 TG |
843 | ret = -EBUSY; |
844 | ||
845 | out_thread: | |
239007b8 | 846 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
3aa551c9 TG |
847 | if (new->thread) { |
848 | struct task_struct *t = new->thread; | |
849 | ||
850 | new->thread = NULL; | |
851 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | |
852 | kthread_stop(t); | |
853 | put_task_struct(t); | |
854 | } | |
855 | return ret; | |
1da177e4 LT |
856 | } |
857 | ||
d3c60047 TG |
858 | /** |
859 | * setup_irq - setup an interrupt | |
860 | * @irq: Interrupt line to setup | |
861 | * @act: irqaction for the interrupt | |
862 | * | |
863 | * Used to statically setup interrupts in the early boot process. | |
864 | */ | |
865 | int setup_irq(unsigned int irq, struct irqaction *act) | |
866 | { | |
867 | struct irq_desc *desc = irq_to_desc(irq); | |
868 | ||
869 | return __setup_irq(irq, desc, act); | |
870 | } | |
eb53b4e8 | 871 | EXPORT_SYMBOL_GPL(setup_irq); |
d3c60047 | 872 | |
cbf94f06 MD |
873 | /* |
874 | * Internal function to unregister an irqaction - used to free | |
875 | * regular and special interrupts that are part of the architecture. | |
1da177e4 | 876 | */ |
cbf94f06 | 877 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) |
1da177e4 | 878 | { |
d3c60047 | 879 | struct irq_desc *desc = irq_to_desc(irq); |
f17c7545 | 880 | struct irqaction *action, **action_ptr; |
1da177e4 LT |
881 | unsigned long flags; |
882 | ||
ae88a23b | 883 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); |
7d94f7ca | 884 | |
7d94f7ca | 885 | if (!desc) |
f21cfb25 | 886 | return NULL; |
1da177e4 | 887 | |
239007b8 | 888 | raw_spin_lock_irqsave(&desc->lock, flags); |
ae88a23b IM |
889 | |
890 | /* | |
891 | * There can be multiple actions per IRQ descriptor, find the right | |
892 | * one based on the dev_id: | |
893 | */ | |
f17c7545 | 894 | action_ptr = &desc->action; |
1da177e4 | 895 | for (;;) { |
f17c7545 | 896 | action = *action_ptr; |
1da177e4 | 897 | |
ae88a23b IM |
898 | if (!action) { |
899 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
239007b8 | 900 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
1da177e4 | 901 | |
f21cfb25 | 902 | return NULL; |
ae88a23b | 903 | } |
1da177e4 | 904 | |
8316e381 IM |
905 | if (action->dev_id == dev_id) |
906 | break; | |
f17c7545 | 907 | action_ptr = &action->next; |
ae88a23b | 908 | } |
dbce706e | 909 | |
ae88a23b | 910 | /* Found it - now remove it from the list of entries: */ |
f17c7545 | 911 | *action_ptr = action->next; |
ae88a23b IM |
912 | |
913 | /* Currently used only by UML, might disappear one day: */ | |
b77d6adc | 914 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
ae88a23b IM |
915 | if (desc->chip->release) |
916 | desc->chip->release(irq, dev_id); | |
b77d6adc | 917 | #endif |
dbce706e | 918 | |
ae88a23b IM |
919 | /* If this was the last handler, shut down the IRQ line: */ |
920 | if (!desc->action) { | |
921 | desc->status |= IRQ_DISABLED; | |
922 | if (desc->chip->shutdown) | |
923 | desc->chip->shutdown(irq); | |
924 | else | |
925 | desc->chip->disable(irq); | |
926 | } | |
3aa551c9 | 927 | |
e7a297b0 PWJ |
928 | #ifdef CONFIG_SMP |
929 | /* make sure affinity_hint is cleaned up */ | |
930 | if (WARN_ON_ONCE(desc->affinity_hint)) | |
931 | desc->affinity_hint = NULL; | |
932 | #endif | |
933 | ||
239007b8 | 934 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
ae88a23b IM |
935 | |
936 | unregister_handler_proc(irq, action); | |
937 | ||
938 | /* Make sure it's not being used on another CPU: */ | |
939 | synchronize_irq(irq); | |
1da177e4 | 940 | |
70edcd77 | 941 | #ifdef CONFIG_DEBUG_SHIRQ |
ae88a23b IM |
942 | /* |
943 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | |
944 | * event to happen even now it's being freed, so let's make sure that | |
945 | * is so by doing an extra call to the handler .... | |
946 | * | |
947 | * ( We do this after actually deregistering it, to make sure that a | |
948 | * 'real' IRQ doesn't run in * parallel with our fake. ) | |
949 | */ | |
950 | if (action->flags & IRQF_SHARED) { | |
951 | local_irq_save(flags); | |
952 | action->handler(irq, dev_id); | |
953 | local_irq_restore(flags); | |
1da177e4 | 954 | } |
ae88a23b | 955 | #endif |
2d860ad7 LT |
956 | |
957 | if (action->thread) { | |
958 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | |
959 | kthread_stop(action->thread); | |
960 | put_task_struct(action->thread); | |
961 | } | |
962 | ||
f21cfb25 MD |
963 | return action; |
964 | } | |
965 | ||
cbf94f06 MD |
966 | /** |
967 | * remove_irq - free an interrupt | |
968 | * @irq: Interrupt line to free | |
969 | * @act: irqaction for the interrupt | |
970 | * | |
971 | * Used to remove interrupts statically setup by the early boot process. | |
972 | */ | |
973 | void remove_irq(unsigned int irq, struct irqaction *act) | |
974 | { | |
975 | __free_irq(irq, act->dev_id); | |
976 | } | |
eb53b4e8 | 977 | EXPORT_SYMBOL_GPL(remove_irq); |
cbf94f06 | 978 | |
f21cfb25 MD |
979 | /** |
980 | * free_irq - free an interrupt allocated with request_irq | |
981 | * @irq: Interrupt line to free | |
982 | * @dev_id: Device identity to free | |
983 | * | |
984 | * Remove an interrupt handler. The handler is removed and if the | |
985 | * interrupt line is no longer in use by any driver it is disabled. | |
986 | * On a shared IRQ the caller must ensure the interrupt is disabled | |
987 | * on the card it drives before calling this function. The function | |
988 | * does not return until any executing interrupts for this IRQ | |
989 | * have completed. | |
990 | * | |
991 | * This function must not be called from interrupt context. | |
992 | */ | |
993 | void free_irq(unsigned int irq, void *dev_id) | |
994 | { | |
70aedd24 TG |
995 | struct irq_desc *desc = irq_to_desc(irq); |
996 | ||
997 | if (!desc) | |
998 | return; | |
999 | ||
1000 | chip_bus_lock(irq, desc); | |
cbf94f06 | 1001 | kfree(__free_irq(irq, dev_id)); |
70aedd24 | 1002 | chip_bus_sync_unlock(irq, desc); |
1da177e4 | 1003 | } |
1da177e4 LT |
1004 | EXPORT_SYMBOL(free_irq); |
1005 | ||
1006 | /** | |
3aa551c9 | 1007 | * request_threaded_irq - allocate an interrupt line |
1da177e4 | 1008 | * @irq: Interrupt line to allocate |
3aa551c9 TG |
1009 | * @handler: Function to be called when the IRQ occurs. |
1010 | * Primary handler for threaded interrupts | |
b25c340c TG |
1011 | * If NULL and thread_fn != NULL the default |
1012 | * primary handler is installed | |
f48fe81e TG |
1013 | * @thread_fn: Function called from the irq handler thread |
1014 | * If NULL, no irq thread is created | |
1da177e4 LT |
1015 | * @irqflags: Interrupt type flags |
1016 | * @devname: An ascii name for the claiming device | |
1017 | * @dev_id: A cookie passed back to the handler function | |
1018 | * | |
1019 | * This call allocates interrupt resources and enables the | |
1020 | * interrupt line and IRQ handling. From the point this | |
1021 | * call is made your handler function may be invoked. Since | |
1022 | * your handler function must clear any interrupt the board | |
1023 | * raises, you must take care both to initialise your hardware | |
1024 | * and to set up the interrupt handler in the right order. | |
1025 | * | |
3aa551c9 TG |
1026 | * If you want to set up a threaded irq handler for your device |
1027 | * then you need to supply @handler and @thread_fn. @handler ist | |
1028 | * still called in hard interrupt context and has to check | |
1029 | * whether the interrupt originates from the device. If yes it | |
1030 | * needs to disable the interrupt on the device and return | |
39a2eddb | 1031 | * IRQ_WAKE_THREAD which will wake up the handler thread and run |
3aa551c9 TG |
1032 | * @thread_fn. This split handler design is necessary to support |
1033 | * shared interrupts. | |
1034 | * | |
1da177e4 LT |
1035 | * Dev_id must be globally unique. Normally the address of the |
1036 | * device data structure is used as the cookie. Since the handler | |
1037 | * receives this value it makes sense to use it. | |
1038 | * | |
1039 | * If your interrupt is shared you must pass a non NULL dev_id | |
1040 | * as this is required when freeing the interrupt. | |
1041 | * | |
1042 | * Flags: | |
1043 | * | |
3cca53b0 | 1044 | * IRQF_SHARED Interrupt is shared |
3cca53b0 | 1045 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy |
0c5d1eb7 | 1046 | * IRQF_TRIGGER_* Specify active edge(s) or level |
1da177e4 LT |
1047 | * |
1048 | */ | |
3aa551c9 TG |
1049 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, |
1050 | irq_handler_t thread_fn, unsigned long irqflags, | |
1051 | const char *devname, void *dev_id) | |
1da177e4 | 1052 | { |
06fcb0c6 | 1053 | struct irqaction *action; |
08678b08 | 1054 | struct irq_desc *desc; |
d3c60047 | 1055 | int retval; |
1da177e4 LT |
1056 | |
1057 | /* | |
1058 | * Sanity-check: shared interrupts must pass in a real dev-ID, | |
1059 | * otherwise we'll have trouble later trying to figure out | |
1060 | * which interrupt is which (messes up the interrupt freeing | |
1061 | * logic etc). | |
1062 | */ | |
3cca53b0 | 1063 | if ((irqflags & IRQF_SHARED) && !dev_id) |
1da177e4 | 1064 | return -EINVAL; |
7d94f7ca | 1065 | |
cb5bc832 | 1066 | desc = irq_to_desc(irq); |
7d94f7ca | 1067 | if (!desc) |
1da177e4 | 1068 | return -EINVAL; |
7d94f7ca | 1069 | |
08678b08 | 1070 | if (desc->status & IRQ_NOREQUEST) |
6550c775 | 1071 | return -EINVAL; |
b25c340c TG |
1072 | |
1073 | if (!handler) { | |
1074 | if (!thread_fn) | |
1075 | return -EINVAL; | |
1076 | handler = irq_default_primary_handler; | |
1077 | } | |
1da177e4 | 1078 | |
45535732 | 1079 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); |
1da177e4 LT |
1080 | if (!action) |
1081 | return -ENOMEM; | |
1082 | ||
1083 | action->handler = handler; | |
3aa551c9 | 1084 | action->thread_fn = thread_fn; |
1da177e4 | 1085 | action->flags = irqflags; |
1da177e4 | 1086 | action->name = devname; |
1da177e4 LT |
1087 | action->dev_id = dev_id; |
1088 | ||
70aedd24 | 1089 | chip_bus_lock(irq, desc); |
d3c60047 | 1090 | retval = __setup_irq(irq, desc, action); |
70aedd24 TG |
1091 | chip_bus_sync_unlock(irq, desc); |
1092 | ||
377bf1e4 AV |
1093 | if (retval) |
1094 | kfree(action); | |
1095 | ||
a304e1b8 | 1096 | #ifdef CONFIG_DEBUG_SHIRQ |
6ce51c43 | 1097 | if (!retval && (irqflags & IRQF_SHARED)) { |
a304e1b8 DW |
1098 | /* |
1099 | * It's a shared IRQ -- the driver ought to be prepared for it | |
1100 | * to happen immediately, so let's make sure.... | |
377bf1e4 AV |
1101 | * We disable the irq to make sure that a 'real' IRQ doesn't |
1102 | * run in parallel with our fake. | |
a304e1b8 | 1103 | */ |
59845b1f | 1104 | unsigned long flags; |
a304e1b8 | 1105 | |
377bf1e4 | 1106 | disable_irq(irq); |
59845b1f | 1107 | local_irq_save(flags); |
377bf1e4 | 1108 | |
59845b1f | 1109 | handler(irq, dev_id); |
377bf1e4 | 1110 | |
59845b1f | 1111 | local_irq_restore(flags); |
377bf1e4 | 1112 | enable_irq(irq); |
a304e1b8 DW |
1113 | } |
1114 | #endif | |
1da177e4 LT |
1115 | return retval; |
1116 | } | |
3aa551c9 | 1117 | EXPORT_SYMBOL(request_threaded_irq); |
ae731f8d MZ |
1118 | |
1119 | /** | |
1120 | * request_any_context_irq - allocate an interrupt line | |
1121 | * @irq: Interrupt line to allocate | |
1122 | * @handler: Function to be called when the IRQ occurs. | |
1123 | * Threaded handler for threaded interrupts. | |
1124 | * @flags: Interrupt type flags | |
1125 | * @name: An ascii name for the claiming device | |
1126 | * @dev_id: A cookie passed back to the handler function | |
1127 | * | |
1128 | * This call allocates interrupt resources and enables the | |
1129 | * interrupt line and IRQ handling. It selects either a | |
1130 | * hardirq or threaded handling method depending on the | |
1131 | * context. | |
1132 | * | |
1133 | * On failure, it returns a negative value. On success, | |
1134 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | |
1135 | */ | |
1136 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
1137 | unsigned long flags, const char *name, void *dev_id) | |
1138 | { | |
1139 | struct irq_desc *desc = irq_to_desc(irq); | |
1140 | int ret; | |
1141 | ||
1142 | if (!desc) | |
1143 | return -EINVAL; | |
1144 | ||
1145 | if (desc->status & IRQ_NESTED_THREAD) { | |
1146 | ret = request_threaded_irq(irq, NULL, handler, | |
1147 | flags, name, dev_id); | |
1148 | return !ret ? IRQC_IS_NESTED : ret; | |
1149 | } | |
1150 | ||
1151 | ret = request_irq(irq, handler, flags, name, dev_id); | |
1152 | return !ret ? IRQC_IS_HARDIRQ : ret; | |
1153 | } | |
1154 | EXPORT_SYMBOL_GPL(request_any_context_irq); |