]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * linux/kernel/irq/manage.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar | |
5 | * Copyright (C) 2005-2006 Thomas Gleixner | |
6 | * | |
7 | * This file contains driver APIs to the irq subsystem. | |
8 | */ | |
9 | ||
10 | #include <linux/irq.h> | |
11 | #include <linux/kthread.h> | |
12 | #include <linux/module.h> | |
13 | #include <linux/random.h> | |
14 | #include <linux/interrupt.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/sched.h> | |
17 | ||
18 | #include "internals.h" | |
19 | ||
20 | /** | |
21 | * synchronize_irq - wait for pending IRQ handlers (on other CPUs) | |
22 | * @irq: interrupt number to wait for | |
23 | * | |
24 | * This function waits for any pending IRQ handlers for this interrupt | |
25 | * to complete before returning. If you use this function while | |
26 | * holding a resource the IRQ handler may need you will deadlock. | |
27 | * | |
28 | * This function may be called - with care - from IRQ context. | |
29 | */ | |
30 | void synchronize_irq(unsigned int irq) | |
31 | { | |
32 | struct irq_desc *desc = irq_to_desc(irq); | |
33 | unsigned int state; | |
34 | ||
35 | if (!desc) | |
36 | return; | |
37 | ||
38 | do { | |
39 | unsigned long flags; | |
40 | ||
41 | /* | |
42 | * Wait until we're out of the critical section. This might | |
43 | * give the wrong answer due to the lack of memory barriers. | |
44 | */ | |
45 | while (desc->istate & IRQS_INPROGRESS) | |
46 | cpu_relax(); | |
47 | ||
48 | /* Ok, that indicated we're done: double-check carefully. */ | |
49 | raw_spin_lock_irqsave(&desc->lock, flags); | |
50 | state = desc->istate; | |
51 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
52 | ||
53 | /* Oops, that failed? */ | |
54 | } while (state & IRQS_INPROGRESS); | |
55 | ||
56 | /* | |
57 | * We made sure that no hardirq handler is running. Now verify | |
58 | * that no threaded handlers are active. | |
59 | */ | |
60 | wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active)); | |
61 | } | |
62 | EXPORT_SYMBOL(synchronize_irq); | |
63 | ||
64 | #ifdef CONFIG_SMP | |
65 | cpumask_var_t irq_default_affinity; | |
66 | ||
67 | /** | |
68 | * irq_can_set_affinity - Check if the affinity of a given irq can be set | |
69 | * @irq: Interrupt to check | |
70 | * | |
71 | */ | |
72 | int irq_can_set_affinity(unsigned int irq) | |
73 | { | |
74 | struct irq_desc *desc = irq_to_desc(irq); | |
75 | ||
76 | if (!desc || !irqd_can_balance(&desc->irq_data) || | |
77 | !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) | |
78 | return 0; | |
79 | ||
80 | return 1; | |
81 | } | |
82 | ||
83 | /** | |
84 | * irq_set_thread_affinity - Notify irq threads to adjust affinity | |
85 | * @desc: irq descriptor which has affitnity changed | |
86 | * | |
87 | * We just set IRQTF_AFFINITY and delegate the affinity setting | |
88 | * to the interrupt thread itself. We can not call | |
89 | * set_cpus_allowed_ptr() here as we hold desc->lock and this | |
90 | * code can be called from hard interrupt context. | |
91 | */ | |
92 | void irq_set_thread_affinity(struct irq_desc *desc) | |
93 | { | |
94 | struct irqaction *action = desc->action; | |
95 | ||
96 | while (action) { | |
97 | if (action->thread) | |
98 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | |
99 | action = action->next; | |
100 | } | |
101 | } | |
102 | ||
103 | #ifdef CONFIG_GENERIC_PENDING_IRQ | |
104 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) | |
105 | { | |
106 | return irq_settings_can_move_pcntxt(desc); | |
107 | } | |
108 | static inline bool irq_move_pending(struct irq_desc *desc) | |
109 | { | |
110 | return irqd_is_setaffinity_pending(&desc->irq_data); | |
111 | } | |
112 | static inline void | |
113 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | |
114 | { | |
115 | cpumask_copy(desc->pending_mask, mask); | |
116 | } | |
117 | static inline void | |
118 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | |
119 | { | |
120 | cpumask_copy(mask, desc->pending_mask); | |
121 | } | |
122 | #else | |
123 | static inline bool irq_can_move_pcntxt(struct irq_desc *desc) { return true; } | |
124 | static inline bool irq_move_pending(struct irq_desc *desc) { return false; } | |
125 | static inline void | |
126 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } | |
127 | static inline void | |
128 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } | |
129 | #endif | |
130 | ||
131 | /** | |
132 | * irq_set_affinity - Set the irq affinity of a given irq | |
133 | * @irq: Interrupt to set affinity | |
134 | * @cpumask: cpumask | |
135 | * | |
136 | */ | |
137 | int irq_set_affinity(unsigned int irq, const struct cpumask *mask) | |
138 | { | |
139 | struct irq_desc *desc = irq_to_desc(irq); | |
140 | struct irq_chip *chip = desc->irq_data.chip; | |
141 | unsigned long flags; | |
142 | int ret = 0; | |
143 | ||
144 | if (!chip->irq_set_affinity) | |
145 | return -EINVAL; | |
146 | ||
147 | raw_spin_lock_irqsave(&desc->lock, flags); | |
148 | ||
149 | if (irq_can_move_pcntxt(desc)) { | |
150 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); | |
151 | switch (ret) { | |
152 | case IRQ_SET_MASK_OK: | |
153 | cpumask_copy(desc->irq_data.affinity, mask); | |
154 | case IRQ_SET_MASK_OK_NOCOPY: | |
155 | irq_set_thread_affinity(desc); | |
156 | ret = 0; | |
157 | } | |
158 | } else { | |
159 | irqd_set_move_pending(&desc->irq_data); | |
160 | irq_copy_pending(desc, mask); | |
161 | } | |
162 | ||
163 | if (desc->affinity_notify) { | |
164 | kref_get(&desc->affinity_notify->kref); | |
165 | schedule_work(&desc->affinity_notify->work); | |
166 | } | |
167 | irq_compat_set_affinity(desc); | |
168 | irqd_set(&desc->irq_data, IRQD_AFFINITY_SET); | |
169 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
170 | return ret; | |
171 | } | |
172 | ||
173 | int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m) | |
174 | { | |
175 | unsigned long flags; | |
176 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | |
177 | ||
178 | if (!desc) | |
179 | return -EINVAL; | |
180 | desc->affinity_hint = m; | |
181 | irq_put_desc_unlock(desc, flags); | |
182 | return 0; | |
183 | } | |
184 | EXPORT_SYMBOL_GPL(irq_set_affinity_hint); | |
185 | ||
186 | static void irq_affinity_notify(struct work_struct *work) | |
187 | { | |
188 | struct irq_affinity_notify *notify = | |
189 | container_of(work, struct irq_affinity_notify, work); | |
190 | struct irq_desc *desc = irq_to_desc(notify->irq); | |
191 | cpumask_var_t cpumask; | |
192 | unsigned long flags; | |
193 | ||
194 | if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) | |
195 | goto out; | |
196 | ||
197 | raw_spin_lock_irqsave(&desc->lock, flags); | |
198 | if (irq_move_pending(desc)) | |
199 | irq_get_pending(cpumask, desc); | |
200 | else | |
201 | cpumask_copy(cpumask, desc->irq_data.affinity); | |
202 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
203 | ||
204 | notify->notify(notify, cpumask); | |
205 | ||
206 | free_cpumask_var(cpumask); | |
207 | out: | |
208 | kref_put(¬ify->kref, notify->release); | |
209 | } | |
210 | ||
211 | /** | |
212 | * irq_set_affinity_notifier - control notification of IRQ affinity changes | |
213 | * @irq: Interrupt for which to enable/disable notification | |
214 | * @notify: Context for notification, or %NULL to disable | |
215 | * notification. Function pointers must be initialised; | |
216 | * the other fields will be initialised by this function. | |
217 | * | |
218 | * Must be called in process context. Notification may only be enabled | |
219 | * after the IRQ is allocated and must be disabled before the IRQ is | |
220 | * freed using free_irq(). | |
221 | */ | |
222 | int | |
223 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | |
224 | { | |
225 | struct irq_desc *desc = irq_to_desc(irq); | |
226 | struct irq_affinity_notify *old_notify; | |
227 | unsigned long flags; | |
228 | ||
229 | /* The release function is promised process context */ | |
230 | might_sleep(); | |
231 | ||
232 | if (!desc) | |
233 | return -EINVAL; | |
234 | ||
235 | /* Complete initialisation of *notify */ | |
236 | if (notify) { | |
237 | notify->irq = irq; | |
238 | kref_init(¬ify->kref); | |
239 | INIT_WORK(¬ify->work, irq_affinity_notify); | |
240 | } | |
241 | ||
242 | raw_spin_lock_irqsave(&desc->lock, flags); | |
243 | old_notify = desc->affinity_notify; | |
244 | desc->affinity_notify = notify; | |
245 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
246 | ||
247 | if (old_notify) | |
248 | kref_put(&old_notify->kref, old_notify->release); | |
249 | ||
250 | return 0; | |
251 | } | |
252 | EXPORT_SYMBOL_GPL(irq_set_affinity_notifier); | |
253 | ||
254 | #ifndef CONFIG_AUTO_IRQ_AFFINITY | |
255 | /* | |
256 | * Generic version of the affinity autoselector. | |
257 | */ | |
258 | static int | |
259 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |
260 | { | |
261 | struct irq_chip *chip = irq_desc_get_chip(desc); | |
262 | struct cpumask *set = irq_default_affinity; | |
263 | int ret; | |
264 | ||
265 | /* Excludes PER_CPU and NO_BALANCE interrupts */ | |
266 | if (!irq_can_set_affinity(irq)) | |
267 | return 0; | |
268 | ||
269 | /* | |
270 | * Preserve an userspace affinity setup, but make sure that | |
271 | * one of the targets is online. | |
272 | */ | |
273 | if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { | |
274 | if (cpumask_intersects(desc->irq_data.affinity, | |
275 | cpu_online_mask)) | |
276 | set = desc->irq_data.affinity; | |
277 | else { | |
278 | irq_compat_clr_affinity(desc); | |
279 | irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); | |
280 | } | |
281 | } | |
282 | ||
283 | cpumask_and(mask, cpu_online_mask, set); | |
284 | ret = chip->irq_set_affinity(&desc->irq_data, mask, false); | |
285 | switch (ret) { | |
286 | case IRQ_SET_MASK_OK: | |
287 | cpumask_copy(desc->irq_data.affinity, mask); | |
288 | case IRQ_SET_MASK_OK_NOCOPY: | |
289 | irq_set_thread_affinity(desc); | |
290 | } | |
291 | return 0; | |
292 | } | |
293 | #else | |
294 | static inline int | |
295 | setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) | |
296 | { | |
297 | return irq_select_affinity(irq); | |
298 | } | |
299 | #endif | |
300 | ||
301 | /* | |
302 | * Called when affinity is set via /proc/irq | |
303 | */ | |
304 | int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) | |
305 | { | |
306 | struct irq_desc *desc = irq_to_desc(irq); | |
307 | unsigned long flags; | |
308 | int ret; | |
309 | ||
310 | raw_spin_lock_irqsave(&desc->lock, flags); | |
311 | ret = setup_affinity(irq, desc, mask); | |
312 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
313 | return ret; | |
314 | } | |
315 | ||
316 | #else | |
317 | static inline int | |
318 | setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) | |
319 | { | |
320 | return 0; | |
321 | } | |
322 | #endif | |
323 | ||
324 | void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend) | |
325 | { | |
326 | if (suspend) { | |
327 | if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND)) | |
328 | return; | |
329 | desc->istate |= IRQS_SUSPENDED; | |
330 | } | |
331 | ||
332 | if (!desc->depth++) | |
333 | irq_disable(desc); | |
334 | } | |
335 | ||
336 | static int __disable_irq_nosync(unsigned int irq) | |
337 | { | |
338 | unsigned long flags; | |
339 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | |
340 | ||
341 | if (!desc) | |
342 | return -EINVAL; | |
343 | __disable_irq(desc, irq, false); | |
344 | irq_put_desc_busunlock(desc, flags); | |
345 | return 0; | |
346 | } | |
347 | ||
348 | /** | |
349 | * disable_irq_nosync - disable an irq without waiting | |
350 | * @irq: Interrupt to disable | |
351 | * | |
352 | * Disable the selected interrupt line. Disables and Enables are | |
353 | * nested. | |
354 | * Unlike disable_irq(), this function does not ensure existing | |
355 | * instances of the IRQ handler have completed before returning. | |
356 | * | |
357 | * This function may be called from IRQ context. | |
358 | */ | |
359 | void disable_irq_nosync(unsigned int irq) | |
360 | { | |
361 | __disable_irq_nosync(irq); | |
362 | } | |
363 | EXPORT_SYMBOL(disable_irq_nosync); | |
364 | ||
365 | /** | |
366 | * disable_irq - disable an irq and wait for completion | |
367 | * @irq: Interrupt to disable | |
368 | * | |
369 | * Disable the selected interrupt line. Enables and Disables are | |
370 | * nested. | |
371 | * This function waits for any pending IRQ handlers for this interrupt | |
372 | * to complete before returning. If you use this function while | |
373 | * holding a resource the IRQ handler may need you will deadlock. | |
374 | * | |
375 | * This function may be called - with care - from IRQ context. | |
376 | */ | |
377 | void disable_irq(unsigned int irq) | |
378 | { | |
379 | if (!__disable_irq_nosync(irq)) | |
380 | synchronize_irq(irq); | |
381 | } | |
382 | EXPORT_SYMBOL(disable_irq); | |
383 | ||
384 | void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume) | |
385 | { | |
386 | if (resume) { | |
387 | if (!(desc->istate & IRQS_SUSPENDED)) { | |
388 | if (!desc->action) | |
389 | return; | |
390 | if (!(desc->action->flags & IRQF_FORCE_RESUME)) | |
391 | return; | |
392 | /* Pretend that it got disabled ! */ | |
393 | desc->depth++; | |
394 | } | |
395 | desc->istate &= ~IRQS_SUSPENDED; | |
396 | } | |
397 | ||
398 | switch (desc->depth) { | |
399 | case 0: | |
400 | err_out: | |
401 | WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | |
402 | break; | |
403 | case 1: { | |
404 | if (desc->istate & IRQS_SUSPENDED) | |
405 | goto err_out; | |
406 | /* Prevent probing on this irq: */ | |
407 | irq_settings_set_noprobe(desc); | |
408 | irq_enable(desc); | |
409 | check_irq_resend(desc, irq); | |
410 | /* fall-through */ | |
411 | } | |
412 | default: | |
413 | desc->depth--; | |
414 | } | |
415 | } | |
416 | ||
417 | /** | |
418 | * enable_irq - enable handling of an irq | |
419 | * @irq: Interrupt to enable | |
420 | * | |
421 | * Undoes the effect of one call to disable_irq(). If this | |
422 | * matches the last disable, processing of interrupts on this | |
423 | * IRQ line is re-enabled. | |
424 | * | |
425 | * This function may be called from IRQ context only when | |
426 | * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL ! | |
427 | */ | |
428 | void enable_irq(unsigned int irq) | |
429 | { | |
430 | unsigned long flags; | |
431 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | |
432 | ||
433 | if (!desc) | |
434 | return; | |
435 | if (WARN(!desc->irq_data.chip, | |
436 | KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq)) | |
437 | goto out; | |
438 | ||
439 | __enable_irq(desc, irq, false); | |
440 | out: | |
441 | irq_put_desc_busunlock(desc, flags); | |
442 | } | |
443 | EXPORT_SYMBOL(enable_irq); | |
444 | ||
445 | static int set_irq_wake_real(unsigned int irq, unsigned int on) | |
446 | { | |
447 | struct irq_desc *desc = irq_to_desc(irq); | |
448 | int ret = -ENXIO; | |
449 | ||
450 | if (desc->irq_data.chip->irq_set_wake) | |
451 | ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); | |
452 | ||
453 | return ret; | |
454 | } | |
455 | ||
456 | /** | |
457 | * irq_set_irq_wake - control irq power management wakeup | |
458 | * @irq: interrupt to control | |
459 | * @on: enable/disable power management wakeup | |
460 | * | |
461 | * Enable/disable power management wakeup mode, which is | |
462 | * disabled by default. Enables and disables must match, | |
463 | * just as they match for non-wakeup mode support. | |
464 | * | |
465 | * Wakeup mode lets this IRQ wake the system from sleep | |
466 | * states like "suspend to RAM". | |
467 | */ | |
468 | int irq_set_irq_wake(unsigned int irq, unsigned int on) | |
469 | { | |
470 | unsigned long flags; | |
471 | struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); | |
472 | int ret = 0; | |
473 | ||
474 | /* wakeup-capable irqs can be shared between drivers that | |
475 | * don't need to have the same sleep mode behaviors. | |
476 | */ | |
477 | if (on) { | |
478 | if (desc->wake_depth++ == 0) { | |
479 | ret = set_irq_wake_real(irq, on); | |
480 | if (ret) | |
481 | desc->wake_depth = 0; | |
482 | else | |
483 | irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); | |
484 | } | |
485 | } else { | |
486 | if (desc->wake_depth == 0) { | |
487 | WARN(1, "Unbalanced IRQ %d wake disable\n", irq); | |
488 | } else if (--desc->wake_depth == 0) { | |
489 | ret = set_irq_wake_real(irq, on); | |
490 | if (ret) | |
491 | desc->wake_depth = 1; | |
492 | else | |
493 | irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); | |
494 | } | |
495 | } | |
496 | irq_put_desc_busunlock(desc, flags); | |
497 | return ret; | |
498 | } | |
499 | EXPORT_SYMBOL(irq_set_irq_wake); | |
500 | ||
501 | /* | |
502 | * Internal function that tells the architecture code whether a | |
503 | * particular irq has been exclusively allocated or is available | |
504 | * for driver use. | |
505 | */ | |
506 | int can_request_irq(unsigned int irq, unsigned long irqflags) | |
507 | { | |
508 | unsigned long flags; | |
509 | struct irq_desc *desc = irq_get_desc_lock(irq, &flags); | |
510 | int canrequest = 0; | |
511 | ||
512 | if (!desc) | |
513 | return 0; | |
514 | ||
515 | if (irq_settings_can_request(desc)) { | |
516 | if (desc->action) | |
517 | if (irqflags & desc->action->flags & IRQF_SHARED) | |
518 | canrequest =1; | |
519 | } | |
520 | irq_put_desc_unlock(desc, flags); | |
521 | return canrequest; | |
522 | } | |
523 | ||
524 | int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, | |
525 | unsigned long flags) | |
526 | { | |
527 | struct irq_chip *chip = desc->irq_data.chip; | |
528 | int ret, unmask = 0; | |
529 | ||
530 | if (!chip || !chip->irq_set_type) { | |
531 | /* | |
532 | * IRQF_TRIGGER_* but the PIC does not support multiple | |
533 | * flow-types? | |
534 | */ | |
535 | pr_debug("No set_type function for IRQ %d (%s)\n", irq, | |
536 | chip ? (chip->name ? : "unknown") : "unknown"); | |
537 | return 0; | |
538 | } | |
539 | ||
540 | flags &= IRQ_TYPE_SENSE_MASK; | |
541 | ||
542 | if (chip->flags & IRQCHIP_SET_TYPE_MASKED) { | |
543 | if (!(desc->istate & IRQS_MASKED)) | |
544 | mask_irq(desc); | |
545 | if (!(desc->istate & IRQS_DISABLED)) | |
546 | unmask = 1; | |
547 | } | |
548 | ||
549 | /* caller masked out all except trigger mode flags */ | |
550 | ret = chip->irq_set_type(&desc->irq_data, flags); | |
551 | ||
552 | switch (ret) { | |
553 | case IRQ_SET_MASK_OK: | |
554 | irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); | |
555 | irqd_set(&desc->irq_data, flags); | |
556 | ||
557 | case IRQ_SET_MASK_OK_NOCOPY: | |
558 | flags = irqd_get_trigger_type(&desc->irq_data); | |
559 | irq_settings_set_trigger_mask(desc, flags); | |
560 | irqd_clear(&desc->irq_data, IRQD_LEVEL); | |
561 | irq_settings_clr_level(desc); | |
562 | if (flags & IRQ_TYPE_LEVEL_MASK) { | |
563 | irq_settings_set_level(desc); | |
564 | irqd_set(&desc->irq_data, IRQD_LEVEL); | |
565 | } | |
566 | ||
567 | if (chip != desc->irq_data.chip) | |
568 | irq_chip_set_defaults(desc->irq_data.chip); | |
569 | ret = 0; | |
570 | default: | |
571 | pr_err("setting trigger mode %lu for irq %u failed (%pF)\n", | |
572 | flags, irq, chip->irq_set_type); | |
573 | } | |
574 | if (unmask) | |
575 | unmask_irq(desc); | |
576 | return ret; | |
577 | } | |
578 | ||
579 | /* | |
580 | * Default primary interrupt handler for threaded interrupts. Is | |
581 | * assigned as primary handler when request_threaded_irq is called | |
582 | * with handler == NULL. Useful for oneshot interrupts. | |
583 | */ | |
584 | static irqreturn_t irq_default_primary_handler(int irq, void *dev_id) | |
585 | { | |
586 | return IRQ_WAKE_THREAD; | |
587 | } | |
588 | ||
589 | /* | |
590 | * Primary handler for nested threaded interrupts. Should never be | |
591 | * called. | |
592 | */ | |
593 | static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id) | |
594 | { | |
595 | WARN(1, "Primary handler called for nested irq %d\n", irq); | |
596 | return IRQ_NONE; | |
597 | } | |
598 | ||
599 | static int irq_wait_for_interrupt(struct irqaction *action) | |
600 | { | |
601 | while (!kthread_should_stop()) { | |
602 | set_current_state(TASK_INTERRUPTIBLE); | |
603 | ||
604 | if (test_and_clear_bit(IRQTF_RUNTHREAD, | |
605 | &action->thread_flags)) { | |
606 | __set_current_state(TASK_RUNNING); | |
607 | return 0; | |
608 | } | |
609 | schedule(); | |
610 | } | |
611 | return -1; | |
612 | } | |
613 | ||
614 | /* | |
615 | * Oneshot interrupts keep the irq line masked until the threaded | |
616 | * handler finished. unmask if the interrupt has not been disabled and | |
617 | * is marked MASKED. | |
618 | */ | |
619 | static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc) | |
620 | { | |
621 | again: | |
622 | chip_bus_lock(desc); | |
623 | raw_spin_lock_irq(&desc->lock); | |
624 | ||
625 | /* | |
626 | * Implausible though it may be we need to protect us against | |
627 | * the following scenario: | |
628 | * | |
629 | * The thread is faster done than the hard interrupt handler | |
630 | * on the other CPU. If we unmask the irq line then the | |
631 | * interrupt can come in again and masks the line, leaves due | |
632 | * to IRQS_INPROGRESS and the irq line is masked forever. | |
633 | */ | |
634 | if (unlikely(desc->istate & IRQS_INPROGRESS)) { | |
635 | raw_spin_unlock_irq(&desc->lock); | |
636 | chip_bus_sync_unlock(desc); | |
637 | cpu_relax(); | |
638 | goto again; | |
639 | } | |
640 | ||
641 | if (!(desc->istate & IRQS_DISABLED) && (desc->istate & IRQS_MASKED)) { | |
642 | irq_compat_clr_masked(desc); | |
643 | desc->istate &= ~IRQS_MASKED; | |
644 | desc->irq_data.chip->irq_unmask(&desc->irq_data); | |
645 | } | |
646 | raw_spin_unlock_irq(&desc->lock); | |
647 | chip_bus_sync_unlock(desc); | |
648 | } | |
649 | ||
650 | #ifdef CONFIG_SMP | |
651 | /* | |
652 | * Check whether we need to chasnge the affinity of the interrupt thread. | |
653 | */ | |
654 | static void | |
655 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) | |
656 | { | |
657 | cpumask_var_t mask; | |
658 | ||
659 | if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags)) | |
660 | return; | |
661 | ||
662 | /* | |
663 | * In case we are out of memory we set IRQTF_AFFINITY again and | |
664 | * try again next time | |
665 | */ | |
666 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | |
667 | set_bit(IRQTF_AFFINITY, &action->thread_flags); | |
668 | return; | |
669 | } | |
670 | ||
671 | raw_spin_lock_irq(&desc->lock); | |
672 | cpumask_copy(mask, desc->irq_data.affinity); | |
673 | raw_spin_unlock_irq(&desc->lock); | |
674 | ||
675 | set_cpus_allowed_ptr(current, mask); | |
676 | free_cpumask_var(mask); | |
677 | } | |
678 | #else | |
679 | static inline void | |
680 | irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |
681 | #endif | |
682 | ||
683 | /* | |
684 | * Interrupt handler thread | |
685 | */ | |
686 | static int irq_thread(void *data) | |
687 | { | |
688 | static const struct sched_param param = { | |
689 | .sched_priority = MAX_USER_RT_PRIO/2, | |
690 | }; | |
691 | struct irqaction *action = data; | |
692 | struct irq_desc *desc = irq_to_desc(action->irq); | |
693 | int wake, oneshot = desc->istate & IRQS_ONESHOT; | |
694 | ||
695 | sched_setscheduler(current, SCHED_FIFO, ¶m); | |
696 | current->irqaction = action; | |
697 | ||
698 | while (!irq_wait_for_interrupt(action)) { | |
699 | ||
700 | irq_thread_check_affinity(desc, action); | |
701 | ||
702 | atomic_inc(&desc->threads_active); | |
703 | ||
704 | raw_spin_lock_irq(&desc->lock); | |
705 | if (unlikely(desc->istate & IRQS_DISABLED)) { | |
706 | /* | |
707 | * CHECKME: We might need a dedicated | |
708 | * IRQ_THREAD_PENDING flag here, which | |
709 | * retriggers the thread in check_irq_resend() | |
710 | * but AFAICT IRQS_PENDING should be fine as it | |
711 | * retriggers the interrupt itself --- tglx | |
712 | */ | |
713 | irq_compat_set_pending(desc); | |
714 | desc->istate |= IRQS_PENDING; | |
715 | raw_spin_unlock_irq(&desc->lock); | |
716 | } else { | |
717 | raw_spin_unlock_irq(&desc->lock); | |
718 | ||
719 | action->thread_fn(action->irq, action->dev_id); | |
720 | ||
721 | if (oneshot) | |
722 | irq_finalize_oneshot(action->irq, desc); | |
723 | } | |
724 | ||
725 | wake = atomic_dec_and_test(&desc->threads_active); | |
726 | ||
727 | if (wake && waitqueue_active(&desc->wait_for_threads)) | |
728 | wake_up(&desc->wait_for_threads); | |
729 | } | |
730 | ||
731 | /* | |
732 | * Clear irqaction. Otherwise exit_irq_thread() would make | |
733 | * fuzz about an active irq thread going into nirvana. | |
734 | */ | |
735 | current->irqaction = NULL; | |
736 | return 0; | |
737 | } | |
738 | ||
739 | /* | |
740 | * Called from do_exit() | |
741 | */ | |
742 | void exit_irq_thread(void) | |
743 | { | |
744 | struct task_struct *tsk = current; | |
745 | ||
746 | if (!tsk->irqaction) | |
747 | return; | |
748 | ||
749 | printk(KERN_ERR | |
750 | "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | |
751 | tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq); | |
752 | ||
753 | /* | |
754 | * Set the THREAD DIED flag to prevent further wakeups of the | |
755 | * soon to be gone threaded handler. | |
756 | */ | |
757 | set_bit(IRQTF_DIED, &tsk->irqaction->flags); | |
758 | } | |
759 | ||
760 | /* | |
761 | * Internal function to register an irqaction - typically used to | |
762 | * allocate special interrupts that are part of the architecture. | |
763 | */ | |
764 | static int | |
765 | __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) | |
766 | { | |
767 | struct irqaction *old, **old_ptr; | |
768 | const char *old_name = NULL; | |
769 | unsigned long flags; | |
770 | int ret, nested, shared = 0; | |
771 | cpumask_var_t mask; | |
772 | ||
773 | if (!desc) | |
774 | return -EINVAL; | |
775 | ||
776 | if (desc->irq_data.chip == &no_irq_chip) | |
777 | return -ENOSYS; | |
778 | /* | |
779 | * Some drivers like serial.c use request_irq() heavily, | |
780 | * so we have to be careful not to interfere with a | |
781 | * running system. | |
782 | */ | |
783 | if (new->flags & IRQF_SAMPLE_RANDOM) { | |
784 | /* | |
785 | * This function might sleep, we want to call it first, | |
786 | * outside of the atomic block. | |
787 | * Yes, this might clear the entropy pool if the wrong | |
788 | * driver is attempted to be loaded, without actually | |
789 | * installing a new handler, but is this really a problem, | |
790 | * only the sysadmin is able to do this. | |
791 | */ | |
792 | rand_initialize_irq(irq); | |
793 | } | |
794 | ||
795 | /* Oneshot interrupts are not allowed with shared */ | |
796 | if ((new->flags & IRQF_ONESHOT) && (new->flags & IRQF_SHARED)) | |
797 | return -EINVAL; | |
798 | ||
799 | /* | |
800 | * Check whether the interrupt nests into another interrupt | |
801 | * thread. | |
802 | */ | |
803 | nested = irq_settings_is_nested_thread(desc); | |
804 | if (nested) { | |
805 | if (!new->thread_fn) | |
806 | return -EINVAL; | |
807 | /* | |
808 | * Replace the primary handler which was provided from | |
809 | * the driver for non nested interrupt handling by the | |
810 | * dummy function which warns when called. | |
811 | */ | |
812 | new->handler = irq_nested_primary_handler; | |
813 | } | |
814 | ||
815 | /* | |
816 | * Create a handler thread when a thread function is supplied | |
817 | * and the interrupt does not nest into another interrupt | |
818 | * thread. | |
819 | */ | |
820 | if (new->thread_fn && !nested) { | |
821 | struct task_struct *t; | |
822 | ||
823 | t = kthread_create(irq_thread, new, "irq/%d-%s", irq, | |
824 | new->name); | |
825 | if (IS_ERR(t)) | |
826 | return PTR_ERR(t); | |
827 | /* | |
828 | * We keep the reference to the task struct even if | |
829 | * the thread dies to avoid that the interrupt code | |
830 | * references an already freed task_struct. | |
831 | */ | |
832 | get_task_struct(t); | |
833 | new->thread = t; | |
834 | } | |
835 | ||
836 | if (!alloc_cpumask_var(&mask, GFP_KERNEL)) { | |
837 | ret = -ENOMEM; | |
838 | goto out_thread; | |
839 | } | |
840 | ||
841 | /* | |
842 | * The following block of code has to be executed atomically | |
843 | */ | |
844 | raw_spin_lock_irqsave(&desc->lock, flags); | |
845 | old_ptr = &desc->action; | |
846 | old = *old_ptr; | |
847 | if (old) { | |
848 | /* | |
849 | * Can't share interrupts unless both agree to and are | |
850 | * the same type (level, edge, polarity). So both flag | |
851 | * fields must have IRQF_SHARED set and the bits which | |
852 | * set the trigger type must match. | |
853 | */ | |
854 | if (!((old->flags & new->flags) & IRQF_SHARED) || | |
855 | ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK)) { | |
856 | old_name = old->name; | |
857 | goto mismatch; | |
858 | } | |
859 | ||
860 | /* All handlers must agree on per-cpuness */ | |
861 | if ((old->flags & IRQF_PERCPU) != | |
862 | (new->flags & IRQF_PERCPU)) | |
863 | goto mismatch; | |
864 | ||
865 | /* add new interrupt at end of irq queue */ | |
866 | do { | |
867 | old_ptr = &old->next; | |
868 | old = *old_ptr; | |
869 | } while (old); | |
870 | shared = 1; | |
871 | } | |
872 | ||
873 | if (!shared) { | |
874 | irq_chip_set_defaults(desc->irq_data.chip); | |
875 | ||
876 | init_waitqueue_head(&desc->wait_for_threads); | |
877 | ||
878 | /* Setup the type (level, edge polarity) if configured: */ | |
879 | if (new->flags & IRQF_TRIGGER_MASK) { | |
880 | ret = __irq_set_trigger(desc, irq, | |
881 | new->flags & IRQF_TRIGGER_MASK); | |
882 | ||
883 | if (ret) | |
884 | goto out_mask; | |
885 | } | |
886 | ||
887 | desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ | |
888 | IRQS_INPROGRESS | IRQS_ONESHOT | \ | |
889 | IRQS_WAITING); | |
890 | ||
891 | if (new->flags & IRQF_PERCPU) { | |
892 | irqd_set(&desc->irq_data, IRQD_PER_CPU); | |
893 | irq_settings_set_per_cpu(desc); | |
894 | } | |
895 | ||
896 | if (new->flags & IRQF_ONESHOT) | |
897 | desc->istate |= IRQS_ONESHOT; | |
898 | ||
899 | if (irq_settings_can_autoenable(desc)) | |
900 | irq_startup(desc); | |
901 | else | |
902 | /* Undo nested disables: */ | |
903 | desc->depth = 1; | |
904 | ||
905 | /* Exclude IRQ from balancing if requested */ | |
906 | if (new->flags & IRQF_NOBALANCING) { | |
907 | irq_settings_set_no_balancing(desc); | |
908 | irqd_set(&desc->irq_data, IRQD_NO_BALANCING); | |
909 | } | |
910 | ||
911 | /* Set default affinity mask once everything is setup */ | |
912 | setup_affinity(irq, desc, mask); | |
913 | ||
914 | } else if (new->flags & IRQF_TRIGGER_MASK) { | |
915 | unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; | |
916 | unsigned int omsk = irq_settings_get_trigger_mask(desc); | |
917 | ||
918 | if (nmsk != omsk) | |
919 | /* hope the handler works with current trigger mode */ | |
920 | pr_warning("IRQ %d uses trigger mode %u; requested %u\n", | |
921 | irq, nmsk, omsk); | |
922 | } | |
923 | ||
924 | new->irq = irq; | |
925 | *old_ptr = new; | |
926 | ||
927 | /* Reset broken irq detection when installing new handler */ | |
928 | desc->irq_count = 0; | |
929 | desc->irqs_unhandled = 0; | |
930 | ||
931 | /* | |
932 | * Check whether we disabled the irq via the spurious handler | |
933 | * before. Reenable it and give it another chance. | |
934 | */ | |
935 | if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { | |
936 | desc->istate &= ~IRQS_SPURIOUS_DISABLED; | |
937 | __enable_irq(desc, irq, false); | |
938 | } | |
939 | ||
940 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
941 | ||
942 | /* | |
943 | * Strictly no need to wake it up, but hung_task complains | |
944 | * when no hard interrupt wakes the thread up. | |
945 | */ | |
946 | if (new->thread) | |
947 | wake_up_process(new->thread); | |
948 | ||
949 | register_irq_proc(irq, desc); | |
950 | new->dir = NULL; | |
951 | register_handler_proc(irq, new); | |
952 | ||
953 | return 0; | |
954 | ||
955 | mismatch: | |
956 | #ifdef CONFIG_DEBUG_SHIRQ | |
957 | if (!(new->flags & IRQF_PROBE_SHARED)) { | |
958 | printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); | |
959 | if (old_name) | |
960 | printk(KERN_ERR "current handler: %s\n", old_name); | |
961 | dump_stack(); | |
962 | } | |
963 | #endif | |
964 | ret = -EBUSY; | |
965 | ||
966 | out_mask: | |
967 | free_cpumask_var(mask); | |
968 | ||
969 | out_thread: | |
970 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
971 | if (new->thread) { | |
972 | struct task_struct *t = new->thread; | |
973 | ||
974 | new->thread = NULL; | |
975 | if (likely(!test_bit(IRQTF_DIED, &new->thread_flags))) | |
976 | kthread_stop(t); | |
977 | put_task_struct(t); | |
978 | } | |
979 | return ret; | |
980 | } | |
981 | ||
982 | /** | |
983 | * setup_irq - setup an interrupt | |
984 | * @irq: Interrupt line to setup | |
985 | * @act: irqaction for the interrupt | |
986 | * | |
987 | * Used to statically setup interrupts in the early boot process. | |
988 | */ | |
989 | int setup_irq(unsigned int irq, struct irqaction *act) | |
990 | { | |
991 | int retval; | |
992 | struct irq_desc *desc = irq_to_desc(irq); | |
993 | ||
994 | chip_bus_lock(desc); | |
995 | retval = __setup_irq(irq, desc, act); | |
996 | chip_bus_sync_unlock(desc); | |
997 | ||
998 | return retval; | |
999 | } | |
1000 | EXPORT_SYMBOL_GPL(setup_irq); | |
1001 | ||
1002 | /* | |
1003 | * Internal function to unregister an irqaction - used to free | |
1004 | * regular and special interrupts that are part of the architecture. | |
1005 | */ | |
1006 | static struct irqaction *__free_irq(unsigned int irq, void *dev_id) | |
1007 | { | |
1008 | struct irq_desc *desc = irq_to_desc(irq); | |
1009 | struct irqaction *action, **action_ptr; | |
1010 | unsigned long flags; | |
1011 | ||
1012 | WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); | |
1013 | ||
1014 | if (!desc) | |
1015 | return NULL; | |
1016 | ||
1017 | raw_spin_lock_irqsave(&desc->lock, flags); | |
1018 | ||
1019 | /* | |
1020 | * There can be multiple actions per IRQ descriptor, find the right | |
1021 | * one based on the dev_id: | |
1022 | */ | |
1023 | action_ptr = &desc->action; | |
1024 | for (;;) { | |
1025 | action = *action_ptr; | |
1026 | ||
1027 | if (!action) { | |
1028 | WARN(1, "Trying to free already-free IRQ %d\n", irq); | |
1029 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1030 | ||
1031 | return NULL; | |
1032 | } | |
1033 | ||
1034 | if (action->dev_id == dev_id) | |
1035 | break; | |
1036 | action_ptr = &action->next; | |
1037 | } | |
1038 | ||
1039 | /* Found it - now remove it from the list of entries: */ | |
1040 | *action_ptr = action->next; | |
1041 | ||
1042 | /* Currently used only by UML, might disappear one day: */ | |
1043 | #ifdef CONFIG_IRQ_RELEASE_METHOD | |
1044 | if (desc->irq_data.chip->release) | |
1045 | desc->irq_data.chip->release(irq, dev_id); | |
1046 | #endif | |
1047 | ||
1048 | /* If this was the last handler, shut down the IRQ line: */ | |
1049 | if (!desc->action) | |
1050 | irq_shutdown(desc); | |
1051 | ||
1052 | #ifdef CONFIG_SMP | |
1053 | /* make sure affinity_hint is cleaned up */ | |
1054 | if (WARN_ON_ONCE(desc->affinity_hint)) | |
1055 | desc->affinity_hint = NULL; | |
1056 | #endif | |
1057 | ||
1058 | raw_spin_unlock_irqrestore(&desc->lock, flags); | |
1059 | ||
1060 | unregister_handler_proc(irq, action); | |
1061 | ||
1062 | /* Make sure it's not being used on another CPU: */ | |
1063 | synchronize_irq(irq); | |
1064 | ||
1065 | #ifdef CONFIG_DEBUG_SHIRQ | |
1066 | /* | |
1067 | * It's a shared IRQ -- the driver ought to be prepared for an IRQ | |
1068 | * event to happen even now it's being freed, so let's make sure that | |
1069 | * is so by doing an extra call to the handler .... | |
1070 | * | |
1071 | * ( We do this after actually deregistering it, to make sure that a | |
1072 | * 'real' IRQ doesn't run in * parallel with our fake. ) | |
1073 | */ | |
1074 | if (action->flags & IRQF_SHARED) { | |
1075 | local_irq_save(flags); | |
1076 | action->handler(irq, dev_id); | |
1077 | local_irq_restore(flags); | |
1078 | } | |
1079 | #endif | |
1080 | ||
1081 | if (action->thread) { | |
1082 | if (!test_bit(IRQTF_DIED, &action->thread_flags)) | |
1083 | kthread_stop(action->thread); | |
1084 | put_task_struct(action->thread); | |
1085 | } | |
1086 | ||
1087 | return action; | |
1088 | } | |
1089 | ||
1090 | /** | |
1091 | * remove_irq - free an interrupt | |
1092 | * @irq: Interrupt line to free | |
1093 | * @act: irqaction for the interrupt | |
1094 | * | |
1095 | * Used to remove interrupts statically setup by the early boot process. | |
1096 | */ | |
1097 | void remove_irq(unsigned int irq, struct irqaction *act) | |
1098 | { | |
1099 | __free_irq(irq, act->dev_id); | |
1100 | } | |
1101 | EXPORT_SYMBOL_GPL(remove_irq); | |
1102 | ||
1103 | /** | |
1104 | * free_irq - free an interrupt allocated with request_irq | |
1105 | * @irq: Interrupt line to free | |
1106 | * @dev_id: Device identity to free | |
1107 | * | |
1108 | * Remove an interrupt handler. The handler is removed and if the | |
1109 | * interrupt line is no longer in use by any driver it is disabled. | |
1110 | * On a shared IRQ the caller must ensure the interrupt is disabled | |
1111 | * on the card it drives before calling this function. The function | |
1112 | * does not return until any executing interrupts for this IRQ | |
1113 | * have completed. | |
1114 | * | |
1115 | * This function must not be called from interrupt context. | |
1116 | */ | |
1117 | void free_irq(unsigned int irq, void *dev_id) | |
1118 | { | |
1119 | struct irq_desc *desc = irq_to_desc(irq); | |
1120 | ||
1121 | if (!desc) | |
1122 | return; | |
1123 | ||
1124 | #ifdef CONFIG_SMP | |
1125 | if (WARN_ON(desc->affinity_notify)) | |
1126 | desc->affinity_notify = NULL; | |
1127 | #endif | |
1128 | ||
1129 | chip_bus_lock(desc); | |
1130 | kfree(__free_irq(irq, dev_id)); | |
1131 | chip_bus_sync_unlock(desc); | |
1132 | } | |
1133 | EXPORT_SYMBOL(free_irq); | |
1134 | ||
1135 | /** | |
1136 | * request_threaded_irq - allocate an interrupt line | |
1137 | * @irq: Interrupt line to allocate | |
1138 | * @handler: Function to be called when the IRQ occurs. | |
1139 | * Primary handler for threaded interrupts | |
1140 | * If NULL and thread_fn != NULL the default | |
1141 | * primary handler is installed | |
1142 | * @thread_fn: Function called from the irq handler thread | |
1143 | * If NULL, no irq thread is created | |
1144 | * @irqflags: Interrupt type flags | |
1145 | * @devname: An ascii name for the claiming device | |
1146 | * @dev_id: A cookie passed back to the handler function | |
1147 | * | |
1148 | * This call allocates interrupt resources and enables the | |
1149 | * interrupt line and IRQ handling. From the point this | |
1150 | * call is made your handler function may be invoked. Since | |
1151 | * your handler function must clear any interrupt the board | |
1152 | * raises, you must take care both to initialise your hardware | |
1153 | * and to set up the interrupt handler in the right order. | |
1154 | * | |
1155 | * If you want to set up a threaded irq handler for your device | |
1156 | * then you need to supply @handler and @thread_fn. @handler ist | |
1157 | * still called in hard interrupt context and has to check | |
1158 | * whether the interrupt originates from the device. If yes it | |
1159 | * needs to disable the interrupt on the device and return | |
1160 | * IRQ_WAKE_THREAD which will wake up the handler thread and run | |
1161 | * @thread_fn. This split handler design is necessary to support | |
1162 | * shared interrupts. | |
1163 | * | |
1164 | * Dev_id must be globally unique. Normally the address of the | |
1165 | * device data structure is used as the cookie. Since the handler | |
1166 | * receives this value it makes sense to use it. | |
1167 | * | |
1168 | * If your interrupt is shared you must pass a non NULL dev_id | |
1169 | * as this is required when freeing the interrupt. | |
1170 | * | |
1171 | * Flags: | |
1172 | * | |
1173 | * IRQF_SHARED Interrupt is shared | |
1174 | * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy | |
1175 | * IRQF_TRIGGER_* Specify active edge(s) or level | |
1176 | * | |
1177 | */ | |
1178 | int request_threaded_irq(unsigned int irq, irq_handler_t handler, | |
1179 | irq_handler_t thread_fn, unsigned long irqflags, | |
1180 | const char *devname, void *dev_id) | |
1181 | { | |
1182 | struct irqaction *action; | |
1183 | struct irq_desc *desc; | |
1184 | int retval; | |
1185 | ||
1186 | /* | |
1187 | * Sanity-check: shared interrupts must pass in a real dev-ID, | |
1188 | * otherwise we'll have trouble later trying to figure out | |
1189 | * which interrupt is which (messes up the interrupt freeing | |
1190 | * logic etc). | |
1191 | */ | |
1192 | if ((irqflags & IRQF_SHARED) && !dev_id) | |
1193 | return -EINVAL; | |
1194 | ||
1195 | desc = irq_to_desc(irq); | |
1196 | if (!desc) | |
1197 | return -EINVAL; | |
1198 | ||
1199 | if (!irq_settings_can_request(desc)) | |
1200 | return -EINVAL; | |
1201 | ||
1202 | if (!handler) { | |
1203 | if (!thread_fn) | |
1204 | return -EINVAL; | |
1205 | handler = irq_default_primary_handler; | |
1206 | } | |
1207 | ||
1208 | action = kzalloc(sizeof(struct irqaction), GFP_KERNEL); | |
1209 | if (!action) | |
1210 | return -ENOMEM; | |
1211 | ||
1212 | action->handler = handler; | |
1213 | action->thread_fn = thread_fn; | |
1214 | action->flags = irqflags; | |
1215 | action->name = devname; | |
1216 | action->dev_id = dev_id; | |
1217 | ||
1218 | chip_bus_lock(desc); | |
1219 | retval = __setup_irq(irq, desc, action); | |
1220 | chip_bus_sync_unlock(desc); | |
1221 | ||
1222 | if (retval) | |
1223 | kfree(action); | |
1224 | ||
1225 | #ifdef CONFIG_DEBUG_SHIRQ_FIXME | |
1226 | if (!retval && (irqflags & IRQF_SHARED)) { | |
1227 | /* | |
1228 | * It's a shared IRQ -- the driver ought to be prepared for it | |
1229 | * to happen immediately, so let's make sure.... | |
1230 | * We disable the irq to make sure that a 'real' IRQ doesn't | |
1231 | * run in parallel with our fake. | |
1232 | */ | |
1233 | unsigned long flags; | |
1234 | ||
1235 | disable_irq(irq); | |
1236 | local_irq_save(flags); | |
1237 | ||
1238 | handler(irq, dev_id); | |
1239 | ||
1240 | local_irq_restore(flags); | |
1241 | enable_irq(irq); | |
1242 | } | |
1243 | #endif | |
1244 | return retval; | |
1245 | } | |
1246 | EXPORT_SYMBOL(request_threaded_irq); | |
1247 | ||
1248 | /** | |
1249 | * request_any_context_irq - allocate an interrupt line | |
1250 | * @irq: Interrupt line to allocate | |
1251 | * @handler: Function to be called when the IRQ occurs. | |
1252 | * Threaded handler for threaded interrupts. | |
1253 | * @flags: Interrupt type flags | |
1254 | * @name: An ascii name for the claiming device | |
1255 | * @dev_id: A cookie passed back to the handler function | |
1256 | * | |
1257 | * This call allocates interrupt resources and enables the | |
1258 | * interrupt line and IRQ handling. It selects either a | |
1259 | * hardirq or threaded handling method depending on the | |
1260 | * context. | |
1261 | * | |
1262 | * On failure, it returns a negative value. On success, | |
1263 | * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED. | |
1264 | */ | |
1265 | int request_any_context_irq(unsigned int irq, irq_handler_t handler, | |
1266 | unsigned long flags, const char *name, void *dev_id) | |
1267 | { | |
1268 | struct irq_desc *desc = irq_to_desc(irq); | |
1269 | int ret; | |
1270 | ||
1271 | if (!desc) | |
1272 | return -EINVAL; | |
1273 | ||
1274 | if (irq_settings_is_nested_thread(desc)) { | |
1275 | ret = request_threaded_irq(irq, NULL, handler, | |
1276 | flags, name, dev_id); | |
1277 | return !ret ? IRQC_IS_NESTED : ret; | |
1278 | } | |
1279 | ||
1280 | ret = request_irq(irq, handler, flags, name, dev_id); | |
1281 | return !ret ? IRQC_IS_HARDIRQ : ret; | |
1282 | } | |
1283 | EXPORT_SYMBOL_GPL(request_any_context_irq); |