]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - kernel/irq/manage.c
Merge branch 'fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/linux-arm-soc
[mirror_ubuntu-bionic-kernel.git] / kernel / irq / manage.c
1 /*
2 * linux/kernel/irq/manage.c
3 *
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
6 *
7 * This file contains driver APIs to the irq subsystem.
8 */
9
10 #include <linux/irq.h>
11 #include <linux/kthread.h>
12 #include <linux/module.h>
13 #include <linux/random.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
17
18 #include "internals.h"
19
20 #ifdef CONFIG_IRQ_FORCED_THREADING
21 __read_mostly bool force_irqthreads;
22
23 static int __init setup_forced_irqthreads(char *arg)
24 {
25 force_irqthreads = true;
26 return 0;
27 }
28 early_param("threadirqs", setup_forced_irqthreads);
29 #endif
30
31 /**
32 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
33 * @irq: interrupt number to wait for
34 *
35 * This function waits for any pending IRQ handlers for this interrupt
36 * to complete before returning. If you use this function while
37 * holding a resource the IRQ handler may need you will deadlock.
38 *
39 * This function may be called - with care - from IRQ context.
40 */
41 void synchronize_irq(unsigned int irq)
42 {
43 struct irq_desc *desc = irq_to_desc(irq);
44 bool inprogress;
45
46 if (!desc)
47 return;
48
49 do {
50 unsigned long flags;
51
52 /*
53 * Wait until we're out of the critical section. This might
54 * give the wrong answer due to the lack of memory barriers.
55 */
56 while (irqd_irq_inprogress(&desc->irq_data))
57 cpu_relax();
58
59 /* Ok, that indicated we're done: double-check carefully. */
60 raw_spin_lock_irqsave(&desc->lock, flags);
61 inprogress = irqd_irq_inprogress(&desc->irq_data);
62 raw_spin_unlock_irqrestore(&desc->lock, flags);
63
64 /* Oops, that failed? */
65 } while (inprogress);
66
67 /*
68 * We made sure that no hardirq handler is running. Now verify
69 * that no threaded handlers are active.
70 */
71 wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
72 }
73 EXPORT_SYMBOL(synchronize_irq);
74
75 #ifdef CONFIG_SMP
76 cpumask_var_t irq_default_affinity;
77
78 /**
79 * irq_can_set_affinity - Check if the affinity of a given irq can be set
80 * @irq: Interrupt to check
81 *
82 */
83 int irq_can_set_affinity(unsigned int irq)
84 {
85 struct irq_desc *desc = irq_to_desc(irq);
86
87 if (!desc || !irqd_can_balance(&desc->irq_data) ||
88 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
89 return 0;
90
91 return 1;
92 }
93
94 /**
95 * irq_set_thread_affinity - Notify irq threads to adjust affinity
96 * @desc: irq descriptor which has affitnity changed
97 *
98 * We just set IRQTF_AFFINITY and delegate the affinity setting
99 * to the interrupt thread itself. We can not call
100 * set_cpus_allowed_ptr() here as we hold desc->lock and this
101 * code can be called from hard interrupt context.
102 */
103 void irq_set_thread_affinity(struct irq_desc *desc)
104 {
105 struct irqaction *action = desc->action;
106
107 while (action) {
108 if (action->thread)
109 set_bit(IRQTF_AFFINITY, &action->thread_flags);
110 action = action->next;
111 }
112 }
113
114 #ifdef CONFIG_GENERIC_PENDING_IRQ
115 static inline bool irq_can_move_pcntxt(struct irq_data *data)
116 {
117 return irqd_can_move_in_process_context(data);
118 }
119 static inline bool irq_move_pending(struct irq_data *data)
120 {
121 return irqd_is_setaffinity_pending(data);
122 }
123 static inline void
124 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
125 {
126 cpumask_copy(desc->pending_mask, mask);
127 }
128 static inline void
129 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
130 {
131 cpumask_copy(mask, desc->pending_mask);
132 }
133 #else
134 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
135 static inline bool irq_move_pending(struct irq_data *data) { return false; }
136 static inline void
137 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
138 static inline void
139 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140 #endif
141
142 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
143 {
144 struct irq_chip *chip = irq_data_get_irq_chip(data);
145 struct irq_desc *desc = irq_data_to_desc(data);
146 int ret = 0;
147
148 if (!chip || !chip->irq_set_affinity)
149 return -EINVAL;
150
151 if (irq_can_move_pcntxt(data)) {
152 ret = chip->irq_set_affinity(data, mask, false);
153 switch (ret) {
154 case IRQ_SET_MASK_OK:
155 cpumask_copy(data->affinity, mask);
156 case IRQ_SET_MASK_OK_NOCOPY:
157 irq_set_thread_affinity(desc);
158 ret = 0;
159 }
160 } else {
161 irqd_set_move_pending(data);
162 irq_copy_pending(desc, mask);
163 }
164
165 if (desc->affinity_notify) {
166 kref_get(&desc->affinity_notify->kref);
167 schedule_work(&desc->affinity_notify->work);
168 }
169 irqd_set(data, IRQD_AFFINITY_SET);
170
171 return ret;
172 }
173
174 /**
175 * irq_set_affinity - Set the irq affinity of a given irq
176 * @irq: Interrupt to set affinity
177 * @mask: cpumask
178 *
179 */
180 int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
181 {
182 struct irq_desc *desc = irq_to_desc(irq);
183 unsigned long flags;
184 int ret;
185
186 if (!desc)
187 return -EINVAL;
188
189 raw_spin_lock_irqsave(&desc->lock, flags);
190 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
191 raw_spin_unlock_irqrestore(&desc->lock, flags);
192 return ret;
193 }
194
195 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
196 {
197 unsigned long flags;
198 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
199
200 if (!desc)
201 return -EINVAL;
202 desc->affinity_hint = m;
203 irq_put_desc_unlock(desc, flags);
204 return 0;
205 }
206 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
207
208 static void irq_affinity_notify(struct work_struct *work)
209 {
210 struct irq_affinity_notify *notify =
211 container_of(work, struct irq_affinity_notify, work);
212 struct irq_desc *desc = irq_to_desc(notify->irq);
213 cpumask_var_t cpumask;
214 unsigned long flags;
215
216 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
217 goto out;
218
219 raw_spin_lock_irqsave(&desc->lock, flags);
220 if (irq_move_pending(&desc->irq_data))
221 irq_get_pending(cpumask, desc);
222 else
223 cpumask_copy(cpumask, desc->irq_data.affinity);
224 raw_spin_unlock_irqrestore(&desc->lock, flags);
225
226 notify->notify(notify, cpumask);
227
228 free_cpumask_var(cpumask);
229 out:
230 kref_put(&notify->kref, notify->release);
231 }
232
233 /**
234 * irq_set_affinity_notifier - control notification of IRQ affinity changes
235 * @irq: Interrupt for which to enable/disable notification
236 * @notify: Context for notification, or %NULL to disable
237 * notification. Function pointers must be initialised;
238 * the other fields will be initialised by this function.
239 *
240 * Must be called in process context. Notification may only be enabled
241 * after the IRQ is allocated and must be disabled before the IRQ is
242 * freed using free_irq().
243 */
244 int
245 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
246 {
247 struct irq_desc *desc = irq_to_desc(irq);
248 struct irq_affinity_notify *old_notify;
249 unsigned long flags;
250
251 /* The release function is promised process context */
252 might_sleep();
253
254 if (!desc)
255 return -EINVAL;
256
257 /* Complete initialisation of *notify */
258 if (notify) {
259 notify->irq = irq;
260 kref_init(&notify->kref);
261 INIT_WORK(&notify->work, irq_affinity_notify);
262 }
263
264 raw_spin_lock_irqsave(&desc->lock, flags);
265 old_notify = desc->affinity_notify;
266 desc->affinity_notify = notify;
267 raw_spin_unlock_irqrestore(&desc->lock, flags);
268
269 if (old_notify)
270 kref_put(&old_notify->kref, old_notify->release);
271
272 return 0;
273 }
274 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
275
276 #ifndef CONFIG_AUTO_IRQ_AFFINITY
277 /*
278 * Generic version of the affinity autoselector.
279 */
280 static int
281 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
282 {
283 struct irq_chip *chip = irq_desc_get_chip(desc);
284 struct cpumask *set = irq_default_affinity;
285 int ret;
286
287 /* Excludes PER_CPU and NO_BALANCE interrupts */
288 if (!irq_can_set_affinity(irq))
289 return 0;
290
291 /*
292 * Preserve an userspace affinity setup, but make sure that
293 * one of the targets is online.
294 */
295 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
296 if (cpumask_intersects(desc->irq_data.affinity,
297 cpu_online_mask))
298 set = desc->irq_data.affinity;
299 else
300 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
301 }
302
303 cpumask_and(mask, cpu_online_mask, set);
304 ret = chip->irq_set_affinity(&desc->irq_data, mask, false);
305 switch (ret) {
306 case IRQ_SET_MASK_OK:
307 cpumask_copy(desc->irq_data.affinity, mask);
308 case IRQ_SET_MASK_OK_NOCOPY:
309 irq_set_thread_affinity(desc);
310 }
311 return 0;
312 }
313 #else
314 static inline int
315 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
316 {
317 return irq_select_affinity(irq);
318 }
319 #endif
320
321 /*
322 * Called when affinity is set via /proc/irq
323 */
324 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
325 {
326 struct irq_desc *desc = irq_to_desc(irq);
327 unsigned long flags;
328 int ret;
329
330 raw_spin_lock_irqsave(&desc->lock, flags);
331 ret = setup_affinity(irq, desc, mask);
332 raw_spin_unlock_irqrestore(&desc->lock, flags);
333 return ret;
334 }
335
336 #else
337 static inline int
338 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
339 {
340 return 0;
341 }
342 #endif
343
344 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
345 {
346 if (suspend) {
347 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
348 return;
349 desc->istate |= IRQS_SUSPENDED;
350 }
351
352 if (!desc->depth++)
353 irq_disable(desc);
354 }
355
356 static int __disable_irq_nosync(unsigned int irq)
357 {
358 unsigned long flags;
359 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
360
361 if (!desc)
362 return -EINVAL;
363 __disable_irq(desc, irq, false);
364 irq_put_desc_busunlock(desc, flags);
365 return 0;
366 }
367
368 /**
369 * disable_irq_nosync - disable an irq without waiting
370 * @irq: Interrupt to disable
371 *
372 * Disable the selected interrupt line. Disables and Enables are
373 * nested.
374 * Unlike disable_irq(), this function does not ensure existing
375 * instances of the IRQ handler have completed before returning.
376 *
377 * This function may be called from IRQ context.
378 */
379 void disable_irq_nosync(unsigned int irq)
380 {
381 __disable_irq_nosync(irq);
382 }
383 EXPORT_SYMBOL(disable_irq_nosync);
384
385 /**
386 * disable_irq - disable an irq and wait for completion
387 * @irq: Interrupt to disable
388 *
389 * Disable the selected interrupt line. Enables and Disables are
390 * nested.
391 * This function waits for any pending IRQ handlers for this interrupt
392 * to complete before returning. If you use this function while
393 * holding a resource the IRQ handler may need you will deadlock.
394 *
395 * This function may be called - with care - from IRQ context.
396 */
397 void disable_irq(unsigned int irq)
398 {
399 if (!__disable_irq_nosync(irq))
400 synchronize_irq(irq);
401 }
402 EXPORT_SYMBOL(disable_irq);
403
404 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
405 {
406 if (resume) {
407 if (!(desc->istate & IRQS_SUSPENDED)) {
408 if (!desc->action)
409 return;
410 if (!(desc->action->flags & IRQF_FORCE_RESUME))
411 return;
412 /* Pretend that it got disabled ! */
413 desc->depth++;
414 }
415 desc->istate &= ~IRQS_SUSPENDED;
416 }
417
418 switch (desc->depth) {
419 case 0:
420 err_out:
421 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
422 break;
423 case 1: {
424 if (desc->istate & IRQS_SUSPENDED)
425 goto err_out;
426 /* Prevent probing on this irq: */
427 irq_settings_set_noprobe(desc);
428 irq_enable(desc);
429 check_irq_resend(desc, irq);
430 /* fall-through */
431 }
432 default:
433 desc->depth--;
434 }
435 }
436
437 /**
438 * enable_irq - enable handling of an irq
439 * @irq: Interrupt to enable
440 *
441 * Undoes the effect of one call to disable_irq(). If this
442 * matches the last disable, processing of interrupts on this
443 * IRQ line is re-enabled.
444 *
445 * This function may be called from IRQ context only when
446 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
447 */
448 void enable_irq(unsigned int irq)
449 {
450 unsigned long flags;
451 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
452
453 if (!desc)
454 return;
455 if (WARN(!desc->irq_data.chip,
456 KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
457 goto out;
458
459 __enable_irq(desc, irq, false);
460 out:
461 irq_put_desc_busunlock(desc, flags);
462 }
463 EXPORT_SYMBOL(enable_irq);
464
465 static int set_irq_wake_real(unsigned int irq, unsigned int on)
466 {
467 struct irq_desc *desc = irq_to_desc(irq);
468 int ret = -ENXIO;
469
470 if (desc->irq_data.chip->irq_set_wake)
471 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
472
473 return ret;
474 }
475
476 /**
477 * irq_set_irq_wake - control irq power management wakeup
478 * @irq: interrupt to control
479 * @on: enable/disable power management wakeup
480 *
481 * Enable/disable power management wakeup mode, which is
482 * disabled by default. Enables and disables must match,
483 * just as they match for non-wakeup mode support.
484 *
485 * Wakeup mode lets this IRQ wake the system from sleep
486 * states like "suspend to RAM".
487 */
488 int irq_set_irq_wake(unsigned int irq, unsigned int on)
489 {
490 unsigned long flags;
491 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
492 int ret = 0;
493
494 if (!desc)
495 return -EINVAL;
496
497 /* wakeup-capable irqs can be shared between drivers that
498 * don't need to have the same sleep mode behaviors.
499 */
500 if (on) {
501 if (desc->wake_depth++ == 0) {
502 ret = set_irq_wake_real(irq, on);
503 if (ret)
504 desc->wake_depth = 0;
505 else
506 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
507 }
508 } else {
509 if (desc->wake_depth == 0) {
510 WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
511 } else if (--desc->wake_depth == 0) {
512 ret = set_irq_wake_real(irq, on);
513 if (ret)
514 desc->wake_depth = 1;
515 else
516 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
517 }
518 }
519 irq_put_desc_busunlock(desc, flags);
520 return ret;
521 }
522 EXPORT_SYMBOL(irq_set_irq_wake);
523
524 /*
525 * Internal function that tells the architecture code whether a
526 * particular irq has been exclusively allocated or is available
527 * for driver use.
528 */
529 int can_request_irq(unsigned int irq, unsigned long irqflags)
530 {
531 unsigned long flags;
532 struct irq_desc *desc = irq_get_desc_lock(irq, &flags);
533 int canrequest = 0;
534
535 if (!desc)
536 return 0;
537
538 if (irq_settings_can_request(desc)) {
539 if (desc->action)
540 if (irqflags & desc->action->flags & IRQF_SHARED)
541 canrequest =1;
542 }
543 irq_put_desc_unlock(desc, flags);
544 return canrequest;
545 }
546
547 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
548 unsigned long flags)
549 {
550 struct irq_chip *chip = desc->irq_data.chip;
551 int ret, unmask = 0;
552
553 if (!chip || !chip->irq_set_type) {
554 /*
555 * IRQF_TRIGGER_* but the PIC does not support multiple
556 * flow-types?
557 */
558 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
559 chip ? (chip->name ? : "unknown") : "unknown");
560 return 0;
561 }
562
563 flags &= IRQ_TYPE_SENSE_MASK;
564
565 if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
566 if (!irqd_irq_masked(&desc->irq_data))
567 mask_irq(desc);
568 if (!irqd_irq_disabled(&desc->irq_data))
569 unmask = 1;
570 }
571
572 /* caller masked out all except trigger mode flags */
573 ret = chip->irq_set_type(&desc->irq_data, flags);
574
575 switch (ret) {
576 case IRQ_SET_MASK_OK:
577 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
578 irqd_set(&desc->irq_data, flags);
579
580 case IRQ_SET_MASK_OK_NOCOPY:
581 flags = irqd_get_trigger_type(&desc->irq_data);
582 irq_settings_set_trigger_mask(desc, flags);
583 irqd_clear(&desc->irq_data, IRQD_LEVEL);
584 irq_settings_clr_level(desc);
585 if (flags & IRQ_TYPE_LEVEL_MASK) {
586 irq_settings_set_level(desc);
587 irqd_set(&desc->irq_data, IRQD_LEVEL);
588 }
589
590 ret = 0;
591 break;
592 default:
593 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
594 flags, irq, chip->irq_set_type);
595 }
596 if (unmask)
597 unmask_irq(desc);
598 return ret;
599 }
600
601 /*
602 * Default primary interrupt handler for threaded interrupts. Is
603 * assigned as primary handler when request_threaded_irq is called
604 * with handler == NULL. Useful for oneshot interrupts.
605 */
606 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
607 {
608 return IRQ_WAKE_THREAD;
609 }
610
611 /*
612 * Primary handler for nested threaded interrupts. Should never be
613 * called.
614 */
615 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
616 {
617 WARN(1, "Primary handler called for nested irq %d\n", irq);
618 return IRQ_NONE;
619 }
620
621 static int irq_wait_for_interrupt(struct irqaction *action)
622 {
623 while (!kthread_should_stop()) {
624 set_current_state(TASK_INTERRUPTIBLE);
625
626 if (test_and_clear_bit(IRQTF_RUNTHREAD,
627 &action->thread_flags)) {
628 __set_current_state(TASK_RUNNING);
629 return 0;
630 }
631 schedule();
632 }
633 return -1;
634 }
635
636 /*
637 * Oneshot interrupts keep the irq line masked until the threaded
638 * handler finished. unmask if the interrupt has not been disabled and
639 * is marked MASKED.
640 */
641 static void irq_finalize_oneshot(struct irq_desc *desc,
642 struct irqaction *action, bool force)
643 {
644 if (!(desc->istate & IRQS_ONESHOT))
645 return;
646 again:
647 chip_bus_lock(desc);
648 raw_spin_lock_irq(&desc->lock);
649
650 /*
651 * Implausible though it may be we need to protect us against
652 * the following scenario:
653 *
654 * The thread is faster done than the hard interrupt handler
655 * on the other CPU. If we unmask the irq line then the
656 * interrupt can come in again and masks the line, leaves due
657 * to IRQS_INPROGRESS and the irq line is masked forever.
658 *
659 * This also serializes the state of shared oneshot handlers
660 * versus "desc->threads_onehsot |= action->thread_mask;" in
661 * irq_wake_thread(). See the comment there which explains the
662 * serialization.
663 */
664 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
665 raw_spin_unlock_irq(&desc->lock);
666 chip_bus_sync_unlock(desc);
667 cpu_relax();
668 goto again;
669 }
670
671 /*
672 * Now check again, whether the thread should run. Otherwise
673 * we would clear the threads_oneshot bit of this thread which
674 * was just set.
675 */
676 if (!force && test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
677 goto out_unlock;
678
679 desc->threads_oneshot &= ~action->thread_mask;
680
681 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
682 irqd_irq_masked(&desc->irq_data))
683 unmask_irq(desc);
684
685 out_unlock:
686 raw_spin_unlock_irq(&desc->lock);
687 chip_bus_sync_unlock(desc);
688 }
689
690 #ifdef CONFIG_SMP
691 /*
692 * Check whether we need to chasnge the affinity of the interrupt thread.
693 */
694 static void
695 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
696 {
697 cpumask_var_t mask;
698
699 if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
700 return;
701
702 /*
703 * In case we are out of memory we set IRQTF_AFFINITY again and
704 * try again next time
705 */
706 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
707 set_bit(IRQTF_AFFINITY, &action->thread_flags);
708 return;
709 }
710
711 raw_spin_lock_irq(&desc->lock);
712 cpumask_copy(mask, desc->irq_data.affinity);
713 raw_spin_unlock_irq(&desc->lock);
714
715 set_cpus_allowed_ptr(current, mask);
716 free_cpumask_var(mask);
717 }
718 #else
719 static inline void
720 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
721 #endif
722
723 /*
724 * Interrupts which are not explicitely requested as threaded
725 * interrupts rely on the implicit bh/preempt disable of the hard irq
726 * context. So we need to disable bh here to avoid deadlocks and other
727 * side effects.
728 */
729 static irqreturn_t
730 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
731 {
732 irqreturn_t ret;
733
734 local_bh_disable();
735 ret = action->thread_fn(action->irq, action->dev_id);
736 irq_finalize_oneshot(desc, action, false);
737 local_bh_enable();
738 return ret;
739 }
740
741 /*
742 * Interrupts explicitely requested as threaded interupts want to be
743 * preemtible - many of them need to sleep and wait for slow busses to
744 * complete.
745 */
746 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
747 struct irqaction *action)
748 {
749 irqreturn_t ret;
750
751 ret = action->thread_fn(action->irq, action->dev_id);
752 irq_finalize_oneshot(desc, action, false);
753 return ret;
754 }
755
756 /*
757 * Interrupt handler thread
758 */
759 static int irq_thread(void *data)
760 {
761 static const struct sched_param param = {
762 .sched_priority = MAX_USER_RT_PRIO/2,
763 };
764 struct irqaction *action = data;
765 struct irq_desc *desc = irq_to_desc(action->irq);
766 irqreturn_t (*handler_fn)(struct irq_desc *desc,
767 struct irqaction *action);
768 int wake;
769
770 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
771 &action->thread_flags))
772 handler_fn = irq_forced_thread_fn;
773 else
774 handler_fn = irq_thread_fn;
775
776 sched_setscheduler(current, SCHED_FIFO, &param);
777 current->irqaction = action;
778
779 while (!irq_wait_for_interrupt(action)) {
780
781 irq_thread_check_affinity(desc, action);
782
783 atomic_inc(&desc->threads_active);
784
785 raw_spin_lock_irq(&desc->lock);
786 if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
787 /*
788 * CHECKME: We might need a dedicated
789 * IRQ_THREAD_PENDING flag here, which
790 * retriggers the thread in check_irq_resend()
791 * but AFAICT IRQS_PENDING should be fine as it
792 * retriggers the interrupt itself --- tglx
793 */
794 desc->istate |= IRQS_PENDING;
795 raw_spin_unlock_irq(&desc->lock);
796 } else {
797 irqreturn_t action_ret;
798
799 raw_spin_unlock_irq(&desc->lock);
800 action_ret = handler_fn(desc, action);
801 if (!noirqdebug)
802 note_interrupt(action->irq, desc, action_ret);
803 }
804
805 wake = atomic_dec_and_test(&desc->threads_active);
806
807 if (wake && waitqueue_active(&desc->wait_for_threads))
808 wake_up(&desc->wait_for_threads);
809 }
810
811 /* Prevent a stale desc->threads_oneshot */
812 irq_finalize_oneshot(desc, action, true);
813
814 /*
815 * Clear irqaction. Otherwise exit_irq_thread() would make
816 * fuzz about an active irq thread going into nirvana.
817 */
818 current->irqaction = NULL;
819 return 0;
820 }
821
822 /*
823 * Called from do_exit()
824 */
825 void exit_irq_thread(void)
826 {
827 struct task_struct *tsk = current;
828 struct irq_desc *desc;
829
830 if (!tsk->irqaction)
831 return;
832
833 printk(KERN_ERR
834 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
835 tsk->comm ? tsk->comm : "", tsk->pid, tsk->irqaction->irq);
836
837 desc = irq_to_desc(tsk->irqaction->irq);
838
839 /*
840 * Prevent a stale desc->threads_oneshot. Must be called
841 * before setting the IRQTF_DIED flag.
842 */
843 irq_finalize_oneshot(desc, tsk->irqaction, true);
844
845 /*
846 * Set the THREAD DIED flag to prevent further wakeups of the
847 * soon to be gone threaded handler.
848 */
849 set_bit(IRQTF_DIED, &tsk->irqaction->flags);
850 }
851
852 static void irq_setup_forced_threading(struct irqaction *new)
853 {
854 if (!force_irqthreads)
855 return;
856 if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
857 return;
858
859 new->flags |= IRQF_ONESHOT;
860
861 if (!new->thread_fn) {
862 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
863 new->thread_fn = new->handler;
864 new->handler = irq_default_primary_handler;
865 }
866 }
867
868 /*
869 * Internal function to register an irqaction - typically used to
870 * allocate special interrupts that are part of the architecture.
871 */
872 static int
873 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
874 {
875 struct irqaction *old, **old_ptr;
876 const char *old_name = NULL;
877 unsigned long flags, thread_mask = 0;
878 int ret, nested, shared = 0;
879 cpumask_var_t mask;
880
881 if (!desc)
882 return -EINVAL;
883
884 if (desc->irq_data.chip == &no_irq_chip)
885 return -ENOSYS;
886 if (!try_module_get(desc->owner))
887 return -ENODEV;
888 /*
889 * Some drivers like serial.c use request_irq() heavily,
890 * so we have to be careful not to interfere with a
891 * running system.
892 */
893 if (new->flags & IRQF_SAMPLE_RANDOM) {
894 /*
895 * This function might sleep, we want to call it first,
896 * outside of the atomic block.
897 * Yes, this might clear the entropy pool if the wrong
898 * driver is attempted to be loaded, without actually
899 * installing a new handler, but is this really a problem,
900 * only the sysadmin is able to do this.
901 */
902 rand_initialize_irq(irq);
903 }
904
905 /*
906 * Check whether the interrupt nests into another interrupt
907 * thread.
908 */
909 nested = irq_settings_is_nested_thread(desc);
910 if (nested) {
911 if (!new->thread_fn) {
912 ret = -EINVAL;
913 goto out_mput;
914 }
915 /*
916 * Replace the primary handler which was provided from
917 * the driver for non nested interrupt handling by the
918 * dummy function which warns when called.
919 */
920 new->handler = irq_nested_primary_handler;
921 } else {
922 if (irq_settings_can_thread(desc))
923 irq_setup_forced_threading(new);
924 }
925
926 /*
927 * Create a handler thread when a thread function is supplied
928 * and the interrupt does not nest into another interrupt
929 * thread.
930 */
931 if (new->thread_fn && !nested) {
932 struct task_struct *t;
933
934 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
935 new->name);
936 if (IS_ERR(t)) {
937 ret = PTR_ERR(t);
938 goto out_mput;
939 }
940 /*
941 * We keep the reference to the task struct even if
942 * the thread dies to avoid that the interrupt code
943 * references an already freed task_struct.
944 */
945 get_task_struct(t);
946 new->thread = t;
947 }
948
949 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
950 ret = -ENOMEM;
951 goto out_thread;
952 }
953
954 /*
955 * The following block of code has to be executed atomically
956 */
957 raw_spin_lock_irqsave(&desc->lock, flags);
958 old_ptr = &desc->action;
959 old = *old_ptr;
960 if (old) {
961 /*
962 * Can't share interrupts unless both agree to and are
963 * the same type (level, edge, polarity). So both flag
964 * fields must have IRQF_SHARED set and the bits which
965 * set the trigger type must match. Also all must
966 * agree on ONESHOT.
967 */
968 if (!((old->flags & new->flags) & IRQF_SHARED) ||
969 ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
970 ((old->flags ^ new->flags) & IRQF_ONESHOT)) {
971 old_name = old->name;
972 goto mismatch;
973 }
974
975 /* All handlers must agree on per-cpuness */
976 if ((old->flags & IRQF_PERCPU) !=
977 (new->flags & IRQF_PERCPU))
978 goto mismatch;
979
980 /* add new interrupt at end of irq queue */
981 do {
982 thread_mask |= old->thread_mask;
983 old_ptr = &old->next;
984 old = *old_ptr;
985 } while (old);
986 shared = 1;
987 }
988
989 /*
990 * Setup the thread mask for this irqaction. Unlikely to have
991 * 32 resp 64 irqs sharing one line, but who knows.
992 */
993 if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
994 ret = -EBUSY;
995 goto out_mask;
996 }
997 new->thread_mask = 1 << ffz(thread_mask);
998
999 if (!shared) {
1000 init_waitqueue_head(&desc->wait_for_threads);
1001
1002 /* Setup the type (level, edge polarity) if configured: */
1003 if (new->flags & IRQF_TRIGGER_MASK) {
1004 ret = __irq_set_trigger(desc, irq,
1005 new->flags & IRQF_TRIGGER_MASK);
1006
1007 if (ret)
1008 goto out_mask;
1009 }
1010
1011 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1012 IRQS_ONESHOT | IRQS_WAITING);
1013 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1014
1015 if (new->flags & IRQF_PERCPU) {
1016 irqd_set(&desc->irq_data, IRQD_PER_CPU);
1017 irq_settings_set_per_cpu(desc);
1018 }
1019
1020 if (new->flags & IRQF_ONESHOT)
1021 desc->istate |= IRQS_ONESHOT;
1022
1023 if (irq_settings_can_autoenable(desc))
1024 irq_startup(desc);
1025 else
1026 /* Undo nested disables: */
1027 desc->depth = 1;
1028
1029 /* Exclude IRQ from balancing if requested */
1030 if (new->flags & IRQF_NOBALANCING) {
1031 irq_settings_set_no_balancing(desc);
1032 irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1033 }
1034
1035 /* Set default affinity mask once everything is setup */
1036 setup_affinity(irq, desc, mask);
1037
1038 } else if (new->flags & IRQF_TRIGGER_MASK) {
1039 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1040 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1041
1042 if (nmsk != omsk)
1043 /* hope the handler works with current trigger mode */
1044 pr_warning("IRQ %d uses trigger mode %u; requested %u\n",
1045 irq, nmsk, omsk);
1046 }
1047
1048 new->irq = irq;
1049 *old_ptr = new;
1050
1051 /* Reset broken irq detection when installing new handler */
1052 desc->irq_count = 0;
1053 desc->irqs_unhandled = 0;
1054
1055 /*
1056 * Check whether we disabled the irq via the spurious handler
1057 * before. Reenable it and give it another chance.
1058 */
1059 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1060 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1061 __enable_irq(desc, irq, false);
1062 }
1063
1064 raw_spin_unlock_irqrestore(&desc->lock, flags);
1065
1066 /*
1067 * Strictly no need to wake it up, but hung_task complains
1068 * when no hard interrupt wakes the thread up.
1069 */
1070 if (new->thread)
1071 wake_up_process(new->thread);
1072
1073 register_irq_proc(irq, desc);
1074 new->dir = NULL;
1075 register_handler_proc(irq, new);
1076 free_cpumask_var(mask);
1077
1078 return 0;
1079
1080 mismatch:
1081 #ifdef CONFIG_DEBUG_SHIRQ
1082 if (!(new->flags & IRQF_PROBE_SHARED)) {
1083 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
1084 if (old_name)
1085 printk(KERN_ERR "current handler: %s\n", old_name);
1086 dump_stack();
1087 }
1088 #endif
1089 ret = -EBUSY;
1090
1091 out_mask:
1092 raw_spin_unlock_irqrestore(&desc->lock, flags);
1093 free_cpumask_var(mask);
1094
1095 out_thread:
1096 if (new->thread) {
1097 struct task_struct *t = new->thread;
1098
1099 new->thread = NULL;
1100 if (likely(!test_bit(IRQTF_DIED, &new->thread_flags)))
1101 kthread_stop(t);
1102 put_task_struct(t);
1103 }
1104 out_mput:
1105 module_put(desc->owner);
1106 return ret;
1107 }
1108
1109 /**
1110 * setup_irq - setup an interrupt
1111 * @irq: Interrupt line to setup
1112 * @act: irqaction for the interrupt
1113 *
1114 * Used to statically setup interrupts in the early boot process.
1115 */
1116 int setup_irq(unsigned int irq, struct irqaction *act)
1117 {
1118 int retval;
1119 struct irq_desc *desc = irq_to_desc(irq);
1120
1121 chip_bus_lock(desc);
1122 retval = __setup_irq(irq, desc, act);
1123 chip_bus_sync_unlock(desc);
1124
1125 return retval;
1126 }
1127 EXPORT_SYMBOL_GPL(setup_irq);
1128
1129 /*
1130 * Internal function to unregister an irqaction - used to free
1131 * regular and special interrupts that are part of the architecture.
1132 */
1133 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1134 {
1135 struct irq_desc *desc = irq_to_desc(irq);
1136 struct irqaction *action, **action_ptr;
1137 unsigned long flags;
1138
1139 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1140
1141 if (!desc)
1142 return NULL;
1143
1144 raw_spin_lock_irqsave(&desc->lock, flags);
1145
1146 /*
1147 * There can be multiple actions per IRQ descriptor, find the right
1148 * one based on the dev_id:
1149 */
1150 action_ptr = &desc->action;
1151 for (;;) {
1152 action = *action_ptr;
1153
1154 if (!action) {
1155 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1156 raw_spin_unlock_irqrestore(&desc->lock, flags);
1157
1158 return NULL;
1159 }
1160
1161 if (action->dev_id == dev_id)
1162 break;
1163 action_ptr = &action->next;
1164 }
1165
1166 /* Found it - now remove it from the list of entries: */
1167 *action_ptr = action->next;
1168
1169 /* Currently used only by UML, might disappear one day: */
1170 #ifdef CONFIG_IRQ_RELEASE_METHOD
1171 if (desc->irq_data.chip->release)
1172 desc->irq_data.chip->release(irq, dev_id);
1173 #endif
1174
1175 /* If this was the last handler, shut down the IRQ line: */
1176 if (!desc->action)
1177 irq_shutdown(desc);
1178
1179 #ifdef CONFIG_SMP
1180 /* make sure affinity_hint is cleaned up */
1181 if (WARN_ON_ONCE(desc->affinity_hint))
1182 desc->affinity_hint = NULL;
1183 #endif
1184
1185 raw_spin_unlock_irqrestore(&desc->lock, flags);
1186
1187 unregister_handler_proc(irq, action);
1188
1189 /* Make sure it's not being used on another CPU: */
1190 synchronize_irq(irq);
1191
1192 #ifdef CONFIG_DEBUG_SHIRQ
1193 /*
1194 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1195 * event to happen even now it's being freed, so let's make sure that
1196 * is so by doing an extra call to the handler ....
1197 *
1198 * ( We do this after actually deregistering it, to make sure that a
1199 * 'real' IRQ doesn't run in * parallel with our fake. )
1200 */
1201 if (action->flags & IRQF_SHARED) {
1202 local_irq_save(flags);
1203 action->handler(irq, dev_id);
1204 local_irq_restore(flags);
1205 }
1206 #endif
1207
1208 if (action->thread) {
1209 if (!test_bit(IRQTF_DIED, &action->thread_flags))
1210 kthread_stop(action->thread);
1211 put_task_struct(action->thread);
1212 }
1213
1214 module_put(desc->owner);
1215 return action;
1216 }
1217
1218 /**
1219 * remove_irq - free an interrupt
1220 * @irq: Interrupt line to free
1221 * @act: irqaction for the interrupt
1222 *
1223 * Used to remove interrupts statically setup by the early boot process.
1224 */
1225 void remove_irq(unsigned int irq, struct irqaction *act)
1226 {
1227 __free_irq(irq, act->dev_id);
1228 }
1229 EXPORT_SYMBOL_GPL(remove_irq);
1230
1231 /**
1232 * free_irq - free an interrupt allocated with request_irq
1233 * @irq: Interrupt line to free
1234 * @dev_id: Device identity to free
1235 *
1236 * Remove an interrupt handler. The handler is removed and if the
1237 * interrupt line is no longer in use by any driver it is disabled.
1238 * On a shared IRQ the caller must ensure the interrupt is disabled
1239 * on the card it drives before calling this function. The function
1240 * does not return until any executing interrupts for this IRQ
1241 * have completed.
1242 *
1243 * This function must not be called from interrupt context.
1244 */
1245 void free_irq(unsigned int irq, void *dev_id)
1246 {
1247 struct irq_desc *desc = irq_to_desc(irq);
1248
1249 if (!desc)
1250 return;
1251
1252 #ifdef CONFIG_SMP
1253 if (WARN_ON(desc->affinity_notify))
1254 desc->affinity_notify = NULL;
1255 #endif
1256
1257 chip_bus_lock(desc);
1258 kfree(__free_irq(irq, dev_id));
1259 chip_bus_sync_unlock(desc);
1260 }
1261 EXPORT_SYMBOL(free_irq);
1262
1263 /**
1264 * request_threaded_irq - allocate an interrupt line
1265 * @irq: Interrupt line to allocate
1266 * @handler: Function to be called when the IRQ occurs.
1267 * Primary handler for threaded interrupts
1268 * If NULL and thread_fn != NULL the default
1269 * primary handler is installed
1270 * @thread_fn: Function called from the irq handler thread
1271 * If NULL, no irq thread is created
1272 * @irqflags: Interrupt type flags
1273 * @devname: An ascii name for the claiming device
1274 * @dev_id: A cookie passed back to the handler function
1275 *
1276 * This call allocates interrupt resources and enables the
1277 * interrupt line and IRQ handling. From the point this
1278 * call is made your handler function may be invoked. Since
1279 * your handler function must clear any interrupt the board
1280 * raises, you must take care both to initialise your hardware
1281 * and to set up the interrupt handler in the right order.
1282 *
1283 * If you want to set up a threaded irq handler for your device
1284 * then you need to supply @handler and @thread_fn. @handler ist
1285 * still called in hard interrupt context and has to check
1286 * whether the interrupt originates from the device. If yes it
1287 * needs to disable the interrupt on the device and return
1288 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1289 * @thread_fn. This split handler design is necessary to support
1290 * shared interrupts.
1291 *
1292 * Dev_id must be globally unique. Normally the address of the
1293 * device data structure is used as the cookie. Since the handler
1294 * receives this value it makes sense to use it.
1295 *
1296 * If your interrupt is shared you must pass a non NULL dev_id
1297 * as this is required when freeing the interrupt.
1298 *
1299 * Flags:
1300 *
1301 * IRQF_SHARED Interrupt is shared
1302 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
1303 * IRQF_TRIGGER_* Specify active edge(s) or level
1304 *
1305 */
1306 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1307 irq_handler_t thread_fn, unsigned long irqflags,
1308 const char *devname, void *dev_id)
1309 {
1310 struct irqaction *action;
1311 struct irq_desc *desc;
1312 int retval;
1313
1314 /*
1315 * Sanity-check: shared interrupts must pass in a real dev-ID,
1316 * otherwise we'll have trouble later trying to figure out
1317 * which interrupt is which (messes up the interrupt freeing
1318 * logic etc).
1319 */
1320 if ((irqflags & IRQF_SHARED) && !dev_id)
1321 return -EINVAL;
1322
1323 desc = irq_to_desc(irq);
1324 if (!desc)
1325 return -EINVAL;
1326
1327 if (!irq_settings_can_request(desc))
1328 return -EINVAL;
1329
1330 if (!handler) {
1331 if (!thread_fn)
1332 return -EINVAL;
1333 handler = irq_default_primary_handler;
1334 }
1335
1336 action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1337 if (!action)
1338 return -ENOMEM;
1339
1340 action->handler = handler;
1341 action->thread_fn = thread_fn;
1342 action->flags = irqflags;
1343 action->name = devname;
1344 action->dev_id = dev_id;
1345
1346 chip_bus_lock(desc);
1347 retval = __setup_irq(irq, desc, action);
1348 chip_bus_sync_unlock(desc);
1349
1350 if (retval)
1351 kfree(action);
1352
1353 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1354 if (!retval && (irqflags & IRQF_SHARED)) {
1355 /*
1356 * It's a shared IRQ -- the driver ought to be prepared for it
1357 * to happen immediately, so let's make sure....
1358 * We disable the irq to make sure that a 'real' IRQ doesn't
1359 * run in parallel with our fake.
1360 */
1361 unsigned long flags;
1362
1363 disable_irq(irq);
1364 local_irq_save(flags);
1365
1366 handler(irq, dev_id);
1367
1368 local_irq_restore(flags);
1369 enable_irq(irq);
1370 }
1371 #endif
1372 return retval;
1373 }
1374 EXPORT_SYMBOL(request_threaded_irq);
1375
1376 /**
1377 * request_any_context_irq - allocate an interrupt line
1378 * @irq: Interrupt line to allocate
1379 * @handler: Function to be called when the IRQ occurs.
1380 * Threaded handler for threaded interrupts.
1381 * @flags: Interrupt type flags
1382 * @name: An ascii name for the claiming device
1383 * @dev_id: A cookie passed back to the handler function
1384 *
1385 * This call allocates interrupt resources and enables the
1386 * interrupt line and IRQ handling. It selects either a
1387 * hardirq or threaded handling method depending on the
1388 * context.
1389 *
1390 * On failure, it returns a negative value. On success,
1391 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1392 */
1393 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1394 unsigned long flags, const char *name, void *dev_id)
1395 {
1396 struct irq_desc *desc = irq_to_desc(irq);
1397 int ret;
1398
1399 if (!desc)
1400 return -EINVAL;
1401
1402 if (irq_settings_is_nested_thread(desc)) {
1403 ret = request_threaded_irq(irq, NULL, handler,
1404 flags, name, dev_id);
1405 return !ret ? IRQC_IS_NESTED : ret;
1406 }
1407
1408 ret = request_irq(irq, handler, flags, name, dev_id);
1409 return !ret ? IRQC_IS_HARDIRQ : ret;
1410 }
1411 EXPORT_SYMBOL_GPL(request_any_context_irq);