2 * linux/kernel/irq/manage.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006 Thomas Gleixner
7 * This file contains driver APIs to the irq subsystem.
10 #include <linux/irq.h>
11 #include <linux/kthread.h>
12 #include <linux/module.h>
13 #include <linux/random.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/sched.h>
18 #include "internals.h"
21 * synchronize_irq - wait for pending IRQ handlers (on other CPUs)
22 * @irq: interrupt number to wait for
24 * This function waits for any pending IRQ handlers for this interrupt
25 * to complete before returning. If you use this function while
26 * holding a resource the IRQ handler may need you will deadlock.
28 * This function may be called - with care - from IRQ context.
30 void synchronize_irq(unsigned int irq
)
32 struct irq_desc
*desc
= irq_to_desc(irq
);
42 * Wait until we're out of the critical section. This might
43 * give the wrong answer due to the lack of memory barriers.
45 while (desc
->istate
& IRQS_INPROGRESS
)
48 /* Ok, that indicated we're done: double-check carefully. */
49 raw_spin_lock_irqsave(&desc
->lock
, flags
);
51 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
53 /* Oops, that failed? */
54 } while (state
& IRQS_INPROGRESS
);
57 * We made sure that no hardirq handler is running. Now verify
58 * that no threaded handlers are active.
60 wait_event(desc
->wait_for_threads
, !atomic_read(&desc
->threads_active
));
62 EXPORT_SYMBOL(synchronize_irq
);
65 cpumask_var_t irq_default_affinity
;
68 * irq_can_set_affinity - Check if the affinity of a given irq can be set
69 * @irq: Interrupt to check
72 int irq_can_set_affinity(unsigned int irq
)
74 struct irq_desc
*desc
= irq_to_desc(irq
);
76 if (CHECK_IRQ_PER_CPU(desc
->status
) || !desc
->irq_data
.chip
||
77 !desc
->irq_data
.chip
->irq_set_affinity
)
84 * irq_set_thread_affinity - Notify irq threads to adjust affinity
85 * @desc: irq descriptor which has affitnity changed
87 * We just set IRQTF_AFFINITY and delegate the affinity setting
88 * to the interrupt thread itself. We can not call
89 * set_cpus_allowed_ptr() here as we hold desc->lock and this
90 * code can be called from hard interrupt context.
92 void irq_set_thread_affinity(struct irq_desc
*desc
)
94 struct irqaction
*action
= desc
->action
;
98 set_bit(IRQTF_AFFINITY
, &action
->thread_flags
);
99 action
= action
->next
;
103 #ifdef CONFIG_GENERIC_PENDING_IRQ
104 static inline bool irq_can_move_pcntxt(struct irq_desc
*desc
)
106 return desc
->status
& IRQ_MOVE_PCNTXT
;
108 static inline bool irq_move_pending(struct irq_desc
*desc
)
110 return desc
->status
& IRQ_MOVE_PENDING
;
113 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
)
115 cpumask_copy(desc
->pending_mask
, mask
);
118 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
)
120 cpumask_copy(mask
, desc
->pending_mask
);
123 static inline bool irq_can_move_pcntxt(struct irq_desc
*desc
) { return true; }
124 static inline bool irq_move_pending(struct irq_desc
*desc
) { return false; }
126 irq_copy_pending(struct irq_desc
*desc
, const struct cpumask
*mask
) { }
128 irq_get_pending(struct cpumask
*mask
, struct irq_desc
*desc
) { }
132 * irq_set_affinity - Set the irq affinity of a given irq
133 * @irq: Interrupt to set affinity
137 int irq_set_affinity(unsigned int irq
, const struct cpumask
*mask
)
139 struct irq_desc
*desc
= irq_to_desc(irq
);
140 struct irq_chip
*chip
= desc
->irq_data
.chip
;
144 if (!chip
->irq_set_affinity
)
147 raw_spin_lock_irqsave(&desc
->lock
, flags
);
149 if (irq_can_move_pcntxt(desc
)) {
150 ret
= chip
->irq_set_affinity(&desc
->irq_data
, mask
, false);
152 case IRQ_SET_MASK_OK
:
153 cpumask_copy(desc
->irq_data
.affinity
, mask
);
154 case IRQ_SET_MASK_OK_NOCOPY
:
155 irq_set_thread_affinity(desc
);
159 desc
->status
|= IRQ_MOVE_PENDING
;
160 irq_copy_pending(desc
, mask
);
163 if (desc
->affinity_notify
) {
164 kref_get(&desc
->affinity_notify
->kref
);
165 schedule_work(&desc
->affinity_notify
->work
);
167 desc
->status
|= IRQ_AFFINITY_SET
;
168 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
172 int irq_set_affinity_hint(unsigned int irq
, const struct cpumask
*m
)
174 struct irq_desc
*desc
= irq_to_desc(irq
);
180 raw_spin_lock_irqsave(&desc
->lock
, flags
);
181 desc
->affinity_hint
= m
;
182 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
186 EXPORT_SYMBOL_GPL(irq_set_affinity_hint
);
188 static void irq_affinity_notify(struct work_struct
*work
)
190 struct irq_affinity_notify
*notify
=
191 container_of(work
, struct irq_affinity_notify
, work
);
192 struct irq_desc
*desc
= irq_to_desc(notify
->irq
);
193 cpumask_var_t cpumask
;
196 if (!desc
|| !alloc_cpumask_var(&cpumask
, GFP_KERNEL
))
199 raw_spin_lock_irqsave(&desc
->lock
, flags
);
200 if (irq_move_pending(desc
))
201 irq_get_pending(cpumask
, desc
);
203 cpumask_copy(cpumask
, desc
->irq_data
.affinity
);
204 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
206 notify
->notify(notify
, cpumask
);
208 free_cpumask_var(cpumask
);
210 kref_put(¬ify
->kref
, notify
->release
);
214 * irq_set_affinity_notifier - control notification of IRQ affinity changes
215 * @irq: Interrupt for which to enable/disable notification
216 * @notify: Context for notification, or %NULL to disable
217 * notification. Function pointers must be initialised;
218 * the other fields will be initialised by this function.
220 * Must be called in process context. Notification may only be enabled
221 * after the IRQ is allocated and must be disabled before the IRQ is
222 * freed using free_irq().
225 irq_set_affinity_notifier(unsigned int irq
, struct irq_affinity_notify
*notify
)
227 struct irq_desc
*desc
= irq_to_desc(irq
);
228 struct irq_affinity_notify
*old_notify
;
231 /* The release function is promised process context */
237 /* Complete initialisation of *notify */
240 kref_init(¬ify
->kref
);
241 INIT_WORK(¬ify
->work
, irq_affinity_notify
);
244 raw_spin_lock_irqsave(&desc
->lock
, flags
);
245 old_notify
= desc
->affinity_notify
;
246 desc
->affinity_notify
= notify
;
247 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
250 kref_put(&old_notify
->kref
, old_notify
->release
);
254 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier
);
256 #ifndef CONFIG_AUTO_IRQ_AFFINITY
258 * Generic version of the affinity autoselector.
261 setup_affinity(unsigned int irq
, struct irq_desc
*desc
, struct cpumask
*mask
)
263 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
264 struct cpumask
*set
= irq_default_affinity
;
267 /* Excludes PER_CPU and NO_BALANCE interrupts */
268 if (!irq_can_set_affinity(irq
))
272 * Preserve an userspace affinity setup, but make sure that
273 * one of the targets is online.
275 if (desc
->status
& (IRQ_AFFINITY_SET
)) {
276 if (cpumask_intersects(desc
->irq_data
.affinity
,
278 set
= desc
->irq_data
.affinity
;
280 desc
->status
&= ~IRQ_AFFINITY_SET
;
283 cpumask_and(mask
, cpu_online_mask
, set
);
284 ret
= chip
->irq_set_affinity(&desc
->irq_data
, mask
, false);
286 case IRQ_SET_MASK_OK
:
287 cpumask_copy(desc
->irq_data
.affinity
, mask
);
288 case IRQ_SET_MASK_OK_NOCOPY
:
289 irq_set_thread_affinity(desc
);
295 setup_affinity(unsigned int irq
, struct irq_desc
*d
, struct cpumask
*mask
)
297 return irq_select_affinity(irq
);
302 * Called when affinity is set via /proc/irq
304 int irq_select_affinity_usr(unsigned int irq
, struct cpumask
*mask
)
306 struct irq_desc
*desc
= irq_to_desc(irq
);
310 raw_spin_lock_irqsave(&desc
->lock
, flags
);
311 ret
= setup_affinity(irq
, desc
, mask
);
312 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
318 setup_affinity(unsigned int irq
, struct irq_desc
*desc
, struct cpumask
*mask
)
324 void __disable_irq(struct irq_desc
*desc
, unsigned int irq
, bool suspend
)
327 if (!desc
->action
|| (desc
->action
->flags
& IRQF_NO_SUSPEND
))
329 desc
->status
|= IRQ_SUSPENDED
;
337 * disable_irq_nosync - disable an irq without waiting
338 * @irq: Interrupt to disable
340 * Disable the selected interrupt line. Disables and Enables are
342 * Unlike disable_irq(), this function does not ensure existing
343 * instances of the IRQ handler have completed before returning.
345 * This function may be called from IRQ context.
347 void disable_irq_nosync(unsigned int irq
)
349 struct irq_desc
*desc
= irq_to_desc(irq
);
356 raw_spin_lock_irqsave(&desc
->lock
, flags
);
357 __disable_irq(desc
, irq
, false);
358 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
359 chip_bus_sync_unlock(desc
);
361 EXPORT_SYMBOL(disable_irq_nosync
);
364 * disable_irq - disable an irq and wait for completion
365 * @irq: Interrupt to disable
367 * Disable the selected interrupt line. Enables and Disables are
369 * This function waits for any pending IRQ handlers for this interrupt
370 * to complete before returning. If you use this function while
371 * holding a resource the IRQ handler may need you will deadlock.
373 * This function may be called - with care - from IRQ context.
375 void disable_irq(unsigned int irq
)
377 struct irq_desc
*desc
= irq_to_desc(irq
);
382 disable_irq_nosync(irq
);
384 synchronize_irq(irq
);
386 EXPORT_SYMBOL(disable_irq
);
388 void __enable_irq(struct irq_desc
*desc
, unsigned int irq
, bool resume
)
391 if (!(desc
->status
& IRQ_SUSPENDED
)) {
394 if (!(desc
->action
->flags
& IRQF_FORCE_RESUME
))
396 /* Pretend that it got disabled ! */
399 desc
->status
&= ~IRQ_SUSPENDED
;
402 switch (desc
->depth
) {
405 WARN(1, KERN_WARNING
"Unbalanced enable for IRQ %d\n", irq
);
408 if (desc
->status
& IRQ_SUSPENDED
)
410 /* Prevent probing on this irq: */
411 desc
->status
|= IRQ_NOPROBE
;
413 check_irq_resend(desc
, irq
);
422 * enable_irq - enable handling of an irq
423 * @irq: Interrupt to enable
425 * Undoes the effect of one call to disable_irq(). If this
426 * matches the last disable, processing of interrupts on this
427 * IRQ line is re-enabled.
429 * This function may be called from IRQ context only when
430 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
432 void enable_irq(unsigned int irq
)
434 struct irq_desc
*desc
= irq_to_desc(irq
);
440 if (WARN(!desc
->irq_data
.chip
,
441 KERN_ERR
"enable_irq before setup/request_irq: irq %u\n", irq
))
445 raw_spin_lock_irqsave(&desc
->lock
, flags
);
446 __enable_irq(desc
, irq
, false);
447 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
448 chip_bus_sync_unlock(desc
);
450 EXPORT_SYMBOL(enable_irq
);
452 static int set_irq_wake_real(unsigned int irq
, unsigned int on
)
454 struct irq_desc
*desc
= irq_to_desc(irq
);
457 if (desc
->irq_data
.chip
->irq_set_wake
)
458 ret
= desc
->irq_data
.chip
->irq_set_wake(&desc
->irq_data
, on
);
464 * irq_set_irq_wake - control irq power management wakeup
465 * @irq: interrupt to control
466 * @on: enable/disable power management wakeup
468 * Enable/disable power management wakeup mode, which is
469 * disabled by default. Enables and disables must match,
470 * just as they match for non-wakeup mode support.
472 * Wakeup mode lets this IRQ wake the system from sleep
473 * states like "suspend to RAM".
475 int irq_set_irq_wake(unsigned int irq
, unsigned int on
)
477 struct irq_desc
*desc
= irq_to_desc(irq
);
481 /* wakeup-capable irqs can be shared between drivers that
482 * don't need to have the same sleep mode behaviors.
485 raw_spin_lock_irqsave(&desc
->lock
, flags
);
487 if (desc
->wake_depth
++ == 0) {
488 ret
= set_irq_wake_real(irq
, on
);
490 desc
->wake_depth
= 0;
492 desc
->status
|= IRQ_WAKEUP
;
495 if (desc
->wake_depth
== 0) {
496 WARN(1, "Unbalanced IRQ %d wake disable\n", irq
);
497 } else if (--desc
->wake_depth
== 0) {
498 ret
= set_irq_wake_real(irq
, on
);
500 desc
->wake_depth
= 1;
502 desc
->status
&= ~IRQ_WAKEUP
;
506 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
507 chip_bus_sync_unlock(desc
);
510 EXPORT_SYMBOL(irq_set_irq_wake
);
513 * Internal function that tells the architecture code whether a
514 * particular irq has been exclusively allocated or is available
517 int can_request_irq(unsigned int irq
, unsigned long irqflags
)
519 struct irq_desc
*desc
= irq_to_desc(irq
);
520 struct irqaction
*action
;
526 if (desc
->status
& IRQ_NOREQUEST
)
529 raw_spin_lock_irqsave(&desc
->lock
, flags
);
530 action
= desc
->action
;
532 if (irqflags
& action
->flags
& IRQF_SHARED
)
535 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
540 void compat_irq_chip_set_default_handler(struct irq_desc
*desc
)
543 * If the architecture still has not overriden
544 * the flow handler then zap the default. This
545 * should catch incorrect flow-type setting.
547 if (desc
->handle_irq
== &handle_bad_irq
)
548 desc
->handle_irq
= NULL
;
551 int __irq_set_trigger(struct irq_desc
*desc
, unsigned int irq
,
555 struct irq_chip
*chip
= desc
->irq_data
.chip
;
557 if (!chip
|| !chip
->irq_set_type
) {
559 * IRQF_TRIGGER_* but the PIC does not support multiple
562 pr_debug("No set_type function for IRQ %d (%s)\n", irq
,
563 chip
? (chip
->name
? : "unknown") : "unknown");
567 /* caller masked out all except trigger mode flags */
568 ret
= chip
->irq_set_type(&desc
->irq_data
, flags
);
571 pr_err("setting trigger mode %lu for irq %u failed (%pF)\n",
572 flags
, irq
, chip
->irq_set_type
);
574 if (flags
& (IRQ_TYPE_LEVEL_LOW
| IRQ_TYPE_LEVEL_HIGH
))
576 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
577 desc
->status
&= ~(IRQ_LEVEL
| IRQ_TYPE_SENSE_MASK
);
578 desc
->status
|= flags
;
580 if (chip
!= desc
->irq_data
.chip
)
581 irq_chip_set_defaults(desc
->irq_data
.chip
);
588 * Default primary interrupt handler for threaded interrupts. Is
589 * assigned as primary handler when request_threaded_irq is called
590 * with handler == NULL. Useful for oneshot interrupts.
592 static irqreturn_t
irq_default_primary_handler(int irq
, void *dev_id
)
594 return IRQ_WAKE_THREAD
;
598 * Primary handler for nested threaded interrupts. Should never be
601 static irqreturn_t
irq_nested_primary_handler(int irq
, void *dev_id
)
603 WARN(1, "Primary handler called for nested irq %d\n", irq
);
607 static int irq_wait_for_interrupt(struct irqaction
*action
)
609 while (!kthread_should_stop()) {
610 set_current_state(TASK_INTERRUPTIBLE
);
612 if (test_and_clear_bit(IRQTF_RUNTHREAD
,
613 &action
->thread_flags
)) {
614 __set_current_state(TASK_RUNNING
);
623 * Oneshot interrupts keep the irq line masked until the threaded
624 * handler finished. unmask if the interrupt has not been disabled and
627 static void irq_finalize_oneshot(unsigned int irq
, struct irq_desc
*desc
)
631 raw_spin_lock_irq(&desc
->lock
);
634 * Implausible though it may be we need to protect us against
635 * the following scenario:
637 * The thread is faster done than the hard interrupt handler
638 * on the other CPU. If we unmask the irq line then the
639 * interrupt can come in again and masks the line, leaves due
640 * to IRQS_INPROGRESS and the irq line is masked forever.
642 if (unlikely(desc
->istate
& IRQS_INPROGRESS
)) {
643 raw_spin_unlock_irq(&desc
->lock
);
644 chip_bus_sync_unlock(desc
);
649 if (!(desc
->istate
& IRQS_DISABLED
) && (desc
->istate
& IRQS_MASKED
)) {
650 irq_compat_clr_masked(desc
);
651 desc
->istate
&= ~IRQS_MASKED
;
652 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
654 raw_spin_unlock_irq(&desc
->lock
);
655 chip_bus_sync_unlock(desc
);
660 * Check whether we need to change the affinity of the interrupt thread.
663 irq_thread_check_affinity(struct irq_desc
*desc
, struct irqaction
*action
)
667 if (!test_and_clear_bit(IRQTF_AFFINITY
, &action
->thread_flags
))
671 * In case we are out of memory we set IRQTF_AFFINITY again and
672 * try again next time
674 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
675 set_bit(IRQTF_AFFINITY
, &action
->thread_flags
);
679 raw_spin_lock_irq(&desc
->lock
);
680 cpumask_copy(mask
, desc
->irq_data
.affinity
);
681 raw_spin_unlock_irq(&desc
->lock
);
683 set_cpus_allowed_ptr(current
, mask
);
684 free_cpumask_var(mask
);
688 irq_thread_check_affinity(struct irq_desc
*desc
, struct irqaction
*action
) { }
692 * Interrupt handler thread
694 static int irq_thread(void *data
)
696 static const struct sched_param param
= {
697 .sched_priority
= MAX_USER_RT_PRIO
/2,
699 struct irqaction
*action
= data
;
700 struct irq_desc
*desc
= irq_to_desc(action
->irq
);
701 int wake
, oneshot
= desc
->istate
& IRQS_ONESHOT
;
703 sched_setscheduler(current
, SCHED_FIFO
, ¶m
);
704 current
->irqaction
= action
;
706 while (!irq_wait_for_interrupt(action
)) {
708 irq_thread_check_affinity(desc
, action
);
710 atomic_inc(&desc
->threads_active
);
712 raw_spin_lock_irq(&desc
->lock
);
713 if (unlikely(desc
->istate
& IRQS_DISABLED
)) {
715 * CHECKME: We might need a dedicated
716 * IRQ_THREAD_PENDING flag here, which
717 * retriggers the thread in check_irq_resend()
718 * but AFAICT IRQS_PENDING should be fine as it
719 * retriggers the interrupt itself --- tglx
721 irq_compat_set_pending(desc
);
722 desc
->istate
|= IRQS_PENDING
;
723 raw_spin_unlock_irq(&desc
->lock
);
725 raw_spin_unlock_irq(&desc
->lock
);
727 action
->thread_fn(action
->irq
, action
->dev_id
);
730 irq_finalize_oneshot(action
->irq
, desc
);
733 wake
= atomic_dec_and_test(&desc
->threads_active
);
735 if (wake
&& waitqueue_active(&desc
->wait_for_threads
))
736 wake_up(&desc
->wait_for_threads
);
740 * Clear irqaction. Otherwise exit_irq_thread() would make
741 * fuzz about an active irq thread going into nirvana.
743 current
->irqaction
= NULL
;
748 * Called from do_exit()
750 void exit_irq_thread(void)
752 struct task_struct
*tsk
= current
;
758 "exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
759 tsk
->comm
? tsk
->comm
: "", tsk
->pid
, tsk
->irqaction
->irq
);
762 * Set the THREAD DIED flag to prevent further wakeups of the
763 * soon to be gone threaded handler.
765 set_bit(IRQTF_DIED
, &tsk
->irqaction
->flags
);
769 * Internal function to register an irqaction - typically used to
770 * allocate special interrupts that are part of the architecture.
773 __setup_irq(unsigned int irq
, struct irq_desc
*desc
, struct irqaction
*new)
775 struct irqaction
*old
, **old_ptr
;
776 const char *old_name
= NULL
;
778 int ret
, nested
, shared
= 0;
784 if (desc
->irq_data
.chip
== &no_irq_chip
)
787 * Some drivers like serial.c use request_irq() heavily,
788 * so we have to be careful not to interfere with a
791 if (new->flags
& IRQF_SAMPLE_RANDOM
) {
793 * This function might sleep, we want to call it first,
794 * outside of the atomic block.
795 * Yes, this might clear the entropy pool if the wrong
796 * driver is attempted to be loaded, without actually
797 * installing a new handler, but is this really a problem,
798 * only the sysadmin is able to do this.
800 rand_initialize_irq(irq
);
803 /* Oneshot interrupts are not allowed with shared */
804 if ((new->flags
& IRQF_ONESHOT
) && (new->flags
& IRQF_SHARED
))
808 * Check whether the interrupt nests into another interrupt
811 nested
= desc
->status
& IRQ_NESTED_THREAD
;
816 * Replace the primary handler which was provided from
817 * the driver for non nested interrupt handling by the
818 * dummy function which warns when called.
820 new->handler
= irq_nested_primary_handler
;
824 * Create a handler thread when a thread function is supplied
825 * and the interrupt does not nest into another interrupt
828 if (new->thread_fn
&& !nested
) {
829 struct task_struct
*t
;
831 t
= kthread_create(irq_thread
, new, "irq/%d-%s", irq
,
836 * We keep the reference to the task struct even if
837 * the thread dies to avoid that the interrupt code
838 * references an already freed task_struct.
844 if (!alloc_cpumask_var(&mask
, GFP_KERNEL
)) {
850 * The following block of code has to be executed atomically
852 raw_spin_lock_irqsave(&desc
->lock
, flags
);
853 old_ptr
= &desc
->action
;
857 * Can't share interrupts unless both agree to and are
858 * the same type (level, edge, polarity). So both flag
859 * fields must have IRQF_SHARED set and the bits which
860 * set the trigger type must match.
862 if (!((old
->flags
& new->flags
) & IRQF_SHARED
) ||
863 ((old
->flags
^ new->flags
) & IRQF_TRIGGER_MASK
)) {
864 old_name
= old
->name
;
868 #if defined(CONFIG_IRQ_PER_CPU)
869 /* All handlers must agree on per-cpuness */
870 if ((old
->flags
& IRQF_PERCPU
) !=
871 (new->flags
& IRQF_PERCPU
))
875 /* add new interrupt at end of irq queue */
877 old_ptr
= &old
->next
;
884 irq_chip_set_defaults(desc
->irq_data
.chip
);
886 init_waitqueue_head(&desc
->wait_for_threads
);
888 /* Setup the type (level, edge polarity) if configured: */
889 if (new->flags
& IRQF_TRIGGER_MASK
) {
890 ret
= __irq_set_trigger(desc
, irq
,
891 new->flags
& IRQF_TRIGGER_MASK
);
896 compat_irq_chip_set_default_handler(desc
);
897 #if defined(CONFIG_IRQ_PER_CPU)
898 if (new->flags
& IRQF_PERCPU
)
899 desc
->status
|= IRQ_PER_CPU
;
902 desc
->istate
&= ~(IRQS_AUTODETECT
| IRQS_SPURIOUS_DISABLED
| \
903 IRQS_INPROGRESS
| IRQS_ONESHOT
| \
906 if (new->flags
& IRQF_ONESHOT
)
907 desc
->istate
|= IRQS_ONESHOT
;
909 if (!(desc
->status
& IRQ_NOAUTOEN
))
912 /* Undo nested disables: */
915 /* Exclude IRQ from balancing if requested */
916 if (new->flags
& IRQF_NOBALANCING
)
917 desc
->status
|= IRQ_NO_BALANCING
;
919 /* Set default affinity mask once everything is setup */
920 setup_affinity(irq
, desc
, mask
);
922 } else if ((new->flags
& IRQF_TRIGGER_MASK
)
923 && (new->flags
& IRQF_TRIGGER_MASK
)
924 != (desc
->status
& IRQ_TYPE_SENSE_MASK
)) {
925 /* hope the handler works with the actual trigger mode... */
926 pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
927 irq
, (int)(desc
->status
& IRQ_TYPE_SENSE_MASK
),
928 (int)(new->flags
& IRQF_TRIGGER_MASK
));
934 /* Reset broken irq detection when installing new handler */
936 desc
->irqs_unhandled
= 0;
939 * Check whether we disabled the irq via the spurious handler
940 * before. Reenable it and give it another chance.
942 if (shared
&& (desc
->istate
& IRQS_SPURIOUS_DISABLED
)) {
943 desc
->istate
&= ~IRQS_SPURIOUS_DISABLED
;
944 __enable_irq(desc
, irq
, false);
947 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
950 * Strictly no need to wake it up, but hung_task complains
951 * when no hard interrupt wakes the thread up.
954 wake_up_process(new->thread
);
956 register_irq_proc(irq
, desc
);
958 register_handler_proc(irq
, new);
963 #ifdef CONFIG_DEBUG_SHIRQ
964 if (!(new->flags
& IRQF_PROBE_SHARED
)) {
965 printk(KERN_ERR
"IRQ handler type mismatch for IRQ %d\n", irq
);
967 printk(KERN_ERR
"current handler: %s\n", old_name
);
974 free_cpumask_var(mask
);
977 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
979 struct task_struct
*t
= new->thread
;
982 if (likely(!test_bit(IRQTF_DIED
, &new->thread_flags
)))
990 * setup_irq - setup an interrupt
991 * @irq: Interrupt line to setup
992 * @act: irqaction for the interrupt
994 * Used to statically setup interrupts in the early boot process.
996 int setup_irq(unsigned int irq
, struct irqaction
*act
)
999 struct irq_desc
*desc
= irq_to_desc(irq
);
1001 chip_bus_lock(desc
);
1002 retval
= __setup_irq(irq
, desc
, act
);
1003 chip_bus_sync_unlock(desc
);
1007 EXPORT_SYMBOL_GPL(setup_irq
);
1010 * Internal function to unregister an irqaction - used to free
1011 * regular and special interrupts that are part of the architecture.
1013 static struct irqaction
*__free_irq(unsigned int irq
, void *dev_id
)
1015 struct irq_desc
*desc
= irq_to_desc(irq
);
1016 struct irqaction
*action
, **action_ptr
;
1017 unsigned long flags
;
1019 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq
);
1024 raw_spin_lock_irqsave(&desc
->lock
, flags
);
1027 * There can be multiple actions per IRQ descriptor, find the right
1028 * one based on the dev_id:
1030 action_ptr
= &desc
->action
;
1032 action
= *action_ptr
;
1035 WARN(1, "Trying to free already-free IRQ %d\n", irq
);
1036 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1041 if (action
->dev_id
== dev_id
)
1043 action_ptr
= &action
->next
;
1046 /* Found it - now remove it from the list of entries: */
1047 *action_ptr
= action
->next
;
1049 /* Currently used only by UML, might disappear one day: */
1050 #ifdef CONFIG_IRQ_RELEASE_METHOD
1051 if (desc
->irq_data
.chip
->release
)
1052 desc
->irq_data
.chip
->release(irq
, dev_id
);
1055 /* If this was the last handler, shut down the IRQ line: */
1060 /* make sure affinity_hint is cleaned up */
1061 if (WARN_ON_ONCE(desc
->affinity_hint
))
1062 desc
->affinity_hint
= NULL
;
1065 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1067 unregister_handler_proc(irq
, action
);
1069 /* Make sure it's not being used on another CPU: */
1070 synchronize_irq(irq
);
1072 #ifdef CONFIG_DEBUG_SHIRQ
1074 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1075 * event to happen even now it's being freed, so let's make sure that
1076 * is so by doing an extra call to the handler ....
1078 * ( We do this after actually deregistering it, to make sure that a
1079 * 'real' IRQ doesn't run in * parallel with our fake. )
1081 if (action
->flags
& IRQF_SHARED
) {
1082 local_irq_save(flags
);
1083 action
->handler(irq
, dev_id
);
1084 local_irq_restore(flags
);
1088 if (action
->thread
) {
1089 if (!test_bit(IRQTF_DIED
, &action
->thread_flags
))
1090 kthread_stop(action
->thread
);
1091 put_task_struct(action
->thread
);
1098 * remove_irq - free an interrupt
1099 * @irq: Interrupt line to free
1100 * @act: irqaction for the interrupt
1102 * Used to remove interrupts statically setup by the early boot process.
1104 void remove_irq(unsigned int irq
, struct irqaction
*act
)
1106 __free_irq(irq
, act
->dev_id
);
1108 EXPORT_SYMBOL_GPL(remove_irq
);
1111 * free_irq - free an interrupt allocated with request_irq
1112 * @irq: Interrupt line to free
1113 * @dev_id: Device identity to free
1115 * Remove an interrupt handler. The handler is removed and if the
1116 * interrupt line is no longer in use by any driver it is disabled.
1117 * On a shared IRQ the caller must ensure the interrupt is disabled
1118 * on the card it drives before calling this function. The function
1119 * does not return until any executing interrupts for this IRQ
1122 * This function must not be called from interrupt context.
1124 void free_irq(unsigned int irq
, void *dev_id
)
1126 struct irq_desc
*desc
= irq_to_desc(irq
);
1132 if (WARN_ON(desc
->affinity_notify
))
1133 desc
->affinity_notify
= NULL
;
1136 chip_bus_lock(desc
);
1137 kfree(__free_irq(irq
, dev_id
));
1138 chip_bus_sync_unlock(desc
);
1140 EXPORT_SYMBOL(free_irq
);
1143 * request_threaded_irq - allocate an interrupt line
1144 * @irq: Interrupt line to allocate
1145 * @handler: Function to be called when the IRQ occurs.
1146 * Primary handler for threaded interrupts
1147 * If NULL and thread_fn != NULL the default
1148 * primary handler is installed
1149 * @thread_fn: Function called from the irq handler thread
1150 * If NULL, no irq thread is created
1151 * @irqflags: Interrupt type flags
1152 * @devname: An ascii name for the claiming device
1153 * @dev_id: A cookie passed back to the handler function
1155 * This call allocates interrupt resources and enables the
1156 * interrupt line and IRQ handling. From the point this
1157 * call is made your handler function may be invoked. Since
1158 * your handler function must clear any interrupt the board
1159 * raises, you must take care both to initialise your hardware
1160 * and to set up the interrupt handler in the right order.
1162 * If you want to set up a threaded irq handler for your device
1163 * then you need to supply @handler and @thread_fn. @handler ist
1164 * still called in hard interrupt context and has to check
1165 * whether the interrupt originates from the device. If yes it
1166 * needs to disable the interrupt on the device and return
1167 * IRQ_WAKE_THREAD which will wake up the handler thread and run
1168 * @thread_fn. This split handler design is necessary to support
1169 * shared interrupts.
1171 * Dev_id must be globally unique. Normally the address of the
1172 * device data structure is used as the cookie. Since the handler
1173 * receives this value it makes sense to use it.
1175 * If your interrupt is shared you must pass a non NULL dev_id
1176 * as this is required when freeing the interrupt.
1180 * IRQF_SHARED Interrupt is shared
1181 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
1182 * IRQF_TRIGGER_* Specify active edge(s) or level
1185 int request_threaded_irq(unsigned int irq
, irq_handler_t handler
,
1186 irq_handler_t thread_fn
, unsigned long irqflags
,
1187 const char *devname
, void *dev_id
)
1189 struct irqaction
*action
;
1190 struct irq_desc
*desc
;
1194 * Sanity-check: shared interrupts must pass in a real dev-ID,
1195 * otherwise we'll have trouble later trying to figure out
1196 * which interrupt is which (messes up the interrupt freeing
1199 if ((irqflags
& IRQF_SHARED
) && !dev_id
)
1202 desc
= irq_to_desc(irq
);
1206 if (desc
->status
& IRQ_NOREQUEST
)
1212 handler
= irq_default_primary_handler
;
1215 action
= kzalloc(sizeof(struct irqaction
), GFP_KERNEL
);
1219 action
->handler
= handler
;
1220 action
->thread_fn
= thread_fn
;
1221 action
->flags
= irqflags
;
1222 action
->name
= devname
;
1223 action
->dev_id
= dev_id
;
1225 chip_bus_lock(desc
);
1226 retval
= __setup_irq(irq
, desc
, action
);
1227 chip_bus_sync_unlock(desc
);
1232 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1233 if (!retval
&& (irqflags
& IRQF_SHARED
)) {
1235 * It's a shared IRQ -- the driver ought to be prepared for it
1236 * to happen immediately, so let's make sure....
1237 * We disable the irq to make sure that a 'real' IRQ doesn't
1238 * run in parallel with our fake.
1240 unsigned long flags
;
1243 local_irq_save(flags
);
1245 handler(irq
, dev_id
);
1247 local_irq_restore(flags
);
1253 EXPORT_SYMBOL(request_threaded_irq
);
1256 * request_any_context_irq - allocate an interrupt line
1257 * @irq: Interrupt line to allocate
1258 * @handler: Function to be called when the IRQ occurs.
1259 * Threaded handler for threaded interrupts.
1260 * @flags: Interrupt type flags
1261 * @name: An ascii name for the claiming device
1262 * @dev_id: A cookie passed back to the handler function
1264 * This call allocates interrupt resources and enables the
1265 * interrupt line and IRQ handling. It selects either a
1266 * hardirq or threaded handling method depending on the
1269 * On failure, it returns a negative value. On success,
1270 * it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1272 int request_any_context_irq(unsigned int irq
, irq_handler_t handler
,
1273 unsigned long flags
, const char *name
, void *dev_id
)
1275 struct irq_desc
*desc
= irq_to_desc(irq
);
1281 if (desc
->status
& IRQ_NESTED_THREAD
) {
1282 ret
= request_threaded_irq(irq
, NULL
, handler
,
1283 flags
, name
, dev_id
);
1284 return !ret
? IRQC_IS_NESTED
: ret
;
1287 ret
= request_irq(irq
, handler
, flags
, name
, dev_id
);
1288 return !ret
? IRQC_IS_HARDIRQ
: ret
;
1290 EXPORT_SYMBOL_GPL(request_any_context_irq
);