2 * linux/kernel/irq/chip.c
4 * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
5 * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
7 * This file contains the core interrupt handling code, for irq-chip
10 * Detailed information is available in Documentation/core-api/genericirq.rst
13 #include <linux/irq.h>
14 #include <linux/msi.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/kernel_stat.h>
18 #include <linux/irqdomain.h>
20 #include <trace/events/irq.h>
22 #include "internals.h"
24 static irqreturn_t
bad_chained_irq(int irq
, void *dev_id
)
26 WARN_ONCE(1, "Chained irq %d should not call an action\n", irq
);
31 * Chained handlers should never call action on their IRQ. This default
32 * action will emit warning if such thing happens.
34 struct irqaction chained_action
= {
35 .handler
= bad_chained_irq
,
39 * irq_set_chip - set the irq chip for an irq
41 * @chip: pointer to irq chip description structure
43 int irq_set_chip(unsigned int irq
, struct irq_chip
*chip
)
46 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
54 desc
->irq_data
.chip
= chip
;
55 irq_put_desc_unlock(desc
, flags
);
57 * For !CONFIG_SPARSE_IRQ make the irq show up in
63 EXPORT_SYMBOL(irq_set_chip
);
66 * irq_set_type - set the irq trigger type for an irq
68 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
70 int irq_set_irq_type(unsigned int irq
, unsigned int type
)
73 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
79 ret
= __irq_set_trigger(desc
, type
);
80 irq_put_desc_busunlock(desc
, flags
);
83 EXPORT_SYMBOL(irq_set_irq_type
);
86 * irq_set_handler_data - set irq handler data for an irq
87 * @irq: Interrupt number
88 * @data: Pointer to interrupt specific data
90 * Set the hardware irq controller data for an irq
92 int irq_set_handler_data(unsigned int irq
, void *data
)
95 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
99 desc
->irq_common_data
.handler_data
= data
;
100 irq_put_desc_unlock(desc
, flags
);
103 EXPORT_SYMBOL(irq_set_handler_data
);
106 * irq_set_msi_desc_off - set MSI descriptor data for an irq at offset
107 * @irq_base: Interrupt number base
108 * @irq_offset: Interrupt number offset
109 * @entry: Pointer to MSI descriptor data
111 * Set the MSI descriptor entry for an irq at offset
113 int irq_set_msi_desc_off(unsigned int irq_base
, unsigned int irq_offset
,
114 struct msi_desc
*entry
)
117 struct irq_desc
*desc
= irq_get_desc_lock(irq_base
+ irq_offset
, &flags
, IRQ_GET_DESC_CHECK_GLOBAL
);
121 desc
->irq_common_data
.msi_desc
= entry
;
122 if (entry
&& !irq_offset
)
123 entry
->irq
= irq_base
;
124 irq_put_desc_unlock(desc
, flags
);
129 * irq_set_msi_desc - set MSI descriptor data for an irq
130 * @irq: Interrupt number
131 * @entry: Pointer to MSI descriptor data
133 * Set the MSI descriptor entry for an irq
135 int irq_set_msi_desc(unsigned int irq
, struct msi_desc
*entry
)
137 return irq_set_msi_desc_off(irq
, 0, entry
);
141 * irq_set_chip_data - set irq chip data for an irq
142 * @irq: Interrupt number
143 * @data: Pointer to chip specific data
145 * Set the hardware irq chip data for an irq
147 int irq_set_chip_data(unsigned int irq
, void *data
)
150 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
154 desc
->irq_data
.chip_data
= data
;
155 irq_put_desc_unlock(desc
, flags
);
158 EXPORT_SYMBOL(irq_set_chip_data
);
160 struct irq_data
*irq_get_irq_data(unsigned int irq
)
162 struct irq_desc
*desc
= irq_to_desc(irq
);
164 return desc
? &desc
->irq_data
: NULL
;
166 EXPORT_SYMBOL_GPL(irq_get_irq_data
);
168 static void irq_state_clr_disabled(struct irq_desc
*desc
)
170 irqd_clear(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
173 static void irq_state_set_disabled(struct irq_desc
*desc
)
175 irqd_set(&desc
->irq_data
, IRQD_IRQ_DISABLED
);
178 static void irq_state_clr_masked(struct irq_desc
*desc
)
180 irqd_clear(&desc
->irq_data
, IRQD_IRQ_MASKED
);
183 static void irq_state_set_masked(struct irq_desc
*desc
)
185 irqd_set(&desc
->irq_data
, IRQD_IRQ_MASKED
);
188 static void irq_state_clr_started(struct irq_desc
*desc
)
190 irqd_clear(&desc
->irq_data
, IRQD_IRQ_STARTED
);
193 static void irq_state_set_started(struct irq_desc
*desc
)
195 irqd_set(&desc
->irq_data
, IRQD_IRQ_STARTED
);
206 __irq_startup_managed(struct irq_desc
*desc
, struct cpumask
*aff
, bool force
)
208 struct irq_data
*d
= irq_desc_get_irq_data(desc
);
210 if (!irqd_affinity_is_managed(d
))
211 return IRQ_STARTUP_NORMAL
;
213 irqd_clr_managed_shutdown(d
);
215 if (cpumask_any_and(aff
, cpu_online_mask
) > nr_cpu_ids
) {
217 * Catch code which fiddles with enable_irq() on a managed
218 * and potentially shutdown IRQ. Chained interrupt
219 * installment or irq auto probing should not happen on
220 * managed irqs either. Emit a warning, break the affinity
221 * and start it up as a normal interrupt.
223 if (WARN_ON_ONCE(force
))
224 return IRQ_STARTUP_NORMAL
;
226 * The interrupt was requested, but there is no online CPU
227 * in it's affinity mask. Put it into managed shutdown
228 * state and let the cpu hotplug mechanism start it up once
229 * a CPU in the mask becomes available.
231 irqd_set_managed_shutdown(d
);
232 return IRQ_STARTUP_ABORT
;
234 return IRQ_STARTUP_MANAGED
;
237 static __always_inline
int
238 __irq_startup_managed(struct irq_desc
*desc
, struct cpumask
*aff
, bool force
)
240 return IRQ_STARTUP_NORMAL
;
244 static int __irq_startup(struct irq_desc
*desc
)
246 struct irq_data
*d
= irq_desc_get_irq_data(desc
);
249 irq_domain_activate_irq(d
);
250 if (d
->chip
->irq_startup
) {
251 ret
= d
->chip
->irq_startup(d
);
252 irq_state_clr_disabled(desc
);
253 irq_state_clr_masked(desc
);
257 irq_state_set_started(desc
);
261 int irq_startup(struct irq_desc
*desc
, bool resend
, bool force
)
263 struct irq_data
*d
= irq_desc_get_irq_data(desc
);
264 struct cpumask
*aff
= irq_data_get_affinity_mask(d
);
269 if (irqd_is_started(d
)) {
272 switch (__irq_startup_managed(desc
, aff
, force
)) {
273 case IRQ_STARTUP_NORMAL
:
274 ret
= __irq_startup(desc
);
275 irq_setup_affinity(desc
);
277 case IRQ_STARTUP_MANAGED
:
278 ret
= __irq_startup(desc
);
279 irq_set_affinity_locked(d
, aff
, false);
281 case IRQ_STARTUP_ABORT
:
286 check_irq_resend(desc
);
291 static void __irq_disable(struct irq_desc
*desc
, bool mask
);
293 void irq_shutdown(struct irq_desc
*desc
)
295 if (irqd_is_started(&desc
->irq_data
)) {
297 if (desc
->irq_data
.chip
->irq_shutdown
) {
298 desc
->irq_data
.chip
->irq_shutdown(&desc
->irq_data
);
299 irq_state_set_disabled(desc
);
300 irq_state_set_masked(desc
);
302 __irq_disable(desc
, true);
304 irq_state_clr_started(desc
);
307 * This must be called even if the interrupt was never started up,
308 * because the activation can happen before the interrupt is
309 * available for request/startup. It has it's own state tracking so
310 * it's safe to call it unconditionally.
312 irq_domain_deactivate_irq(&desc
->irq_data
);
315 void irq_enable(struct irq_desc
*desc
)
317 if (!irqd_irq_disabled(&desc
->irq_data
)) {
320 irq_state_clr_disabled(desc
);
321 if (desc
->irq_data
.chip
->irq_enable
) {
322 desc
->irq_data
.chip
->irq_enable(&desc
->irq_data
);
323 irq_state_clr_masked(desc
);
330 static void __irq_disable(struct irq_desc
*desc
, bool mask
)
332 if (irqd_irq_disabled(&desc
->irq_data
)) {
336 irq_state_set_disabled(desc
);
337 if (desc
->irq_data
.chip
->irq_disable
) {
338 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
339 irq_state_set_masked(desc
);
347 * irq_disable - Mark interrupt disabled
348 * @desc: irq descriptor which should be disabled
350 * If the chip does not implement the irq_disable callback, we
351 * use a lazy disable approach. That means we mark the interrupt
352 * disabled, but leave the hardware unmasked. That's an
353 * optimization because we avoid the hardware access for the
354 * common case where no interrupt happens after we marked it
355 * disabled. If an interrupt happens, then the interrupt flow
356 * handler masks the line at the hardware level and marks it
359 * If the interrupt chip does not implement the irq_disable callback,
360 * a driver can disable the lazy approach for a particular irq line by
361 * calling 'irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY)'. This can
362 * be used for devices which cannot disable the interrupt at the
363 * device level under certain circumstances and have to use
364 * disable_irq[_nosync] instead.
366 void irq_disable(struct irq_desc
*desc
)
368 __irq_disable(desc
, irq_settings_disable_unlazy(desc
));
371 void irq_percpu_enable(struct irq_desc
*desc
, unsigned int cpu
)
373 if (desc
->irq_data
.chip
->irq_enable
)
374 desc
->irq_data
.chip
->irq_enable(&desc
->irq_data
);
376 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
377 cpumask_set_cpu(cpu
, desc
->percpu_enabled
);
380 void irq_percpu_disable(struct irq_desc
*desc
, unsigned int cpu
)
382 if (desc
->irq_data
.chip
->irq_disable
)
383 desc
->irq_data
.chip
->irq_disable(&desc
->irq_data
);
385 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
386 cpumask_clear_cpu(cpu
, desc
->percpu_enabled
);
389 static inline void mask_ack_irq(struct irq_desc
*desc
)
391 if (desc
->irq_data
.chip
->irq_mask_ack
) {
392 desc
->irq_data
.chip
->irq_mask_ack(&desc
->irq_data
);
393 irq_state_set_masked(desc
);
396 if (desc
->irq_data
.chip
->irq_ack
)
397 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
401 void mask_irq(struct irq_desc
*desc
)
403 if (irqd_irq_masked(&desc
->irq_data
))
406 if (desc
->irq_data
.chip
->irq_mask
) {
407 desc
->irq_data
.chip
->irq_mask(&desc
->irq_data
);
408 irq_state_set_masked(desc
);
412 void unmask_irq(struct irq_desc
*desc
)
414 if (!irqd_irq_masked(&desc
->irq_data
))
417 if (desc
->irq_data
.chip
->irq_unmask
) {
418 desc
->irq_data
.chip
->irq_unmask(&desc
->irq_data
);
419 irq_state_clr_masked(desc
);
423 void unmask_threaded_irq(struct irq_desc
*desc
)
425 struct irq_chip
*chip
= desc
->irq_data
.chip
;
427 if (chip
->flags
& IRQCHIP_EOI_THREADED
)
428 chip
->irq_eoi(&desc
->irq_data
);
434 * handle_nested_irq - Handle a nested irq from a irq thread
435 * @irq: the interrupt number
437 * Handle interrupts which are nested into a threaded interrupt
438 * handler. The handler function is called inside the calling
441 void handle_nested_irq(unsigned int irq
)
443 struct irq_desc
*desc
= irq_to_desc(irq
);
444 struct irqaction
*action
;
445 irqreturn_t action_ret
;
449 raw_spin_lock_irq(&desc
->lock
);
451 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
453 action
= desc
->action
;
454 if (unlikely(!action
|| irqd_irq_disabled(&desc
->irq_data
))) {
455 desc
->istate
|= IRQS_PENDING
;
459 kstat_incr_irqs_this_cpu(desc
);
460 irqd_set(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
461 raw_spin_unlock_irq(&desc
->lock
);
463 action_ret
= IRQ_NONE
;
464 for_each_action_of_desc(desc
, action
)
465 action_ret
|= action
->thread_fn(action
->irq
, action
->dev_id
);
468 note_interrupt(desc
, action_ret
);
470 raw_spin_lock_irq(&desc
->lock
);
471 irqd_clear(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
474 raw_spin_unlock_irq(&desc
->lock
);
476 EXPORT_SYMBOL_GPL(handle_nested_irq
);
478 static bool irq_check_poll(struct irq_desc
*desc
)
480 if (!(desc
->istate
& IRQS_POLL_INPROGRESS
))
482 return irq_wait_for_poll(desc
);
485 static bool irq_may_run(struct irq_desc
*desc
)
487 unsigned int mask
= IRQD_IRQ_INPROGRESS
| IRQD_WAKEUP_ARMED
;
490 * If the interrupt is not in progress and is not an armed
491 * wakeup interrupt, proceed.
493 if (!irqd_has_set(&desc
->irq_data
, mask
))
497 * If the interrupt is an armed wakeup source, mark it pending
498 * and suspended, disable it and notify the pm core about the
501 if (irq_pm_check_wakeup(desc
))
505 * Handle a potential concurrent poll on a different core.
507 return irq_check_poll(desc
);
511 * handle_simple_irq - Simple and software-decoded IRQs.
512 * @desc: the interrupt description structure for this irq
514 * Simple interrupts are either sent from a demultiplexing interrupt
515 * handler or come from hardware, where no interrupt hardware control
518 * Note: The caller is expected to handle the ack, clear, mask and
519 * unmask issues if necessary.
521 void handle_simple_irq(struct irq_desc
*desc
)
523 raw_spin_lock(&desc
->lock
);
525 if (!irq_may_run(desc
))
528 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
530 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
531 desc
->istate
|= IRQS_PENDING
;
535 kstat_incr_irqs_this_cpu(desc
);
536 handle_irq_event(desc
);
539 raw_spin_unlock(&desc
->lock
);
541 EXPORT_SYMBOL_GPL(handle_simple_irq
);
544 * handle_untracked_irq - Simple and software-decoded IRQs.
545 * @desc: the interrupt description structure for this irq
547 * Untracked interrupts are sent from a demultiplexing interrupt
548 * handler when the demultiplexer does not know which device it its
549 * multiplexed irq domain generated the interrupt. IRQ's handled
550 * through here are not subjected to stats tracking, randomness, or
551 * spurious interrupt detection.
553 * Note: Like handle_simple_irq, the caller is expected to handle
554 * the ack, clear, mask and unmask issues if necessary.
556 void handle_untracked_irq(struct irq_desc
*desc
)
558 unsigned int flags
= 0;
560 raw_spin_lock(&desc
->lock
);
562 if (!irq_may_run(desc
))
565 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
567 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
568 desc
->istate
|= IRQS_PENDING
;
572 desc
->istate
&= ~IRQS_PENDING
;
573 irqd_set(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
574 raw_spin_unlock(&desc
->lock
);
576 __handle_irq_event_percpu(desc
, &flags
);
578 raw_spin_lock(&desc
->lock
);
579 irqd_clear(&desc
->irq_data
, IRQD_IRQ_INPROGRESS
);
582 raw_spin_unlock(&desc
->lock
);
584 EXPORT_SYMBOL_GPL(handle_untracked_irq
);
587 * Called unconditionally from handle_level_irq() and only for oneshot
588 * interrupts from handle_fasteoi_irq()
590 static void cond_unmask_irq(struct irq_desc
*desc
)
593 * We need to unmask in the following cases:
594 * - Standard level irq (IRQF_ONESHOT is not set)
595 * - Oneshot irq which did not wake the thread (caused by a
596 * spurious interrupt or a primary handler handling it
599 if (!irqd_irq_disabled(&desc
->irq_data
) &&
600 irqd_irq_masked(&desc
->irq_data
) && !desc
->threads_oneshot
)
605 * handle_level_irq - Level type irq handler
606 * @desc: the interrupt description structure for this irq
608 * Level type interrupts are active as long as the hardware line has
609 * the active level. This may require to mask the interrupt and unmask
610 * it after the associated handler has acknowledged the device, so the
611 * interrupt line is back to inactive.
613 void handle_level_irq(struct irq_desc
*desc
)
615 raw_spin_lock(&desc
->lock
);
618 if (!irq_may_run(desc
))
621 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
624 * If its disabled or no action available
625 * keep it masked and get out of here
627 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
628 desc
->istate
|= IRQS_PENDING
;
632 kstat_incr_irqs_this_cpu(desc
);
633 handle_irq_event(desc
);
635 cond_unmask_irq(desc
);
638 raw_spin_unlock(&desc
->lock
);
640 EXPORT_SYMBOL_GPL(handle_level_irq
);
642 #ifdef CONFIG_IRQ_PREFLOW_FASTEOI
643 static inline void preflow_handler(struct irq_desc
*desc
)
645 if (desc
->preflow_handler
)
646 desc
->preflow_handler(&desc
->irq_data
);
649 static inline void preflow_handler(struct irq_desc
*desc
) { }
652 static void cond_unmask_eoi_irq(struct irq_desc
*desc
, struct irq_chip
*chip
)
654 if (!(desc
->istate
& IRQS_ONESHOT
)) {
655 chip
->irq_eoi(&desc
->irq_data
);
659 * We need to unmask in the following cases:
660 * - Oneshot irq which did not wake the thread (caused by a
661 * spurious interrupt or a primary handler handling it
664 if (!irqd_irq_disabled(&desc
->irq_data
) &&
665 irqd_irq_masked(&desc
->irq_data
) && !desc
->threads_oneshot
) {
666 chip
->irq_eoi(&desc
->irq_data
);
668 } else if (!(chip
->flags
& IRQCHIP_EOI_THREADED
)) {
669 chip
->irq_eoi(&desc
->irq_data
);
674 * handle_fasteoi_irq - irq handler for transparent controllers
675 * @desc: the interrupt description structure for this irq
677 * Only a single callback will be issued to the chip: an ->eoi()
678 * call when the interrupt has been serviced. This enables support
679 * for modern forms of interrupt handlers, which handle the flow
680 * details in hardware, transparently.
682 void handle_fasteoi_irq(struct irq_desc
*desc
)
684 struct irq_chip
*chip
= desc
->irq_data
.chip
;
686 raw_spin_lock(&desc
->lock
);
688 if (!irq_may_run(desc
))
691 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
694 * If its disabled or no action available
695 * then mask it and get out of here:
697 if (unlikely(!desc
->action
|| irqd_irq_disabled(&desc
->irq_data
))) {
698 desc
->istate
|= IRQS_PENDING
;
703 kstat_incr_irqs_this_cpu(desc
);
704 if (desc
->istate
& IRQS_ONESHOT
)
707 preflow_handler(desc
);
708 handle_irq_event(desc
);
710 cond_unmask_eoi_irq(desc
, chip
);
712 raw_spin_unlock(&desc
->lock
);
715 if (!(chip
->flags
& IRQCHIP_EOI_IF_HANDLED
))
716 chip
->irq_eoi(&desc
->irq_data
);
717 raw_spin_unlock(&desc
->lock
);
719 EXPORT_SYMBOL_GPL(handle_fasteoi_irq
);
722 * handle_edge_irq - edge type IRQ handler
723 * @desc: the interrupt description structure for this irq
725 * Interrupt occures on the falling and/or rising edge of a hardware
726 * signal. The occurrence is latched into the irq controller hardware
727 * and must be acked in order to be reenabled. After the ack another
728 * interrupt can happen on the same source even before the first one
729 * is handled by the associated event handler. If this happens it
730 * might be necessary to disable (mask) the interrupt depending on the
731 * controller hardware. This requires to reenable the interrupt inside
732 * of the loop which handles the interrupts which have arrived while
733 * the handler was running. If all pending interrupts are handled, the
736 void handle_edge_irq(struct irq_desc
*desc
)
738 raw_spin_lock(&desc
->lock
);
740 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
742 if (!irq_may_run(desc
)) {
743 desc
->istate
|= IRQS_PENDING
;
749 * If its disabled or no action available then mask it and get
752 if (irqd_irq_disabled(&desc
->irq_data
) || !desc
->action
) {
753 desc
->istate
|= IRQS_PENDING
;
758 kstat_incr_irqs_this_cpu(desc
);
760 /* Start handling the irq */
761 desc
->irq_data
.chip
->irq_ack(&desc
->irq_data
);
764 if (unlikely(!desc
->action
)) {
770 * When another irq arrived while we were handling
771 * one, we could have masked the irq.
772 * Renable it, if it was not disabled in meantime.
774 if (unlikely(desc
->istate
& IRQS_PENDING
)) {
775 if (!irqd_irq_disabled(&desc
->irq_data
) &&
776 irqd_irq_masked(&desc
->irq_data
))
780 handle_irq_event(desc
);
782 } while ((desc
->istate
& IRQS_PENDING
) &&
783 !irqd_irq_disabled(&desc
->irq_data
));
786 raw_spin_unlock(&desc
->lock
);
788 EXPORT_SYMBOL(handle_edge_irq
);
790 #ifdef CONFIG_IRQ_EDGE_EOI_HANDLER
792 * handle_edge_eoi_irq - edge eoi type IRQ handler
793 * @desc: the interrupt description structure for this irq
795 * Similar as the above handle_edge_irq, but using eoi and w/o the
798 void handle_edge_eoi_irq(struct irq_desc
*desc
)
800 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
802 raw_spin_lock(&desc
->lock
);
804 desc
->istate
&= ~(IRQS_REPLAY
| IRQS_WAITING
);
806 if (!irq_may_run(desc
)) {
807 desc
->istate
|= IRQS_PENDING
;
812 * If its disabled or no action available then mask it and get
815 if (irqd_irq_disabled(&desc
->irq_data
) || !desc
->action
) {
816 desc
->istate
|= IRQS_PENDING
;
820 kstat_incr_irqs_this_cpu(desc
);
823 if (unlikely(!desc
->action
))
826 handle_irq_event(desc
);
828 } while ((desc
->istate
& IRQS_PENDING
) &&
829 !irqd_irq_disabled(&desc
->irq_data
));
832 chip
->irq_eoi(&desc
->irq_data
);
833 raw_spin_unlock(&desc
->lock
);
838 * handle_percpu_irq - Per CPU local irq handler
839 * @desc: the interrupt description structure for this irq
841 * Per CPU interrupts on SMP machines without locking requirements
843 void handle_percpu_irq(struct irq_desc
*desc
)
845 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
847 kstat_incr_irqs_this_cpu(desc
);
850 chip
->irq_ack(&desc
->irq_data
);
852 handle_irq_event_percpu(desc
);
855 chip
->irq_eoi(&desc
->irq_data
);
859 * handle_percpu_devid_irq - Per CPU local irq handler with per cpu dev ids
860 * @desc: the interrupt description structure for this irq
862 * Per CPU interrupts on SMP machines without locking requirements. Same as
863 * handle_percpu_irq() above but with the following extras:
865 * action->percpu_dev_id is a pointer to percpu variables which
866 * contain the real device id for the cpu on which this handler is
869 void handle_percpu_devid_irq(struct irq_desc
*desc
)
871 struct irq_chip
*chip
= irq_desc_get_chip(desc
);
872 struct irqaction
*action
= desc
->action
;
873 unsigned int irq
= irq_desc_get_irq(desc
);
876 kstat_incr_irqs_this_cpu(desc
);
879 chip
->irq_ack(&desc
->irq_data
);
881 if (likely(action
)) {
882 trace_irq_handler_entry(irq
, action
);
883 res
= action
->handler(irq
, raw_cpu_ptr(action
->percpu_dev_id
));
884 trace_irq_handler_exit(irq
, action
, res
);
886 unsigned int cpu
= smp_processor_id();
887 bool enabled
= cpumask_test_cpu(cpu
, desc
->percpu_enabled
);
890 irq_percpu_disable(desc
, cpu
);
892 pr_err_once("Spurious%s percpu IRQ%u on CPU%u\n",
893 enabled
? " and unmasked" : "", irq
, cpu
);
897 chip
->irq_eoi(&desc
->irq_data
);
901 __irq_do_set_handler(struct irq_desc
*desc
, irq_flow_handler_t handle
,
902 int is_chained
, const char *name
)
905 handle
= handle_bad_irq
;
907 struct irq_data
*irq_data
= &desc
->irq_data
;
908 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
910 * With hierarchical domains we might run into a
911 * situation where the outermost chip is not yet set
912 * up, but the inner chips are there. Instead of
913 * bailing we install the handler, but obviously we
914 * cannot enable/startup the interrupt at this point.
917 if (irq_data
->chip
!= &no_irq_chip
)
920 * Bail out if the outer chip is not set up
921 * and the interrrupt supposed to be started
924 if (WARN_ON(is_chained
))
927 irq_data
= irq_data
->parent_data
;
930 if (WARN_ON(!irq_data
|| irq_data
->chip
== &no_irq_chip
))
935 if (handle
== handle_bad_irq
) {
936 if (desc
->irq_data
.chip
!= &no_irq_chip
)
938 irq_state_set_disabled(desc
);
943 desc
->handle_irq
= handle
;
946 if (handle
!= handle_bad_irq
&& is_chained
) {
947 unsigned int type
= irqd_get_trigger_type(&desc
->irq_data
);
950 * We're about to start this interrupt immediately,
951 * hence the need to set the trigger configuration.
952 * But the .set_type callback may have overridden the
953 * flow handler, ignoring that we're dealing with a
954 * chained interrupt. Reset it immediately because we
957 if (type
!= IRQ_TYPE_NONE
) {
958 __irq_set_trigger(desc
, type
);
959 desc
->handle_irq
= handle
;
962 irq_settings_set_noprobe(desc
);
963 irq_settings_set_norequest(desc
);
964 irq_settings_set_nothread(desc
);
965 desc
->action
= &chained_action
;
966 irq_startup(desc
, IRQ_RESEND
, IRQ_START_FORCE
);
971 __irq_set_handler(unsigned int irq
, irq_flow_handler_t handle
, int is_chained
,
975 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, 0);
980 __irq_do_set_handler(desc
, handle
, is_chained
, name
);
981 irq_put_desc_busunlock(desc
, flags
);
983 EXPORT_SYMBOL_GPL(__irq_set_handler
);
986 irq_set_chained_handler_and_data(unsigned int irq
, irq_flow_handler_t handle
,
990 struct irq_desc
*desc
= irq_get_desc_buslock(irq
, &flags
, 0);
995 desc
->irq_common_data
.handler_data
= data
;
996 __irq_do_set_handler(desc
, handle
, 1, NULL
);
998 irq_put_desc_busunlock(desc
, flags
);
1000 EXPORT_SYMBOL_GPL(irq_set_chained_handler_and_data
);
1003 irq_set_chip_and_handler_name(unsigned int irq
, struct irq_chip
*chip
,
1004 irq_flow_handler_t handle
, const char *name
)
1006 irq_set_chip(irq
, chip
);
1007 __irq_set_handler(irq
, handle
, 0, name
);
1009 EXPORT_SYMBOL_GPL(irq_set_chip_and_handler_name
);
1011 void irq_modify_status(unsigned int irq
, unsigned long clr
, unsigned long set
)
1013 unsigned long flags
;
1014 struct irq_desc
*desc
= irq_get_desc_lock(irq
, &flags
, 0);
1020 * Warn when a driver sets the no autoenable flag on an already
1023 WARN_ON_ONCE(!desc
->depth
&& (set
& _IRQ_NOAUTOEN
));
1025 irq_settings_clr_and_set(desc
, clr
, set
);
1027 irqd_clear(&desc
->irq_data
, IRQD_NO_BALANCING
| IRQD_PER_CPU
|
1028 IRQD_TRIGGER_MASK
| IRQD_LEVEL
| IRQD_MOVE_PCNTXT
);
1029 if (irq_settings_has_no_balance_set(desc
))
1030 irqd_set(&desc
->irq_data
, IRQD_NO_BALANCING
);
1031 if (irq_settings_is_per_cpu(desc
))
1032 irqd_set(&desc
->irq_data
, IRQD_PER_CPU
);
1033 if (irq_settings_can_move_pcntxt(desc
))
1034 irqd_set(&desc
->irq_data
, IRQD_MOVE_PCNTXT
);
1035 if (irq_settings_is_level(desc
))
1036 irqd_set(&desc
->irq_data
, IRQD_LEVEL
);
1038 irqd_set(&desc
->irq_data
, irq_settings_get_trigger_mask(desc
));
1040 irq_put_desc_unlock(desc
, flags
);
1042 EXPORT_SYMBOL_GPL(irq_modify_status
);
1045 * irq_cpu_online - Invoke all irq_cpu_online functions.
1047 * Iterate through all irqs and invoke the chip.irq_cpu_online()
1050 void irq_cpu_online(void)
1052 struct irq_desc
*desc
;
1053 struct irq_chip
*chip
;
1054 unsigned long flags
;
1057 for_each_active_irq(irq
) {
1058 desc
= irq_to_desc(irq
);
1062 raw_spin_lock_irqsave(&desc
->lock
, flags
);
1064 chip
= irq_data_get_irq_chip(&desc
->irq_data
);
1065 if (chip
&& chip
->irq_cpu_online
&&
1066 (!(chip
->flags
& IRQCHIP_ONOFFLINE_ENABLED
) ||
1067 !irqd_irq_disabled(&desc
->irq_data
)))
1068 chip
->irq_cpu_online(&desc
->irq_data
);
1070 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1075 * irq_cpu_offline - Invoke all irq_cpu_offline functions.
1077 * Iterate through all irqs and invoke the chip.irq_cpu_offline()
1080 void irq_cpu_offline(void)
1082 struct irq_desc
*desc
;
1083 struct irq_chip
*chip
;
1084 unsigned long flags
;
1087 for_each_active_irq(irq
) {
1088 desc
= irq_to_desc(irq
);
1092 raw_spin_lock_irqsave(&desc
->lock
, flags
);
1094 chip
= irq_data_get_irq_chip(&desc
->irq_data
);
1095 if (chip
&& chip
->irq_cpu_offline
&&
1096 (!(chip
->flags
& IRQCHIP_ONOFFLINE_ENABLED
) ||
1097 !irqd_irq_disabled(&desc
->irq_data
)))
1098 chip
->irq_cpu_offline(&desc
->irq_data
);
1100 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1104 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1106 * irq_chip_enable_parent - Enable the parent interrupt (defaults to unmask if
1108 * @data: Pointer to interrupt specific data
1110 void irq_chip_enable_parent(struct irq_data
*data
)
1112 data
= data
->parent_data
;
1113 if (data
->chip
->irq_enable
)
1114 data
->chip
->irq_enable(data
);
1116 data
->chip
->irq_unmask(data
);
1120 * irq_chip_disable_parent - Disable the parent interrupt (defaults to mask if
1122 * @data: Pointer to interrupt specific data
1124 void irq_chip_disable_parent(struct irq_data
*data
)
1126 data
= data
->parent_data
;
1127 if (data
->chip
->irq_disable
)
1128 data
->chip
->irq_disable(data
);
1130 data
->chip
->irq_mask(data
);
1134 * irq_chip_ack_parent - Acknowledge the parent interrupt
1135 * @data: Pointer to interrupt specific data
1137 void irq_chip_ack_parent(struct irq_data
*data
)
1139 data
= data
->parent_data
;
1140 data
->chip
->irq_ack(data
);
1142 EXPORT_SYMBOL_GPL(irq_chip_ack_parent
);
1145 * irq_chip_mask_parent - Mask the parent interrupt
1146 * @data: Pointer to interrupt specific data
1148 void irq_chip_mask_parent(struct irq_data
*data
)
1150 data
= data
->parent_data
;
1151 data
->chip
->irq_mask(data
);
1153 EXPORT_SYMBOL_GPL(irq_chip_mask_parent
);
1156 * irq_chip_unmask_parent - Unmask the parent interrupt
1157 * @data: Pointer to interrupt specific data
1159 void irq_chip_unmask_parent(struct irq_data
*data
)
1161 data
= data
->parent_data
;
1162 data
->chip
->irq_unmask(data
);
1164 EXPORT_SYMBOL_GPL(irq_chip_unmask_parent
);
1167 * irq_chip_eoi_parent - Invoke EOI on the parent interrupt
1168 * @data: Pointer to interrupt specific data
1170 void irq_chip_eoi_parent(struct irq_data
*data
)
1172 data
= data
->parent_data
;
1173 data
->chip
->irq_eoi(data
);
1175 EXPORT_SYMBOL_GPL(irq_chip_eoi_parent
);
1178 * irq_chip_set_affinity_parent - Set affinity on the parent interrupt
1179 * @data: Pointer to interrupt specific data
1180 * @dest: The affinity mask to set
1181 * @force: Flag to enforce setting (disable online checks)
1183 * Conditinal, as the underlying parent chip might not implement it.
1185 int irq_chip_set_affinity_parent(struct irq_data
*data
,
1186 const struct cpumask
*dest
, bool force
)
1188 data
= data
->parent_data
;
1189 if (data
->chip
->irq_set_affinity
)
1190 return data
->chip
->irq_set_affinity(data
, dest
, force
);
1196 * irq_chip_set_type_parent - Set IRQ type on the parent interrupt
1197 * @data: Pointer to interrupt specific data
1198 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
1200 * Conditional, as the underlying parent chip might not implement it.
1202 int irq_chip_set_type_parent(struct irq_data
*data
, unsigned int type
)
1204 data
= data
->parent_data
;
1206 if (data
->chip
->irq_set_type
)
1207 return data
->chip
->irq_set_type(data
, type
);
1211 EXPORT_SYMBOL_GPL(irq_chip_set_type_parent
);
1214 * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware
1215 * @data: Pointer to interrupt specific data
1217 * Iterate through the domain hierarchy of the interrupt and check
1218 * whether a hw retrigger function exists. If yes, invoke it.
1220 int irq_chip_retrigger_hierarchy(struct irq_data
*data
)
1222 for (data
= data
->parent_data
; data
; data
= data
->parent_data
)
1223 if (data
->chip
&& data
->chip
->irq_retrigger
)
1224 return data
->chip
->irq_retrigger(data
);
1230 * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
1231 * @data: Pointer to interrupt specific data
1232 * @vcpu_info: The vcpu affinity information
1234 int irq_chip_set_vcpu_affinity_parent(struct irq_data
*data
, void *vcpu_info
)
1236 data
= data
->parent_data
;
1237 if (data
->chip
->irq_set_vcpu_affinity
)
1238 return data
->chip
->irq_set_vcpu_affinity(data
, vcpu_info
);
1244 * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
1245 * @data: Pointer to interrupt specific data
1246 * @on: Whether to set or reset the wake-up capability of this irq
1248 * Conditional, as the underlying parent chip might not implement it.
1250 int irq_chip_set_wake_parent(struct irq_data
*data
, unsigned int on
)
1252 data
= data
->parent_data
;
1253 if (data
->chip
->irq_set_wake
)
1254 return data
->chip
->irq_set_wake(data
, on
);
1261 * irq_chip_compose_msi_msg - Componse msi message for a irq chip
1262 * @data: Pointer to interrupt specific data
1263 * @msg: Pointer to the MSI message
1265 * For hierarchical domains we find the first chip in the hierarchy
1266 * which implements the irq_compose_msi_msg callback. For non
1267 * hierarchical we use the top level chip.
1269 int irq_chip_compose_msi_msg(struct irq_data
*data
, struct msi_msg
*msg
)
1271 struct irq_data
*pos
= NULL
;
1273 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
1274 for (; data
; data
= data
->parent_data
)
1276 if (data
->chip
&& data
->chip
->irq_compose_msi_msg
)
1281 pos
->chip
->irq_compose_msi_msg(pos
, msg
);
1287 * irq_chip_pm_get - Enable power for an IRQ chip
1288 * @data: Pointer to interrupt specific data
1290 * Enable the power to the IRQ chip referenced by the interrupt data
1293 int irq_chip_pm_get(struct irq_data
*data
)
1297 if (IS_ENABLED(CONFIG_PM
) && data
->chip
->parent_device
) {
1298 retval
= pm_runtime_get_sync(data
->chip
->parent_device
);
1300 pm_runtime_put_noidle(data
->chip
->parent_device
);
1309 * irq_chip_pm_put - Disable power for an IRQ chip
1310 * @data: Pointer to interrupt specific data
1312 * Disable the power to the IRQ chip referenced by the interrupt data
1313 * structure, belongs. Note that power will only be disabled, once this
1314 * function has been called for all IRQs that have called irq_chip_pm_get().
1316 int irq_chip_pm_put(struct irq_data
*data
)
1320 if (IS_ENABLED(CONFIG_PM
) && data
->chip
->parent_device
)
1321 retval
= pm_runtime_put(data
->chip
->parent_device
);
1323 return (retval
< 0) ? retval
: 0;