2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
28 * to reduce code space and undefined function references.
33 #include <linux/export.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/pci.h>
54 #include <linux/debugfs.h>
56 #include <linux/of_irq.h>
58 #include <linux/uaccess.h>
60 #include <asm/pgtable.h>
62 #include <asm/cache.h>
64 #include <asm/ptrace.h>
65 #include <asm/machdep.h>
68 #include <asm/livepatch.h>
69 #include <asm/asm-prototypes.h>
73 #include <asm/firmware.h>
74 #include <asm/lv1call.h>
76 #define CREATE_TRACE_POINTS
77 #include <asm/trace.h>
78 #include <asm/cpu_has_feature.h>
80 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t
, irq_stat
);
81 EXPORT_PER_CPU_SYMBOL(irq_stat
);
83 int __irq_offset_value
;
86 EXPORT_SYMBOL(__irq_offset_value
);
87 atomic_t ppc_n_lost_interrupts
;
90 extern int tau_initialized
;
91 extern int tau_interrupts(int);
93 #endif /* CONFIG_PPC32 */
97 int distribute_irqs
= 1;
99 static inline notrace
unsigned long get_irq_happened(void)
101 unsigned long happened
;
103 __asm__
__volatile__("lbz %0,%1(13)"
104 : "=r" (happened
) : "i" (offsetof(struct paca_struct
, irq_happened
)));
109 static inline notrace
void set_soft_enabled(unsigned long enable
)
111 __asm__
__volatile__("stb %0,%1(13)"
112 : : "r" (enable
), "i" (offsetof(struct paca_struct
, soft_enabled
)));
115 static inline notrace
int decrementer_check_overflow(void)
117 u64 now
= get_tb_or_rtc();
118 u64
*next_tb
= this_cpu_ptr(&decrementers_next_tb
);
120 return now
>= *next_tb
;
123 /* This is called whenever we are re-enabling interrupts
124 * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
125 * there's an EE, DEC or DBELL to generate.
127 * This is called in two contexts: From arch_local_irq_restore()
128 * before soft-enabling interrupts, and from the exception exit
129 * path when returning from an interrupt from a soft-disabled to
130 * a soft enabled context. In both case we have interrupts hard
133 * We take care of only clearing the bits we handled in the
134 * PACA irq_happened field since we can only re-emit one at a
135 * time and we don't want to "lose" one.
137 notrace
unsigned int __check_irq_replay(void)
140 * We use local_paca rather than get_paca() to avoid all
141 * the debug_smp_processor_id() business in this low level
144 unsigned char happened
= local_paca
->irq_happened
;
147 * We are responding to the next interrupt, so interrupt-off
148 * latencies should be reset here.
151 trace_hardirqs_off();
153 if (happened
& PACA_IRQ_HARD_DIS
) {
154 /* Clear bit 0 which we wouldn't clear otherwise */
155 local_paca
->irq_happened
&= ~PACA_IRQ_HARD_DIS
;
158 * We may have missed a decrementer interrupt if hard disabled.
159 * Check the decrementer register in case we had a rollover
160 * while hard disabled.
162 if (!(happened
& PACA_IRQ_DEC
)) {
163 if (decrementer_check_overflow()) {
164 local_paca
->irq_happened
|= PACA_IRQ_DEC
;
165 happened
|= PACA_IRQ_DEC
;
171 * Force the delivery of pending soft-disabled interrupts on PS3.
172 * Any HV call will have this side effect.
174 if (firmware_has_feature(FW_FEATURE_PS3_LV1
)) {
176 lv1_get_version_info(&tmp
, &tmp2
);
180 * Check if an hypervisor Maintenance interrupt happened.
181 * This is a higher priority interrupt than the others, so
184 if (happened
& PACA_IRQ_HMI
) {
185 local_paca
->irq_happened
&= ~PACA_IRQ_HMI
;
189 if (happened
& PACA_IRQ_DEC
) {
190 local_paca
->irq_happened
&= ~PACA_IRQ_DEC
;
194 if (happened
& PACA_IRQ_EE
) {
195 local_paca
->irq_happened
&= ~PACA_IRQ_EE
;
199 #ifdef CONFIG_PPC_BOOK3E
201 * Check if an EPR external interrupt happened this bit is typically
202 * set if we need to handle another "edge" interrupt from within the
203 * MPIC "EPR" handler.
205 if (happened
& PACA_IRQ_EE_EDGE
) {
206 local_paca
->irq_happened
&= ~PACA_IRQ_EE_EDGE
;
210 if (happened
& PACA_IRQ_DBELL
) {
211 local_paca
->irq_happened
&= ~PACA_IRQ_DBELL
;
215 if (happened
& PACA_IRQ_DBELL
) {
216 local_paca
->irq_happened
&= ~PACA_IRQ_DBELL
;
219 #endif /* CONFIG_PPC_BOOK3E */
221 /* There should be nothing left ! */
222 BUG_ON(local_paca
->irq_happened
!= 0);
227 notrace
void arch_local_irq_restore(unsigned long en
)
229 unsigned char irq_happened
;
232 /* Write the new soft-enabled value */
233 set_soft_enabled(en
);
237 * From this point onward, we can take interrupts, preempt,
238 * etc... unless we got hard-disabled. We check if an event
239 * happened. If none happened, we know we can just return.
241 * We may have preempted before the check below, in which case
242 * we are checking the "new" CPU instead of the old one. This
243 * is only a problem if an event happened on the "old" CPU.
245 * External interrupt events will have caused interrupts to
246 * be hard-disabled, so there is no problem, we
247 * cannot have preempted.
249 irq_happened
= get_irq_happened();
254 * We need to hard disable to get a trusted value from
255 * __check_irq_replay(). We also need to soft-disable
256 * again to avoid warnings in there due to the use of
259 * We know that if the value in irq_happened is exactly 0x01
260 * then we are already hard disabled (there are other less
261 * common cases that we'll ignore for now), so we skip the
262 * (expensive) mtmsrd.
264 if (unlikely(irq_happened
!= PACA_IRQ_HARD_DIS
))
265 __hard_irq_disable();
266 #ifdef CONFIG_TRACE_IRQFLAGS
269 * We should already be hard disabled here. We had bugs
270 * where that wasn't the case so let's dbl check it and
271 * warn if we are wrong. Only do that when IRQ tracing
272 * is enabled as mfmsr() can be costly.
274 if (WARN_ON(mfmsr() & MSR_EE
))
275 __hard_irq_disable();
277 #endif /* CONFIG_TRACE_IRQFLAGS */
280 trace_hardirqs_off();
283 * Check if anything needs to be re-emitted. We haven't
284 * soft-enabled yet to avoid warnings in decrementer_check_overflow
285 * accessing per-cpu variables
287 replay
= __check_irq_replay();
289 /* We can soft-enable now */
294 * And replay if we have to. This will return with interrupts
298 __replay_interrupt(replay
);
302 /* Finally, let's ensure we are hard enabled */
305 EXPORT_SYMBOL(arch_local_irq_restore
);
308 * This is specifically called by assembly code to re-enable interrupts
309 * if they are currently disabled. This is typically called before
310 * schedule() or do_signal() when returning to userspace. We do it
311 * in C to avoid the burden of dealing with lockdep etc...
313 * NOTE: This is called with interrupts hard disabled but not marked
314 * as such in paca->irq_happened, so we need to resync this.
316 void notrace
restore_interrupts(void)
318 if (irqs_disabled()) {
319 local_paca
->irq_happened
|= PACA_IRQ_HARD_DIS
;
326 * This is a helper to use when about to go into idle low-power
327 * when the latter has the side effect of re-enabling interrupts
328 * (such as calling H_CEDE under pHyp).
330 * You call this function with interrupts soft-disabled (this is
331 * already the case when ppc_md.power_save is called). The function
332 * will return whether to enter power save or just return.
334 * In the former case, it will have notified lockdep of interrupts
335 * being re-enabled and generally sanitized the lazy irq state,
336 * and in the latter case it will leave with interrupts hard
337 * disabled and marked as such, so the local_irq_enable() call
338 * in arch_cpu_idle() will properly re-enable everything.
340 bool prep_irq_for_idle(void)
343 * First we need to hard disable to ensure no interrupt
344 * occurs before we effectively enter the low power state
346 __hard_irq_disable();
347 local_paca
->irq_happened
|= PACA_IRQ_HARD_DIS
;
350 * If anything happened while we were soft-disabled,
351 * we return now and do not enter the low power state.
353 if (lazy_irq_pending())
356 /* Tell lockdep we are about to re-enable */
360 * Mark interrupts as soft-enabled and clear the
361 * PACA_IRQ_HARD_DIS from the pending mask since we
362 * are about to hard enable as well as a side effect
363 * of entering the low power state.
365 local_paca
->irq_happened
&= ~PACA_IRQ_HARD_DIS
;
366 local_paca
->soft_enabled
= 1;
368 /* Tell the caller to enter the low power state */
372 #ifdef CONFIG_PPC_BOOK3S
374 * This is for idle sequences that return with IRQs off, but the
375 * idle state itself wakes on interrupt. Tell the irq tracer that
376 * IRQs are enabled for the duration of idle so it does not get long
377 * off times. Must be paired with fini_irq_for_idle_irqsoff.
379 bool prep_irq_for_idle_irqsoff(void)
381 WARN_ON(!irqs_disabled());
384 * First we need to hard disable to ensure no interrupt
385 * occurs before we effectively enter the low power state
387 __hard_irq_disable();
388 local_paca
->irq_happened
|= PACA_IRQ_HARD_DIS
;
391 * If anything happened while we were soft-disabled,
392 * we return now and do not enter the low power state.
394 if (lazy_irq_pending())
397 /* Tell lockdep we are about to re-enable */
404 * Take the SRR1 wakeup reason, index into this table to find the
405 * appropriate irq_happened bit.
407 * Sytem reset exceptions taken in idle state also come through here,
408 * but they are NMI interrupts so do not need to wait for IRQs to be
409 * restored, and should be taken as early as practical. These are marked
410 * with 0xff in the table. The Power ISA specifies 0100b as the system
411 * reset interrupt reason.
413 #define IRQ_SYSTEM_RESET 0xff
415 static const u8 srr1_to_lazyirq
[0x10] = {
427 void replay_system_reset(void)
431 ppc_save_regs(®s
);
433 get_paca()->in_nmi
= 1;
434 system_reset_exception(®s
);
435 get_paca()->in_nmi
= 0;
437 EXPORT_SYMBOL_GPL(replay_system_reset
);
439 void irq_set_pending_from_srr1(unsigned long srr1
)
441 unsigned int idx
= (srr1
& SRR1_WAKEMASK_P8
) >> 18;
442 u8 reason
= srr1_to_lazyirq
[idx
];
445 * Take the system reset now, which is immediately after registers
446 * are restored from idle. It's an NMI, so interrupts need not be
447 * re-enabled before it is taken.
449 if (unlikely(reason
== IRQ_SYSTEM_RESET
)) {
450 replay_system_reset();
455 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
456 * so this can be called unconditionally with the SRR1 wake
457 * reason as returned by the idle code, which uses 0 to mean no
460 * If a future CPU was to designate this as an interrupt reason,
461 * then a new index for no interrupt must be assigned.
463 local_paca
->irq_happened
|= reason
;
465 #endif /* CONFIG_PPC_BOOK3S */
468 * Force a replay of the external interrupt handler on this CPU.
470 void force_external_irq_replay(void)
473 * This must only be called with interrupts soft-disabled,
474 * the replay will happen when re-enabling.
476 WARN_ON(!arch_irqs_disabled());
479 * Interrupts must always be hard disabled before irq_happened is
480 * modified (to prevent lost update in case of interrupt between
483 __hard_irq_disable();
484 local_paca
->irq_happened
|= PACA_IRQ_HARD_DIS
;
486 /* Indicate in the PACA that we have an interrupt to replay */
487 local_paca
->irq_happened
|= PACA_IRQ_EE
;
490 #endif /* CONFIG_PPC64 */
492 int arch_show_interrupts(struct seq_file
*p
, int prec
)
496 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
497 if (tau_initialized
) {
498 seq_printf(p
, "%*s: ", prec
, "TAU");
499 for_each_online_cpu(j
)
500 seq_printf(p
, "%10u ", tau_interrupts(j
));
501 seq_puts(p
, " PowerPC Thermal Assist (cpu temp)\n");
503 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
505 seq_printf(p
, "%*s: ", prec
, "LOC");
506 for_each_online_cpu(j
)
507 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).timer_irqs_event
);
508 seq_printf(p
, " Local timer interrupts for timer event device\n");
510 seq_printf(p
, "%*s: ", prec
, "LOC");
511 for_each_online_cpu(j
)
512 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).timer_irqs_others
);
513 seq_printf(p
, " Local timer interrupts for others\n");
515 seq_printf(p
, "%*s: ", prec
, "SPU");
516 for_each_online_cpu(j
)
517 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).spurious_irqs
);
518 seq_printf(p
, " Spurious interrupts\n");
520 seq_printf(p
, "%*s: ", prec
, "PMI");
521 for_each_online_cpu(j
)
522 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).pmu_irqs
);
523 seq_printf(p
, " Performance monitoring interrupts\n");
525 seq_printf(p
, "%*s: ", prec
, "MCE");
526 for_each_online_cpu(j
)
527 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).mce_exceptions
);
528 seq_printf(p
, " Machine check exceptions\n");
530 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
531 seq_printf(p
, "%*s: ", prec
, "HMI");
532 for_each_online_cpu(j
)
533 seq_printf(p
, "%10u ",
534 per_cpu(irq_stat
, j
).hmi_exceptions
);
535 seq_printf(p
, " Hypervisor Maintenance Interrupts\n");
538 seq_printf(p
, "%*s: ", prec
, "NMI");
539 for_each_online_cpu(j
)
540 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).sreset_irqs
);
541 seq_printf(p
, " System Reset interrupts\n");
543 #ifdef CONFIG_PPC_WATCHDOG
544 seq_printf(p
, "%*s: ", prec
, "WDG");
545 for_each_online_cpu(j
)
546 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).soft_nmi_irqs
);
547 seq_printf(p
, " Watchdog soft-NMI interrupts\n");
550 #ifdef CONFIG_PPC_DOORBELL
551 if (cpu_has_feature(CPU_FTR_DBELL
)) {
552 seq_printf(p
, "%*s: ", prec
, "DBL");
553 for_each_online_cpu(j
)
554 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).doorbell_irqs
);
555 seq_printf(p
, " Doorbell interrupts\n");
565 u64
arch_irq_stat_cpu(unsigned int cpu
)
567 u64 sum
= per_cpu(irq_stat
, cpu
).timer_irqs_event
;
569 sum
+= per_cpu(irq_stat
, cpu
).pmu_irqs
;
570 sum
+= per_cpu(irq_stat
, cpu
).mce_exceptions
;
571 sum
+= per_cpu(irq_stat
, cpu
).spurious_irqs
;
572 sum
+= per_cpu(irq_stat
, cpu
).timer_irqs_others
;
573 sum
+= per_cpu(irq_stat
, cpu
).hmi_exceptions
;
574 sum
+= per_cpu(irq_stat
, cpu
).sreset_irqs
;
575 #ifdef CONFIG_PPC_WATCHDOG
576 sum
+= per_cpu(irq_stat
, cpu
).soft_nmi_irqs
;
578 #ifdef CONFIG_PPC_DOORBELL
579 sum
+= per_cpu(irq_stat
, cpu
).doorbell_irqs
;
585 static inline void check_stack_overflow(void)
587 #ifdef CONFIG_DEBUG_STACKOVERFLOW
590 sp
= current_stack_pointer() & (THREAD_SIZE
-1);
592 /* check for stack overflow: is there less than 2KB free? */
593 if (unlikely(sp
< (sizeof(struct thread_info
) + 2048))) {
594 pr_err("do_IRQ: stack overflow: %ld\n",
595 sp
- sizeof(struct thread_info
));
601 void __do_irq(struct pt_regs
*regs
)
607 trace_irq_entry(regs
);
610 * Query the platform PIC for the interrupt & ack it.
612 * This will typically lower the interrupt line to the CPU
614 irq
= ppc_md
.get_irq();
616 /* We can hard enable interrupts now to allow perf interrupts */
617 may_hard_irq_enable();
619 /* And finally process it */
621 __this_cpu_inc(irq_stat
.spurious_irqs
);
623 generic_handle_irq(irq
);
625 trace_irq_exit(regs
);
630 void do_IRQ(struct pt_regs
*regs
)
632 struct pt_regs
*old_regs
= set_irq_regs(regs
);
633 struct thread_info
*curtp
, *irqtp
, *sirqtp
;
635 /* Switch to the irq stack to handle this */
636 curtp
= current_thread_info();
637 irqtp
= hardirq_ctx
[raw_smp_processor_id()];
638 sirqtp
= softirq_ctx
[raw_smp_processor_id()];
640 check_stack_overflow();
642 /* Already there ? */
643 if (unlikely(curtp
== irqtp
|| curtp
== sirqtp
)) {
645 set_irq_regs(old_regs
);
649 /* Prepare the thread_info in the irq stack */
650 irqtp
->task
= curtp
->task
;
653 /* Copy the preempt_count so that the [soft]irq checks work. */
654 irqtp
->preempt_count
= curtp
->preempt_count
;
656 /* Switch stack and call */
657 call_do_irq(regs
, irqtp
);
659 /* Restore stack limit */
662 /* Copy back updates to the thread_info */
664 set_bits(irqtp
->flags
, &curtp
->flags
);
666 set_irq_regs(old_regs
);
669 void __init
init_IRQ(void)
679 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
680 struct thread_info
*critirq_ctx
[NR_CPUS
] __read_mostly
;
681 struct thread_info
*dbgirq_ctx
[NR_CPUS
] __read_mostly
;
682 struct thread_info
*mcheckirq_ctx
[NR_CPUS
] __read_mostly
;
684 void exc_lvl_ctx_init(void)
686 struct thread_info
*tp
;
689 for_each_possible_cpu(i
) {
694 cpu_nr
= get_hard_smp_processor_id(i
);
700 memset((void *)critirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
701 tp
= critirq_ctx
[cpu_nr
];
703 tp
->preempt_count
= 0;
706 memset((void *)dbgirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
707 tp
= dbgirq_ctx
[cpu_nr
];
709 tp
->preempt_count
= 0;
711 memset((void *)mcheckirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
712 tp
= mcheckirq_ctx
[cpu_nr
];
714 tp
->preempt_count
= HARDIRQ_OFFSET
;
720 struct thread_info
*softirq_ctx
[NR_CPUS
] __read_mostly
;
721 struct thread_info
*hardirq_ctx
[NR_CPUS
] __read_mostly
;
723 void irq_ctx_init(void)
725 struct thread_info
*tp
;
728 for_each_possible_cpu(i
) {
729 memset((void *)softirq_ctx
[i
], 0, THREAD_SIZE
);
732 klp_init_thread_info(tp
);
734 memset((void *)hardirq_ctx
[i
], 0, THREAD_SIZE
);
737 klp_init_thread_info(tp
);
741 void do_softirq_own_stack(void)
743 struct thread_info
*curtp
, *irqtp
;
745 curtp
= current_thread_info();
746 irqtp
= softirq_ctx
[smp_processor_id()];
747 irqtp
->task
= curtp
->task
;
749 call_do_softirq(irqtp
);
752 /* Set any flag that may have been set on the
756 set_bits(irqtp
->flags
, &curtp
->flags
);
759 irq_hw_number_t
virq_to_hw(unsigned int virq
)
761 struct irq_data
*irq_data
= irq_get_irq_data(virq
);
762 return WARN_ON(!irq_data
) ? 0 : irq_data
->hwirq
;
764 EXPORT_SYMBOL_GPL(virq_to_hw
);
767 int irq_choose_cpu(const struct cpumask
*mask
)
771 if (cpumask_equal(mask
, cpu_online_mask
)) {
772 static int irq_rover
;
773 static DEFINE_RAW_SPINLOCK(irq_rover_lock
);
776 /* Round-robin distribution... */
778 raw_spin_lock_irqsave(&irq_rover_lock
, flags
);
780 irq_rover
= cpumask_next(irq_rover
, cpu_online_mask
);
781 if (irq_rover
>= nr_cpu_ids
)
782 irq_rover
= cpumask_first(cpu_online_mask
);
786 raw_spin_unlock_irqrestore(&irq_rover_lock
, flags
);
788 cpuid
= cpumask_first_and(mask
, cpu_online_mask
);
789 if (cpuid
>= nr_cpu_ids
)
793 return get_hard_smp_processor_id(cpuid
);
796 int irq_choose_cpu(const struct cpumask
*mask
)
798 return hard_smp_processor_id();
802 int arch_early_irq_init(void)
808 static int __init
setup_noirqdistrib(char *str
)
814 __setup("noirqdistrib", setup_noirqdistrib
);
815 #endif /* CONFIG_PPC64 */