2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
28 * to reduce code space and undefined function references.
33 #include <linux/export.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/pci.h>
54 #include <linux/debugfs.h>
56 #include <linux/of_irq.h>
58 #include <linux/uaccess.h>
60 #include <asm/pgtable.h>
62 #include <asm/cache.h>
64 #include <asm/ptrace.h>
65 #include <asm/machdep.h>
68 #include <asm/livepatch.h>
69 #include <asm/asm-prototypes.h>
73 #include <asm/firmware.h>
74 #include <asm/lv1call.h>
76 #define CREATE_TRACE_POINTS
77 #include <asm/trace.h>
78 #include <asm/cpu_has_feature.h>
80 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t
, irq_stat
);
81 EXPORT_PER_CPU_SYMBOL(irq_stat
);
83 int __irq_offset_value
;
86 EXPORT_SYMBOL(__irq_offset_value
);
87 atomic_t ppc_n_lost_interrupts
;
90 extern int tau_initialized
;
91 extern int tau_interrupts(int);
93 #endif /* CONFIG_PPC32 */
97 int distribute_irqs
= 1;
99 static inline notrace
unsigned long get_irq_happened(void)
101 unsigned long happened
;
103 __asm__
__volatile__("lbz %0,%1(13)"
104 : "=r" (happened
) : "i" (offsetof(struct paca_struct
, irq_happened
)));
109 static inline notrace
void set_soft_enabled(unsigned long enable
)
111 __asm__
__volatile__("stb %0,%1(13)"
112 : : "r" (enable
), "i" (offsetof(struct paca_struct
, soft_enabled
)));
115 static inline notrace
int decrementer_check_overflow(void)
117 u64 now
= get_tb_or_rtc();
118 u64
*next_tb
= this_cpu_ptr(&decrementers_next_tb
);
120 return now
>= *next_tb
;
123 /* This is called whenever we are re-enabling interrupts
124 * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
125 * there's an EE, DEC or DBELL to generate.
127 * This is called in two contexts: From arch_local_irq_restore()
128 * before soft-enabling interrupts, and from the exception exit
129 * path when returning from an interrupt from a soft-disabled to
130 * a soft enabled context. In both case we have interrupts hard
133 * We take care of only clearing the bits we handled in the
134 * PACA irq_happened field since we can only re-emit one at a
135 * time and we don't want to "lose" one.
137 notrace
unsigned int __check_irq_replay(void)
140 * We use local_paca rather than get_paca() to avoid all
141 * the debug_smp_processor_id() business in this low level
144 unsigned char happened
= local_paca
->irq_happened
;
146 if (happened
& PACA_IRQ_HARD_DIS
) {
147 /* Clear bit 0 which we wouldn't clear otherwise */
148 local_paca
->irq_happened
&= ~PACA_IRQ_HARD_DIS
;
151 * We may have missed a decrementer interrupt if hard disabled.
152 * Check the decrementer register in case we had a rollover
153 * while hard disabled.
155 if (!(happened
& PACA_IRQ_DEC
)) {
156 if (decrementer_check_overflow()) {
157 local_paca
->irq_happened
|= PACA_IRQ_DEC
;
158 happened
|= PACA_IRQ_DEC
;
164 * Force the delivery of pending soft-disabled interrupts on PS3.
165 * Any HV call will have this side effect.
167 if (firmware_has_feature(FW_FEATURE_PS3_LV1
)) {
169 lv1_get_version_info(&tmp
, &tmp2
);
173 * Check if an hypervisor Maintenance interrupt happened.
174 * This is a higher priority interrupt than the others, so
177 if (happened
& PACA_IRQ_HMI
) {
178 local_paca
->irq_happened
&= ~PACA_IRQ_HMI
;
182 if (happened
& PACA_IRQ_DEC
) {
183 local_paca
->irq_happened
&= ~PACA_IRQ_DEC
;
187 if (happened
& PACA_IRQ_EE
) {
188 local_paca
->irq_happened
&= ~PACA_IRQ_EE
;
192 #ifdef CONFIG_PPC_BOOK3E
194 * Check if an EPR external interrupt happened this bit is typically
195 * set if we need to handle another "edge" interrupt from within the
196 * MPIC "EPR" handler.
198 if (happened
& PACA_IRQ_EE_EDGE
) {
199 local_paca
->irq_happened
&= ~PACA_IRQ_EE_EDGE
;
203 if (happened
& PACA_IRQ_DBELL
) {
204 local_paca
->irq_happened
&= ~PACA_IRQ_DBELL
;
208 if (happened
& PACA_IRQ_DBELL
) {
209 local_paca
->irq_happened
&= ~PACA_IRQ_DBELL
;
212 #endif /* CONFIG_PPC_BOOK3E */
214 /* There should be nothing left ! */
215 BUG_ON(local_paca
->irq_happened
!= 0);
220 notrace
void arch_local_irq_restore(unsigned long en
)
222 unsigned char irq_happened
;
225 /* Write the new soft-enabled value */
226 set_soft_enabled(en
);
230 * From this point onward, we can take interrupts, preempt,
231 * etc... unless we got hard-disabled. We check if an event
232 * happened. If none happened, we know we can just return.
234 * We may have preempted before the check below, in which case
235 * we are checking the "new" CPU instead of the old one. This
236 * is only a problem if an event happened on the "old" CPU.
238 * External interrupt events will have caused interrupts to
239 * be hard-disabled, so there is no problem, we
240 * cannot have preempted.
242 irq_happened
= get_irq_happened();
247 * We need to hard disable to get a trusted value from
248 * __check_irq_replay(). We also need to soft-disable
249 * again to avoid warnings in there due to the use of
252 * We know that if the value in irq_happened is exactly 0x01
253 * then we are already hard disabled (there are other less
254 * common cases that we'll ignore for now), so we skip the
255 * (expensive) mtmsrd.
257 if (unlikely(irq_happened
!= PACA_IRQ_HARD_DIS
))
258 __hard_irq_disable();
259 #ifdef CONFIG_TRACE_IRQFLAGS
262 * We should already be hard disabled here. We had bugs
263 * where that wasn't the case so let's dbl check it and
264 * warn if we are wrong. Only do that when IRQ tracing
265 * is enabled as mfmsr() can be costly.
267 if (WARN_ON(mfmsr() & MSR_EE
))
268 __hard_irq_disable();
270 #endif /* CONFIG_TRACE_IRQFLAGS */
275 * Check if anything needs to be re-emitted. We haven't
276 * soft-enabled yet to avoid warnings in decrementer_check_overflow
277 * accessing per-cpu variables
279 replay
= __check_irq_replay();
281 /* We can soft-enable now */
285 * And replay if we have to. This will return with interrupts
289 __replay_interrupt(replay
);
293 /* Finally, let's ensure we are hard enabled */
296 EXPORT_SYMBOL(arch_local_irq_restore
);
299 * This is specifically called by assembly code to re-enable interrupts
300 * if they are currently disabled. This is typically called before
301 * schedule() or do_signal() when returning to userspace. We do it
302 * in C to avoid the burden of dealing with lockdep etc...
304 * NOTE: This is called with interrupts hard disabled but not marked
305 * as such in paca->irq_happened, so we need to resync this.
307 void notrace
restore_interrupts(void)
309 if (irqs_disabled()) {
310 local_paca
->irq_happened
|= PACA_IRQ_HARD_DIS
;
317 * This is a helper to use when about to go into idle low-power
318 * when the latter has the side effect of re-enabling interrupts
319 * (such as calling H_CEDE under pHyp).
321 * You call this function with interrupts soft-disabled (this is
322 * already the case when ppc_md.power_save is called). The function
323 * will return whether to enter power save or just return.
325 * In the former case, it will have notified lockdep of interrupts
326 * being re-enabled and generally sanitized the lazy irq state,
327 * and in the latter case it will leave with interrupts hard
328 * disabled and marked as such, so the local_irq_enable() call
329 * in arch_cpu_idle() will properly re-enable everything.
331 bool prep_irq_for_idle(void)
334 * First we need to hard disable to ensure no interrupt
335 * occurs before we effectively enter the low power state
337 __hard_irq_disable();
338 local_paca
->irq_happened
|= PACA_IRQ_HARD_DIS
;
341 * If anything happened while we were soft-disabled,
342 * we return now and do not enter the low power state.
344 if (lazy_irq_pending())
347 /* Tell lockdep we are about to re-enable */
351 * Mark interrupts as soft-enabled and clear the
352 * PACA_IRQ_HARD_DIS from the pending mask since we
353 * are about to hard enable as well as a side effect
354 * of entering the low power state.
356 local_paca
->irq_happened
&= ~PACA_IRQ_HARD_DIS
;
357 local_paca
->soft_enabled
= 1;
359 /* Tell the caller to enter the low power state */
363 #ifdef CONFIG_PPC_BOOK3S
365 * This is for idle sequences that return with IRQs off, but the
366 * idle state itself wakes on interrupt. Tell the irq tracer that
367 * IRQs are enabled for the duration of idle so it does not get long
368 * off times. Must be paired with fini_irq_for_idle_irqsoff.
370 bool prep_irq_for_idle_irqsoff(void)
372 WARN_ON(!irqs_disabled());
375 * First we need to hard disable to ensure no interrupt
376 * occurs before we effectively enter the low power state
378 __hard_irq_disable();
379 local_paca
->irq_happened
|= PACA_IRQ_HARD_DIS
;
382 * If anything happened while we were soft-disabled,
383 * we return now and do not enter the low power state.
385 if (lazy_irq_pending())
388 /* Tell lockdep we are about to re-enable */
395 * Take the SRR1 wakeup reason, index into this table to find the
396 * appropriate irq_happened bit.
398 * Sytem reset exceptions taken in idle state also come through here,
399 * but they are NMI interrupts so do not need to wait for IRQs to be
400 * restored, and should be taken as early as practical. These are marked
401 * with 0xff in the table. The Power ISA specifies 0100b as the system
402 * reset interrupt reason.
404 #define IRQ_SYSTEM_RESET 0xff
406 static const u8 srr1_to_lazyirq
[0x10] = {
418 void replay_system_reset(void)
422 ppc_save_regs(®s
);
424 get_paca()->in_nmi
= 1;
425 system_reset_exception(®s
);
426 get_paca()->in_nmi
= 0;
428 EXPORT_SYMBOL_GPL(replay_system_reset
);
430 void irq_set_pending_from_srr1(unsigned long srr1
)
432 unsigned int idx
= (srr1
& SRR1_WAKEMASK_P8
) >> 18;
433 u8 reason
= srr1_to_lazyirq
[idx
];
436 * Take the system reset now, which is immediately after registers
437 * are restored from idle. It's an NMI, so interrupts need not be
438 * re-enabled before it is taken.
440 if (unlikely(reason
== IRQ_SYSTEM_RESET
)) {
441 replay_system_reset();
446 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
447 * so this can be called unconditionally with the SRR1 wake
448 * reason as returned by the idle code, which uses 0 to mean no
451 * If a future CPU was to designate this as an interrupt reason,
452 * then a new index for no interrupt must be assigned.
454 local_paca
->irq_happened
|= reason
;
456 #endif /* CONFIG_PPC_BOOK3S */
459 * Force a replay of the external interrupt handler on this CPU.
461 void force_external_irq_replay(void)
464 * This must only be called with interrupts soft-disabled,
465 * the replay will happen when re-enabling.
467 WARN_ON(!arch_irqs_disabled());
469 /* Indicate in the PACA that we have an interrupt to replay */
470 local_paca
->irq_happened
|= PACA_IRQ_EE
;
473 #endif /* CONFIG_PPC64 */
475 int arch_show_interrupts(struct seq_file
*p
, int prec
)
479 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
480 if (tau_initialized
) {
481 seq_printf(p
, "%*s: ", prec
, "TAU");
482 for_each_online_cpu(j
)
483 seq_printf(p
, "%10u ", tau_interrupts(j
));
484 seq_puts(p
, " PowerPC Thermal Assist (cpu temp)\n");
486 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
488 seq_printf(p
, "%*s: ", prec
, "LOC");
489 for_each_online_cpu(j
)
490 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).timer_irqs_event
);
491 seq_printf(p
, " Local timer interrupts for timer event device\n");
493 seq_printf(p
, "%*s: ", prec
, "LOC");
494 for_each_online_cpu(j
)
495 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).timer_irqs_others
);
496 seq_printf(p
, " Local timer interrupts for others\n");
498 seq_printf(p
, "%*s: ", prec
, "SPU");
499 for_each_online_cpu(j
)
500 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).spurious_irqs
);
501 seq_printf(p
, " Spurious interrupts\n");
503 seq_printf(p
, "%*s: ", prec
, "PMI");
504 for_each_online_cpu(j
)
505 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).pmu_irqs
);
506 seq_printf(p
, " Performance monitoring interrupts\n");
508 seq_printf(p
, "%*s: ", prec
, "MCE");
509 for_each_online_cpu(j
)
510 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).mce_exceptions
);
511 seq_printf(p
, " Machine check exceptions\n");
513 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
514 seq_printf(p
, "%*s: ", prec
, "HMI");
515 for_each_online_cpu(j
)
516 seq_printf(p
, "%10u ",
517 per_cpu(irq_stat
, j
).hmi_exceptions
);
518 seq_printf(p
, " Hypervisor Maintenance Interrupts\n");
521 seq_printf(p
, "%*s: ", prec
, "NMI");
522 for_each_online_cpu(j
)
523 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).sreset_irqs
);
524 seq_printf(p
, " System Reset interrupts\n");
526 #ifdef CONFIG_PPC_WATCHDOG
527 seq_printf(p
, "%*s: ", prec
, "WDG");
528 for_each_online_cpu(j
)
529 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).soft_nmi_irqs
);
530 seq_printf(p
, " Watchdog soft-NMI interrupts\n");
533 #ifdef CONFIG_PPC_DOORBELL
534 if (cpu_has_feature(CPU_FTR_DBELL
)) {
535 seq_printf(p
, "%*s: ", prec
, "DBL");
536 for_each_online_cpu(j
)
537 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).doorbell_irqs
);
538 seq_printf(p
, " Doorbell interrupts\n");
548 u64
arch_irq_stat_cpu(unsigned int cpu
)
550 u64 sum
= per_cpu(irq_stat
, cpu
).timer_irqs_event
;
552 sum
+= per_cpu(irq_stat
, cpu
).pmu_irqs
;
553 sum
+= per_cpu(irq_stat
, cpu
).mce_exceptions
;
554 sum
+= per_cpu(irq_stat
, cpu
).spurious_irqs
;
555 sum
+= per_cpu(irq_stat
, cpu
).timer_irqs_others
;
556 sum
+= per_cpu(irq_stat
, cpu
).hmi_exceptions
;
557 sum
+= per_cpu(irq_stat
, cpu
).sreset_irqs
;
558 #ifdef CONFIG_PPC_WATCHDOG
559 sum
+= per_cpu(irq_stat
, cpu
).soft_nmi_irqs
;
561 #ifdef CONFIG_PPC_DOORBELL
562 sum
+= per_cpu(irq_stat
, cpu
).doorbell_irqs
;
568 static inline void check_stack_overflow(void)
570 #ifdef CONFIG_DEBUG_STACKOVERFLOW
573 sp
= current_stack_pointer() & (THREAD_SIZE
-1);
575 /* check for stack overflow: is there less than 2KB free? */
576 if (unlikely(sp
< (sizeof(struct thread_info
) + 2048))) {
577 pr_err("do_IRQ: stack overflow: %ld\n",
578 sp
- sizeof(struct thread_info
));
584 void __do_irq(struct pt_regs
*regs
)
590 trace_irq_entry(regs
);
592 check_stack_overflow();
595 * Query the platform PIC for the interrupt & ack it.
597 * This will typically lower the interrupt line to the CPU
599 irq
= ppc_md
.get_irq();
601 /* We can hard enable interrupts now to allow perf interrupts */
602 may_hard_irq_enable();
604 /* And finally process it */
606 __this_cpu_inc(irq_stat
.spurious_irqs
);
608 generic_handle_irq(irq
);
610 trace_irq_exit(regs
);
615 void do_IRQ(struct pt_regs
*regs
)
617 struct pt_regs
*old_regs
= set_irq_regs(regs
);
618 struct thread_info
*curtp
, *irqtp
, *sirqtp
;
620 /* Switch to the irq stack to handle this */
621 curtp
= current_thread_info();
622 irqtp
= hardirq_ctx
[raw_smp_processor_id()];
623 sirqtp
= softirq_ctx
[raw_smp_processor_id()];
625 /* Already there ? */
626 if (unlikely(curtp
== irqtp
|| curtp
== sirqtp
)) {
628 set_irq_regs(old_regs
);
632 /* Prepare the thread_info in the irq stack */
633 irqtp
->task
= curtp
->task
;
636 /* Copy the preempt_count so that the [soft]irq checks work. */
637 irqtp
->preempt_count
= curtp
->preempt_count
;
639 /* Switch stack and call */
640 call_do_irq(regs
, irqtp
);
642 /* Restore stack limit */
645 /* Copy back updates to the thread_info */
647 set_bits(irqtp
->flags
, &curtp
->flags
);
649 set_irq_regs(old_regs
);
652 void __init
init_IRQ(void)
662 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
663 struct thread_info
*critirq_ctx
[NR_CPUS
] __read_mostly
;
664 struct thread_info
*dbgirq_ctx
[NR_CPUS
] __read_mostly
;
665 struct thread_info
*mcheckirq_ctx
[NR_CPUS
] __read_mostly
;
667 void exc_lvl_ctx_init(void)
669 struct thread_info
*tp
;
672 for_each_possible_cpu(i
) {
677 cpu_nr
= get_hard_smp_processor_id(i
);
683 memset((void *)critirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
684 tp
= critirq_ctx
[cpu_nr
];
686 tp
->preempt_count
= 0;
689 memset((void *)dbgirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
690 tp
= dbgirq_ctx
[cpu_nr
];
692 tp
->preempt_count
= 0;
694 memset((void *)mcheckirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
695 tp
= mcheckirq_ctx
[cpu_nr
];
697 tp
->preempt_count
= HARDIRQ_OFFSET
;
703 struct thread_info
*softirq_ctx
[NR_CPUS
] __read_mostly
;
704 struct thread_info
*hardirq_ctx
[NR_CPUS
] __read_mostly
;
706 void irq_ctx_init(void)
708 struct thread_info
*tp
;
711 for_each_possible_cpu(i
) {
712 memset((void *)softirq_ctx
[i
], 0, THREAD_SIZE
);
715 klp_init_thread_info(tp
);
717 memset((void *)hardirq_ctx
[i
], 0, THREAD_SIZE
);
720 klp_init_thread_info(tp
);
724 void do_softirq_own_stack(void)
726 struct thread_info
*curtp
, *irqtp
;
728 curtp
= current_thread_info();
729 irqtp
= softirq_ctx
[smp_processor_id()];
730 irqtp
->task
= curtp
->task
;
732 call_do_softirq(irqtp
);
735 /* Set any flag that may have been set on the
739 set_bits(irqtp
->flags
, &curtp
->flags
);
742 irq_hw_number_t
virq_to_hw(unsigned int virq
)
744 struct irq_data
*irq_data
= irq_get_irq_data(virq
);
745 return WARN_ON(!irq_data
) ? 0 : irq_data
->hwirq
;
747 EXPORT_SYMBOL_GPL(virq_to_hw
);
750 int irq_choose_cpu(const struct cpumask
*mask
)
754 if (cpumask_equal(mask
, cpu_online_mask
)) {
755 static int irq_rover
;
756 static DEFINE_RAW_SPINLOCK(irq_rover_lock
);
759 /* Round-robin distribution... */
761 raw_spin_lock_irqsave(&irq_rover_lock
, flags
);
763 irq_rover
= cpumask_next(irq_rover
, cpu_online_mask
);
764 if (irq_rover
>= nr_cpu_ids
)
765 irq_rover
= cpumask_first(cpu_online_mask
);
769 raw_spin_unlock_irqrestore(&irq_rover_lock
, flags
);
771 cpuid
= cpumask_first_and(mask
, cpu_online_mask
);
772 if (cpuid
>= nr_cpu_ids
)
776 return get_hard_smp_processor_id(cpuid
);
779 int irq_choose_cpu(const struct cpumask
*mask
)
781 return hard_smp_processor_id();
785 int arch_early_irq_init(void)
791 static int __init
setup_noirqdistrib(char *str
)
797 __setup("noirqdistrib", setup_noirqdistrib
);
798 #endif /* CONFIG_PPC64 */