2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
33 #include <linux/export.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/pci.h>
54 #include <linux/debugfs.h>
56 #include <linux/of_irq.h>
58 #include <linux/uaccess.h>
60 #include <asm/pgtable.h>
62 #include <asm/cache.h>
64 #include <asm/ptrace.h>
65 #include <asm/machdep.h>
68 #include <asm/debug.h>
69 #include <asm/livepatch.h>
70 #include <asm/asm-prototypes.h>
74 #include <asm/firmware.h>
75 #include <asm/lv1call.h>
77 #define CREATE_TRACE_POINTS
78 #include <asm/trace.h>
79 #include <asm/cpu_has_feature.h>
81 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t
, irq_stat
);
82 EXPORT_PER_CPU_SYMBOL(irq_stat
);
84 int __irq_offset_value
;
87 EXPORT_SYMBOL(__irq_offset_value
);
88 atomic_t ppc_n_lost_interrupts
;
91 extern int tau_initialized
;
92 extern int tau_interrupts(int);
94 #endif /* CONFIG_PPC32 */
98 int distribute_irqs
= 1;
100 static inline notrace
unsigned long get_irq_happened(void)
102 unsigned long happened
;
104 __asm__
__volatile__("lbz %0,%1(13)"
105 : "=r" (happened
) : "i" (offsetof(struct paca_struct
, irq_happened
)));
110 static inline notrace
void set_soft_enabled(unsigned long enable
)
112 __asm__
__volatile__("stb %0,%1(13)"
113 : : "r" (enable
), "i" (offsetof(struct paca_struct
, soft_enabled
)));
116 static inline notrace
int decrementer_check_overflow(void)
118 u64 now
= get_tb_or_rtc();
119 u64
*next_tb
= this_cpu_ptr(&decrementers_next_tb
);
121 return now
>= *next_tb
;
124 /* This is called whenever we are re-enabling interrupts
125 * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
126 * there's an EE, DEC or DBELL to generate.
128 * This is called in two contexts: From arch_local_irq_restore()
129 * before soft-enabling interrupts, and from the exception exit
130 * path when returning from an interrupt from a soft-disabled to
131 * a soft enabled context. In both case we have interrupts hard
134 * We take care of only clearing the bits we handled in the
135 * PACA irq_happened field since we can only re-emit one at a
136 * time and we don't want to "lose" one.
138 notrace
unsigned int __check_irq_replay(void)
141 * We use local_paca rather than get_paca() to avoid all
142 * the debug_smp_processor_id() business in this low level
145 unsigned char happened
= local_paca
->irq_happened
;
147 /* Clear bit 0 which we wouldn't clear otherwise */
148 local_paca
->irq_happened
&= ~PACA_IRQ_HARD_DIS
;
151 * Force the delivery of pending soft-disabled interrupts on PS3.
152 * Any HV call will have this side effect.
154 if (firmware_has_feature(FW_FEATURE_PS3_LV1
)) {
156 lv1_get_version_info(&tmp
, &tmp2
);
160 * Check if an hypervisor Maintenance interrupt happened.
161 * This is a higher priority interrupt than the others, so
164 local_paca
->irq_happened
&= ~PACA_IRQ_HMI
;
165 if (happened
& PACA_IRQ_HMI
)
169 * We may have missed a decrementer interrupt. We check the
170 * decrementer itself rather than the paca irq_happened field
171 * in case we also had a rollover while hard disabled
173 local_paca
->irq_happened
&= ~PACA_IRQ_DEC
;
174 if ((happened
& PACA_IRQ_DEC
) || decrementer_check_overflow())
177 /* Finally check if an external interrupt happened */
178 local_paca
->irq_happened
&= ~PACA_IRQ_EE
;
179 if (happened
& PACA_IRQ_EE
)
182 #ifdef CONFIG_PPC_BOOK3E
183 /* Finally check if an EPR external interrupt happened
184 * this bit is typically set if we need to handle another
185 * "edge" interrupt from within the MPIC "EPR" handler
187 local_paca
->irq_happened
&= ~PACA_IRQ_EE_EDGE
;
188 if (happened
& PACA_IRQ_EE_EDGE
)
191 local_paca
->irq_happened
&= ~PACA_IRQ_DBELL
;
192 if (happened
& PACA_IRQ_DBELL
)
195 local_paca
->irq_happened
&= ~PACA_IRQ_DBELL
;
196 if (happened
& PACA_IRQ_DBELL
) {
197 if (cpu_has_feature(CPU_FTR_HVMODE
))
201 #endif /* CONFIG_PPC_BOOK3E */
203 /* There should be nothing left ! */
204 BUG_ON(local_paca
->irq_happened
!= 0);
209 notrace
void arch_local_irq_restore(unsigned long en
)
211 unsigned char irq_happened
;
214 /* Write the new soft-enabled value */
215 set_soft_enabled(en
);
219 * From this point onward, we can take interrupts, preempt,
220 * etc... unless we got hard-disabled. We check if an event
221 * happened. If none happened, we know we can just return.
223 * We may have preempted before the check below, in which case
224 * we are checking the "new" CPU instead of the old one. This
225 * is only a problem if an event happened on the "old" CPU.
227 * External interrupt events will have caused interrupts to
228 * be hard-disabled, so there is no problem, we
229 * cannot have preempted.
231 irq_happened
= get_irq_happened();
236 * We need to hard disable to get a trusted value from
237 * __check_irq_replay(). We also need to soft-disable
238 * again to avoid warnings in there due to the use of
241 * We know that if the value in irq_happened is exactly 0x01
242 * then we are already hard disabled (there are other less
243 * common cases that we'll ignore for now), so we skip the
244 * (expensive) mtmsrd.
246 if (unlikely(irq_happened
!= PACA_IRQ_HARD_DIS
))
247 __hard_irq_disable();
248 #ifdef CONFIG_TRACE_IRQFLAGS
251 * We should already be hard disabled here. We had bugs
252 * where that wasn't the case so let's dbl check it and
253 * warn if we are wrong. Only do that when IRQ tracing
254 * is enabled as mfmsr() can be costly.
256 if (WARN_ON(mfmsr() & MSR_EE
))
257 __hard_irq_disable();
259 #endif /* CONFIG_TRACE_IRQFLAGS */
264 * Check if anything needs to be re-emitted. We haven't
265 * soft-enabled yet to avoid warnings in decrementer_check_overflow
266 * accessing per-cpu variables
268 replay
= __check_irq_replay();
270 /* We can soft-enable now */
274 * And replay if we have to. This will return with interrupts
278 __replay_interrupt(replay
);
282 /* Finally, let's ensure we are hard enabled */
285 EXPORT_SYMBOL(arch_local_irq_restore
);
288 * This is specifically called by assembly code to re-enable interrupts
289 * if they are currently disabled. This is typically called before
290 * schedule() or do_signal() when returning to userspace. We do it
291 * in C to avoid the burden of dealing with lockdep etc...
293 * NOTE: This is called with interrupts hard disabled but not marked
294 * as such in paca->irq_happened, so we need to resync this.
296 void notrace
restore_interrupts(void)
298 if (irqs_disabled()) {
299 local_paca
->irq_happened
|= PACA_IRQ_HARD_DIS
;
306 * This is a helper to use when about to go into idle low-power
307 * when the latter has the side effect of re-enabling interrupts
308 * (such as calling H_CEDE under pHyp).
310 * You call this function with interrupts soft-disabled (this is
311 * already the case when ppc_md.power_save is called). The function
312 * will return whether to enter power save or just return.
314 * In the former case, it will have notified lockdep of interrupts
315 * being re-enabled and generally sanitized the lazy irq state,
316 * and in the latter case it will leave with interrupts hard
317 * disabled and marked as such, so the local_irq_enable() call
318 * in arch_cpu_idle() will properly re-enable everything.
320 bool prep_irq_for_idle(void)
323 * First we need to hard disable to ensure no interrupt
324 * occurs before we effectively enter the low power state
329 * If anything happened while we were soft-disabled,
330 * we return now and do not enter the low power state.
332 if (lazy_irq_pending())
335 /* Tell lockdep we are about to re-enable */
339 * Mark interrupts as soft-enabled and clear the
340 * PACA_IRQ_HARD_DIS from the pending mask since we
341 * are about to hard enable as well as a side effect
342 * of entering the low power state.
344 local_paca
->irq_happened
&= ~PACA_IRQ_HARD_DIS
;
345 local_paca
->soft_enabled
= 1;
347 /* Tell the caller to enter the low power state */
352 * Force a replay of the external interrupt handler on this CPU.
354 void force_external_irq_replay(void)
357 * This must only be called with interrupts soft-disabled,
358 * the replay will happen when re-enabling.
360 WARN_ON(!arch_irqs_disabled());
362 /* Indicate in the PACA that we have an interrupt to replay */
363 local_paca
->irq_happened
|= PACA_IRQ_EE
;
366 #endif /* CONFIG_PPC64 */
368 int arch_show_interrupts(struct seq_file
*p
, int prec
)
372 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
373 if (tau_initialized
) {
374 seq_printf(p
, "%*s: ", prec
, "TAU");
375 for_each_online_cpu(j
)
376 seq_printf(p
, "%10u ", tau_interrupts(j
));
377 seq_puts(p
, " PowerPC Thermal Assist (cpu temp)\n");
379 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
381 seq_printf(p
, "%*s: ", prec
, "LOC");
382 for_each_online_cpu(j
)
383 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).timer_irqs_event
);
384 seq_printf(p
, " Local timer interrupts for timer event device\n");
386 seq_printf(p
, "%*s: ", prec
, "LOC");
387 for_each_online_cpu(j
)
388 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).timer_irqs_others
);
389 seq_printf(p
, " Local timer interrupts for others\n");
391 seq_printf(p
, "%*s: ", prec
, "SPU");
392 for_each_online_cpu(j
)
393 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).spurious_irqs
);
394 seq_printf(p
, " Spurious interrupts\n");
396 seq_printf(p
, "%*s: ", prec
, "PMI");
397 for_each_online_cpu(j
)
398 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).pmu_irqs
);
399 seq_printf(p
, " Performance monitoring interrupts\n");
401 seq_printf(p
, "%*s: ", prec
, "MCE");
402 for_each_online_cpu(j
)
403 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).mce_exceptions
);
404 seq_printf(p
, " Machine check exceptions\n");
406 if (cpu_has_feature(CPU_FTR_HVMODE
)) {
407 seq_printf(p
, "%*s: ", prec
, "HMI");
408 for_each_online_cpu(j
)
409 seq_printf(p
, "%10u ",
410 per_cpu(irq_stat
, j
).hmi_exceptions
);
411 seq_printf(p
, " Hypervisor Maintenance Interrupts\n");
414 #ifdef CONFIG_PPC_DOORBELL
415 if (cpu_has_feature(CPU_FTR_DBELL
)) {
416 seq_printf(p
, "%*s: ", prec
, "DBL");
417 for_each_online_cpu(j
)
418 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).doorbell_irqs
);
419 seq_printf(p
, " Doorbell interrupts\n");
429 u64
arch_irq_stat_cpu(unsigned int cpu
)
431 u64 sum
= per_cpu(irq_stat
, cpu
).timer_irqs_event
;
433 sum
+= per_cpu(irq_stat
, cpu
).pmu_irqs
;
434 sum
+= per_cpu(irq_stat
, cpu
).mce_exceptions
;
435 sum
+= per_cpu(irq_stat
, cpu
).spurious_irqs
;
436 sum
+= per_cpu(irq_stat
, cpu
).timer_irqs_others
;
437 sum
+= per_cpu(irq_stat
, cpu
).hmi_exceptions
;
438 #ifdef CONFIG_PPC_DOORBELL
439 sum
+= per_cpu(irq_stat
, cpu
).doorbell_irqs
;
445 #ifdef CONFIG_HOTPLUG_CPU
446 void migrate_irqs(void)
448 struct irq_desc
*desc
;
452 const struct cpumask
*map
= cpu_online_mask
;
454 alloc_cpumask_var(&mask
, GFP_KERNEL
);
456 for_each_irq_desc(irq
, desc
) {
457 struct irq_data
*data
;
458 struct irq_chip
*chip
;
460 data
= irq_desc_get_irq_data(desc
);
461 if (irqd_is_per_cpu(data
))
464 chip
= irq_data_get_irq_chip(data
);
466 cpumask_and(mask
, irq_data_get_affinity_mask(data
), map
);
467 if (cpumask_any(mask
) >= nr_cpu_ids
) {
468 pr_warn("Breaking affinity for irq %i\n", irq
);
469 cpumask_copy(mask
, map
);
471 if (chip
->irq_set_affinity
)
472 chip
->irq_set_affinity(data
, mask
, true);
473 else if (desc
->action
&& !(warned
++))
474 pr_err("Cannot set affinity for irq %i\n", irq
);
477 free_cpumask_var(mask
);
485 static inline void check_stack_overflow(void)
487 #ifdef CONFIG_DEBUG_STACKOVERFLOW
490 sp
= current_stack_pointer() & (THREAD_SIZE
-1);
492 /* check for stack overflow: is there less than 2KB free? */
493 if (unlikely(sp
< (sizeof(struct thread_info
) + 2048))) {
494 pr_err("do_IRQ: stack overflow: %ld\n",
495 sp
- sizeof(struct thread_info
));
501 void __do_irq(struct pt_regs
*regs
)
507 trace_irq_entry(regs
);
509 check_stack_overflow();
512 * Query the platform PIC for the interrupt & ack it.
514 * This will typically lower the interrupt line to the CPU
516 irq
= ppc_md
.get_irq();
518 /* We can hard enable interrupts now to allow perf interrupts */
519 may_hard_irq_enable();
521 /* And finally process it */
523 __this_cpu_inc(irq_stat
.spurious_irqs
);
525 generic_handle_irq(irq
);
527 trace_irq_exit(regs
);
532 void do_IRQ(struct pt_regs
*regs
)
534 struct pt_regs
*old_regs
= set_irq_regs(regs
);
535 struct thread_info
*curtp
, *irqtp
, *sirqtp
;
537 /* Switch to the irq stack to handle this */
538 curtp
= current_thread_info();
539 irqtp
= hardirq_ctx
[raw_smp_processor_id()];
540 sirqtp
= softirq_ctx
[raw_smp_processor_id()];
542 /* Already there ? */
543 if (unlikely(curtp
== irqtp
|| curtp
== sirqtp
)) {
545 set_irq_regs(old_regs
);
549 /* Prepare the thread_info in the irq stack */
550 irqtp
->task
= curtp
->task
;
553 /* Copy the preempt_count so that the [soft]irq checks work. */
554 irqtp
->preempt_count
= curtp
->preempt_count
;
556 /* Switch stack and call */
557 call_do_irq(regs
, irqtp
);
559 /* Restore stack limit */
562 /* Copy back updates to the thread_info */
564 set_bits(irqtp
->flags
, &curtp
->flags
);
566 set_irq_regs(old_regs
);
569 void __init
init_IRQ(void)
579 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
580 struct thread_info
*critirq_ctx
[NR_CPUS
] __read_mostly
;
581 struct thread_info
*dbgirq_ctx
[NR_CPUS
] __read_mostly
;
582 struct thread_info
*mcheckirq_ctx
[NR_CPUS
] __read_mostly
;
584 void exc_lvl_ctx_init(void)
586 struct thread_info
*tp
;
589 for_each_possible_cpu(i
) {
594 cpu_nr
= get_hard_smp_processor_id(i
);
600 memset((void *)critirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
601 tp
= critirq_ctx
[cpu_nr
];
603 tp
->preempt_count
= 0;
606 memset((void *)dbgirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
607 tp
= dbgirq_ctx
[cpu_nr
];
609 tp
->preempt_count
= 0;
611 memset((void *)mcheckirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
612 tp
= mcheckirq_ctx
[cpu_nr
];
614 tp
->preempt_count
= HARDIRQ_OFFSET
;
620 struct thread_info
*softirq_ctx
[NR_CPUS
] __read_mostly
;
621 struct thread_info
*hardirq_ctx
[NR_CPUS
] __read_mostly
;
623 void irq_ctx_init(void)
625 struct thread_info
*tp
;
628 for_each_possible_cpu(i
) {
629 memset((void *)softirq_ctx
[i
], 0, THREAD_SIZE
);
632 klp_init_thread_info(tp
);
634 memset((void *)hardirq_ctx
[i
], 0, THREAD_SIZE
);
637 klp_init_thread_info(tp
);
641 void do_softirq_own_stack(void)
643 struct thread_info
*curtp
, *irqtp
;
645 curtp
= current_thread_info();
646 irqtp
= softirq_ctx
[smp_processor_id()];
647 irqtp
->task
= curtp
->task
;
649 call_do_softirq(irqtp
);
652 /* Set any flag that may have been set on the
656 set_bits(irqtp
->flags
, &curtp
->flags
);
659 irq_hw_number_t
virq_to_hw(unsigned int virq
)
661 struct irq_data
*irq_data
= irq_get_irq_data(virq
);
662 return WARN_ON(!irq_data
) ? 0 : irq_data
->hwirq
;
664 EXPORT_SYMBOL_GPL(virq_to_hw
);
667 int irq_choose_cpu(const struct cpumask
*mask
)
671 if (cpumask_equal(mask
, cpu_online_mask
)) {
672 static int irq_rover
;
673 static DEFINE_RAW_SPINLOCK(irq_rover_lock
);
676 /* Round-robin distribution... */
678 raw_spin_lock_irqsave(&irq_rover_lock
, flags
);
680 irq_rover
= cpumask_next(irq_rover
, cpu_online_mask
);
681 if (irq_rover
>= nr_cpu_ids
)
682 irq_rover
= cpumask_first(cpu_online_mask
);
686 raw_spin_unlock_irqrestore(&irq_rover_lock
, flags
);
688 cpuid
= cpumask_first_and(mask
, cpu_online_mask
);
689 if (cpuid
>= nr_cpu_ids
)
693 return get_hard_smp_processor_id(cpuid
);
696 int irq_choose_cpu(const struct cpumask
*mask
)
698 return hard_smp_processor_id();
702 int arch_early_irq_init(void)
708 static int __init
setup_noirqdistrib(char *str
)
714 __setup("noirqdistrib", setup_noirqdistrib
);
715 #endif /* CONFIG_PPC64 */