]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/powerpc/kernel/irq.c
treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 151
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / kernel / irq.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
756e7104
SR
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
1da177e4
LT
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
756e7104 10 *
1da177e4
LT
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
756e7104
SR
21 *
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
968159c0 27 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx
756e7104 28 * to reduce code space and undefined function references.
1da177e4
LT
29 */
30
0ebfff14
BH
31#undef DEBUG
32
4b16f8e2 33#include <linux/export.h>
1da177e4
LT
34#include <linux/threads.h>
35#include <linux/kernel_stat.h>
36#include <linux/signal.h>
37#include <linux/sched.h>
756e7104 38#include <linux/ptrace.h>
1da177e4
LT
39#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/timex.h>
1da177e4
LT
42#include <linux/init.h>
43#include <linux/slab.h>
1da177e4
LT
44#include <linux/delay.h>
45#include <linux/irq.h>
756e7104
SR
46#include <linux/seq_file.h>
47#include <linux/cpumask.h>
1da177e4
LT
48#include <linux/profile.h>
49#include <linux/bitops.h>
0ebfff14
BH
50#include <linux/list.h>
51#include <linux/radix-tree.h>
52#include <linux/mutex.h>
45934c47 53#include <linux/pci.h>
60b332e7 54#include <linux/debugfs.h>
e3873444
GL
55#include <linux/of.h>
56#include <linux/of_irq.h>
1da177e4 57
7c0f6ba6 58#include <linux/uaccess.h>
1da177e4
LT
59#include <asm/io.h>
60#include <asm/pgtable.h>
61#include <asm/irq.h>
62#include <asm/cache.h>
63#include <asm/prom.h>
64#include <asm/ptrace.h>
1da177e4 65#include <asm/machdep.h>
0ebfff14 66#include <asm/udbg.h>
3e7f45ad 67#include <asm/smp.h>
5d31a96e 68#include <asm/livepatch.h>
0545d543 69#include <asm/asm-prototypes.h>
c2e480ba 70#include <asm/hw_irq.h>
89c81797 71
d04c56f7 72#ifdef CONFIG_PPC64
1da177e4 73#include <asm/paca.h>
d04c56f7 74#include <asm/firmware.h>
0874dd40 75#include <asm/lv1call.h>
756e7104 76#endif
1bf4af16
AB
77#define CREATE_TRACE_POINTS
78#include <asm/trace.h>
b92a226e 79#include <asm/cpu_has_feature.h>
1da177e4 80
8c007bfd
AB
81DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
82EXPORT_PER_CPU_SYMBOL(irq_stat);
83
756e7104 84#ifdef CONFIG_PPC32
b9e5b4e6 85atomic_t ppc_n_lost_interrupts;
756e7104 86
756e7104
SR
87#ifdef CONFIG_TAU_INT
88extern int tau_initialized;
bd13ac95 89u32 tau_interrupts(unsigned long cpu);
756e7104 90#endif
b9e5b4e6 91#endif /* CONFIG_PPC32 */
756e7104 92
756e7104 93#ifdef CONFIG_PPC64
cd015707 94
1da177e4 95int distribute_irqs = 1;
d04c56f7 96
7230c564 97static inline notrace unsigned long get_irq_happened(void)
ef2b343e 98{
7230c564 99 unsigned long happened;
ef2b343e
HD
100
101 __asm__ __volatile__("lbz %0,%1(13)"
7230c564 102 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
ef2b343e 103
7230c564 104 return happened;
ef2b343e
HD
105}
106
7230c564 107static inline notrace int decrementer_check_overflow(void)
7df10275 108{
7230c564 109 u64 now = get_tb_or_rtc();
69111bac 110 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
7230c564 111
7230c564 112 return now >= *next_tb;
7df10275
AB
113}
114
7230c564 115/* This is called whenever we are re-enabling interrupts
fe9e1d54
IM
116 * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if
117 * there's an EE, DEC or DBELL to generate.
7230c564
BH
118 *
119 * This is called in two contexts: From arch_local_irq_restore()
120 * before soft-enabling interrupts, and from the exception exit
121 * path when returning from an interrupt from a soft-disabled to
122 * a soft enabled context. In both case we have interrupts hard
123 * disabled.
124 *
125 * We take care of only clearing the bits we handled in the
126 * PACA irq_happened field since we can only re-emit one at a
127 * time and we don't want to "lose" one.
128 */
129notrace unsigned int __check_irq_replay(void)
d04c56f7 130{
ef2b343e 131 /*
7230c564
BH
132 * We use local_paca rather than get_paca() to avoid all
133 * the debug_smp_processor_id() business in this low level
134 * function
ef2b343e 135 */
7230c564 136 unsigned char happened = local_paca->irq_happened;
d04c56f7 137
ff967900
NP
138 /*
139 * We are responding to the next interrupt, so interrupt-off
140 * latencies should be reset here.
141 */
142 trace_hardirqs_on();
143 trace_hardirqs_off();
144
9b81c021
NP
145 /*
146 * We are always hard disabled here, but PACA_IRQ_HARD_DIS may
147 * not be set, which means interrupts have only just been hard
148 * disabled as part of the local_irq_restore or interrupt return
149 * code. In that case, skip the decrementr check becaus it's
150 * expensive to read the TB.
151 *
152 * HARD_DIS then gets cleared here, but it's reconciled later.
153 * Either local_irq_disable will replay the interrupt and that
154 * will reconcile state like other hard interrupts. Or interrupt
155 * retur will replay the interrupt and in that case it sets
156 * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S).
157 */
3db40c31 158 if (happened & PACA_IRQ_HARD_DIS) {
6f881eae
NP
159 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
160
3db40c31
NP
161 /*
162 * We may have missed a decrementer interrupt if hard disabled.
163 * Check the decrementer register in case we had a rollover
164 * while hard disabled.
165 */
166 if (!(happened & PACA_IRQ_DEC)) {
167 if (decrementer_check_overflow()) {
168 local_paca->irq_happened |= PACA_IRQ_DEC;
169 happened |= PACA_IRQ_DEC;
170 }
171 }
172 }
7230c564
BH
173
174 /*
175 * Force the delivery of pending soft-disabled interrupts on PS3.
176 * Any HV call will have this side effect.
177 */
178 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
179 u64 tmp, tmp2;
180 lv1_get_version_info(&tmp, &tmp2);
d04c56f7
PM
181 }
182
e0e0d6b7
NP
183 /*
184 * Check if an hypervisor Maintenance interrupt happened.
185 * This is a higher priority interrupt than the others, so
186 * replay it first.
187 */
6f881eae
NP
188 if (happened & PACA_IRQ_HMI) {
189 local_paca->irq_happened &= ~PACA_IRQ_HMI;
e0e0d6b7 190 return 0xe60;
6f881eae 191 }
e0e0d6b7 192
6f881eae
NP
193 if (happened & PACA_IRQ_DEC) {
194 local_paca->irq_happened &= ~PACA_IRQ_DEC;
7230c564 195 return 0x900;
6f881eae 196 }
7230c564 197
f442d004
MS
198 if (happened & PACA_IRQ_PMI) {
199 local_paca->irq_happened &= ~PACA_IRQ_PMI;
200 return 0xf00;
201 }
202
6f881eae
NP
203 if (happened & PACA_IRQ_EE) {
204 local_paca->irq_happened &= ~PACA_IRQ_EE;
7230c564 205 return 0x500;
6f881eae 206 }
7230c564
BH
207
208#ifdef CONFIG_PPC_BOOK3E
6f881eae
NP
209 /*
210 * Check if an EPR external interrupt happened this bit is typically
211 * set if we need to handle another "edge" interrupt from within the
212 * MPIC "EPR" handler.
ef2b343e 213 */
6f881eae
NP
214 if (happened & PACA_IRQ_EE_EDGE) {
215 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
7230c564 216 return 0x500;
6f881eae 217 }
7230c564 218
6f881eae
NP
219 if (happened & PACA_IRQ_DBELL) {
220 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
7230c564 221 return 0x280;
6f881eae 222 }
fe9e1d54 223#else
fe9e1d54 224 if (happened & PACA_IRQ_DBELL) {
6f881eae 225 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
fe9e1d54
IM
226 return 0xa00;
227 }
7230c564
BH
228#endif /* CONFIG_PPC_BOOK3E */
229
230 /* There should be nothing left ! */
231 BUG_ON(local_paca->irq_happened != 0);
232
233 return 0;
234}
235
01417c6c 236notrace void arch_local_irq_restore(unsigned long mask)
7230c564
BH
237{
238 unsigned char irq_happened;
239 unsigned int replay;
240
241 /* Write the new soft-enabled value */
4e26bc4a
MS
242 irq_soft_mask_set(mask);
243 if (mask)
7230c564 244 return;
01417c6c 245
7230c564
BH
246 /*
247 * From this point onward, we can take interrupts, preempt,
248 * etc... unless we got hard-disabled. We check if an event
249 * happened. If none happened, we know we can just return.
250 *
251 * We may have preempted before the check below, in which case
252 * we are checking the "new" CPU instead of the old one. This
253 * is only a problem if an event happened on the "old" CPU.
254 *
1d9a4731
SR
255 * External interrupt events will have caused interrupts to
256 * be hard-disabled, so there is no problem, we
7230c564 257 * cannot have preempted.
ef2b343e 258 */
7230c564 259 irq_happened = get_irq_happened();
9b81c021 260 if (!irq_happened) {
e2b36d59
NP
261#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
262 WARN_ON(!(mfmsr() & MSR_EE));
263#endif
d04c56f7 264 return;
9b81c021 265 }
ef2b343e
HD
266
267 /*
7230c564
BH
268 * We need to hard disable to get a trusted value from
269 * __check_irq_replay(). We also need to soft-disable
270 * again to avoid warnings in there due to the use of
271 * per-cpu variables.
ef2b343e 272 */
9b81c021
NP
273 if (!(irq_happened & PACA_IRQ_HARD_DIS)) {
274#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
275 WARN_ON(!(mfmsr() & MSR_EE));
276#endif
7230c564 277 __hard_irq_disable();
9aa88188 278#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
9b81c021 279 } else {
7c0482e3
BH
280 /*
281 * We should already be hard disabled here. We had bugs
282 * where that wasn't the case so let's dbl check it and
283 * warn if we are wrong. Only do that when IRQ tracing
284 * is enabled as mfmsr() can be costly.
285 */
286 if (WARN_ON(mfmsr() & MSR_EE))
287 __hard_irq_disable();
9aa88188 288#endif
9b81c021 289 }
7c0482e3 290
f442d004 291 irq_soft_mask_set(IRQS_ALL_DISABLED);
ff967900 292 trace_hardirqs_off();
e8775d4a 293
37fb9a02 294 /*
7230c564
BH
295 * Check if anything needs to be re-emitted. We haven't
296 * soft-enabled yet to avoid warnings in decrementer_check_overflow
297 * accessing per-cpu variables
e8775d4a 298 */
7230c564
BH
299 replay = __check_irq_replay();
300
301 /* We can soft-enable now */
ff967900 302 trace_hardirqs_on();
4e26bc4a 303 irq_soft_mask_set(IRQS_ENABLED);
0874dd40
TS
304
305 /*
7230c564
BH
306 * And replay if we have to. This will return with interrupts
307 * hard-enabled.
0874dd40 308 */
7230c564
BH
309 if (replay) {
310 __replay_interrupt(replay);
311 return;
0874dd40
TS
312 }
313
7230c564 314 /* Finally, let's ensure we are hard enabled */
e1fa2e13 315 __hard_irq_enable();
d04c56f7 316}
df9ee292 317EXPORT_SYMBOL(arch_local_irq_restore);
7230c564
BH
318
319/*
320 * This is specifically called by assembly code to re-enable interrupts
321 * if they are currently disabled. This is typically called before
322 * schedule() or do_signal() when returning to userspace. We do it
323 * in C to avoid the burden of dealing with lockdep etc...
56dfa7fa
BH
324 *
325 * NOTE: This is called with interrupts hard disabled but not marked
326 * as such in paca->irq_happened, so we need to resync this.
7230c564 327 */
2d773aa4 328void notrace restore_interrupts(void)
7230c564 329{
56dfa7fa
BH
330 if (irqs_disabled()) {
331 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
7230c564 332 local_irq_enable();
56dfa7fa
BH
333 } else
334 __hard_irq_enable();
7230c564
BH
335}
336
be2cf20a
BH
337/*
338 * This is a helper to use when about to go into idle low-power
339 * when the latter has the side effect of re-enabling interrupts
340 * (such as calling H_CEDE under pHyp).
341 *
342 * You call this function with interrupts soft-disabled (this is
343 * already the case when ppc_md.power_save is called). The function
344 * will return whether to enter power save or just return.
345 *
346 * In the former case, it will have notified lockdep of interrupts
347 * being re-enabled and generally sanitized the lazy irq state,
348 * and in the latter case it will leave with interrupts hard
349 * disabled and marked as such, so the local_irq_enable() call
0d2b7ea9 350 * in arch_cpu_idle() will properly re-enable everything.
be2cf20a
BH
351 */
352bool prep_irq_for_idle(void)
353{
354 /*
355 * First we need to hard disable to ensure no interrupt
356 * occurs before we effectively enter the low power state
357 */
2201f994
NP
358 __hard_irq_disable();
359 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
be2cf20a
BH
360
361 /*
362 * If anything happened while we were soft-disabled,
363 * we return now and do not enter the low power state.
364 */
365 if (lazy_irq_pending())
366 return false;
367
368 /* Tell lockdep we are about to re-enable */
369 trace_hardirqs_on();
370
371 /*
372 * Mark interrupts as soft-enabled and clear the
373 * PACA_IRQ_HARD_DIS from the pending mask since we
374 * are about to hard enable as well as a side effect
375 * of entering the low power state.
376 */
377 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
4e26bc4a 378 irq_soft_mask_set(IRQS_ENABLED);
be2cf20a
BH
379
380 /* Tell the caller to enter the low power state */
381 return true;
382}
383
771d4304 384#ifdef CONFIG_PPC_BOOK3S
2201f994
NP
385/*
386 * This is for idle sequences that return with IRQs off, but the
387 * idle state itself wakes on interrupt. Tell the irq tracer that
388 * IRQs are enabled for the duration of idle so it does not get long
389 * off times. Must be paired with fini_irq_for_idle_irqsoff.
390 */
391bool prep_irq_for_idle_irqsoff(void)
392{
393 WARN_ON(!irqs_disabled());
394
395 /*
396 * First we need to hard disable to ensure no interrupt
397 * occurs before we effectively enter the low power state
398 */
399 __hard_irq_disable();
400 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
401
402 /*
403 * If anything happened while we were soft-disabled,
404 * we return now and do not enter the low power state.
405 */
406 if (lazy_irq_pending())
407 return false;
408
409 /* Tell lockdep we are about to re-enable */
410 trace_hardirqs_on();
411
412 return true;
413}
414
771d4304
NP
415/*
416 * Take the SRR1 wakeup reason, index into this table to find the
417 * appropriate irq_happened bit.
78adf6c2
NP
418 *
419 * Sytem reset exceptions taken in idle state also come through here,
420 * but they are NMI interrupts so do not need to wait for IRQs to be
421 * restored, and should be taken as early as practical. These are marked
422 * with 0xff in the table. The Power ISA specifies 0100b as the system
423 * reset interrupt reason.
771d4304 424 */
78adf6c2
NP
425#define IRQ_SYSTEM_RESET 0xff
426
771d4304
NP
427static const u8 srr1_to_lazyirq[0x10] = {
428 0, 0, 0,
429 PACA_IRQ_DBELL,
78adf6c2 430 IRQ_SYSTEM_RESET,
771d4304
NP
431 PACA_IRQ_DBELL,
432 PACA_IRQ_DEC,
433 0,
434 PACA_IRQ_EE,
435 PACA_IRQ_EE,
436 PACA_IRQ_HMI,
437 0, 0, 0, 0, 0 };
438
6de6638b 439void replay_system_reset(void)
78adf6c2
NP
440{
441 struct pt_regs regs;
442
443 ppc_save_regs(&regs);
444 regs.trap = 0x100;
445 get_paca()->in_nmi = 1;
446 system_reset_exception(&regs);
447 get_paca()->in_nmi = 0;
448}
6de6638b 449EXPORT_SYMBOL_GPL(replay_system_reset);
78adf6c2 450
771d4304
NP
451void irq_set_pending_from_srr1(unsigned long srr1)
452{
453 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18;
78adf6c2
NP
454 u8 reason = srr1_to_lazyirq[idx];
455
456 /*
457 * Take the system reset now, which is immediately after registers
458 * are restored from idle. It's an NMI, so interrupts need not be
459 * re-enabled before it is taken.
460 */
461 if (unlikely(reason == IRQ_SYSTEM_RESET)) {
462 replay_system_reset();
463 return;
464 }
771d4304
NP
465
466 /*
467 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0,
78adf6c2
NP
468 * so this can be called unconditionally with the SRR1 wake
469 * reason as returned by the idle code, which uses 0 to mean no
470 * interrupt.
471 *
472 * If a future CPU was to designate this as an interrupt reason,
473 * then a new index for no interrupt must be assigned.
771d4304 474 */
78adf6c2 475 local_paca->irq_happened |= reason;
771d4304
NP
476}
477#endif /* CONFIG_PPC_BOOK3S */
478
1d607bb3
BH
479/*
480 * Force a replay of the external interrupt handler on this CPU.
481 */
482void force_external_irq_replay(void)
483{
484 /*
485 * This must only be called with interrupts soft-disabled,
486 * the replay will happen when re-enabling.
487 */
488 WARN_ON(!arch_irqs_disabled());
489
ff6781fd
NP
490 /*
491 * Interrupts must always be hard disabled before irq_happened is
492 * modified (to prevent lost update in case of interrupt between
493 * load and store).
494 */
495 __hard_irq_disable();
496 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
497
1d607bb3
BH
498 /* Indicate in the PACA that we have an interrupt to replay */
499 local_paca->irq_happened |= PACA_IRQ_EE;
500}
501
756e7104 502#endif /* CONFIG_PPC64 */
1da177e4 503
433c9c67 504int arch_show_interrupts(struct seq_file *p, int prec)
c86845ed
AB
505{
506 int j;
507
508#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
509 if (tau_initialized) {
510 seq_printf(p, "%*s: ", prec, "TAU");
511 for_each_online_cpu(j)
512 seq_printf(p, "%10u ", tau_interrupts(j));
513 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
514 }
515#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
516
89713ed1
AB
517 seq_printf(p, "%*s: ", prec, "LOC");
518 for_each_online_cpu(j)
c041cfa2 519 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event);
520 seq_printf(p, " Local timer interrupts for timer event device\n");
521
e360cd37
NP
522 seq_printf(p, "%*s: ", prec, "BCT");
523 for_each_online_cpu(j)
524 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event);
525 seq_printf(p, " Broadcast timer interrupts for timer event device\n");
526
c041cfa2 527 seq_printf(p, "%*s: ", prec, "LOC");
528 for_each_online_cpu(j)
529 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others);
530 seq_printf(p, " Local timer interrupts for others\n");
89713ed1 531
17081102
AB
532 seq_printf(p, "%*s: ", prec, "SPU");
533 for_each_online_cpu(j)
534 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
535 seq_printf(p, " Spurious interrupts\n");
536
e8e813ed 537 seq_printf(p, "%*s: ", prec, "PMI");
89713ed1
AB
538 for_each_online_cpu(j)
539 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
540 seq_printf(p, " Performance monitoring interrupts\n");
541
542 seq_printf(p, "%*s: ", prec, "MCE");
543 for_each_online_cpu(j)
544 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
545 seq_printf(p, " Machine check exceptions\n");
546
0869b6fd
MS
547 if (cpu_has_feature(CPU_FTR_HVMODE)) {
548 seq_printf(p, "%*s: ", prec, "HMI");
549 for_each_online_cpu(j)
550 seq_printf(p, "%10u ",
551 per_cpu(irq_stat, j).hmi_exceptions);
552 seq_printf(p, " Hypervisor Maintenance Interrupts\n");
553 }
554
ca41ad43
NP
555 seq_printf(p, "%*s: ", prec, "NMI");
556 for_each_online_cpu(j)
557 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs);
558 seq_printf(p, " System Reset interrupts\n");
559
04019bf8
NP
560#ifdef CONFIG_PPC_WATCHDOG
561 seq_printf(p, "%*s: ", prec, "WDG");
562 for_each_online_cpu(j)
563 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs);
564 seq_printf(p, " Watchdog soft-NMI interrupts\n");
565#endif
566
a6a058e5
IM
567#ifdef CONFIG_PPC_DOORBELL
568 if (cpu_has_feature(CPU_FTR_DBELL)) {
569 seq_printf(p, "%*s: ", prec, "DBL");
570 for_each_online_cpu(j)
571 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs);
572 seq_printf(p, " Doorbell interrupts\n");
573 }
574#endif
575
c86845ed
AB
576 return 0;
577}
578
89713ed1
AB
579/*
580 * /proc/stat helpers
581 */
582u64 arch_irq_stat_cpu(unsigned int cpu)
583{
c041cfa2 584 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event;
89713ed1 585
e360cd37 586 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event;
89713ed1
AB
587 sum += per_cpu(irq_stat, cpu).pmu_irqs;
588 sum += per_cpu(irq_stat, cpu).mce_exceptions;
17081102 589 sum += per_cpu(irq_stat, cpu).spurious_irqs;
c041cfa2 590 sum += per_cpu(irq_stat, cpu).timer_irqs_others;
0869b6fd 591 sum += per_cpu(irq_stat, cpu).hmi_exceptions;
ca41ad43 592 sum += per_cpu(irq_stat, cpu).sreset_irqs;
04019bf8
NP
593#ifdef CONFIG_PPC_WATCHDOG
594 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs;
595#endif
a6a058e5
IM
596#ifdef CONFIG_PPC_DOORBELL
597 sum += per_cpu(irq_stat, cpu).doorbell_irqs;
598#endif
89713ed1
AB
599
600 return sum;
601}
602
d7cb10d6
ME
603static inline void check_stack_overflow(void)
604{
605#ifdef CONFIG_DEBUG_STACKOVERFLOW
606 long sp;
607
acf620ec 608 sp = current_stack_pointer() & (THREAD_SIZE-1);
d7cb10d6
ME
609
610 /* check for stack overflow: is there less than 2KB free? */
a7916a1d
CL
611 if (unlikely(sp < 2048)) {
612 pr_err("do_IRQ: stack overflow: %ld\n", sp);
d7cb10d6
ME
613 dump_stack();
614 }
615#endif
616}
617
0366a1c7 618void __do_irq(struct pt_regs *regs)
1da177e4 619{
0ebfff14 620 unsigned int irq;
1da177e4 621
4b218e9b 622 irq_enter();
1da177e4 623
e72bbbab
LZ
624 trace_irq_entry(regs);
625
d7cb10d6 626 check_stack_overflow();
1da177e4 627
7230c564
BH
628 /*
629 * Query the platform PIC for the interrupt & ack it.
630 *
631 * This will typically lower the interrupt line to the CPU
632 */
35a84c2f 633 irq = ppc_md.get_irq();
1da177e4 634
0366a1c7 635 /* We can hard enable interrupts now to allow perf interrupts */
7230c564
BH
636 may_hard_irq_enable();
637
638 /* And finally process it */
ef24ba70 639 if (unlikely(!irq))
69111bac 640 __this_cpu_inc(irq_stat.spurious_irqs);
a4e04c9f 641 else
0edc2ca9 642 generic_handle_irq(irq);
e199500c 643
e72bbbab
LZ
644 trace_irq_exit(regs);
645
4b218e9b 646 irq_exit();
0366a1c7
BH
647}
648
649void do_IRQ(struct pt_regs *regs)
650{
651 struct pt_regs *old_regs = set_irq_regs(regs);
d608898a 652 void *cursp, *irqsp, *sirqsp;
0366a1c7
BH
653
654 /* Switch to the irq stack to handle this */
d608898a
CL
655 cursp = (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1));
656 irqsp = hardirq_ctx[raw_smp_processor_id()];
657 sirqsp = softirq_ctx[raw_smp_processor_id()];
0366a1c7
BH
658
659 /* Already there ? */
d608898a 660 if (unlikely(cursp == irqsp || cursp == sirqsp)) {
0366a1c7
BH
661 __do_irq(regs);
662 set_irq_regs(old_regs);
663 return;
664 }
0366a1c7 665 /* Switch stack and call */
d608898a 666 call_do_irq(regs, irqsp);
0366a1c7 667
7d12e780 668 set_irq_regs(old_regs);
e199500c 669}
1da177e4
LT
670
671void __init init_IRQ(void)
672{
70584578
SR
673 if (ppc_md.init_IRQ)
674 ppc_md.init_IRQ();
1da177e4
LT
675}
676
bcf0b088 677#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
a7916a1d
CL
678void *critirq_ctx[NR_CPUS] __read_mostly;
679void *dbgirq_ctx[NR_CPUS] __read_mostly;
680void *mcheckirq_ctx[NR_CPUS] __read_mostly;
bcf0b088 681#endif
1da177e4 682
a7916a1d
CL
683void *softirq_ctx[NR_CPUS] __read_mostly;
684void *hardirq_ctx[NR_CPUS] __read_mostly;
1da177e4 685
7d65f4a6 686void do_softirq_own_stack(void)
c6622f63 687{
d608898a 688 call_do_softirq(softirq_ctx[smp_processor_id()]);
c6622f63 689}
1da177e4 690
35923f12
OJ
691irq_hw_number_t virq_to_hw(unsigned int virq)
692{
4bbdd45a
GL
693 struct irq_data *irq_data = irq_get_irq_data(virq);
694 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
35923f12
OJ
695}
696EXPORT_SYMBOL_GPL(virq_to_hw);
697
6ec36b58
SY
698#ifdef CONFIG_SMP
699int irq_choose_cpu(const struct cpumask *mask)
700{
701 int cpuid;
702
2074b1d9 703 if (cpumask_equal(mask, cpu_online_mask)) {
6ec36b58
SY
704 static int irq_rover;
705 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
706 unsigned long flags;
707
708 /* Round-robin distribution... */
709do_round_robin:
710 raw_spin_lock_irqsave(&irq_rover_lock, flags);
711
712 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
713 if (irq_rover >= nr_cpu_ids)
714 irq_rover = cpumask_first(cpu_online_mask);
715
716 cpuid = irq_rover;
717
718 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
719 } else {
720 cpuid = cpumask_first_and(mask, cpu_online_mask);
721 if (cpuid >= nr_cpu_ids)
722 goto do_round_robin;
723 }
724
725 return get_hard_smp_processor_id(cpuid);
726}
727#else
728int irq_choose_cpu(const struct cpumask *mask)
729{
730 return hard_smp_processor_id();
731}
732#endif
0ebfff14 733
c6622f63 734#ifdef CONFIG_PPC64
1da177e4
LT
735static int __init setup_noirqdistrib(char *str)
736{
737 distribute_irqs = 0;
738 return 1;
739}
740
741__setup("noirqdistrib", setup_noirqdistrib);
756e7104 742#endif /* CONFIG_PPC64 */