]>
Commit | Line | Data |
---|---|---|
1 | // SPDX-License-Identifier: GPL-2.0-or-later | |
2 | /* | |
3 | * Derived from arch/i386/kernel/irq.c | |
4 | * Copyright (C) 1992 Linus Torvalds | |
5 | * Adapted from arch/i386 by Gary Thomas | |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
7 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> | |
8 | * Copyright (C) 1996-2001 Cort Dougan | |
9 | * Adapted for Power Macintosh by Paul Mackerras | |
10 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | |
11 | * | |
12 | * This file contains the code used by various IRQ handling routines: | |
13 | * asking for different IRQ's should be done through these routines | |
14 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
15 | * shouldn't result in any weird surprises, and installing new handlers | |
16 | * should be easier. | |
17 | * | |
18 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | |
19 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | |
20 | * mask register (of which only 16 are defined), hence the weird shifting | |
21 | * and complement of the cached_irq_mask. I want to be able to stuff | |
22 | * this right into the SIU SMASK register. | |
23 | * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx | |
24 | * to reduce code space and undefined function references. | |
25 | */ | |
26 | ||
27 | #undef DEBUG | |
28 | ||
29 | #include <linux/export.h> | |
30 | #include <linux/threads.h> | |
31 | #include <linux/kernel_stat.h> | |
32 | #include <linux/signal.h> | |
33 | #include <linux/sched.h> | |
34 | #include <linux/ptrace.h> | |
35 | #include <linux/ioport.h> | |
36 | #include <linux/interrupt.h> | |
37 | #include <linux/timex.h> | |
38 | #include <linux/init.h> | |
39 | #include <linux/slab.h> | |
40 | #include <linux/delay.h> | |
41 | #include <linux/irq.h> | |
42 | #include <linux/seq_file.h> | |
43 | #include <linux/cpumask.h> | |
44 | #include <linux/profile.h> | |
45 | #include <linux/bitops.h> | |
46 | #include <linux/list.h> | |
47 | #include <linux/radix-tree.h> | |
48 | #include <linux/mutex.h> | |
49 | #include <linux/pci.h> | |
50 | #include <linux/debugfs.h> | |
51 | #include <linux/of.h> | |
52 | #include <linux/of_irq.h> | |
53 | ||
54 | #include <linux/uaccess.h> | |
55 | #include <asm/io.h> | |
56 | #include <asm/pgtable.h> | |
57 | #include <asm/irq.h> | |
58 | #include <asm/cache.h> | |
59 | #include <asm/prom.h> | |
60 | #include <asm/ptrace.h> | |
61 | #include <asm/machdep.h> | |
62 | #include <asm/udbg.h> | |
63 | #include <asm/smp.h> | |
64 | #include <asm/livepatch.h> | |
65 | #include <asm/asm-prototypes.h> | |
66 | #include <asm/hw_irq.h> | |
67 | ||
68 | #ifdef CONFIG_PPC64 | |
69 | #include <asm/paca.h> | |
70 | #include <asm/firmware.h> | |
71 | #include <asm/lv1call.h> | |
72 | #endif | |
73 | #define CREATE_TRACE_POINTS | |
74 | #include <asm/trace.h> | |
75 | #include <asm/cpu_has_feature.h> | |
76 | ||
77 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); | |
78 | EXPORT_PER_CPU_SYMBOL(irq_stat); | |
79 | ||
80 | #ifdef CONFIG_PPC32 | |
81 | atomic_t ppc_n_lost_interrupts; | |
82 | ||
83 | #ifdef CONFIG_TAU_INT | |
84 | extern int tau_initialized; | |
85 | u32 tau_interrupts(unsigned long cpu); | |
86 | #endif | |
87 | #endif /* CONFIG_PPC32 */ | |
88 | ||
89 | #ifdef CONFIG_PPC64 | |
90 | ||
91 | int distribute_irqs = 1; | |
92 | ||
93 | static inline notrace unsigned long get_irq_happened(void) | |
94 | { | |
95 | unsigned long happened; | |
96 | ||
97 | __asm__ __volatile__("lbz %0,%1(13)" | |
98 | : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); | |
99 | ||
100 | return happened; | |
101 | } | |
102 | ||
103 | static inline notrace int decrementer_check_overflow(void) | |
104 | { | |
105 | u64 now = get_tb_or_rtc(); | |
106 | u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); | |
107 | ||
108 | return now >= *next_tb; | |
109 | } | |
110 | ||
111 | /* This is called whenever we are re-enabling interrupts | |
112 | * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if | |
113 | * there's an EE, DEC or DBELL to generate. | |
114 | * | |
115 | * This is called in two contexts: From arch_local_irq_restore() | |
116 | * before soft-enabling interrupts, and from the exception exit | |
117 | * path when returning from an interrupt from a soft-disabled to | |
118 | * a soft enabled context. In both case we have interrupts hard | |
119 | * disabled. | |
120 | * | |
121 | * We take care of only clearing the bits we handled in the | |
122 | * PACA irq_happened field since we can only re-emit one at a | |
123 | * time and we don't want to "lose" one. | |
124 | */ | |
125 | notrace unsigned int __check_irq_replay(void) | |
126 | { | |
127 | /* | |
128 | * We use local_paca rather than get_paca() to avoid all | |
129 | * the debug_smp_processor_id() business in this low level | |
130 | * function | |
131 | */ | |
132 | unsigned char happened = local_paca->irq_happened; | |
133 | ||
134 | /* | |
135 | * We are responding to the next interrupt, so interrupt-off | |
136 | * latencies should be reset here. | |
137 | */ | |
138 | trace_hardirqs_on(); | |
139 | trace_hardirqs_off(); | |
140 | ||
141 | /* | |
142 | * We are always hard disabled here, but PACA_IRQ_HARD_DIS may | |
143 | * not be set, which means interrupts have only just been hard | |
144 | * disabled as part of the local_irq_restore or interrupt return | |
145 | * code. In that case, skip the decrementr check becaus it's | |
146 | * expensive to read the TB. | |
147 | * | |
148 | * HARD_DIS then gets cleared here, but it's reconciled later. | |
149 | * Either local_irq_disable will replay the interrupt and that | |
150 | * will reconcile state like other hard interrupts. Or interrupt | |
151 | * retur will replay the interrupt and in that case it sets | |
152 | * PACA_IRQ_HARD_DIS by hand (see comments in entry_64.S). | |
153 | */ | |
154 | if (happened & PACA_IRQ_HARD_DIS) { | |
155 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; | |
156 | ||
157 | /* | |
158 | * We may have missed a decrementer interrupt if hard disabled. | |
159 | * Check the decrementer register in case we had a rollover | |
160 | * while hard disabled. | |
161 | */ | |
162 | if (!(happened & PACA_IRQ_DEC)) { | |
163 | if (decrementer_check_overflow()) { | |
164 | local_paca->irq_happened |= PACA_IRQ_DEC; | |
165 | happened |= PACA_IRQ_DEC; | |
166 | } | |
167 | } | |
168 | } | |
169 | ||
170 | /* | |
171 | * Force the delivery of pending soft-disabled interrupts on PS3. | |
172 | * Any HV call will have this side effect. | |
173 | */ | |
174 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { | |
175 | u64 tmp, tmp2; | |
176 | lv1_get_version_info(&tmp, &tmp2); | |
177 | } | |
178 | ||
179 | /* | |
180 | * Check if an hypervisor Maintenance interrupt happened. | |
181 | * This is a higher priority interrupt than the others, so | |
182 | * replay it first. | |
183 | */ | |
184 | if (happened & PACA_IRQ_HMI) { | |
185 | local_paca->irq_happened &= ~PACA_IRQ_HMI; | |
186 | return 0xe60; | |
187 | } | |
188 | ||
189 | if (happened & PACA_IRQ_DEC) { | |
190 | local_paca->irq_happened &= ~PACA_IRQ_DEC; | |
191 | return 0x900; | |
192 | } | |
193 | ||
194 | if (happened & PACA_IRQ_PMI) { | |
195 | local_paca->irq_happened &= ~PACA_IRQ_PMI; | |
196 | return 0xf00; | |
197 | } | |
198 | ||
199 | if (happened & PACA_IRQ_EE) { | |
200 | local_paca->irq_happened &= ~PACA_IRQ_EE; | |
201 | return 0x500; | |
202 | } | |
203 | ||
204 | #ifdef CONFIG_PPC_BOOK3E | |
205 | /* | |
206 | * Check if an EPR external interrupt happened this bit is typically | |
207 | * set if we need to handle another "edge" interrupt from within the | |
208 | * MPIC "EPR" handler. | |
209 | */ | |
210 | if (happened & PACA_IRQ_EE_EDGE) { | |
211 | local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; | |
212 | return 0x500; | |
213 | } | |
214 | ||
215 | if (happened & PACA_IRQ_DBELL) { | |
216 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; | |
217 | return 0x280; | |
218 | } | |
219 | #else | |
220 | if (happened & PACA_IRQ_DBELL) { | |
221 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; | |
222 | return 0xa00; | |
223 | } | |
224 | #endif /* CONFIG_PPC_BOOK3E */ | |
225 | ||
226 | /* There should be nothing left ! */ | |
227 | BUG_ON(local_paca->irq_happened != 0); | |
228 | ||
229 | return 0; | |
230 | } | |
231 | ||
232 | notrace void arch_local_irq_restore(unsigned long mask) | |
233 | { | |
234 | unsigned char irq_happened; | |
235 | unsigned int replay; | |
236 | ||
237 | /* Write the new soft-enabled value */ | |
238 | irq_soft_mask_set(mask); | |
239 | if (mask) | |
240 | return; | |
241 | ||
242 | /* | |
243 | * From this point onward, we can take interrupts, preempt, | |
244 | * etc... unless we got hard-disabled. We check if an event | |
245 | * happened. If none happened, we know we can just return. | |
246 | * | |
247 | * We may have preempted before the check below, in which case | |
248 | * we are checking the "new" CPU instead of the old one. This | |
249 | * is only a problem if an event happened on the "old" CPU. | |
250 | * | |
251 | * External interrupt events will have caused interrupts to | |
252 | * be hard-disabled, so there is no problem, we | |
253 | * cannot have preempted. | |
254 | */ | |
255 | irq_happened = get_irq_happened(); | |
256 | if (!irq_happened) { | |
257 | #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG | |
258 | WARN_ON(!(mfmsr() & MSR_EE)); | |
259 | #endif | |
260 | return; | |
261 | } | |
262 | ||
263 | /* | |
264 | * We need to hard disable to get a trusted value from | |
265 | * __check_irq_replay(). We also need to soft-disable | |
266 | * again to avoid warnings in there due to the use of | |
267 | * per-cpu variables. | |
268 | */ | |
269 | if (!(irq_happened & PACA_IRQ_HARD_DIS)) { | |
270 | #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG | |
271 | WARN_ON(!(mfmsr() & MSR_EE)); | |
272 | #endif | |
273 | __hard_irq_disable(); | |
274 | #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG | |
275 | } else { | |
276 | /* | |
277 | * We should already be hard disabled here. We had bugs | |
278 | * where that wasn't the case so let's dbl check it and | |
279 | * warn if we are wrong. Only do that when IRQ tracing | |
280 | * is enabled as mfmsr() can be costly. | |
281 | */ | |
282 | if (WARN_ON(mfmsr() & MSR_EE)) | |
283 | __hard_irq_disable(); | |
284 | #endif | |
285 | } | |
286 | ||
287 | irq_soft_mask_set(IRQS_ALL_DISABLED); | |
288 | trace_hardirqs_off(); | |
289 | ||
290 | /* | |
291 | * Check if anything needs to be re-emitted. We haven't | |
292 | * soft-enabled yet to avoid warnings in decrementer_check_overflow | |
293 | * accessing per-cpu variables | |
294 | */ | |
295 | replay = __check_irq_replay(); | |
296 | ||
297 | /* We can soft-enable now */ | |
298 | trace_hardirqs_on(); | |
299 | irq_soft_mask_set(IRQS_ENABLED); | |
300 | ||
301 | /* | |
302 | * And replay if we have to. This will return with interrupts | |
303 | * hard-enabled. | |
304 | */ | |
305 | if (replay) { | |
306 | __replay_interrupt(replay); | |
307 | return; | |
308 | } | |
309 | ||
310 | /* Finally, let's ensure we are hard enabled */ | |
311 | __hard_irq_enable(); | |
312 | } | |
313 | EXPORT_SYMBOL(arch_local_irq_restore); | |
314 | ||
315 | /* | |
316 | * This is specifically called by assembly code to re-enable interrupts | |
317 | * if they are currently disabled. This is typically called before | |
318 | * schedule() or do_signal() when returning to userspace. We do it | |
319 | * in C to avoid the burden of dealing with lockdep etc... | |
320 | * | |
321 | * NOTE: This is called with interrupts hard disabled but not marked | |
322 | * as such in paca->irq_happened, so we need to resync this. | |
323 | */ | |
324 | void notrace restore_interrupts(void) | |
325 | { | |
326 | if (irqs_disabled()) { | |
327 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; | |
328 | local_irq_enable(); | |
329 | } else | |
330 | __hard_irq_enable(); | |
331 | } | |
332 | ||
333 | /* | |
334 | * This is a helper to use when about to go into idle low-power | |
335 | * when the latter has the side effect of re-enabling interrupts | |
336 | * (such as calling H_CEDE under pHyp). | |
337 | * | |
338 | * You call this function with interrupts soft-disabled (this is | |
339 | * already the case when ppc_md.power_save is called). The function | |
340 | * will return whether to enter power save or just return. | |
341 | * | |
342 | * In the former case, it will have notified lockdep of interrupts | |
343 | * being re-enabled and generally sanitized the lazy irq state, | |
344 | * and in the latter case it will leave with interrupts hard | |
345 | * disabled and marked as such, so the local_irq_enable() call | |
346 | * in arch_cpu_idle() will properly re-enable everything. | |
347 | */ | |
348 | bool prep_irq_for_idle(void) | |
349 | { | |
350 | /* | |
351 | * First we need to hard disable to ensure no interrupt | |
352 | * occurs before we effectively enter the low power state | |
353 | */ | |
354 | __hard_irq_disable(); | |
355 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; | |
356 | ||
357 | /* | |
358 | * If anything happened while we were soft-disabled, | |
359 | * we return now and do not enter the low power state. | |
360 | */ | |
361 | if (lazy_irq_pending()) | |
362 | return false; | |
363 | ||
364 | /* Tell lockdep we are about to re-enable */ | |
365 | trace_hardirqs_on(); | |
366 | ||
367 | /* | |
368 | * Mark interrupts as soft-enabled and clear the | |
369 | * PACA_IRQ_HARD_DIS from the pending mask since we | |
370 | * are about to hard enable as well as a side effect | |
371 | * of entering the low power state. | |
372 | */ | |
373 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; | |
374 | irq_soft_mask_set(IRQS_ENABLED); | |
375 | ||
376 | /* Tell the caller to enter the low power state */ | |
377 | return true; | |
378 | } | |
379 | ||
380 | #ifdef CONFIG_PPC_BOOK3S | |
381 | /* | |
382 | * This is for idle sequences that return with IRQs off, but the | |
383 | * idle state itself wakes on interrupt. Tell the irq tracer that | |
384 | * IRQs are enabled for the duration of idle so it does not get long | |
385 | * off times. Must be paired with fini_irq_for_idle_irqsoff. | |
386 | */ | |
387 | bool prep_irq_for_idle_irqsoff(void) | |
388 | { | |
389 | WARN_ON(!irqs_disabled()); | |
390 | ||
391 | /* | |
392 | * First we need to hard disable to ensure no interrupt | |
393 | * occurs before we effectively enter the low power state | |
394 | */ | |
395 | __hard_irq_disable(); | |
396 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; | |
397 | ||
398 | /* | |
399 | * If anything happened while we were soft-disabled, | |
400 | * we return now and do not enter the low power state. | |
401 | */ | |
402 | if (lazy_irq_pending()) | |
403 | return false; | |
404 | ||
405 | /* Tell lockdep we are about to re-enable */ | |
406 | trace_hardirqs_on(); | |
407 | ||
408 | return true; | |
409 | } | |
410 | ||
411 | /* | |
412 | * Take the SRR1 wakeup reason, index into this table to find the | |
413 | * appropriate irq_happened bit. | |
414 | * | |
415 | * Sytem reset exceptions taken in idle state also come through here, | |
416 | * but they are NMI interrupts so do not need to wait for IRQs to be | |
417 | * restored, and should be taken as early as practical. These are marked | |
418 | * with 0xff in the table. The Power ISA specifies 0100b as the system | |
419 | * reset interrupt reason. | |
420 | */ | |
421 | #define IRQ_SYSTEM_RESET 0xff | |
422 | ||
423 | static const u8 srr1_to_lazyirq[0x10] = { | |
424 | 0, 0, 0, | |
425 | PACA_IRQ_DBELL, | |
426 | IRQ_SYSTEM_RESET, | |
427 | PACA_IRQ_DBELL, | |
428 | PACA_IRQ_DEC, | |
429 | 0, | |
430 | PACA_IRQ_EE, | |
431 | PACA_IRQ_EE, | |
432 | PACA_IRQ_HMI, | |
433 | 0, 0, 0, 0, 0 }; | |
434 | ||
435 | void replay_system_reset(void) | |
436 | { | |
437 | struct pt_regs regs; | |
438 | ||
439 | ppc_save_regs(®s); | |
440 | regs.trap = 0x100; | |
441 | get_paca()->in_nmi = 1; | |
442 | system_reset_exception(®s); | |
443 | get_paca()->in_nmi = 0; | |
444 | } | |
445 | EXPORT_SYMBOL_GPL(replay_system_reset); | |
446 | ||
447 | void irq_set_pending_from_srr1(unsigned long srr1) | |
448 | { | |
449 | unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18; | |
450 | u8 reason = srr1_to_lazyirq[idx]; | |
451 | ||
452 | /* | |
453 | * Take the system reset now, which is immediately after registers | |
454 | * are restored from idle. It's an NMI, so interrupts need not be | |
455 | * re-enabled before it is taken. | |
456 | */ | |
457 | if (unlikely(reason == IRQ_SYSTEM_RESET)) { | |
458 | replay_system_reset(); | |
459 | return; | |
460 | } | |
461 | ||
462 | /* | |
463 | * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0, | |
464 | * so this can be called unconditionally with the SRR1 wake | |
465 | * reason as returned by the idle code, which uses 0 to mean no | |
466 | * interrupt. | |
467 | * | |
468 | * If a future CPU was to designate this as an interrupt reason, | |
469 | * then a new index for no interrupt must be assigned. | |
470 | */ | |
471 | local_paca->irq_happened |= reason; | |
472 | } | |
473 | #endif /* CONFIG_PPC_BOOK3S */ | |
474 | ||
475 | /* | |
476 | * Force a replay of the external interrupt handler on this CPU. | |
477 | */ | |
478 | void force_external_irq_replay(void) | |
479 | { | |
480 | /* | |
481 | * This must only be called with interrupts soft-disabled, | |
482 | * the replay will happen when re-enabling. | |
483 | */ | |
484 | WARN_ON(!arch_irqs_disabled()); | |
485 | ||
486 | /* | |
487 | * Interrupts must always be hard disabled before irq_happened is | |
488 | * modified (to prevent lost update in case of interrupt between | |
489 | * load and store). | |
490 | */ | |
491 | __hard_irq_disable(); | |
492 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; | |
493 | ||
494 | /* Indicate in the PACA that we have an interrupt to replay */ | |
495 | local_paca->irq_happened |= PACA_IRQ_EE; | |
496 | } | |
497 | ||
498 | #endif /* CONFIG_PPC64 */ | |
499 | ||
500 | int arch_show_interrupts(struct seq_file *p, int prec) | |
501 | { | |
502 | int j; | |
503 | ||
504 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | |
505 | if (tau_initialized) { | |
506 | seq_printf(p, "%*s: ", prec, "TAU"); | |
507 | for_each_online_cpu(j) | |
508 | seq_printf(p, "%10u ", tau_interrupts(j)); | |
509 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | |
510 | } | |
511 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ | |
512 | ||
513 | seq_printf(p, "%*s: ", prec, "LOC"); | |
514 | for_each_online_cpu(j) | |
515 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); | |
516 | seq_printf(p, " Local timer interrupts for timer event device\n"); | |
517 | ||
518 | seq_printf(p, "%*s: ", prec, "BCT"); | |
519 | for_each_online_cpu(j) | |
520 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event); | |
521 | seq_printf(p, " Broadcast timer interrupts for timer event device\n"); | |
522 | ||
523 | seq_printf(p, "%*s: ", prec, "LOC"); | |
524 | for_each_online_cpu(j) | |
525 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); | |
526 | seq_printf(p, " Local timer interrupts for others\n"); | |
527 | ||
528 | seq_printf(p, "%*s: ", prec, "SPU"); | |
529 | for_each_online_cpu(j) | |
530 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); | |
531 | seq_printf(p, " Spurious interrupts\n"); | |
532 | ||
533 | seq_printf(p, "%*s: ", prec, "PMI"); | |
534 | for_each_online_cpu(j) | |
535 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); | |
536 | seq_printf(p, " Performance monitoring interrupts\n"); | |
537 | ||
538 | seq_printf(p, "%*s: ", prec, "MCE"); | |
539 | for_each_online_cpu(j) | |
540 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); | |
541 | seq_printf(p, " Machine check exceptions\n"); | |
542 | ||
543 | if (cpu_has_feature(CPU_FTR_HVMODE)) { | |
544 | seq_printf(p, "%*s: ", prec, "HMI"); | |
545 | for_each_online_cpu(j) | |
546 | seq_printf(p, "%10u ", | |
547 | per_cpu(irq_stat, j).hmi_exceptions); | |
548 | seq_printf(p, " Hypervisor Maintenance Interrupts\n"); | |
549 | } | |
550 | ||
551 | seq_printf(p, "%*s: ", prec, "NMI"); | |
552 | for_each_online_cpu(j) | |
553 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); | |
554 | seq_printf(p, " System Reset interrupts\n"); | |
555 | ||
556 | #ifdef CONFIG_PPC_WATCHDOG | |
557 | seq_printf(p, "%*s: ", prec, "WDG"); | |
558 | for_each_online_cpu(j) | |
559 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs); | |
560 | seq_printf(p, " Watchdog soft-NMI interrupts\n"); | |
561 | #endif | |
562 | ||
563 | #ifdef CONFIG_PPC_DOORBELL | |
564 | if (cpu_has_feature(CPU_FTR_DBELL)) { | |
565 | seq_printf(p, "%*s: ", prec, "DBL"); | |
566 | for_each_online_cpu(j) | |
567 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); | |
568 | seq_printf(p, " Doorbell interrupts\n"); | |
569 | } | |
570 | #endif | |
571 | ||
572 | return 0; | |
573 | } | |
574 | ||
575 | /* | |
576 | * /proc/stat helpers | |
577 | */ | |
578 | u64 arch_irq_stat_cpu(unsigned int cpu) | |
579 | { | |
580 | u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; | |
581 | ||
582 | sum += per_cpu(irq_stat, cpu).broadcast_irqs_event; | |
583 | sum += per_cpu(irq_stat, cpu).pmu_irqs; | |
584 | sum += per_cpu(irq_stat, cpu).mce_exceptions; | |
585 | sum += per_cpu(irq_stat, cpu).spurious_irqs; | |
586 | sum += per_cpu(irq_stat, cpu).timer_irqs_others; | |
587 | sum += per_cpu(irq_stat, cpu).hmi_exceptions; | |
588 | sum += per_cpu(irq_stat, cpu).sreset_irqs; | |
589 | #ifdef CONFIG_PPC_WATCHDOG | |
590 | sum += per_cpu(irq_stat, cpu).soft_nmi_irqs; | |
591 | #endif | |
592 | #ifdef CONFIG_PPC_DOORBELL | |
593 | sum += per_cpu(irq_stat, cpu).doorbell_irqs; | |
594 | #endif | |
595 | ||
596 | return sum; | |
597 | } | |
598 | ||
599 | static inline void check_stack_overflow(void) | |
600 | { | |
601 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
602 | long sp; | |
603 | ||
604 | sp = current_stack_pointer() & (THREAD_SIZE-1); | |
605 | ||
606 | /* check for stack overflow: is there less than 2KB free? */ | |
607 | if (unlikely(sp < 2048)) { | |
608 | pr_err("do_IRQ: stack overflow: %ld\n", sp); | |
609 | dump_stack(); | |
610 | } | |
611 | #endif | |
612 | } | |
613 | ||
614 | void __do_irq(struct pt_regs *regs) | |
615 | { | |
616 | unsigned int irq; | |
617 | ||
618 | irq_enter(); | |
619 | ||
620 | trace_irq_entry(regs); | |
621 | ||
622 | check_stack_overflow(); | |
623 | ||
624 | /* | |
625 | * Query the platform PIC for the interrupt & ack it. | |
626 | * | |
627 | * This will typically lower the interrupt line to the CPU | |
628 | */ | |
629 | irq = ppc_md.get_irq(); | |
630 | ||
631 | /* We can hard enable interrupts now to allow perf interrupts */ | |
632 | may_hard_irq_enable(); | |
633 | ||
634 | /* And finally process it */ | |
635 | if (unlikely(!irq)) | |
636 | __this_cpu_inc(irq_stat.spurious_irqs); | |
637 | else | |
638 | generic_handle_irq(irq); | |
639 | ||
640 | trace_irq_exit(regs); | |
641 | ||
642 | irq_exit(); | |
643 | } | |
644 | ||
645 | void do_IRQ(struct pt_regs *regs) | |
646 | { | |
647 | struct pt_regs *old_regs = set_irq_regs(regs); | |
648 | void *cursp, *irqsp, *sirqsp; | |
649 | ||
650 | /* Switch to the irq stack to handle this */ | |
651 | cursp = (void *)(current_stack_pointer() & ~(THREAD_SIZE - 1)); | |
652 | irqsp = hardirq_ctx[raw_smp_processor_id()]; | |
653 | sirqsp = softirq_ctx[raw_smp_processor_id()]; | |
654 | ||
655 | /* Already there ? */ | |
656 | if (unlikely(cursp == irqsp || cursp == sirqsp)) { | |
657 | __do_irq(regs); | |
658 | set_irq_regs(old_regs); | |
659 | return; | |
660 | } | |
661 | /* Switch stack and call */ | |
662 | call_do_irq(regs, irqsp); | |
663 | ||
664 | set_irq_regs(old_regs); | |
665 | } | |
666 | ||
667 | void __init init_IRQ(void) | |
668 | { | |
669 | if (ppc_md.init_IRQ) | |
670 | ppc_md.init_IRQ(); | |
671 | } | |
672 | ||
673 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) | |
674 | void *critirq_ctx[NR_CPUS] __read_mostly; | |
675 | void *dbgirq_ctx[NR_CPUS] __read_mostly; | |
676 | void *mcheckirq_ctx[NR_CPUS] __read_mostly; | |
677 | #endif | |
678 | ||
679 | void *softirq_ctx[NR_CPUS] __read_mostly; | |
680 | void *hardirq_ctx[NR_CPUS] __read_mostly; | |
681 | ||
682 | void do_softirq_own_stack(void) | |
683 | { | |
684 | call_do_softirq(softirq_ctx[smp_processor_id()]); | |
685 | } | |
686 | ||
687 | irq_hw_number_t virq_to_hw(unsigned int virq) | |
688 | { | |
689 | struct irq_data *irq_data = irq_get_irq_data(virq); | |
690 | return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; | |
691 | } | |
692 | EXPORT_SYMBOL_GPL(virq_to_hw); | |
693 | ||
694 | #ifdef CONFIG_SMP | |
695 | int irq_choose_cpu(const struct cpumask *mask) | |
696 | { | |
697 | int cpuid; | |
698 | ||
699 | if (cpumask_equal(mask, cpu_online_mask)) { | |
700 | static int irq_rover; | |
701 | static DEFINE_RAW_SPINLOCK(irq_rover_lock); | |
702 | unsigned long flags; | |
703 | ||
704 | /* Round-robin distribution... */ | |
705 | do_round_robin: | |
706 | raw_spin_lock_irqsave(&irq_rover_lock, flags); | |
707 | ||
708 | irq_rover = cpumask_next(irq_rover, cpu_online_mask); | |
709 | if (irq_rover >= nr_cpu_ids) | |
710 | irq_rover = cpumask_first(cpu_online_mask); | |
711 | ||
712 | cpuid = irq_rover; | |
713 | ||
714 | raw_spin_unlock_irqrestore(&irq_rover_lock, flags); | |
715 | } else { | |
716 | cpuid = cpumask_first_and(mask, cpu_online_mask); | |
717 | if (cpuid >= nr_cpu_ids) | |
718 | goto do_round_robin; | |
719 | } | |
720 | ||
721 | return get_hard_smp_processor_id(cpuid); | |
722 | } | |
723 | #else | |
724 | int irq_choose_cpu(const struct cpumask *mask) | |
725 | { | |
726 | return hard_smp_processor_id(); | |
727 | } | |
728 | #endif | |
729 | ||
730 | #ifdef CONFIG_PPC64 | |
731 | static int __init setup_noirqdistrib(char *str) | |
732 | { | |
733 | distribute_irqs = 0; | |
734 | return 1; | |
735 | } | |
736 | ||
737 | __setup("noirqdistrib", setup_noirqdistrib); | |
738 | #endif /* CONFIG_PPC64 */ |