]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Derived from arch/i386/kernel/irq.c |
3 | * Copyright (C) 1992 Linus Torvalds | |
4 | * Adapted from arch/i386 by Gary Thomas | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
756e7104 SR |
6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
7 | * Copyright (C) 1996-2001 Cort Dougan | |
1da177e4 LT |
8 | * Adapted for Power Macintosh by Paul Mackerras |
9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | |
756e7104 | 10 | * |
1da177e4 LT |
11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This file contains the code used by various IRQ handling routines: | |
17 | * asking for different IRQ's should be done through these routines | |
18 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
19 | * shouldn't result in any weird surprises, and installing new handlers | |
20 | * should be easier. | |
756e7104 SR |
21 | * |
22 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | |
23 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | |
24 | * mask register (of which only 16 are defined), hence the weird shifting | |
25 | * and complement of the cached_irq_mask. I want to be able to stuff | |
26 | * this right into the SIU SMASK register. | |
27 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | |
28 | * to reduce code space and undefined function references. | |
1da177e4 LT |
29 | */ |
30 | ||
0ebfff14 BH |
31 | #undef DEBUG |
32 | ||
4b16f8e2 | 33 | #include <linux/export.h> |
1da177e4 LT |
34 | #include <linux/threads.h> |
35 | #include <linux/kernel_stat.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/sched.h> | |
756e7104 | 38 | #include <linux/ptrace.h> |
1da177e4 LT |
39 | #include <linux/ioport.h> |
40 | #include <linux/interrupt.h> | |
41 | #include <linux/timex.h> | |
1da177e4 LT |
42 | #include <linux/init.h> |
43 | #include <linux/slab.h> | |
1da177e4 LT |
44 | #include <linux/delay.h> |
45 | #include <linux/irq.h> | |
756e7104 SR |
46 | #include <linux/seq_file.h> |
47 | #include <linux/cpumask.h> | |
1da177e4 LT |
48 | #include <linux/profile.h> |
49 | #include <linux/bitops.h> | |
0ebfff14 BH |
50 | #include <linux/list.h> |
51 | #include <linux/radix-tree.h> | |
52 | #include <linux/mutex.h> | |
45934c47 | 53 | #include <linux/pci.h> |
60b332e7 | 54 | #include <linux/debugfs.h> |
e3873444 GL |
55 | #include <linux/of.h> |
56 | #include <linux/of_irq.h> | |
1da177e4 | 57 | |
7c0f6ba6 | 58 | #include <linux/uaccess.h> |
1da177e4 LT |
59 | #include <asm/io.h> |
60 | #include <asm/pgtable.h> | |
61 | #include <asm/irq.h> | |
62 | #include <asm/cache.h> | |
63 | #include <asm/prom.h> | |
64 | #include <asm/ptrace.h> | |
1da177e4 | 65 | #include <asm/machdep.h> |
0ebfff14 | 66 | #include <asm/udbg.h> |
3e7f45ad | 67 | #include <asm/smp.h> |
ae3a197e | 68 | #include <asm/debug.h> |
5d31a96e | 69 | #include <asm/livepatch.h> |
0545d543 | 70 | #include <asm/asm-prototypes.h> |
89c81797 | 71 | |
d04c56f7 | 72 | #ifdef CONFIG_PPC64 |
1da177e4 | 73 | #include <asm/paca.h> |
d04c56f7 | 74 | #include <asm/firmware.h> |
0874dd40 | 75 | #include <asm/lv1call.h> |
756e7104 | 76 | #endif |
1bf4af16 AB |
77 | #define CREATE_TRACE_POINTS |
78 | #include <asm/trace.h> | |
b92a226e | 79 | #include <asm/cpu_has_feature.h> |
1da177e4 | 80 | |
8c007bfd AB |
81 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
82 | EXPORT_PER_CPU_SYMBOL(irq_stat); | |
83 | ||
868accb7 | 84 | int __irq_offset_value; |
756e7104 | 85 | |
756e7104 | 86 | #ifdef CONFIG_PPC32 |
b9e5b4e6 BH |
87 | EXPORT_SYMBOL(__irq_offset_value); |
88 | atomic_t ppc_n_lost_interrupts; | |
756e7104 | 89 | |
756e7104 SR |
90 | #ifdef CONFIG_TAU_INT |
91 | extern int tau_initialized; | |
92 | extern int tau_interrupts(int); | |
93 | #endif | |
b9e5b4e6 | 94 | #endif /* CONFIG_PPC32 */ |
756e7104 | 95 | |
756e7104 | 96 | #ifdef CONFIG_PPC64 |
cd015707 | 97 | |
1da177e4 | 98 | int distribute_irqs = 1; |
d04c56f7 | 99 | |
7230c564 | 100 | static inline notrace unsigned long get_irq_happened(void) |
ef2b343e | 101 | { |
7230c564 | 102 | unsigned long happened; |
ef2b343e HD |
103 | |
104 | __asm__ __volatile__("lbz %0,%1(13)" | |
7230c564 | 105 | : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); |
ef2b343e | 106 | |
7230c564 | 107 | return happened; |
ef2b343e HD |
108 | } |
109 | ||
4e491d14 | 110 | static inline notrace void set_soft_enabled(unsigned long enable) |
ef2b343e HD |
111 | { |
112 | __asm__ __volatile__("stb %0,%1(13)" | |
113 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | |
114 | } | |
115 | ||
7230c564 | 116 | static inline notrace int decrementer_check_overflow(void) |
7df10275 | 117 | { |
7230c564 | 118 | u64 now = get_tb_or_rtc(); |
69111bac | 119 | u64 *next_tb = this_cpu_ptr(&decrementers_next_tb); |
7230c564 | 120 | |
7230c564 | 121 | return now >= *next_tb; |
7df10275 AB |
122 | } |
123 | ||
7230c564 | 124 | /* This is called whenever we are re-enabling interrupts |
fe9e1d54 IM |
125 | * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if |
126 | * there's an EE, DEC or DBELL to generate. | |
7230c564 BH |
127 | * |
128 | * This is called in two contexts: From arch_local_irq_restore() | |
129 | * before soft-enabling interrupts, and from the exception exit | |
130 | * path when returning from an interrupt from a soft-disabled to | |
131 | * a soft enabled context. In both case we have interrupts hard | |
132 | * disabled. | |
133 | * | |
134 | * We take care of only clearing the bits we handled in the | |
135 | * PACA irq_happened field since we can only re-emit one at a | |
136 | * time and we don't want to "lose" one. | |
137 | */ | |
138 | notrace unsigned int __check_irq_replay(void) | |
d04c56f7 | 139 | { |
ef2b343e | 140 | /* |
7230c564 BH |
141 | * We use local_paca rather than get_paca() to avoid all |
142 | * the debug_smp_processor_id() business in this low level | |
143 | * function | |
ef2b343e | 144 | */ |
7230c564 | 145 | unsigned char happened = local_paca->irq_happened; |
d04c56f7 | 146 | |
7230c564 BH |
147 | /* Clear bit 0 which we wouldn't clear otherwise */ |
148 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; | |
149 | ||
150 | /* | |
151 | * Force the delivery of pending soft-disabled interrupts on PS3. | |
152 | * Any HV call will have this side effect. | |
153 | */ | |
154 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { | |
155 | u64 tmp, tmp2; | |
156 | lv1_get_version_info(&tmp, &tmp2); | |
d04c56f7 PM |
157 | } |
158 | ||
e0e0d6b7 NP |
159 | /* |
160 | * Check if an hypervisor Maintenance interrupt happened. | |
161 | * This is a higher priority interrupt than the others, so | |
162 | * replay it first. | |
163 | */ | |
164 | local_paca->irq_happened &= ~PACA_IRQ_HMI; | |
165 | if (happened & PACA_IRQ_HMI) | |
166 | return 0xe60; | |
167 | ||
ef2b343e | 168 | /* |
7230c564 BH |
169 | * We may have missed a decrementer interrupt. We check the |
170 | * decrementer itself rather than the paca irq_happened field | |
171 | * in case we also had a rollover while hard disabled | |
172 | */ | |
173 | local_paca->irq_happened &= ~PACA_IRQ_DEC; | |
230b3034 | 174 | if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) |
7230c564 BH |
175 | return 0x900; |
176 | ||
177 | /* Finally check if an external interrupt happened */ | |
178 | local_paca->irq_happened &= ~PACA_IRQ_EE; | |
179 | if (happened & PACA_IRQ_EE) | |
180 | return 0x500; | |
181 | ||
182 | #ifdef CONFIG_PPC_BOOK3E | |
183 | /* Finally check if an EPR external interrupt happened | |
184 | * this bit is typically set if we need to handle another | |
185 | * "edge" interrupt from within the MPIC "EPR" handler | |
ef2b343e | 186 | */ |
7230c564 BH |
187 | local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; |
188 | if (happened & PACA_IRQ_EE_EDGE) | |
189 | return 0x500; | |
190 | ||
191 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; | |
192 | if (happened & PACA_IRQ_DBELL) | |
193 | return 0x280; | |
fe9e1d54 IM |
194 | #else |
195 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; | |
196 | if (happened & PACA_IRQ_DBELL) { | |
197 | if (cpu_has_feature(CPU_FTR_HVMODE)) | |
198 | return 0xe80; | |
199 | return 0xa00; | |
200 | } | |
7230c564 BH |
201 | #endif /* CONFIG_PPC_BOOK3E */ |
202 | ||
203 | /* There should be nothing left ! */ | |
204 | BUG_ON(local_paca->irq_happened != 0); | |
205 | ||
206 | return 0; | |
207 | } | |
208 | ||
209 | notrace void arch_local_irq_restore(unsigned long en) | |
210 | { | |
211 | unsigned char irq_happened; | |
212 | unsigned int replay; | |
213 | ||
214 | /* Write the new soft-enabled value */ | |
215 | set_soft_enabled(en); | |
216 | if (!en) | |
217 | return; | |
218 | /* | |
219 | * From this point onward, we can take interrupts, preempt, | |
220 | * etc... unless we got hard-disabled. We check if an event | |
221 | * happened. If none happened, we know we can just return. | |
222 | * | |
223 | * We may have preempted before the check below, in which case | |
224 | * we are checking the "new" CPU instead of the old one. This | |
225 | * is only a problem if an event happened on the "old" CPU. | |
226 | * | |
1d9a4731 SR |
227 | * External interrupt events will have caused interrupts to |
228 | * be hard-disabled, so there is no problem, we | |
7230c564 | 229 | * cannot have preempted. |
ef2b343e | 230 | */ |
7230c564 BH |
231 | irq_happened = get_irq_happened(); |
232 | if (!irq_happened) | |
d04c56f7 | 233 | return; |
ef2b343e HD |
234 | |
235 | /* | |
7230c564 BH |
236 | * We need to hard disable to get a trusted value from |
237 | * __check_irq_replay(). We also need to soft-disable | |
238 | * again to avoid warnings in there due to the use of | |
239 | * per-cpu variables. | |
240 | * | |
241 | * We know that if the value in irq_happened is exactly 0x01 | |
242 | * then we are already hard disabled (there are other less | |
243 | * common cases that we'll ignore for now), so we skip the | |
244 | * (expensive) mtmsrd. | |
ef2b343e | 245 | */ |
7230c564 BH |
246 | if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) |
247 | __hard_irq_disable(); | |
21b2de34 | 248 | #ifdef CONFIG_TRACE_IRQFLAGS |
7c0482e3 BH |
249 | else { |
250 | /* | |
251 | * We should already be hard disabled here. We had bugs | |
252 | * where that wasn't the case so let's dbl check it and | |
253 | * warn if we are wrong. Only do that when IRQ tracing | |
254 | * is enabled as mfmsr() can be costly. | |
255 | */ | |
256 | if (WARN_ON(mfmsr() & MSR_EE)) | |
257 | __hard_irq_disable(); | |
258 | } | |
fa2cff3f | 259 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
7c0482e3 | 260 | |
7230c564 | 261 | set_soft_enabled(0); |
e8775d4a | 262 | |
37fb9a02 | 263 | /* |
7230c564 BH |
264 | * Check if anything needs to be re-emitted. We haven't |
265 | * soft-enabled yet to avoid warnings in decrementer_check_overflow | |
266 | * accessing per-cpu variables | |
e8775d4a | 267 | */ |
7230c564 BH |
268 | replay = __check_irq_replay(); |
269 | ||
270 | /* We can soft-enable now */ | |
271 | set_soft_enabled(1); | |
0874dd40 TS |
272 | |
273 | /* | |
7230c564 BH |
274 | * And replay if we have to. This will return with interrupts |
275 | * hard-enabled. | |
0874dd40 | 276 | */ |
7230c564 BH |
277 | if (replay) { |
278 | __replay_interrupt(replay); | |
279 | return; | |
0874dd40 TS |
280 | } |
281 | ||
7230c564 | 282 | /* Finally, let's ensure we are hard enabled */ |
e1fa2e13 | 283 | __hard_irq_enable(); |
d04c56f7 | 284 | } |
df9ee292 | 285 | EXPORT_SYMBOL(arch_local_irq_restore); |
7230c564 BH |
286 | |
287 | /* | |
288 | * This is specifically called by assembly code to re-enable interrupts | |
289 | * if they are currently disabled. This is typically called before | |
290 | * schedule() or do_signal() when returning to userspace. We do it | |
291 | * in C to avoid the burden of dealing with lockdep etc... | |
56dfa7fa BH |
292 | * |
293 | * NOTE: This is called with interrupts hard disabled but not marked | |
294 | * as such in paca->irq_happened, so we need to resync this. | |
7230c564 | 295 | */ |
2d773aa4 | 296 | void notrace restore_interrupts(void) |
7230c564 | 297 | { |
56dfa7fa BH |
298 | if (irqs_disabled()) { |
299 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; | |
7230c564 | 300 | local_irq_enable(); |
56dfa7fa BH |
301 | } else |
302 | __hard_irq_enable(); | |
7230c564 BH |
303 | } |
304 | ||
be2cf20a BH |
305 | /* |
306 | * This is a helper to use when about to go into idle low-power | |
307 | * when the latter has the side effect of re-enabling interrupts | |
308 | * (such as calling H_CEDE under pHyp). | |
309 | * | |
310 | * You call this function with interrupts soft-disabled (this is | |
311 | * already the case when ppc_md.power_save is called). The function | |
312 | * will return whether to enter power save or just return. | |
313 | * | |
314 | * In the former case, it will have notified lockdep of interrupts | |
315 | * being re-enabled and generally sanitized the lazy irq state, | |
316 | * and in the latter case it will leave with interrupts hard | |
317 | * disabled and marked as such, so the local_irq_enable() call | |
0d2b7ea9 | 318 | * in arch_cpu_idle() will properly re-enable everything. |
be2cf20a BH |
319 | */ |
320 | bool prep_irq_for_idle(void) | |
321 | { | |
322 | /* | |
323 | * First we need to hard disable to ensure no interrupt | |
324 | * occurs before we effectively enter the low power state | |
325 | */ | |
326 | hard_irq_disable(); | |
327 | ||
328 | /* | |
329 | * If anything happened while we were soft-disabled, | |
330 | * we return now and do not enter the low power state. | |
331 | */ | |
332 | if (lazy_irq_pending()) | |
333 | return false; | |
334 | ||
335 | /* Tell lockdep we are about to re-enable */ | |
336 | trace_hardirqs_on(); | |
337 | ||
338 | /* | |
339 | * Mark interrupts as soft-enabled and clear the | |
340 | * PACA_IRQ_HARD_DIS from the pending mask since we | |
341 | * are about to hard enable as well as a side effect | |
342 | * of entering the low power state. | |
343 | */ | |
344 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; | |
345 | local_paca->soft_enabled = 1; | |
346 | ||
347 | /* Tell the caller to enter the low power state */ | |
348 | return true; | |
349 | } | |
350 | ||
1d607bb3 BH |
351 | /* |
352 | * Force a replay of the external interrupt handler on this CPU. | |
353 | */ | |
354 | void force_external_irq_replay(void) | |
355 | { | |
356 | /* | |
357 | * This must only be called with interrupts soft-disabled, | |
358 | * the replay will happen when re-enabling. | |
359 | */ | |
360 | WARN_ON(!arch_irqs_disabled()); | |
361 | ||
362 | /* Indicate in the PACA that we have an interrupt to replay */ | |
363 | local_paca->irq_happened |= PACA_IRQ_EE; | |
364 | } | |
365 | ||
756e7104 | 366 | #endif /* CONFIG_PPC64 */ |
1da177e4 | 367 | |
433c9c67 | 368 | int arch_show_interrupts(struct seq_file *p, int prec) |
c86845ed AB |
369 | { |
370 | int j; | |
371 | ||
372 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | |
373 | if (tau_initialized) { | |
374 | seq_printf(p, "%*s: ", prec, "TAU"); | |
375 | for_each_online_cpu(j) | |
376 | seq_printf(p, "%10u ", tau_interrupts(j)); | |
377 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | |
378 | } | |
379 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ | |
380 | ||
89713ed1 AB |
381 | seq_printf(p, "%*s: ", prec, "LOC"); |
382 | for_each_online_cpu(j) | |
c041cfa2 | 383 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); |
384 | seq_printf(p, " Local timer interrupts for timer event device\n"); | |
385 | ||
386 | seq_printf(p, "%*s: ", prec, "LOC"); | |
387 | for_each_online_cpu(j) | |
388 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); | |
389 | seq_printf(p, " Local timer interrupts for others\n"); | |
89713ed1 | 390 | |
17081102 AB |
391 | seq_printf(p, "%*s: ", prec, "SPU"); |
392 | for_each_online_cpu(j) | |
393 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); | |
394 | seq_printf(p, " Spurious interrupts\n"); | |
395 | ||
e8e813ed | 396 | seq_printf(p, "%*s: ", prec, "PMI"); |
89713ed1 AB |
397 | for_each_online_cpu(j) |
398 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); | |
399 | seq_printf(p, " Performance monitoring interrupts\n"); | |
400 | ||
401 | seq_printf(p, "%*s: ", prec, "MCE"); | |
402 | for_each_online_cpu(j) | |
403 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); | |
404 | seq_printf(p, " Machine check exceptions\n"); | |
405 | ||
0869b6fd MS |
406 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
407 | seq_printf(p, "%*s: ", prec, "HMI"); | |
408 | for_each_online_cpu(j) | |
409 | seq_printf(p, "%10u ", | |
410 | per_cpu(irq_stat, j).hmi_exceptions); | |
411 | seq_printf(p, " Hypervisor Maintenance Interrupts\n"); | |
412 | } | |
413 | ||
a6a058e5 IM |
414 | #ifdef CONFIG_PPC_DOORBELL |
415 | if (cpu_has_feature(CPU_FTR_DBELL)) { | |
416 | seq_printf(p, "%*s: ", prec, "DBL"); | |
417 | for_each_online_cpu(j) | |
418 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); | |
419 | seq_printf(p, " Doorbell interrupts\n"); | |
420 | } | |
421 | #endif | |
422 | ||
c86845ed AB |
423 | return 0; |
424 | } | |
425 | ||
89713ed1 AB |
426 | /* |
427 | * /proc/stat helpers | |
428 | */ | |
429 | u64 arch_irq_stat_cpu(unsigned int cpu) | |
430 | { | |
c041cfa2 | 431 | u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; |
89713ed1 AB |
432 | |
433 | sum += per_cpu(irq_stat, cpu).pmu_irqs; | |
434 | sum += per_cpu(irq_stat, cpu).mce_exceptions; | |
17081102 | 435 | sum += per_cpu(irq_stat, cpu).spurious_irqs; |
c041cfa2 | 436 | sum += per_cpu(irq_stat, cpu).timer_irqs_others; |
0869b6fd | 437 | sum += per_cpu(irq_stat, cpu).hmi_exceptions; |
a6a058e5 IM |
438 | #ifdef CONFIG_PPC_DOORBELL |
439 | sum += per_cpu(irq_stat, cpu).doorbell_irqs; | |
440 | #endif | |
89713ed1 AB |
441 | |
442 | return sum; | |
443 | } | |
444 | ||
1da177e4 | 445 | #ifdef CONFIG_HOTPLUG_CPU |
1c91cc57 | 446 | void migrate_irqs(void) |
1da177e4 | 447 | { |
6cff46f4 | 448 | struct irq_desc *desc; |
1da177e4 LT |
449 | unsigned int irq; |
450 | static int warned; | |
b6decb70 | 451 | cpumask_var_t mask; |
1c91cc57 | 452 | const struct cpumask *map = cpu_online_mask; |
1da177e4 | 453 | |
b6decb70 | 454 | alloc_cpumask_var(&mask, GFP_KERNEL); |
1da177e4 | 455 | |
4013369f | 456 | for_each_irq_desc(irq, desc) { |
7bfbc1f2 | 457 | struct irq_data *data; |
e1180287 LB |
458 | struct irq_chip *chip; |
459 | ||
7bfbc1f2 TG |
460 | data = irq_desc_get_irq_data(desc); |
461 | if (irqd_is_per_cpu(data)) | |
1da177e4 LT |
462 | continue; |
463 | ||
7bfbc1f2 | 464 | chip = irq_data_get_irq_chip(data); |
e1180287 | 465 | |
da92b4eb | 466 | cpumask_and(mask, irq_data_get_affinity_mask(data), map); |
b6decb70 | 467 | if (cpumask_any(mask) >= nr_cpu_ids) { |
a7696b36 | 468 | pr_warn("Breaking affinity for irq %i\n", irq); |
b6decb70 | 469 | cpumask_copy(mask, map); |
1da177e4 | 470 | } |
e1180287 | 471 | if (chip->irq_set_affinity) |
7bfbc1f2 | 472 | chip->irq_set_affinity(data, mask, true); |
6cff46f4 | 473 | else if (desc->action && !(warned++)) |
a7696b36 | 474 | pr_err("Cannot set affinity for irq %i\n", irq); |
1da177e4 LT |
475 | } |
476 | ||
b6decb70 AB |
477 | free_cpumask_var(mask); |
478 | ||
1da177e4 LT |
479 | local_irq_enable(); |
480 | mdelay(1); | |
481 | local_irq_disable(); | |
482 | } | |
483 | #endif | |
484 | ||
d7cb10d6 ME |
485 | static inline void check_stack_overflow(void) |
486 | { | |
487 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
488 | long sp; | |
489 | ||
acf620ec | 490 | sp = current_stack_pointer() & (THREAD_SIZE-1); |
d7cb10d6 ME |
491 | |
492 | /* check for stack overflow: is there less than 2KB free? */ | |
493 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | |
a7696b36 | 494 | pr_err("do_IRQ: stack overflow: %ld\n", |
d7cb10d6 ME |
495 | sp - sizeof(struct thread_info)); |
496 | dump_stack(); | |
497 | } | |
498 | #endif | |
499 | } | |
500 | ||
0366a1c7 | 501 | void __do_irq(struct pt_regs *regs) |
1da177e4 | 502 | { |
0ebfff14 | 503 | unsigned int irq; |
1da177e4 | 504 | |
4b218e9b | 505 | irq_enter(); |
1da177e4 | 506 | |
e72bbbab LZ |
507 | trace_irq_entry(regs); |
508 | ||
d7cb10d6 | 509 | check_stack_overflow(); |
1da177e4 | 510 | |
7230c564 BH |
511 | /* |
512 | * Query the platform PIC for the interrupt & ack it. | |
513 | * | |
514 | * This will typically lower the interrupt line to the CPU | |
515 | */ | |
35a84c2f | 516 | irq = ppc_md.get_irq(); |
1da177e4 | 517 | |
0366a1c7 | 518 | /* We can hard enable interrupts now to allow perf interrupts */ |
7230c564 BH |
519 | may_hard_irq_enable(); |
520 | ||
521 | /* And finally process it */ | |
ef24ba70 | 522 | if (unlikely(!irq)) |
69111bac | 523 | __this_cpu_inc(irq_stat.spurious_irqs); |
a4e04c9f TG |
524 | else |
525 | generic_handle_irq(irq); | |
e199500c | 526 | |
e72bbbab LZ |
527 | trace_irq_exit(regs); |
528 | ||
4b218e9b | 529 | irq_exit(); |
0366a1c7 BH |
530 | } |
531 | ||
532 | void do_IRQ(struct pt_regs *regs) | |
533 | { | |
534 | struct pt_regs *old_regs = set_irq_regs(regs); | |
8b5ede69 | 535 | struct thread_info *curtp, *irqtp, *sirqtp; |
0366a1c7 BH |
536 | |
537 | /* Switch to the irq stack to handle this */ | |
538 | curtp = current_thread_info(); | |
539 | irqtp = hardirq_ctx[raw_smp_processor_id()]; | |
8b5ede69 | 540 | sirqtp = softirq_ctx[raw_smp_processor_id()]; |
0366a1c7 BH |
541 | |
542 | /* Already there ? */ | |
8b5ede69 | 543 | if (unlikely(curtp == irqtp || curtp == sirqtp)) { |
0366a1c7 BH |
544 | __do_irq(regs); |
545 | set_irq_regs(old_regs); | |
546 | return; | |
547 | } | |
548 | ||
0366a1c7 BH |
549 | /* Prepare the thread_info in the irq stack */ |
550 | irqtp->task = curtp->task; | |
551 | irqtp->flags = 0; | |
552 | ||
553 | /* Copy the preempt_count so that the [soft]irq checks work. */ | |
554 | irqtp->preempt_count = curtp->preempt_count; | |
555 | ||
556 | /* Switch stack and call */ | |
557 | call_do_irq(regs, irqtp); | |
558 | ||
559 | /* Restore stack limit */ | |
0366a1c7 BH |
560 | irqtp->task = NULL; |
561 | ||
562 | /* Copy back updates to the thread_info */ | |
563 | if (irqtp->flags) | |
564 | set_bits(irqtp->flags, &curtp->flags); | |
565 | ||
7d12e780 | 566 | set_irq_regs(old_regs); |
e199500c | 567 | } |
1da177e4 LT |
568 | |
569 | void __init init_IRQ(void) | |
570 | { | |
70584578 SR |
571 | if (ppc_md.init_IRQ) |
572 | ppc_md.init_IRQ(); | |
bcf0b088 KG |
573 | |
574 | exc_lvl_ctx_init(); | |
575 | ||
1da177e4 LT |
576 | irq_ctx_init(); |
577 | } | |
578 | ||
bcf0b088 KG |
579 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
580 | struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; | |
581 | struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; | |
582 | struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | |
583 | ||
584 | void exc_lvl_ctx_init(void) | |
585 | { | |
586 | struct thread_info *tp; | |
ca1769f7 | 587 | int i, cpu_nr; |
bcf0b088 KG |
588 | |
589 | for_each_possible_cpu(i) { | |
ca1769f7 ME |
590 | #ifdef CONFIG_PPC64 |
591 | cpu_nr = i; | |
592 | #else | |
04a34113 | 593 | #ifdef CONFIG_SMP |
ca1769f7 | 594 | cpu_nr = get_hard_smp_processor_id(i); |
04a34113 KH |
595 | #else |
596 | cpu_nr = 0; | |
597 | #endif | |
ca1769f7 | 598 | #endif |
04a34113 | 599 | |
ca1769f7 ME |
600 | memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); |
601 | tp = critirq_ctx[cpu_nr]; | |
602 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
603 | tp->preempt_count = 0; |
604 | ||
605 | #ifdef CONFIG_BOOKE | |
ca1769f7 ME |
606 | memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); |
607 | tp = dbgirq_ctx[cpu_nr]; | |
608 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
609 | tp->preempt_count = 0; |
610 | ||
ca1769f7 ME |
611 | memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); |
612 | tp = mcheckirq_ctx[cpu_nr]; | |
613 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
614 | tp->preempt_count = HARDIRQ_OFFSET; |
615 | #endif | |
616 | } | |
617 | } | |
618 | #endif | |
1da177e4 | 619 | |
22722051 AM |
620 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
621 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
622 | |
623 | void irq_ctx_init(void) | |
624 | { | |
625 | struct thread_info *tp; | |
626 | int i; | |
627 | ||
0e551954 | 628 | for_each_possible_cpu(i) { |
1da177e4 LT |
629 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
630 | tp = softirq_ctx[i]; | |
631 | tp->cpu = i; | |
5d31a96e | 632 | klp_init_thread_info(tp); |
1da177e4 LT |
633 | |
634 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | |
635 | tp = hardirq_ctx[i]; | |
636 | tp->cpu = i; | |
5d31a96e | 637 | klp_init_thread_info(tp); |
1da177e4 LT |
638 | } |
639 | } | |
640 | ||
7d65f4a6 | 641 | void do_softirq_own_stack(void) |
c6622f63 PM |
642 | { |
643 | struct thread_info *curtp, *irqtp; | |
644 | ||
645 | curtp = current_thread_info(); | |
646 | irqtp = softirq_ctx[smp_processor_id()]; | |
647 | irqtp->task = curtp->task; | |
50d2a422 | 648 | irqtp->flags = 0; |
c6622f63 PM |
649 | call_do_softirq(irqtp); |
650 | irqtp->task = NULL; | |
50d2a422 BH |
651 | |
652 | /* Set any flag that may have been set on the | |
653 | * alternate stack | |
654 | */ | |
655 | if (irqtp->flags) | |
656 | set_bits(irqtp->flags, &curtp->flags); | |
c6622f63 | 657 | } |
1da177e4 | 658 | |
35923f12 OJ |
659 | irq_hw_number_t virq_to_hw(unsigned int virq) |
660 | { | |
4bbdd45a GL |
661 | struct irq_data *irq_data = irq_get_irq_data(virq); |
662 | return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; | |
35923f12 OJ |
663 | } |
664 | EXPORT_SYMBOL_GPL(virq_to_hw); | |
665 | ||
6ec36b58 SY |
666 | #ifdef CONFIG_SMP |
667 | int irq_choose_cpu(const struct cpumask *mask) | |
668 | { | |
669 | int cpuid; | |
670 | ||
2074b1d9 | 671 | if (cpumask_equal(mask, cpu_online_mask)) { |
6ec36b58 SY |
672 | static int irq_rover; |
673 | static DEFINE_RAW_SPINLOCK(irq_rover_lock); | |
674 | unsigned long flags; | |
675 | ||
676 | /* Round-robin distribution... */ | |
677 | do_round_robin: | |
678 | raw_spin_lock_irqsave(&irq_rover_lock, flags); | |
679 | ||
680 | irq_rover = cpumask_next(irq_rover, cpu_online_mask); | |
681 | if (irq_rover >= nr_cpu_ids) | |
682 | irq_rover = cpumask_first(cpu_online_mask); | |
683 | ||
684 | cpuid = irq_rover; | |
685 | ||
686 | raw_spin_unlock_irqrestore(&irq_rover_lock, flags); | |
687 | } else { | |
688 | cpuid = cpumask_first_and(mask, cpu_online_mask); | |
689 | if (cpuid >= nr_cpu_ids) | |
690 | goto do_round_robin; | |
691 | } | |
692 | ||
693 | return get_hard_smp_processor_id(cpuid); | |
694 | } | |
695 | #else | |
696 | int irq_choose_cpu(const struct cpumask *mask) | |
697 | { | |
698 | return hard_smp_processor_id(); | |
699 | } | |
700 | #endif | |
0ebfff14 | 701 | |
cd015707 | 702 | int arch_early_irq_init(void) |
0ebfff14 | 703 | { |
cd015707 | 704 | return 0; |
0ebfff14 BH |
705 | } |
706 | ||
c6622f63 | 707 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
708 | static int __init setup_noirqdistrib(char *str) |
709 | { | |
710 | distribute_irqs = 0; | |
711 | return 1; | |
712 | } | |
713 | ||
714 | __setup("noirqdistrib", setup_noirqdistrib); | |
756e7104 | 715 | #endif /* CONFIG_PPC64 */ |