]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Derived from arch/i386/kernel/irq.c |
3 | * Copyright (C) 1992 Linus Torvalds | |
4 | * Adapted from arch/i386 by Gary Thomas | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
756e7104 SR |
6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
7 | * Copyright (C) 1996-2001 Cort Dougan | |
1da177e4 LT |
8 | * Adapted for Power Macintosh by Paul Mackerras |
9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | |
756e7104 | 10 | * |
1da177e4 LT |
11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This file contains the code used by various IRQ handling routines: | |
17 | * asking for different IRQ's should be done through these routines | |
18 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
19 | * shouldn't result in any weird surprises, and installing new handlers | |
20 | * should be easier. | |
756e7104 SR |
21 | * |
22 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | |
23 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | |
24 | * mask register (of which only 16 are defined), hence the weird shifting | |
25 | * and complement of the cached_irq_mask. I want to be able to stuff | |
26 | * this right into the SIU SMASK register. | |
27 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | |
28 | * to reduce code space and undefined function references. | |
1da177e4 LT |
29 | */ |
30 | ||
0ebfff14 BH |
31 | #undef DEBUG |
32 | ||
4b16f8e2 | 33 | #include <linux/export.h> |
1da177e4 LT |
34 | #include <linux/threads.h> |
35 | #include <linux/kernel_stat.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/sched.h> | |
756e7104 | 38 | #include <linux/ptrace.h> |
1da177e4 LT |
39 | #include <linux/ioport.h> |
40 | #include <linux/interrupt.h> | |
41 | #include <linux/timex.h> | |
1da177e4 LT |
42 | #include <linux/init.h> |
43 | #include <linux/slab.h> | |
1da177e4 LT |
44 | #include <linux/delay.h> |
45 | #include <linux/irq.h> | |
756e7104 SR |
46 | #include <linux/seq_file.h> |
47 | #include <linux/cpumask.h> | |
1da177e4 LT |
48 | #include <linux/profile.h> |
49 | #include <linux/bitops.h> | |
0ebfff14 BH |
50 | #include <linux/list.h> |
51 | #include <linux/radix-tree.h> | |
52 | #include <linux/mutex.h> | |
53 | #include <linux/bootmem.h> | |
45934c47 | 54 | #include <linux/pci.h> |
60b332e7 | 55 | #include <linux/debugfs.h> |
e3873444 GL |
56 | #include <linux/of.h> |
57 | #include <linux/of_irq.h> | |
1da177e4 LT |
58 | |
59 | #include <asm/uaccess.h> | |
1da177e4 LT |
60 | #include <asm/io.h> |
61 | #include <asm/pgtable.h> | |
62 | #include <asm/irq.h> | |
63 | #include <asm/cache.h> | |
64 | #include <asm/prom.h> | |
65 | #include <asm/ptrace.h> | |
1da177e4 | 66 | #include <asm/machdep.h> |
0ebfff14 | 67 | #include <asm/udbg.h> |
3e7f45ad | 68 | #include <asm/smp.h> |
ae3a197e | 69 | #include <asm/debug.h> |
89c81797 | 70 | |
d04c56f7 | 71 | #ifdef CONFIG_PPC64 |
1da177e4 | 72 | #include <asm/paca.h> |
d04c56f7 | 73 | #include <asm/firmware.h> |
0874dd40 | 74 | #include <asm/lv1call.h> |
756e7104 | 75 | #endif |
1bf4af16 AB |
76 | #define CREATE_TRACE_POINTS |
77 | #include <asm/trace.h> | |
1da177e4 | 78 | |
8c007bfd AB |
79 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
80 | EXPORT_PER_CPU_SYMBOL(irq_stat); | |
81 | ||
868accb7 | 82 | int __irq_offset_value; |
756e7104 | 83 | |
756e7104 | 84 | #ifdef CONFIG_PPC32 |
b9e5b4e6 BH |
85 | EXPORT_SYMBOL(__irq_offset_value); |
86 | atomic_t ppc_n_lost_interrupts; | |
756e7104 | 87 | |
756e7104 SR |
88 | #ifdef CONFIG_TAU_INT |
89 | extern int tau_initialized; | |
90 | extern int tau_interrupts(int); | |
91 | #endif | |
b9e5b4e6 | 92 | #endif /* CONFIG_PPC32 */ |
756e7104 | 93 | |
756e7104 | 94 | #ifdef CONFIG_PPC64 |
cd015707 | 95 | |
1da177e4 | 96 | int distribute_irqs = 1; |
d04c56f7 | 97 | |
7230c564 | 98 | static inline notrace unsigned long get_irq_happened(void) |
ef2b343e | 99 | { |
7230c564 | 100 | unsigned long happened; |
ef2b343e HD |
101 | |
102 | __asm__ __volatile__("lbz %0,%1(13)" | |
7230c564 | 103 | : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); |
ef2b343e | 104 | |
7230c564 | 105 | return happened; |
ef2b343e HD |
106 | } |
107 | ||
4e491d14 | 108 | static inline notrace void set_soft_enabled(unsigned long enable) |
ef2b343e HD |
109 | { |
110 | __asm__ __volatile__("stb %0,%1(13)" | |
111 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | |
112 | } | |
113 | ||
7230c564 | 114 | static inline notrace int decrementer_check_overflow(void) |
7df10275 | 115 | { |
7230c564 BH |
116 | u64 now = get_tb_or_rtc(); |
117 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | |
118 | ||
7230c564 | 119 | return now >= *next_tb; |
7df10275 AB |
120 | } |
121 | ||
7230c564 | 122 | /* This is called whenever we are re-enabling interrupts |
fe9e1d54 IM |
123 | * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if |
124 | * there's an EE, DEC or DBELL to generate. | |
7230c564 BH |
125 | * |
126 | * This is called in two contexts: From arch_local_irq_restore() | |
127 | * before soft-enabling interrupts, and from the exception exit | |
128 | * path when returning from an interrupt from a soft-disabled to | |
129 | * a soft enabled context. In both case we have interrupts hard | |
130 | * disabled. | |
131 | * | |
132 | * We take care of only clearing the bits we handled in the | |
133 | * PACA irq_happened field since we can only re-emit one at a | |
134 | * time and we don't want to "lose" one. | |
135 | */ | |
136 | notrace unsigned int __check_irq_replay(void) | |
d04c56f7 | 137 | { |
ef2b343e | 138 | /* |
7230c564 BH |
139 | * We use local_paca rather than get_paca() to avoid all |
140 | * the debug_smp_processor_id() business in this low level | |
141 | * function | |
ef2b343e | 142 | */ |
7230c564 | 143 | unsigned char happened = local_paca->irq_happened; |
d04c56f7 | 144 | |
7230c564 BH |
145 | /* Clear bit 0 which we wouldn't clear otherwise */ |
146 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; | |
147 | ||
148 | /* | |
149 | * Force the delivery of pending soft-disabled interrupts on PS3. | |
150 | * Any HV call will have this side effect. | |
151 | */ | |
152 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { | |
153 | u64 tmp, tmp2; | |
154 | lv1_get_version_info(&tmp, &tmp2); | |
d04c56f7 PM |
155 | } |
156 | ||
ef2b343e | 157 | /* |
7230c564 BH |
158 | * We may have missed a decrementer interrupt. We check the |
159 | * decrementer itself rather than the paca irq_happened field | |
160 | * in case we also had a rollover while hard disabled | |
161 | */ | |
162 | local_paca->irq_happened &= ~PACA_IRQ_DEC; | |
230b3034 | 163 | if ((happened & PACA_IRQ_DEC) || decrementer_check_overflow()) |
7230c564 BH |
164 | return 0x900; |
165 | ||
166 | /* Finally check if an external interrupt happened */ | |
167 | local_paca->irq_happened &= ~PACA_IRQ_EE; | |
168 | if (happened & PACA_IRQ_EE) | |
169 | return 0x500; | |
170 | ||
171 | #ifdef CONFIG_PPC_BOOK3E | |
172 | /* Finally check if an EPR external interrupt happened | |
173 | * this bit is typically set if we need to handle another | |
174 | * "edge" interrupt from within the MPIC "EPR" handler | |
ef2b343e | 175 | */ |
7230c564 BH |
176 | local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; |
177 | if (happened & PACA_IRQ_EE_EDGE) | |
178 | return 0x500; | |
179 | ||
180 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; | |
181 | if (happened & PACA_IRQ_DBELL) | |
182 | return 0x280; | |
fe9e1d54 IM |
183 | #else |
184 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; | |
185 | if (happened & PACA_IRQ_DBELL) { | |
186 | if (cpu_has_feature(CPU_FTR_HVMODE)) | |
187 | return 0xe80; | |
188 | return 0xa00; | |
189 | } | |
7230c564 BH |
190 | #endif /* CONFIG_PPC_BOOK3E */ |
191 | ||
192 | /* There should be nothing left ! */ | |
193 | BUG_ON(local_paca->irq_happened != 0); | |
194 | ||
195 | return 0; | |
196 | } | |
197 | ||
198 | notrace void arch_local_irq_restore(unsigned long en) | |
199 | { | |
200 | unsigned char irq_happened; | |
201 | unsigned int replay; | |
202 | ||
203 | /* Write the new soft-enabled value */ | |
204 | set_soft_enabled(en); | |
205 | if (!en) | |
206 | return; | |
207 | /* | |
208 | * From this point onward, we can take interrupts, preempt, | |
209 | * etc... unless we got hard-disabled. We check if an event | |
210 | * happened. If none happened, we know we can just return. | |
211 | * | |
212 | * We may have preempted before the check below, in which case | |
213 | * we are checking the "new" CPU instead of the old one. This | |
214 | * is only a problem if an event happened on the "old" CPU. | |
215 | * | |
1d9a4731 SR |
216 | * External interrupt events will have caused interrupts to |
217 | * be hard-disabled, so there is no problem, we | |
7230c564 | 218 | * cannot have preempted. |
ef2b343e | 219 | */ |
7230c564 BH |
220 | irq_happened = get_irq_happened(); |
221 | if (!irq_happened) | |
d04c56f7 | 222 | return; |
ef2b343e HD |
223 | |
224 | /* | |
7230c564 BH |
225 | * We need to hard disable to get a trusted value from |
226 | * __check_irq_replay(). We also need to soft-disable | |
227 | * again to avoid warnings in there due to the use of | |
228 | * per-cpu variables. | |
229 | * | |
230 | * We know that if the value in irq_happened is exactly 0x01 | |
231 | * then we are already hard disabled (there are other less | |
232 | * common cases that we'll ignore for now), so we skip the | |
233 | * (expensive) mtmsrd. | |
ef2b343e | 234 | */ |
7230c564 BH |
235 | if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) |
236 | __hard_irq_disable(); | |
21b2de34 | 237 | #ifdef CONFIG_TRACE_IRQFLAGS |
7c0482e3 BH |
238 | else { |
239 | /* | |
240 | * We should already be hard disabled here. We had bugs | |
241 | * where that wasn't the case so let's dbl check it and | |
242 | * warn if we are wrong. Only do that when IRQ tracing | |
243 | * is enabled as mfmsr() can be costly. | |
244 | */ | |
245 | if (WARN_ON(mfmsr() & MSR_EE)) | |
246 | __hard_irq_disable(); | |
247 | } | |
248 | #endif /* CONFIG_TRACE_IRQFLAG */ | |
249 | ||
7230c564 | 250 | set_soft_enabled(0); |
e8775d4a | 251 | |
37fb9a02 | 252 | /* |
7230c564 BH |
253 | * Check if anything needs to be re-emitted. We haven't |
254 | * soft-enabled yet to avoid warnings in decrementer_check_overflow | |
255 | * accessing per-cpu variables | |
e8775d4a | 256 | */ |
7230c564 BH |
257 | replay = __check_irq_replay(); |
258 | ||
259 | /* We can soft-enable now */ | |
260 | set_soft_enabled(1); | |
0874dd40 TS |
261 | |
262 | /* | |
7230c564 BH |
263 | * And replay if we have to. This will return with interrupts |
264 | * hard-enabled. | |
0874dd40 | 265 | */ |
7230c564 BH |
266 | if (replay) { |
267 | __replay_interrupt(replay); | |
268 | return; | |
0874dd40 TS |
269 | } |
270 | ||
7230c564 | 271 | /* Finally, let's ensure we are hard enabled */ |
e1fa2e13 | 272 | __hard_irq_enable(); |
d04c56f7 | 273 | } |
df9ee292 | 274 | EXPORT_SYMBOL(arch_local_irq_restore); |
7230c564 BH |
275 | |
276 | /* | |
277 | * This is specifically called by assembly code to re-enable interrupts | |
278 | * if they are currently disabled. This is typically called before | |
279 | * schedule() or do_signal() when returning to userspace. We do it | |
280 | * in C to avoid the burden of dealing with lockdep etc... | |
56dfa7fa BH |
281 | * |
282 | * NOTE: This is called with interrupts hard disabled but not marked | |
283 | * as such in paca->irq_happened, so we need to resync this. | |
7230c564 | 284 | */ |
2d773aa4 | 285 | void notrace restore_interrupts(void) |
7230c564 | 286 | { |
56dfa7fa BH |
287 | if (irqs_disabled()) { |
288 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; | |
7230c564 | 289 | local_irq_enable(); |
56dfa7fa BH |
290 | } else |
291 | __hard_irq_enable(); | |
7230c564 BH |
292 | } |
293 | ||
be2cf20a BH |
294 | /* |
295 | * This is a helper to use when about to go into idle low-power | |
296 | * when the latter has the side effect of re-enabling interrupts | |
297 | * (such as calling H_CEDE under pHyp). | |
298 | * | |
299 | * You call this function with interrupts soft-disabled (this is | |
300 | * already the case when ppc_md.power_save is called). The function | |
301 | * will return whether to enter power save or just return. | |
302 | * | |
303 | * In the former case, it will have notified lockdep of interrupts | |
304 | * being re-enabled and generally sanitized the lazy irq state, | |
305 | * and in the latter case it will leave with interrupts hard | |
306 | * disabled and marked as such, so the local_irq_enable() call | |
307 | * in cpu_idle() will properly re-enable everything. | |
308 | */ | |
309 | bool prep_irq_for_idle(void) | |
310 | { | |
311 | /* | |
312 | * First we need to hard disable to ensure no interrupt | |
313 | * occurs before we effectively enter the low power state | |
314 | */ | |
315 | hard_irq_disable(); | |
316 | ||
317 | /* | |
318 | * If anything happened while we were soft-disabled, | |
319 | * we return now and do not enter the low power state. | |
320 | */ | |
321 | if (lazy_irq_pending()) | |
322 | return false; | |
323 | ||
324 | /* Tell lockdep we are about to re-enable */ | |
325 | trace_hardirqs_on(); | |
326 | ||
327 | /* | |
328 | * Mark interrupts as soft-enabled and clear the | |
329 | * PACA_IRQ_HARD_DIS from the pending mask since we | |
330 | * are about to hard enable as well as a side effect | |
331 | * of entering the low power state. | |
332 | */ | |
333 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; | |
334 | local_paca->soft_enabled = 1; | |
335 | ||
336 | /* Tell the caller to enter the low power state */ | |
337 | return true; | |
338 | } | |
339 | ||
756e7104 | 340 | #endif /* CONFIG_PPC64 */ |
1da177e4 | 341 | |
433c9c67 | 342 | int arch_show_interrupts(struct seq_file *p, int prec) |
c86845ed AB |
343 | { |
344 | int j; | |
345 | ||
346 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | |
347 | if (tau_initialized) { | |
348 | seq_printf(p, "%*s: ", prec, "TAU"); | |
349 | for_each_online_cpu(j) | |
350 | seq_printf(p, "%10u ", tau_interrupts(j)); | |
351 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | |
352 | } | |
353 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ | |
354 | ||
89713ed1 AB |
355 | seq_printf(p, "%*s: ", prec, "LOC"); |
356 | for_each_online_cpu(j) | |
357 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); | |
358 | seq_printf(p, " Local timer interrupts\n"); | |
359 | ||
17081102 AB |
360 | seq_printf(p, "%*s: ", prec, "SPU"); |
361 | for_each_online_cpu(j) | |
362 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); | |
363 | seq_printf(p, " Spurious interrupts\n"); | |
364 | ||
e8e813ed | 365 | seq_printf(p, "%*s: ", prec, "PMI"); |
89713ed1 AB |
366 | for_each_online_cpu(j) |
367 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); | |
368 | seq_printf(p, " Performance monitoring interrupts\n"); | |
369 | ||
370 | seq_printf(p, "%*s: ", prec, "MCE"); | |
371 | for_each_online_cpu(j) | |
372 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); | |
373 | seq_printf(p, " Machine check exceptions\n"); | |
374 | ||
a6a058e5 IM |
375 | #ifdef CONFIG_PPC_DOORBELL |
376 | if (cpu_has_feature(CPU_FTR_DBELL)) { | |
377 | seq_printf(p, "%*s: ", prec, "DBL"); | |
378 | for_each_online_cpu(j) | |
379 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); | |
380 | seq_printf(p, " Doorbell interrupts\n"); | |
381 | } | |
382 | #endif | |
383 | ||
c86845ed AB |
384 | return 0; |
385 | } | |
386 | ||
89713ed1 AB |
387 | /* |
388 | * /proc/stat helpers | |
389 | */ | |
390 | u64 arch_irq_stat_cpu(unsigned int cpu) | |
391 | { | |
392 | u64 sum = per_cpu(irq_stat, cpu).timer_irqs; | |
393 | ||
394 | sum += per_cpu(irq_stat, cpu).pmu_irqs; | |
395 | sum += per_cpu(irq_stat, cpu).mce_exceptions; | |
17081102 | 396 | sum += per_cpu(irq_stat, cpu).spurious_irqs; |
a6a058e5 IM |
397 | #ifdef CONFIG_PPC_DOORBELL |
398 | sum += per_cpu(irq_stat, cpu).doorbell_irqs; | |
399 | #endif | |
89713ed1 AB |
400 | |
401 | return sum; | |
402 | } | |
403 | ||
1da177e4 | 404 | #ifdef CONFIG_HOTPLUG_CPU |
1c91cc57 | 405 | void migrate_irqs(void) |
1da177e4 | 406 | { |
6cff46f4 | 407 | struct irq_desc *desc; |
1da177e4 LT |
408 | unsigned int irq; |
409 | static int warned; | |
b6decb70 | 410 | cpumask_var_t mask; |
1c91cc57 | 411 | const struct cpumask *map = cpu_online_mask; |
1da177e4 | 412 | |
b6decb70 | 413 | alloc_cpumask_var(&mask, GFP_KERNEL); |
1da177e4 | 414 | |
4013369f | 415 | for_each_irq_desc(irq, desc) { |
7bfbc1f2 | 416 | struct irq_data *data; |
e1180287 LB |
417 | struct irq_chip *chip; |
418 | ||
7bfbc1f2 TG |
419 | data = irq_desc_get_irq_data(desc); |
420 | if (irqd_is_per_cpu(data)) | |
1da177e4 LT |
421 | continue; |
422 | ||
7bfbc1f2 | 423 | chip = irq_data_get_irq_chip(data); |
e1180287 | 424 | |
7bfbc1f2 | 425 | cpumask_and(mask, data->affinity, map); |
b6decb70 | 426 | if (cpumask_any(mask) >= nr_cpu_ids) { |
1da177e4 | 427 | printk("Breaking affinity for irq %i\n", irq); |
b6decb70 | 428 | cpumask_copy(mask, map); |
1da177e4 | 429 | } |
e1180287 | 430 | if (chip->irq_set_affinity) |
7bfbc1f2 | 431 | chip->irq_set_affinity(data, mask, true); |
6cff46f4 | 432 | else if (desc->action && !(warned++)) |
1da177e4 LT |
433 | printk("Cannot set affinity for irq %i\n", irq); |
434 | } | |
435 | ||
b6decb70 AB |
436 | free_cpumask_var(mask); |
437 | ||
1da177e4 LT |
438 | local_irq_enable(); |
439 | mdelay(1); | |
440 | local_irq_disable(); | |
441 | } | |
442 | #endif | |
443 | ||
d7cb10d6 ME |
444 | static inline void check_stack_overflow(void) |
445 | { | |
446 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
447 | long sp; | |
448 | ||
449 | sp = __get_SP() & (THREAD_SIZE-1); | |
450 | ||
451 | /* check for stack overflow: is there less than 2KB free? */ | |
452 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | |
453 | printk("do_IRQ: stack overflow: %ld\n", | |
454 | sp - sizeof(struct thread_info)); | |
455 | dump_stack(); | |
456 | } | |
457 | #endif | |
458 | } | |
459 | ||
0366a1c7 | 460 | void __do_irq(struct pt_regs *regs) |
1da177e4 | 461 | { |
0366a1c7 | 462 | struct irq_desc *desc; |
0ebfff14 | 463 | unsigned int irq; |
1da177e4 | 464 | |
4b218e9b | 465 | irq_enter(); |
1da177e4 | 466 | |
e72bbbab LZ |
467 | trace_irq_entry(regs); |
468 | ||
d7cb10d6 | 469 | check_stack_overflow(); |
1da177e4 | 470 | |
7230c564 BH |
471 | /* |
472 | * Query the platform PIC for the interrupt & ack it. | |
473 | * | |
474 | * This will typically lower the interrupt line to the CPU | |
475 | */ | |
35a84c2f | 476 | irq = ppc_md.get_irq(); |
1da177e4 | 477 | |
0366a1c7 | 478 | /* We can hard enable interrupts now to allow perf interrupts */ |
7230c564 BH |
479 | may_hard_irq_enable(); |
480 | ||
481 | /* And finally process it */ | |
0366a1c7 | 482 | if (unlikely(irq == NO_IRQ)) |
17081102 | 483 | __get_cpu_var(irq_stat).spurious_irqs++; |
0366a1c7 BH |
484 | else { |
485 | desc = irq_to_desc(irq); | |
486 | if (likely(desc)) | |
487 | desc->handle_irq(irq, desc); | |
488 | } | |
e199500c | 489 | |
e72bbbab LZ |
490 | trace_irq_exit(regs); |
491 | ||
4b218e9b | 492 | irq_exit(); |
0366a1c7 BH |
493 | } |
494 | ||
495 | void do_IRQ(struct pt_regs *regs) | |
496 | { | |
497 | struct pt_regs *old_regs = set_irq_regs(regs); | |
8b5ede69 | 498 | struct thread_info *curtp, *irqtp, *sirqtp; |
0366a1c7 BH |
499 | |
500 | /* Switch to the irq stack to handle this */ | |
501 | curtp = current_thread_info(); | |
502 | irqtp = hardirq_ctx[raw_smp_processor_id()]; | |
8b5ede69 | 503 | sirqtp = softirq_ctx[raw_smp_processor_id()]; |
0366a1c7 BH |
504 | |
505 | /* Already there ? */ | |
8b5ede69 | 506 | if (unlikely(curtp == irqtp || curtp == sirqtp)) { |
0366a1c7 BH |
507 | __do_irq(regs); |
508 | set_irq_regs(old_regs); | |
509 | return; | |
510 | } | |
511 | ||
0366a1c7 BH |
512 | /* Prepare the thread_info in the irq stack */ |
513 | irqtp->task = curtp->task; | |
514 | irqtp->flags = 0; | |
515 | ||
516 | /* Copy the preempt_count so that the [soft]irq checks work. */ | |
517 | irqtp->preempt_count = curtp->preempt_count; | |
518 | ||
519 | /* Switch stack and call */ | |
520 | call_do_irq(regs, irqtp); | |
521 | ||
522 | /* Restore stack limit */ | |
0366a1c7 BH |
523 | irqtp->task = NULL; |
524 | ||
525 | /* Copy back updates to the thread_info */ | |
526 | if (irqtp->flags) | |
527 | set_bits(irqtp->flags, &curtp->flags); | |
528 | ||
7d12e780 | 529 | set_irq_regs(old_regs); |
e199500c | 530 | } |
1da177e4 LT |
531 | |
532 | void __init init_IRQ(void) | |
533 | { | |
70584578 SR |
534 | if (ppc_md.init_IRQ) |
535 | ppc_md.init_IRQ(); | |
bcf0b088 KG |
536 | |
537 | exc_lvl_ctx_init(); | |
538 | ||
1da177e4 LT |
539 | irq_ctx_init(); |
540 | } | |
541 | ||
bcf0b088 KG |
542 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
543 | struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; | |
544 | struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; | |
545 | struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | |
546 | ||
547 | void exc_lvl_ctx_init(void) | |
548 | { | |
549 | struct thread_info *tp; | |
ca1769f7 | 550 | int i, cpu_nr; |
bcf0b088 KG |
551 | |
552 | for_each_possible_cpu(i) { | |
ca1769f7 ME |
553 | #ifdef CONFIG_PPC64 |
554 | cpu_nr = i; | |
555 | #else | |
556 | cpu_nr = get_hard_smp_processor_id(i); | |
557 | #endif | |
558 | memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); | |
559 | tp = critirq_ctx[cpu_nr]; | |
560 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
561 | tp->preempt_count = 0; |
562 | ||
563 | #ifdef CONFIG_BOOKE | |
ca1769f7 ME |
564 | memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); |
565 | tp = dbgirq_ctx[cpu_nr]; | |
566 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
567 | tp->preempt_count = 0; |
568 | ||
ca1769f7 ME |
569 | memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); |
570 | tp = mcheckirq_ctx[cpu_nr]; | |
571 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
572 | tp->preempt_count = HARDIRQ_OFFSET; |
573 | #endif | |
574 | } | |
575 | } | |
576 | #endif | |
1da177e4 | 577 | |
22722051 AM |
578 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
579 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
580 | |
581 | void irq_ctx_init(void) | |
582 | { | |
583 | struct thread_info *tp; | |
584 | int i; | |
585 | ||
0e551954 | 586 | for_each_possible_cpu(i) { |
1da177e4 LT |
587 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
588 | tp = softirq_ctx[i]; | |
589 | tp->cpu = i; | |
1da177e4 LT |
590 | |
591 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | |
592 | tp = hardirq_ctx[i]; | |
593 | tp->cpu = i; | |
1da177e4 LT |
594 | } |
595 | } | |
596 | ||
c6622f63 PM |
597 | static inline void do_softirq_onstack(void) |
598 | { | |
599 | struct thread_info *curtp, *irqtp; | |
600 | ||
601 | curtp = current_thread_info(); | |
602 | irqtp = softirq_ctx[smp_processor_id()]; | |
603 | irqtp->task = curtp->task; | |
50d2a422 | 604 | irqtp->flags = 0; |
c6622f63 PM |
605 | call_do_softirq(irqtp); |
606 | irqtp->task = NULL; | |
50d2a422 BH |
607 | |
608 | /* Set any flag that may have been set on the | |
609 | * alternate stack | |
610 | */ | |
611 | if (irqtp->flags) | |
612 | set_bits(irqtp->flags, &curtp->flags); | |
c6622f63 | 613 | } |
1da177e4 | 614 | |
1da177e4 LT |
615 | void do_softirq(void) |
616 | { | |
617 | unsigned long flags; | |
1da177e4 LT |
618 | |
619 | if (in_interrupt()) | |
1da177e4 LT |
620 | return; |
621 | ||
1da177e4 | 622 | local_irq_save(flags); |
1da177e4 | 623 | |
912b2539 | 624 | if (local_softirq_pending()) |
c6622f63 | 625 | do_softirq_onstack(); |
1da177e4 LT |
626 | |
627 | local_irq_restore(flags); | |
1da177e4 | 628 | } |
1da177e4 | 629 | |
35923f12 OJ |
630 | irq_hw_number_t virq_to_hw(unsigned int virq) |
631 | { | |
4bbdd45a GL |
632 | struct irq_data *irq_data = irq_get_irq_data(virq); |
633 | return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; | |
35923f12 OJ |
634 | } |
635 | EXPORT_SYMBOL_GPL(virq_to_hw); | |
636 | ||
6ec36b58 SY |
637 | #ifdef CONFIG_SMP |
638 | int irq_choose_cpu(const struct cpumask *mask) | |
639 | { | |
640 | int cpuid; | |
641 | ||
2074b1d9 | 642 | if (cpumask_equal(mask, cpu_online_mask)) { |
6ec36b58 SY |
643 | static int irq_rover; |
644 | static DEFINE_RAW_SPINLOCK(irq_rover_lock); | |
645 | unsigned long flags; | |
646 | ||
647 | /* Round-robin distribution... */ | |
648 | do_round_robin: | |
649 | raw_spin_lock_irqsave(&irq_rover_lock, flags); | |
650 | ||
651 | irq_rover = cpumask_next(irq_rover, cpu_online_mask); | |
652 | if (irq_rover >= nr_cpu_ids) | |
653 | irq_rover = cpumask_first(cpu_online_mask); | |
654 | ||
655 | cpuid = irq_rover; | |
656 | ||
657 | raw_spin_unlock_irqrestore(&irq_rover_lock, flags); | |
658 | } else { | |
659 | cpuid = cpumask_first_and(mask, cpu_online_mask); | |
660 | if (cpuid >= nr_cpu_ids) | |
661 | goto do_round_robin; | |
662 | } | |
663 | ||
664 | return get_hard_smp_processor_id(cpuid); | |
665 | } | |
666 | #else | |
667 | int irq_choose_cpu(const struct cpumask *mask) | |
668 | { | |
669 | return hard_smp_processor_id(); | |
670 | } | |
671 | #endif | |
0ebfff14 | 672 | |
cd015707 | 673 | int arch_early_irq_init(void) |
0ebfff14 | 674 | { |
cd015707 | 675 | return 0; |
0ebfff14 BH |
676 | } |
677 | ||
c6622f63 | 678 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
679 | static int __init setup_noirqdistrib(char *str) |
680 | { | |
681 | distribute_irqs = 0; | |
682 | return 1; | |
683 | } | |
684 | ||
685 | __setup("noirqdistrib", setup_noirqdistrib); | |
756e7104 | 686 | #endif /* CONFIG_PPC64 */ |