]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Derived from arch/i386/kernel/irq.c |
3 | * Copyright (C) 1992 Linus Torvalds | |
4 | * Adapted from arch/i386 by Gary Thomas | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
756e7104 SR |
6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
7 | * Copyright (C) 1996-2001 Cort Dougan | |
1da177e4 LT |
8 | * Adapted for Power Macintosh by Paul Mackerras |
9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | |
756e7104 | 10 | * |
1da177e4 LT |
11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This file contains the code used by various IRQ handling routines: | |
17 | * asking for different IRQ's should be done through these routines | |
18 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
19 | * shouldn't result in any weird surprises, and installing new handlers | |
20 | * should be easier. | |
756e7104 SR |
21 | * |
22 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | |
23 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | |
24 | * mask register (of which only 16 are defined), hence the weird shifting | |
25 | * and complement of the cached_irq_mask. I want to be able to stuff | |
26 | * this right into the SIU SMASK register. | |
27 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | |
28 | * to reduce code space and undefined function references. | |
1da177e4 LT |
29 | */ |
30 | ||
0ebfff14 BH |
31 | #undef DEBUG |
32 | ||
4b16f8e2 | 33 | #include <linux/export.h> |
1da177e4 LT |
34 | #include <linux/threads.h> |
35 | #include <linux/kernel_stat.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/sched.h> | |
756e7104 | 38 | #include <linux/ptrace.h> |
1da177e4 LT |
39 | #include <linux/ioport.h> |
40 | #include <linux/interrupt.h> | |
41 | #include <linux/timex.h> | |
1da177e4 LT |
42 | #include <linux/init.h> |
43 | #include <linux/slab.h> | |
1da177e4 LT |
44 | #include <linux/delay.h> |
45 | #include <linux/irq.h> | |
756e7104 SR |
46 | #include <linux/seq_file.h> |
47 | #include <linux/cpumask.h> | |
1da177e4 LT |
48 | #include <linux/profile.h> |
49 | #include <linux/bitops.h> | |
0ebfff14 BH |
50 | #include <linux/list.h> |
51 | #include <linux/radix-tree.h> | |
52 | #include <linux/mutex.h> | |
53 | #include <linux/bootmem.h> | |
45934c47 | 54 | #include <linux/pci.h> |
60b332e7 | 55 | #include <linux/debugfs.h> |
e3873444 GL |
56 | #include <linux/of.h> |
57 | #include <linux/of_irq.h> | |
1da177e4 LT |
58 | |
59 | #include <asm/uaccess.h> | |
1da177e4 LT |
60 | #include <asm/io.h> |
61 | #include <asm/pgtable.h> | |
62 | #include <asm/irq.h> | |
63 | #include <asm/cache.h> | |
64 | #include <asm/prom.h> | |
65 | #include <asm/ptrace.h> | |
1da177e4 | 66 | #include <asm/machdep.h> |
0ebfff14 | 67 | #include <asm/udbg.h> |
3e7f45ad | 68 | #include <asm/smp.h> |
ae3a197e | 69 | #include <asm/debug.h> |
89c81797 | 70 | |
d04c56f7 | 71 | #ifdef CONFIG_PPC64 |
1da177e4 | 72 | #include <asm/paca.h> |
d04c56f7 | 73 | #include <asm/firmware.h> |
0874dd40 | 74 | #include <asm/lv1call.h> |
756e7104 | 75 | #endif |
1bf4af16 AB |
76 | #define CREATE_TRACE_POINTS |
77 | #include <asm/trace.h> | |
1da177e4 | 78 | |
8c007bfd AB |
79 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
80 | EXPORT_PER_CPU_SYMBOL(irq_stat); | |
81 | ||
868accb7 | 82 | int __irq_offset_value; |
756e7104 | 83 | |
756e7104 | 84 | #ifdef CONFIG_PPC32 |
b9e5b4e6 BH |
85 | EXPORT_SYMBOL(__irq_offset_value); |
86 | atomic_t ppc_n_lost_interrupts; | |
756e7104 | 87 | |
756e7104 SR |
88 | #ifdef CONFIG_TAU_INT |
89 | extern int tau_initialized; | |
90 | extern int tau_interrupts(int); | |
91 | #endif | |
b9e5b4e6 | 92 | #endif /* CONFIG_PPC32 */ |
756e7104 | 93 | |
756e7104 | 94 | #ifdef CONFIG_PPC64 |
cd015707 | 95 | |
1da177e4 | 96 | int distribute_irqs = 1; |
d04c56f7 | 97 | |
7230c564 | 98 | static inline notrace unsigned long get_irq_happened(void) |
ef2b343e | 99 | { |
7230c564 | 100 | unsigned long happened; |
ef2b343e HD |
101 | |
102 | __asm__ __volatile__("lbz %0,%1(13)" | |
7230c564 | 103 | : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); |
ef2b343e | 104 | |
7230c564 | 105 | return happened; |
ef2b343e HD |
106 | } |
107 | ||
4e491d14 | 108 | static inline notrace void set_soft_enabled(unsigned long enable) |
ef2b343e HD |
109 | { |
110 | __asm__ __volatile__("stb %0,%1(13)" | |
111 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | |
112 | } | |
113 | ||
7230c564 | 114 | static inline notrace int decrementer_check_overflow(void) |
7df10275 | 115 | { |
7230c564 BH |
116 | u64 now = get_tb_or_rtc(); |
117 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | |
118 | ||
7df10275 AB |
119 | if (now >= *next_tb) |
120 | set_dec(1); | |
7230c564 | 121 | return now >= *next_tb; |
7df10275 AB |
122 | } |
123 | ||
7230c564 | 124 | /* This is called whenever we are re-enabling interrupts |
fe9e1d54 IM |
125 | * and returns either 0 (nothing to do) or 500/900/280/a00/e80 if |
126 | * there's an EE, DEC or DBELL to generate. | |
7230c564 BH |
127 | * |
128 | * This is called in two contexts: From arch_local_irq_restore() | |
129 | * before soft-enabling interrupts, and from the exception exit | |
130 | * path when returning from an interrupt from a soft-disabled to | |
131 | * a soft enabled context. In both case we have interrupts hard | |
132 | * disabled. | |
133 | * | |
134 | * We take care of only clearing the bits we handled in the | |
135 | * PACA irq_happened field since we can only re-emit one at a | |
136 | * time and we don't want to "lose" one. | |
137 | */ | |
138 | notrace unsigned int __check_irq_replay(void) | |
d04c56f7 | 139 | { |
ef2b343e | 140 | /* |
7230c564 BH |
141 | * We use local_paca rather than get_paca() to avoid all |
142 | * the debug_smp_processor_id() business in this low level | |
143 | * function | |
ef2b343e | 144 | */ |
7230c564 | 145 | unsigned char happened = local_paca->irq_happened; |
d04c56f7 | 146 | |
7230c564 BH |
147 | /* Clear bit 0 which we wouldn't clear otherwise */ |
148 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; | |
149 | ||
150 | /* | |
151 | * Force the delivery of pending soft-disabled interrupts on PS3. | |
152 | * Any HV call will have this side effect. | |
153 | */ | |
154 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { | |
155 | u64 tmp, tmp2; | |
156 | lv1_get_version_info(&tmp, &tmp2); | |
d04c56f7 PM |
157 | } |
158 | ||
ef2b343e | 159 | /* |
7230c564 BH |
160 | * We may have missed a decrementer interrupt. We check the |
161 | * decrementer itself rather than the paca irq_happened field | |
162 | * in case we also had a rollover while hard disabled | |
163 | */ | |
164 | local_paca->irq_happened &= ~PACA_IRQ_DEC; | |
165 | if (decrementer_check_overflow()) | |
166 | return 0x900; | |
167 | ||
168 | /* Finally check if an external interrupt happened */ | |
169 | local_paca->irq_happened &= ~PACA_IRQ_EE; | |
170 | if (happened & PACA_IRQ_EE) | |
171 | return 0x500; | |
172 | ||
173 | #ifdef CONFIG_PPC_BOOK3E | |
174 | /* Finally check if an EPR external interrupt happened | |
175 | * this bit is typically set if we need to handle another | |
176 | * "edge" interrupt from within the MPIC "EPR" handler | |
ef2b343e | 177 | */ |
7230c564 BH |
178 | local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; |
179 | if (happened & PACA_IRQ_EE_EDGE) | |
180 | return 0x500; | |
181 | ||
182 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; | |
183 | if (happened & PACA_IRQ_DBELL) | |
184 | return 0x280; | |
fe9e1d54 IM |
185 | #else |
186 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; | |
187 | if (happened & PACA_IRQ_DBELL) { | |
188 | if (cpu_has_feature(CPU_FTR_HVMODE)) | |
189 | return 0xe80; | |
190 | return 0xa00; | |
191 | } | |
7230c564 BH |
192 | #endif /* CONFIG_PPC_BOOK3E */ |
193 | ||
194 | /* There should be nothing left ! */ | |
195 | BUG_ON(local_paca->irq_happened != 0); | |
196 | ||
197 | return 0; | |
198 | } | |
199 | ||
200 | notrace void arch_local_irq_restore(unsigned long en) | |
201 | { | |
202 | unsigned char irq_happened; | |
203 | unsigned int replay; | |
204 | ||
205 | /* Write the new soft-enabled value */ | |
206 | set_soft_enabled(en); | |
207 | if (!en) | |
208 | return; | |
209 | /* | |
210 | * From this point onward, we can take interrupts, preempt, | |
211 | * etc... unless we got hard-disabled. We check if an event | |
212 | * happened. If none happened, we know we can just return. | |
213 | * | |
214 | * We may have preempted before the check below, in which case | |
215 | * we are checking the "new" CPU instead of the old one. This | |
216 | * is only a problem if an event happened on the "old" CPU. | |
217 | * | |
1d9a4731 SR |
218 | * External interrupt events will have caused interrupts to |
219 | * be hard-disabled, so there is no problem, we | |
7230c564 | 220 | * cannot have preempted. |
ef2b343e | 221 | */ |
7230c564 BH |
222 | irq_happened = get_irq_happened(); |
223 | if (!irq_happened) | |
d04c56f7 | 224 | return; |
ef2b343e HD |
225 | |
226 | /* | |
7230c564 BH |
227 | * We need to hard disable to get a trusted value from |
228 | * __check_irq_replay(). We also need to soft-disable | |
229 | * again to avoid warnings in there due to the use of | |
230 | * per-cpu variables. | |
231 | * | |
232 | * We know that if the value in irq_happened is exactly 0x01 | |
233 | * then we are already hard disabled (there are other less | |
234 | * common cases that we'll ignore for now), so we skip the | |
235 | * (expensive) mtmsrd. | |
ef2b343e | 236 | */ |
7230c564 BH |
237 | if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) |
238 | __hard_irq_disable(); | |
21b2de34 | 239 | #ifdef CONFIG_TRACE_IRQFLAGS |
7c0482e3 BH |
240 | else { |
241 | /* | |
242 | * We should already be hard disabled here. We had bugs | |
243 | * where that wasn't the case so let's dbl check it and | |
244 | * warn if we are wrong. Only do that when IRQ tracing | |
245 | * is enabled as mfmsr() can be costly. | |
246 | */ | |
247 | if (WARN_ON(mfmsr() & MSR_EE)) | |
248 | __hard_irq_disable(); | |
249 | } | |
250 | #endif /* CONFIG_TRACE_IRQFLAG */ | |
251 | ||
7230c564 | 252 | set_soft_enabled(0); |
e8775d4a | 253 | |
37fb9a02 | 254 | /* |
7230c564 BH |
255 | * Check if anything needs to be re-emitted. We haven't |
256 | * soft-enabled yet to avoid warnings in decrementer_check_overflow | |
257 | * accessing per-cpu variables | |
e8775d4a | 258 | */ |
7230c564 BH |
259 | replay = __check_irq_replay(); |
260 | ||
261 | /* We can soft-enable now */ | |
262 | set_soft_enabled(1); | |
0874dd40 TS |
263 | |
264 | /* | |
7230c564 BH |
265 | * And replay if we have to. This will return with interrupts |
266 | * hard-enabled. | |
0874dd40 | 267 | */ |
7230c564 BH |
268 | if (replay) { |
269 | __replay_interrupt(replay); | |
270 | return; | |
0874dd40 TS |
271 | } |
272 | ||
7230c564 | 273 | /* Finally, let's ensure we are hard enabled */ |
e1fa2e13 | 274 | __hard_irq_enable(); |
d04c56f7 | 275 | } |
df9ee292 | 276 | EXPORT_SYMBOL(arch_local_irq_restore); |
7230c564 BH |
277 | |
278 | /* | |
279 | * This is specifically called by assembly code to re-enable interrupts | |
280 | * if they are currently disabled. This is typically called before | |
281 | * schedule() or do_signal() when returning to userspace. We do it | |
282 | * in C to avoid the burden of dealing with lockdep etc... | |
56dfa7fa BH |
283 | * |
284 | * NOTE: This is called with interrupts hard disabled but not marked | |
285 | * as such in paca->irq_happened, so we need to resync this. | |
7230c564 | 286 | */ |
2d773aa4 | 287 | void notrace restore_interrupts(void) |
7230c564 | 288 | { |
56dfa7fa BH |
289 | if (irqs_disabled()) { |
290 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; | |
7230c564 | 291 | local_irq_enable(); |
56dfa7fa BH |
292 | } else |
293 | __hard_irq_enable(); | |
7230c564 BH |
294 | } |
295 | ||
be2cf20a BH |
296 | /* |
297 | * This is a helper to use when about to go into idle low-power | |
298 | * when the latter has the side effect of re-enabling interrupts | |
299 | * (such as calling H_CEDE under pHyp). | |
300 | * | |
301 | * You call this function with interrupts soft-disabled (this is | |
302 | * already the case when ppc_md.power_save is called). The function | |
303 | * will return whether to enter power save or just return. | |
304 | * | |
305 | * In the former case, it will have notified lockdep of interrupts | |
306 | * being re-enabled and generally sanitized the lazy irq state, | |
307 | * and in the latter case it will leave with interrupts hard | |
308 | * disabled and marked as such, so the local_irq_enable() call | |
309 | * in cpu_idle() will properly re-enable everything. | |
310 | */ | |
311 | bool prep_irq_for_idle(void) | |
312 | { | |
313 | /* | |
314 | * First we need to hard disable to ensure no interrupt | |
315 | * occurs before we effectively enter the low power state | |
316 | */ | |
317 | hard_irq_disable(); | |
318 | ||
319 | /* | |
320 | * If anything happened while we were soft-disabled, | |
321 | * we return now and do not enter the low power state. | |
322 | */ | |
323 | if (lazy_irq_pending()) | |
324 | return false; | |
325 | ||
326 | /* Tell lockdep we are about to re-enable */ | |
327 | trace_hardirqs_on(); | |
328 | ||
329 | /* | |
330 | * Mark interrupts as soft-enabled and clear the | |
331 | * PACA_IRQ_HARD_DIS from the pending mask since we | |
332 | * are about to hard enable as well as a side effect | |
333 | * of entering the low power state. | |
334 | */ | |
335 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; | |
336 | local_paca->soft_enabled = 1; | |
337 | ||
338 | /* Tell the caller to enter the low power state */ | |
339 | return true; | |
340 | } | |
341 | ||
756e7104 | 342 | #endif /* CONFIG_PPC64 */ |
1da177e4 | 343 | |
433c9c67 | 344 | int arch_show_interrupts(struct seq_file *p, int prec) |
c86845ed AB |
345 | { |
346 | int j; | |
347 | ||
348 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | |
349 | if (tau_initialized) { | |
350 | seq_printf(p, "%*s: ", prec, "TAU"); | |
351 | for_each_online_cpu(j) | |
352 | seq_printf(p, "%10u ", tau_interrupts(j)); | |
353 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | |
354 | } | |
355 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ | |
356 | ||
89713ed1 AB |
357 | seq_printf(p, "%*s: ", prec, "LOC"); |
358 | for_each_online_cpu(j) | |
359 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); | |
360 | seq_printf(p, " Local timer interrupts\n"); | |
361 | ||
17081102 AB |
362 | seq_printf(p, "%*s: ", prec, "SPU"); |
363 | for_each_online_cpu(j) | |
364 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); | |
365 | seq_printf(p, " Spurious interrupts\n"); | |
366 | ||
89713ed1 AB |
367 | seq_printf(p, "%*s: ", prec, "CNT"); |
368 | for_each_online_cpu(j) | |
369 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); | |
370 | seq_printf(p, " Performance monitoring interrupts\n"); | |
371 | ||
372 | seq_printf(p, "%*s: ", prec, "MCE"); | |
373 | for_each_online_cpu(j) | |
374 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); | |
375 | seq_printf(p, " Machine check exceptions\n"); | |
376 | ||
a6a058e5 IM |
377 | #ifdef CONFIG_PPC_DOORBELL |
378 | if (cpu_has_feature(CPU_FTR_DBELL)) { | |
379 | seq_printf(p, "%*s: ", prec, "DBL"); | |
380 | for_each_online_cpu(j) | |
381 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); | |
382 | seq_printf(p, " Doorbell interrupts\n"); | |
383 | } | |
384 | #endif | |
385 | ||
c86845ed AB |
386 | return 0; |
387 | } | |
388 | ||
89713ed1 AB |
389 | /* |
390 | * /proc/stat helpers | |
391 | */ | |
392 | u64 arch_irq_stat_cpu(unsigned int cpu) | |
393 | { | |
394 | u64 sum = per_cpu(irq_stat, cpu).timer_irqs; | |
395 | ||
396 | sum += per_cpu(irq_stat, cpu).pmu_irqs; | |
397 | sum += per_cpu(irq_stat, cpu).mce_exceptions; | |
17081102 | 398 | sum += per_cpu(irq_stat, cpu).spurious_irqs; |
a6a058e5 IM |
399 | #ifdef CONFIG_PPC_DOORBELL |
400 | sum += per_cpu(irq_stat, cpu).doorbell_irqs; | |
401 | #endif | |
89713ed1 AB |
402 | |
403 | return sum; | |
404 | } | |
405 | ||
1da177e4 | 406 | #ifdef CONFIG_HOTPLUG_CPU |
1c91cc57 | 407 | void migrate_irqs(void) |
1da177e4 | 408 | { |
6cff46f4 | 409 | struct irq_desc *desc; |
1da177e4 LT |
410 | unsigned int irq; |
411 | static int warned; | |
b6decb70 | 412 | cpumask_var_t mask; |
1c91cc57 | 413 | const struct cpumask *map = cpu_online_mask; |
1da177e4 | 414 | |
b6decb70 | 415 | alloc_cpumask_var(&mask, GFP_KERNEL); |
1da177e4 | 416 | |
4013369f | 417 | for_each_irq_desc(irq, desc) { |
7bfbc1f2 | 418 | struct irq_data *data; |
e1180287 LB |
419 | struct irq_chip *chip; |
420 | ||
7bfbc1f2 TG |
421 | data = irq_desc_get_irq_data(desc); |
422 | if (irqd_is_per_cpu(data)) | |
1da177e4 LT |
423 | continue; |
424 | ||
7bfbc1f2 | 425 | chip = irq_data_get_irq_chip(data); |
e1180287 | 426 | |
7bfbc1f2 | 427 | cpumask_and(mask, data->affinity, map); |
b6decb70 | 428 | if (cpumask_any(mask) >= nr_cpu_ids) { |
1da177e4 | 429 | printk("Breaking affinity for irq %i\n", irq); |
b6decb70 | 430 | cpumask_copy(mask, map); |
1da177e4 | 431 | } |
e1180287 | 432 | if (chip->irq_set_affinity) |
7bfbc1f2 | 433 | chip->irq_set_affinity(data, mask, true); |
6cff46f4 | 434 | else if (desc->action && !(warned++)) |
1da177e4 LT |
435 | printk("Cannot set affinity for irq %i\n", irq); |
436 | } | |
437 | ||
b6decb70 AB |
438 | free_cpumask_var(mask); |
439 | ||
1da177e4 LT |
440 | local_irq_enable(); |
441 | mdelay(1); | |
442 | local_irq_disable(); | |
443 | } | |
444 | #endif | |
445 | ||
f2694ba5 ME |
446 | static inline void handle_one_irq(unsigned int irq) |
447 | { | |
448 | struct thread_info *curtp, *irqtp; | |
449 | unsigned long saved_sp_limit; | |
450 | struct irq_desc *desc; | |
f2694ba5 | 451 | |
2e455257 MM |
452 | desc = irq_to_desc(irq); |
453 | if (!desc) | |
454 | return; | |
455 | ||
f2694ba5 ME |
456 | /* Switch to the irq stack to handle this */ |
457 | curtp = current_thread_info(); | |
458 | irqtp = hardirq_ctx[smp_processor_id()]; | |
459 | ||
460 | if (curtp == irqtp) { | |
461 | /* We're already on the irq stack, just handle it */ | |
2e455257 | 462 | desc->handle_irq(irq, desc); |
f2694ba5 ME |
463 | return; |
464 | } | |
465 | ||
f2694ba5 ME |
466 | saved_sp_limit = current->thread.ksp_limit; |
467 | ||
f2694ba5 ME |
468 | irqtp->task = curtp->task; |
469 | irqtp->flags = 0; | |
470 | ||
471 | /* Copy the softirq bits in preempt_count so that the | |
472 | * softirq checks work in the hardirq context. */ | |
473 | irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | | |
474 | (curtp->preempt_count & SOFTIRQ_MASK); | |
475 | ||
476 | current->thread.ksp_limit = (unsigned long)irqtp + | |
477 | _ALIGN_UP(sizeof(struct thread_info), 16); | |
478 | ||
835363e6 | 479 | call_handle_irq(irq, desc, irqtp, desc->handle_irq); |
f2694ba5 ME |
480 | current->thread.ksp_limit = saved_sp_limit; |
481 | irqtp->task = NULL; | |
482 | ||
483 | /* Set any flag that may have been set on the | |
484 | * alternate stack | |
485 | */ | |
486 | if (irqtp->flags) | |
487 | set_bits(irqtp->flags, &curtp->flags); | |
488 | } | |
f2694ba5 | 489 | |
d7cb10d6 ME |
490 | static inline void check_stack_overflow(void) |
491 | { | |
492 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
493 | long sp; | |
494 | ||
495 | sp = __get_SP() & (THREAD_SIZE-1); | |
496 | ||
497 | /* check for stack overflow: is there less than 2KB free? */ | |
498 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | |
499 | printk("do_IRQ: stack overflow: %ld\n", | |
500 | sp - sizeof(struct thread_info)); | |
501 | dump_stack(); | |
502 | } | |
503 | #endif | |
504 | } | |
505 | ||
1da177e4 LT |
506 | void do_IRQ(struct pt_regs *regs) |
507 | { | |
7d12e780 | 508 | struct pt_regs *old_regs = set_irq_regs(regs); |
0ebfff14 | 509 | unsigned int irq; |
1da177e4 | 510 | |
4b218e9b | 511 | irq_enter(); |
1da177e4 | 512 | |
e72bbbab LZ |
513 | trace_irq_entry(regs); |
514 | ||
d7cb10d6 | 515 | check_stack_overflow(); |
1da177e4 | 516 | |
7230c564 BH |
517 | /* |
518 | * Query the platform PIC for the interrupt & ack it. | |
519 | * | |
520 | * This will typically lower the interrupt line to the CPU | |
521 | */ | |
35a84c2f | 522 | irq = ppc_md.get_irq(); |
1da177e4 | 523 | |
7230c564 BH |
524 | /* We can hard enable interrupts now */ |
525 | may_hard_irq_enable(); | |
526 | ||
527 | /* And finally process it */ | |
7ba3e4f5 | 528 | if (irq != NO_IRQ) |
f2694ba5 | 529 | handle_one_irq(irq); |
7ba3e4f5 | 530 | else |
17081102 | 531 | __get_cpu_var(irq_stat).spurious_irqs++; |
e199500c | 532 | |
e72bbbab LZ |
533 | trace_irq_exit(regs); |
534 | ||
4b218e9b | 535 | irq_exit(); |
7d12e780 | 536 | set_irq_regs(old_regs); |
e199500c | 537 | } |
1da177e4 LT |
538 | |
539 | void __init init_IRQ(void) | |
540 | { | |
70584578 SR |
541 | if (ppc_md.init_IRQ) |
542 | ppc_md.init_IRQ(); | |
bcf0b088 KG |
543 | |
544 | exc_lvl_ctx_init(); | |
545 | ||
1da177e4 LT |
546 | irq_ctx_init(); |
547 | } | |
548 | ||
bcf0b088 KG |
549 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
550 | struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; | |
551 | struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; | |
552 | struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | |
553 | ||
554 | void exc_lvl_ctx_init(void) | |
555 | { | |
556 | struct thread_info *tp; | |
ca1769f7 | 557 | int i, cpu_nr; |
bcf0b088 KG |
558 | |
559 | for_each_possible_cpu(i) { | |
ca1769f7 ME |
560 | #ifdef CONFIG_PPC64 |
561 | cpu_nr = i; | |
562 | #else | |
563 | cpu_nr = get_hard_smp_processor_id(i); | |
564 | #endif | |
565 | memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); | |
566 | tp = critirq_ctx[cpu_nr]; | |
567 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
568 | tp->preempt_count = 0; |
569 | ||
570 | #ifdef CONFIG_BOOKE | |
ca1769f7 ME |
571 | memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); |
572 | tp = dbgirq_ctx[cpu_nr]; | |
573 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
574 | tp->preempt_count = 0; |
575 | ||
ca1769f7 ME |
576 | memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); |
577 | tp = mcheckirq_ctx[cpu_nr]; | |
578 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
579 | tp->preempt_count = HARDIRQ_OFFSET; |
580 | #endif | |
581 | } | |
582 | } | |
583 | #endif | |
1da177e4 | 584 | |
22722051 AM |
585 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
586 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
587 | |
588 | void irq_ctx_init(void) | |
589 | { | |
590 | struct thread_info *tp; | |
591 | int i; | |
592 | ||
0e551954 | 593 | for_each_possible_cpu(i) { |
1da177e4 LT |
594 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
595 | tp = softirq_ctx[i]; | |
596 | tp->cpu = i; | |
e6768a4f | 597 | tp->preempt_count = 0; |
1da177e4 LT |
598 | |
599 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | |
600 | tp = hardirq_ctx[i]; | |
601 | tp->cpu = i; | |
602 | tp->preempt_count = HARDIRQ_OFFSET; | |
603 | } | |
604 | } | |
605 | ||
c6622f63 PM |
606 | static inline void do_softirq_onstack(void) |
607 | { | |
608 | struct thread_info *curtp, *irqtp; | |
85218827 | 609 | unsigned long saved_sp_limit = current->thread.ksp_limit; |
c6622f63 PM |
610 | |
611 | curtp = current_thread_info(); | |
612 | irqtp = softirq_ctx[smp_processor_id()]; | |
613 | irqtp->task = curtp->task; | |
50d2a422 | 614 | irqtp->flags = 0; |
85218827 KG |
615 | current->thread.ksp_limit = (unsigned long)irqtp + |
616 | _ALIGN_UP(sizeof(struct thread_info), 16); | |
c6622f63 | 617 | call_do_softirq(irqtp); |
85218827 | 618 | current->thread.ksp_limit = saved_sp_limit; |
c6622f63 | 619 | irqtp->task = NULL; |
50d2a422 BH |
620 | |
621 | /* Set any flag that may have been set on the | |
622 | * alternate stack | |
623 | */ | |
624 | if (irqtp->flags) | |
625 | set_bits(irqtp->flags, &curtp->flags); | |
c6622f63 | 626 | } |
1da177e4 | 627 | |
1da177e4 LT |
628 | void do_softirq(void) |
629 | { | |
630 | unsigned long flags; | |
1da177e4 LT |
631 | |
632 | if (in_interrupt()) | |
1da177e4 LT |
633 | return; |
634 | ||
1da177e4 | 635 | local_irq_save(flags); |
1da177e4 | 636 | |
912b2539 | 637 | if (local_softirq_pending()) |
c6622f63 | 638 | do_softirq_onstack(); |
1da177e4 LT |
639 | |
640 | local_irq_restore(flags); | |
1da177e4 | 641 | } |
1da177e4 | 642 | |
35923f12 OJ |
643 | irq_hw_number_t virq_to_hw(unsigned int virq) |
644 | { | |
4bbdd45a GL |
645 | struct irq_data *irq_data = irq_get_irq_data(virq); |
646 | return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; | |
35923f12 OJ |
647 | } |
648 | EXPORT_SYMBOL_GPL(virq_to_hw); | |
649 | ||
6ec36b58 SY |
650 | #ifdef CONFIG_SMP |
651 | int irq_choose_cpu(const struct cpumask *mask) | |
652 | { | |
653 | int cpuid; | |
654 | ||
2074b1d9 | 655 | if (cpumask_equal(mask, cpu_online_mask)) { |
6ec36b58 SY |
656 | static int irq_rover; |
657 | static DEFINE_RAW_SPINLOCK(irq_rover_lock); | |
658 | unsigned long flags; | |
659 | ||
660 | /* Round-robin distribution... */ | |
661 | do_round_robin: | |
662 | raw_spin_lock_irqsave(&irq_rover_lock, flags); | |
663 | ||
664 | irq_rover = cpumask_next(irq_rover, cpu_online_mask); | |
665 | if (irq_rover >= nr_cpu_ids) | |
666 | irq_rover = cpumask_first(cpu_online_mask); | |
667 | ||
668 | cpuid = irq_rover; | |
669 | ||
670 | raw_spin_unlock_irqrestore(&irq_rover_lock, flags); | |
671 | } else { | |
672 | cpuid = cpumask_first_and(mask, cpu_online_mask); | |
673 | if (cpuid >= nr_cpu_ids) | |
674 | goto do_round_robin; | |
675 | } | |
676 | ||
677 | return get_hard_smp_processor_id(cpuid); | |
678 | } | |
679 | #else | |
680 | int irq_choose_cpu(const struct cpumask *mask) | |
681 | { | |
682 | return hard_smp_processor_id(); | |
683 | } | |
684 | #endif | |
0ebfff14 | 685 | |
cd015707 | 686 | int arch_early_irq_init(void) |
0ebfff14 | 687 | { |
cd015707 | 688 | return 0; |
0ebfff14 BH |
689 | } |
690 | ||
c6622f63 | 691 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
692 | static int __init setup_noirqdistrib(char *str) |
693 | { | |
694 | distribute_irqs = 0; | |
695 | return 1; | |
696 | } | |
697 | ||
698 | __setup("noirqdistrib", setup_noirqdistrib); | |
756e7104 | 699 | #endif /* CONFIG_PPC64 */ |