]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Derived from arch/i386/kernel/irq.c |
3 | * Copyright (C) 1992 Linus Torvalds | |
4 | * Adapted from arch/i386 by Gary Thomas | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
756e7104 SR |
6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
7 | * Copyright (C) 1996-2001 Cort Dougan | |
1da177e4 LT |
8 | * Adapted for Power Macintosh by Paul Mackerras |
9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | |
756e7104 | 10 | * |
1da177e4 LT |
11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | |
13 | * as published by the Free Software Foundation; either version | |
14 | * 2 of the License, or (at your option) any later version. | |
15 | * | |
16 | * This file contains the code used by various IRQ handling routines: | |
17 | * asking for different IRQ's should be done through these routines | |
18 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
19 | * shouldn't result in any weird surprises, and installing new handlers | |
20 | * should be easier. | |
756e7104 SR |
21 | * |
22 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | |
23 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | |
24 | * mask register (of which only 16 are defined), hence the weird shifting | |
25 | * and complement of the cached_irq_mask. I want to be able to stuff | |
26 | * this right into the SIU SMASK register. | |
27 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | |
28 | * to reduce code space and undefined function references. | |
1da177e4 LT |
29 | */ |
30 | ||
0ebfff14 BH |
31 | #undef DEBUG |
32 | ||
4b16f8e2 | 33 | #include <linux/export.h> |
1da177e4 LT |
34 | #include <linux/threads.h> |
35 | #include <linux/kernel_stat.h> | |
36 | #include <linux/signal.h> | |
37 | #include <linux/sched.h> | |
756e7104 | 38 | #include <linux/ptrace.h> |
1da177e4 LT |
39 | #include <linux/ioport.h> |
40 | #include <linux/interrupt.h> | |
41 | #include <linux/timex.h> | |
1da177e4 LT |
42 | #include <linux/init.h> |
43 | #include <linux/slab.h> | |
1da177e4 LT |
44 | #include <linux/delay.h> |
45 | #include <linux/irq.h> | |
756e7104 SR |
46 | #include <linux/seq_file.h> |
47 | #include <linux/cpumask.h> | |
1da177e4 LT |
48 | #include <linux/profile.h> |
49 | #include <linux/bitops.h> | |
0ebfff14 BH |
50 | #include <linux/list.h> |
51 | #include <linux/radix-tree.h> | |
52 | #include <linux/mutex.h> | |
53 | #include <linux/bootmem.h> | |
45934c47 | 54 | #include <linux/pci.h> |
60b332e7 | 55 | #include <linux/debugfs.h> |
e3873444 GL |
56 | #include <linux/of.h> |
57 | #include <linux/of_irq.h> | |
1da177e4 LT |
58 | |
59 | #include <asm/uaccess.h> | |
1da177e4 LT |
60 | #include <asm/io.h> |
61 | #include <asm/pgtable.h> | |
62 | #include <asm/irq.h> | |
63 | #include <asm/cache.h> | |
64 | #include <asm/prom.h> | |
65 | #include <asm/ptrace.h> | |
1da177e4 | 66 | #include <asm/machdep.h> |
0ebfff14 | 67 | #include <asm/udbg.h> |
3e7f45ad | 68 | #include <asm/smp.h> |
ae3a197e | 69 | #include <asm/debug.h> |
89c81797 | 70 | |
d04c56f7 | 71 | #ifdef CONFIG_PPC64 |
1da177e4 | 72 | #include <asm/paca.h> |
d04c56f7 | 73 | #include <asm/firmware.h> |
0874dd40 | 74 | #include <asm/lv1call.h> |
756e7104 | 75 | #endif |
1bf4af16 AB |
76 | #define CREATE_TRACE_POINTS |
77 | #include <asm/trace.h> | |
1da177e4 | 78 | |
8c007bfd AB |
79 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
80 | EXPORT_PER_CPU_SYMBOL(irq_stat); | |
81 | ||
868accb7 | 82 | int __irq_offset_value; |
756e7104 | 83 | |
756e7104 | 84 | #ifdef CONFIG_PPC32 |
b9e5b4e6 BH |
85 | EXPORT_SYMBOL(__irq_offset_value); |
86 | atomic_t ppc_n_lost_interrupts; | |
756e7104 | 87 | |
756e7104 SR |
88 | #ifdef CONFIG_TAU_INT |
89 | extern int tau_initialized; | |
90 | extern int tau_interrupts(int); | |
91 | #endif | |
b9e5b4e6 | 92 | #endif /* CONFIG_PPC32 */ |
756e7104 | 93 | |
756e7104 | 94 | #ifdef CONFIG_PPC64 |
cd015707 | 95 | |
1da177e4 | 96 | int distribute_irqs = 1; |
d04c56f7 | 97 | |
7230c564 | 98 | static inline notrace unsigned long get_irq_happened(void) |
ef2b343e | 99 | { |
7230c564 | 100 | unsigned long happened; |
ef2b343e HD |
101 | |
102 | __asm__ __volatile__("lbz %0,%1(13)" | |
7230c564 | 103 | : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); |
ef2b343e | 104 | |
7230c564 | 105 | return happened; |
ef2b343e HD |
106 | } |
107 | ||
4e491d14 | 108 | static inline notrace void set_soft_enabled(unsigned long enable) |
ef2b343e HD |
109 | { |
110 | __asm__ __volatile__("stb %0,%1(13)" | |
111 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | |
112 | } | |
113 | ||
7230c564 | 114 | static inline notrace int decrementer_check_overflow(void) |
7df10275 | 115 | { |
7230c564 BH |
116 | u64 now = get_tb_or_rtc(); |
117 | u64 *next_tb = &__get_cpu_var(decrementers_next_tb); | |
118 | ||
7df10275 AB |
119 | if (now >= *next_tb) |
120 | set_dec(1); | |
7230c564 | 121 | return now >= *next_tb; |
7df10275 AB |
122 | } |
123 | ||
7230c564 BH |
124 | /* This is called whenever we are re-enabling interrupts |
125 | * and returns either 0 (nothing to do) or 500/900 if there's | |
126 | * either an EE or a DEC to generate. | |
127 | * | |
128 | * This is called in two contexts: From arch_local_irq_restore() | |
129 | * before soft-enabling interrupts, and from the exception exit | |
130 | * path when returning from an interrupt from a soft-disabled to | |
131 | * a soft enabled context. In both case we have interrupts hard | |
132 | * disabled. | |
133 | * | |
134 | * We take care of only clearing the bits we handled in the | |
135 | * PACA irq_happened field since we can only re-emit one at a | |
136 | * time and we don't want to "lose" one. | |
137 | */ | |
138 | notrace unsigned int __check_irq_replay(void) | |
d04c56f7 | 139 | { |
ef2b343e | 140 | /* |
7230c564 BH |
141 | * We use local_paca rather than get_paca() to avoid all |
142 | * the debug_smp_processor_id() business in this low level | |
143 | * function | |
ef2b343e | 144 | */ |
7230c564 | 145 | unsigned char happened = local_paca->irq_happened; |
d04c56f7 | 146 | |
7230c564 BH |
147 | /* Clear bit 0 which we wouldn't clear otherwise */ |
148 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; | |
149 | ||
150 | /* | |
151 | * Force the delivery of pending soft-disabled interrupts on PS3. | |
152 | * Any HV call will have this side effect. | |
153 | */ | |
154 | if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { | |
155 | u64 tmp, tmp2; | |
156 | lv1_get_version_info(&tmp, &tmp2); | |
d04c56f7 PM |
157 | } |
158 | ||
ef2b343e | 159 | /* |
7230c564 BH |
160 | * We may have missed a decrementer interrupt. We check the |
161 | * decrementer itself rather than the paca irq_happened field | |
162 | * in case we also had a rollover while hard disabled | |
163 | */ | |
164 | local_paca->irq_happened &= ~PACA_IRQ_DEC; | |
165 | if (decrementer_check_overflow()) | |
166 | return 0x900; | |
167 | ||
168 | /* Finally check if an external interrupt happened */ | |
169 | local_paca->irq_happened &= ~PACA_IRQ_EE; | |
170 | if (happened & PACA_IRQ_EE) | |
171 | return 0x500; | |
172 | ||
173 | #ifdef CONFIG_PPC_BOOK3E | |
174 | /* Finally check if an EPR external interrupt happened | |
175 | * this bit is typically set if we need to handle another | |
176 | * "edge" interrupt from within the MPIC "EPR" handler | |
ef2b343e | 177 | */ |
7230c564 BH |
178 | local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE; |
179 | if (happened & PACA_IRQ_EE_EDGE) | |
180 | return 0x500; | |
181 | ||
182 | local_paca->irq_happened &= ~PACA_IRQ_DBELL; | |
183 | if (happened & PACA_IRQ_DBELL) | |
184 | return 0x280; | |
185 | #endif /* CONFIG_PPC_BOOK3E */ | |
186 | ||
187 | /* There should be nothing left ! */ | |
188 | BUG_ON(local_paca->irq_happened != 0); | |
189 | ||
190 | return 0; | |
191 | } | |
192 | ||
193 | notrace void arch_local_irq_restore(unsigned long en) | |
194 | { | |
195 | unsigned char irq_happened; | |
196 | unsigned int replay; | |
197 | ||
198 | /* Write the new soft-enabled value */ | |
199 | set_soft_enabled(en); | |
200 | if (!en) | |
201 | return; | |
202 | /* | |
203 | * From this point onward, we can take interrupts, preempt, | |
204 | * etc... unless we got hard-disabled. We check if an event | |
205 | * happened. If none happened, we know we can just return. | |
206 | * | |
207 | * We may have preempted before the check below, in which case | |
208 | * we are checking the "new" CPU instead of the old one. This | |
209 | * is only a problem if an event happened on the "old" CPU. | |
210 | * | |
1d9a4731 SR |
211 | * External interrupt events will have caused interrupts to |
212 | * be hard-disabled, so there is no problem, we | |
7230c564 | 213 | * cannot have preempted. |
ef2b343e | 214 | */ |
7230c564 BH |
215 | irq_happened = get_irq_happened(); |
216 | if (!irq_happened) | |
d04c56f7 | 217 | return; |
ef2b343e HD |
218 | |
219 | /* | |
7230c564 BH |
220 | * We need to hard disable to get a trusted value from |
221 | * __check_irq_replay(). We also need to soft-disable | |
222 | * again to avoid warnings in there due to the use of | |
223 | * per-cpu variables. | |
224 | * | |
225 | * We know that if the value in irq_happened is exactly 0x01 | |
226 | * then we are already hard disabled (there are other less | |
227 | * common cases that we'll ignore for now), so we skip the | |
228 | * (expensive) mtmsrd. | |
ef2b343e | 229 | */ |
7230c564 BH |
230 | if (unlikely(irq_happened != PACA_IRQ_HARD_DIS)) |
231 | __hard_irq_disable(); | |
21b2de34 | 232 | #ifdef CONFIG_TRACE_IRQFLAGS |
7c0482e3 BH |
233 | else { |
234 | /* | |
235 | * We should already be hard disabled here. We had bugs | |
236 | * where that wasn't the case so let's dbl check it and | |
237 | * warn if we are wrong. Only do that when IRQ tracing | |
238 | * is enabled as mfmsr() can be costly. | |
239 | */ | |
240 | if (WARN_ON(mfmsr() & MSR_EE)) | |
241 | __hard_irq_disable(); | |
242 | } | |
243 | #endif /* CONFIG_TRACE_IRQFLAG */ | |
244 | ||
7230c564 | 245 | set_soft_enabled(0); |
e8775d4a | 246 | |
37fb9a02 | 247 | /* |
7230c564 BH |
248 | * Check if anything needs to be re-emitted. We haven't |
249 | * soft-enabled yet to avoid warnings in decrementer_check_overflow | |
250 | * accessing per-cpu variables | |
e8775d4a | 251 | */ |
7230c564 BH |
252 | replay = __check_irq_replay(); |
253 | ||
254 | /* We can soft-enable now */ | |
255 | set_soft_enabled(1); | |
0874dd40 TS |
256 | |
257 | /* | |
7230c564 BH |
258 | * And replay if we have to. This will return with interrupts |
259 | * hard-enabled. | |
0874dd40 | 260 | */ |
7230c564 BH |
261 | if (replay) { |
262 | __replay_interrupt(replay); | |
263 | return; | |
0874dd40 TS |
264 | } |
265 | ||
7230c564 | 266 | /* Finally, let's ensure we are hard enabled */ |
e1fa2e13 | 267 | __hard_irq_enable(); |
d04c56f7 | 268 | } |
df9ee292 | 269 | EXPORT_SYMBOL(arch_local_irq_restore); |
7230c564 BH |
270 | |
271 | /* | |
272 | * This is specifically called by assembly code to re-enable interrupts | |
273 | * if they are currently disabled. This is typically called before | |
274 | * schedule() or do_signal() when returning to userspace. We do it | |
275 | * in C to avoid the burden of dealing with lockdep etc... | |
56dfa7fa BH |
276 | * |
277 | * NOTE: This is called with interrupts hard disabled but not marked | |
278 | * as such in paca->irq_happened, so we need to resync this. | |
7230c564 | 279 | */ |
2d773aa4 | 280 | void notrace restore_interrupts(void) |
7230c564 | 281 | { |
56dfa7fa BH |
282 | if (irqs_disabled()) { |
283 | local_paca->irq_happened |= PACA_IRQ_HARD_DIS; | |
7230c564 | 284 | local_irq_enable(); |
56dfa7fa BH |
285 | } else |
286 | __hard_irq_enable(); | |
7230c564 BH |
287 | } |
288 | ||
be2cf20a BH |
289 | /* |
290 | * This is a helper to use when about to go into idle low-power | |
291 | * when the latter has the side effect of re-enabling interrupts | |
292 | * (such as calling H_CEDE under pHyp). | |
293 | * | |
294 | * You call this function with interrupts soft-disabled (this is | |
295 | * already the case when ppc_md.power_save is called). The function | |
296 | * will return whether to enter power save or just return. | |
297 | * | |
298 | * In the former case, it will have notified lockdep of interrupts | |
299 | * being re-enabled and generally sanitized the lazy irq state, | |
300 | * and in the latter case it will leave with interrupts hard | |
301 | * disabled and marked as such, so the local_irq_enable() call | |
302 | * in cpu_idle() will properly re-enable everything. | |
303 | */ | |
304 | bool prep_irq_for_idle(void) | |
305 | { | |
306 | /* | |
307 | * First we need to hard disable to ensure no interrupt | |
308 | * occurs before we effectively enter the low power state | |
309 | */ | |
310 | hard_irq_disable(); | |
311 | ||
312 | /* | |
313 | * If anything happened while we were soft-disabled, | |
314 | * we return now and do not enter the low power state. | |
315 | */ | |
316 | if (lazy_irq_pending()) | |
317 | return false; | |
318 | ||
319 | /* Tell lockdep we are about to re-enable */ | |
320 | trace_hardirqs_on(); | |
321 | ||
322 | /* | |
323 | * Mark interrupts as soft-enabled and clear the | |
324 | * PACA_IRQ_HARD_DIS from the pending mask since we | |
325 | * are about to hard enable as well as a side effect | |
326 | * of entering the low power state. | |
327 | */ | |
328 | local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; | |
329 | local_paca->soft_enabled = 1; | |
330 | ||
331 | /* Tell the caller to enter the low power state */ | |
332 | return true; | |
333 | } | |
334 | ||
756e7104 | 335 | #endif /* CONFIG_PPC64 */ |
1da177e4 | 336 | |
433c9c67 | 337 | int arch_show_interrupts(struct seq_file *p, int prec) |
c86845ed AB |
338 | { |
339 | int j; | |
340 | ||
341 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) | |
342 | if (tau_initialized) { | |
343 | seq_printf(p, "%*s: ", prec, "TAU"); | |
344 | for_each_online_cpu(j) | |
345 | seq_printf(p, "%10u ", tau_interrupts(j)); | |
346 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); | |
347 | } | |
348 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ | |
349 | ||
89713ed1 AB |
350 | seq_printf(p, "%*s: ", prec, "LOC"); |
351 | for_each_online_cpu(j) | |
352 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs); | |
353 | seq_printf(p, " Local timer interrupts\n"); | |
354 | ||
17081102 AB |
355 | seq_printf(p, "%*s: ", prec, "SPU"); |
356 | for_each_online_cpu(j) | |
357 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); | |
358 | seq_printf(p, " Spurious interrupts\n"); | |
359 | ||
89713ed1 AB |
360 | seq_printf(p, "%*s: ", prec, "CNT"); |
361 | for_each_online_cpu(j) | |
362 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); | |
363 | seq_printf(p, " Performance monitoring interrupts\n"); | |
364 | ||
365 | seq_printf(p, "%*s: ", prec, "MCE"); | |
366 | for_each_online_cpu(j) | |
367 | seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); | |
368 | seq_printf(p, " Machine check exceptions\n"); | |
369 | ||
c86845ed AB |
370 | return 0; |
371 | } | |
372 | ||
89713ed1 AB |
373 | /* |
374 | * /proc/stat helpers | |
375 | */ | |
376 | u64 arch_irq_stat_cpu(unsigned int cpu) | |
377 | { | |
378 | u64 sum = per_cpu(irq_stat, cpu).timer_irqs; | |
379 | ||
380 | sum += per_cpu(irq_stat, cpu).pmu_irqs; | |
381 | sum += per_cpu(irq_stat, cpu).mce_exceptions; | |
17081102 | 382 | sum += per_cpu(irq_stat, cpu).spurious_irqs; |
89713ed1 AB |
383 | |
384 | return sum; | |
385 | } | |
386 | ||
1da177e4 | 387 | #ifdef CONFIG_HOTPLUG_CPU |
1c91cc57 | 388 | void migrate_irqs(void) |
1da177e4 | 389 | { |
6cff46f4 | 390 | struct irq_desc *desc; |
1da177e4 LT |
391 | unsigned int irq; |
392 | static int warned; | |
b6decb70 | 393 | cpumask_var_t mask; |
1c91cc57 | 394 | const struct cpumask *map = cpu_online_mask; |
1da177e4 | 395 | |
b6decb70 | 396 | alloc_cpumask_var(&mask, GFP_KERNEL); |
1da177e4 | 397 | |
4013369f | 398 | for_each_irq_desc(irq, desc) { |
7bfbc1f2 | 399 | struct irq_data *data; |
e1180287 LB |
400 | struct irq_chip *chip; |
401 | ||
7bfbc1f2 TG |
402 | data = irq_desc_get_irq_data(desc); |
403 | if (irqd_is_per_cpu(data)) | |
1da177e4 LT |
404 | continue; |
405 | ||
7bfbc1f2 | 406 | chip = irq_data_get_irq_chip(data); |
e1180287 | 407 | |
7bfbc1f2 | 408 | cpumask_and(mask, data->affinity, map); |
b6decb70 | 409 | if (cpumask_any(mask) >= nr_cpu_ids) { |
1da177e4 | 410 | printk("Breaking affinity for irq %i\n", irq); |
b6decb70 | 411 | cpumask_copy(mask, map); |
1da177e4 | 412 | } |
e1180287 | 413 | if (chip->irq_set_affinity) |
7bfbc1f2 | 414 | chip->irq_set_affinity(data, mask, true); |
6cff46f4 | 415 | else if (desc->action && !(warned++)) |
1da177e4 LT |
416 | printk("Cannot set affinity for irq %i\n", irq); |
417 | } | |
418 | ||
b6decb70 AB |
419 | free_cpumask_var(mask); |
420 | ||
1da177e4 LT |
421 | local_irq_enable(); |
422 | mdelay(1); | |
423 | local_irq_disable(); | |
424 | } | |
425 | #endif | |
426 | ||
f2694ba5 ME |
427 | static inline void handle_one_irq(unsigned int irq) |
428 | { | |
429 | struct thread_info *curtp, *irqtp; | |
430 | unsigned long saved_sp_limit; | |
431 | struct irq_desc *desc; | |
f2694ba5 | 432 | |
2e455257 MM |
433 | desc = irq_to_desc(irq); |
434 | if (!desc) | |
435 | return; | |
436 | ||
f2694ba5 ME |
437 | /* Switch to the irq stack to handle this */ |
438 | curtp = current_thread_info(); | |
439 | irqtp = hardirq_ctx[smp_processor_id()]; | |
440 | ||
441 | if (curtp == irqtp) { | |
442 | /* We're already on the irq stack, just handle it */ | |
2e455257 | 443 | desc->handle_irq(irq, desc); |
f2694ba5 ME |
444 | return; |
445 | } | |
446 | ||
f2694ba5 ME |
447 | saved_sp_limit = current->thread.ksp_limit; |
448 | ||
f2694ba5 ME |
449 | irqtp->task = curtp->task; |
450 | irqtp->flags = 0; | |
451 | ||
452 | /* Copy the softirq bits in preempt_count so that the | |
453 | * softirq checks work in the hardirq context. */ | |
454 | irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | | |
455 | (curtp->preempt_count & SOFTIRQ_MASK); | |
456 | ||
457 | current->thread.ksp_limit = (unsigned long)irqtp + | |
458 | _ALIGN_UP(sizeof(struct thread_info), 16); | |
459 | ||
835363e6 | 460 | call_handle_irq(irq, desc, irqtp, desc->handle_irq); |
f2694ba5 ME |
461 | current->thread.ksp_limit = saved_sp_limit; |
462 | irqtp->task = NULL; | |
463 | ||
464 | /* Set any flag that may have been set on the | |
465 | * alternate stack | |
466 | */ | |
467 | if (irqtp->flags) | |
468 | set_bits(irqtp->flags, &curtp->flags); | |
469 | } | |
f2694ba5 | 470 | |
d7cb10d6 ME |
471 | static inline void check_stack_overflow(void) |
472 | { | |
473 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
474 | long sp; | |
475 | ||
476 | sp = __get_SP() & (THREAD_SIZE-1); | |
477 | ||
478 | /* check for stack overflow: is there less than 2KB free? */ | |
479 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | |
480 | printk("do_IRQ: stack overflow: %ld\n", | |
481 | sp - sizeof(struct thread_info)); | |
482 | dump_stack(); | |
483 | } | |
484 | #endif | |
485 | } | |
486 | ||
1da177e4 LT |
487 | void do_IRQ(struct pt_regs *regs) |
488 | { | |
7d12e780 | 489 | struct pt_regs *old_regs = set_irq_regs(regs); |
0ebfff14 | 490 | unsigned int irq; |
1da177e4 | 491 | |
4b218e9b | 492 | irq_enter(); |
1da177e4 | 493 | |
e72bbbab LZ |
494 | trace_irq_entry(regs); |
495 | ||
d7cb10d6 | 496 | check_stack_overflow(); |
1da177e4 | 497 | |
7230c564 BH |
498 | /* |
499 | * Query the platform PIC for the interrupt & ack it. | |
500 | * | |
501 | * This will typically lower the interrupt line to the CPU | |
502 | */ | |
35a84c2f | 503 | irq = ppc_md.get_irq(); |
1da177e4 | 504 | |
7230c564 BH |
505 | /* We can hard enable interrupts now */ |
506 | may_hard_irq_enable(); | |
507 | ||
508 | /* And finally process it */ | |
7ba3e4f5 | 509 | if (irq != NO_IRQ) |
f2694ba5 | 510 | handle_one_irq(irq); |
7ba3e4f5 | 511 | else |
17081102 | 512 | __get_cpu_var(irq_stat).spurious_irqs++; |
e199500c | 513 | |
e72bbbab LZ |
514 | trace_irq_exit(regs); |
515 | ||
4b218e9b | 516 | irq_exit(); |
7d12e780 | 517 | set_irq_regs(old_regs); |
e199500c | 518 | } |
1da177e4 LT |
519 | |
520 | void __init init_IRQ(void) | |
521 | { | |
70584578 SR |
522 | if (ppc_md.init_IRQ) |
523 | ppc_md.init_IRQ(); | |
bcf0b088 KG |
524 | |
525 | exc_lvl_ctx_init(); | |
526 | ||
1da177e4 LT |
527 | irq_ctx_init(); |
528 | } | |
529 | ||
bcf0b088 KG |
530 | #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) |
531 | struct thread_info *critirq_ctx[NR_CPUS] __read_mostly; | |
532 | struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly; | |
533 | struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | |
534 | ||
535 | void exc_lvl_ctx_init(void) | |
536 | { | |
537 | struct thread_info *tp; | |
ca1769f7 | 538 | int i, cpu_nr; |
bcf0b088 KG |
539 | |
540 | for_each_possible_cpu(i) { | |
ca1769f7 ME |
541 | #ifdef CONFIG_PPC64 |
542 | cpu_nr = i; | |
543 | #else | |
544 | cpu_nr = get_hard_smp_processor_id(i); | |
545 | #endif | |
546 | memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); | |
547 | tp = critirq_ctx[cpu_nr]; | |
548 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
549 | tp->preempt_count = 0; |
550 | ||
551 | #ifdef CONFIG_BOOKE | |
ca1769f7 ME |
552 | memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); |
553 | tp = dbgirq_ctx[cpu_nr]; | |
554 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
555 | tp->preempt_count = 0; |
556 | ||
ca1769f7 ME |
557 | memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); |
558 | tp = mcheckirq_ctx[cpu_nr]; | |
559 | tp->cpu = cpu_nr; | |
bcf0b088 KG |
560 | tp->preempt_count = HARDIRQ_OFFSET; |
561 | #endif | |
562 | } | |
563 | } | |
564 | #endif | |
1da177e4 | 565 | |
22722051 AM |
566 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
567 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
568 | |
569 | void irq_ctx_init(void) | |
570 | { | |
571 | struct thread_info *tp; | |
572 | int i; | |
573 | ||
0e551954 | 574 | for_each_possible_cpu(i) { |
1da177e4 LT |
575 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
576 | tp = softirq_ctx[i]; | |
577 | tp->cpu = i; | |
e6768a4f | 578 | tp->preempt_count = 0; |
1da177e4 LT |
579 | |
580 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | |
581 | tp = hardirq_ctx[i]; | |
582 | tp->cpu = i; | |
583 | tp->preempt_count = HARDIRQ_OFFSET; | |
584 | } | |
585 | } | |
586 | ||
c6622f63 PM |
587 | static inline void do_softirq_onstack(void) |
588 | { | |
589 | struct thread_info *curtp, *irqtp; | |
85218827 | 590 | unsigned long saved_sp_limit = current->thread.ksp_limit; |
c6622f63 PM |
591 | |
592 | curtp = current_thread_info(); | |
593 | irqtp = softirq_ctx[smp_processor_id()]; | |
594 | irqtp->task = curtp->task; | |
50d2a422 | 595 | irqtp->flags = 0; |
85218827 KG |
596 | current->thread.ksp_limit = (unsigned long)irqtp + |
597 | _ALIGN_UP(sizeof(struct thread_info), 16); | |
c6622f63 | 598 | call_do_softirq(irqtp); |
85218827 | 599 | current->thread.ksp_limit = saved_sp_limit; |
c6622f63 | 600 | irqtp->task = NULL; |
50d2a422 BH |
601 | |
602 | /* Set any flag that may have been set on the | |
603 | * alternate stack | |
604 | */ | |
605 | if (irqtp->flags) | |
606 | set_bits(irqtp->flags, &curtp->flags); | |
c6622f63 | 607 | } |
1da177e4 | 608 | |
1da177e4 LT |
609 | void do_softirq(void) |
610 | { | |
611 | unsigned long flags; | |
1da177e4 LT |
612 | |
613 | if (in_interrupt()) | |
1da177e4 LT |
614 | return; |
615 | ||
1da177e4 | 616 | local_irq_save(flags); |
1da177e4 | 617 | |
912b2539 | 618 | if (local_softirq_pending()) |
c6622f63 | 619 | do_softirq_onstack(); |
1da177e4 LT |
620 | |
621 | local_irq_restore(flags); | |
1da177e4 | 622 | } |
1da177e4 | 623 | |
35923f12 OJ |
624 | irq_hw_number_t virq_to_hw(unsigned int virq) |
625 | { | |
4bbdd45a GL |
626 | struct irq_data *irq_data = irq_get_irq_data(virq); |
627 | return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; | |
35923f12 OJ |
628 | } |
629 | EXPORT_SYMBOL_GPL(virq_to_hw); | |
630 | ||
6ec36b58 SY |
631 | #ifdef CONFIG_SMP |
632 | int irq_choose_cpu(const struct cpumask *mask) | |
633 | { | |
634 | int cpuid; | |
635 | ||
2074b1d9 | 636 | if (cpumask_equal(mask, cpu_online_mask)) { |
6ec36b58 SY |
637 | static int irq_rover; |
638 | static DEFINE_RAW_SPINLOCK(irq_rover_lock); | |
639 | unsigned long flags; | |
640 | ||
641 | /* Round-robin distribution... */ | |
642 | do_round_robin: | |
643 | raw_spin_lock_irqsave(&irq_rover_lock, flags); | |
644 | ||
645 | irq_rover = cpumask_next(irq_rover, cpu_online_mask); | |
646 | if (irq_rover >= nr_cpu_ids) | |
647 | irq_rover = cpumask_first(cpu_online_mask); | |
648 | ||
649 | cpuid = irq_rover; | |
650 | ||
651 | raw_spin_unlock_irqrestore(&irq_rover_lock, flags); | |
652 | } else { | |
653 | cpuid = cpumask_first_and(mask, cpu_online_mask); | |
654 | if (cpuid >= nr_cpu_ids) | |
655 | goto do_round_robin; | |
656 | } | |
657 | ||
658 | return get_hard_smp_processor_id(cpuid); | |
659 | } | |
660 | #else | |
661 | int irq_choose_cpu(const struct cpumask *mask) | |
662 | { | |
663 | return hard_smp_processor_id(); | |
664 | } | |
665 | #endif | |
0ebfff14 | 666 | |
cd015707 | 667 | int arch_early_irq_init(void) |
0ebfff14 | 668 | { |
cd015707 | 669 | return 0; |
0ebfff14 BH |
670 | } |
671 | ||
c6622f63 | 672 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
673 | static int __init setup_noirqdistrib(char *str) |
674 | { | |
675 | distribute_irqs = 0; | |
676 | return 1; | |
677 | } | |
678 | ||
679 | __setup("noirqdistrib", setup_noirqdistrib); | |
756e7104 | 680 | #endif /* CONFIG_PPC64 */ |