]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kernel/irq.c
powerpc/8xx: Fix NR_IRQ bugs and refactor 8xx interrupt controller
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kernel / irq.c
CommitLineData
1da177e4 1/*
1da177e4
LT
2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
756e7104
SR
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
1da177e4
LT
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
756e7104 10 *
1da177e4
LT
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 *
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
20 * should be easier.
756e7104
SR
21 *
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
1da177e4
LT
29 */
30
0ebfff14
BH
31#undef DEBUG
32
4b16f8e2 33#include <linux/export.h>
1da177e4
LT
34#include <linux/threads.h>
35#include <linux/kernel_stat.h>
36#include <linux/signal.h>
37#include <linux/sched.h>
756e7104 38#include <linux/ptrace.h>
1da177e4
LT
39#include <linux/ioport.h>
40#include <linux/interrupt.h>
41#include <linux/timex.h>
1da177e4
LT
42#include <linux/init.h>
43#include <linux/slab.h>
1da177e4
LT
44#include <linux/delay.h>
45#include <linux/irq.h>
756e7104
SR
46#include <linux/seq_file.h>
47#include <linux/cpumask.h>
1da177e4
LT
48#include <linux/profile.h>
49#include <linux/bitops.h>
0ebfff14
BH
50#include <linux/list.h>
51#include <linux/radix-tree.h>
52#include <linux/mutex.h>
53#include <linux/bootmem.h>
45934c47 54#include <linux/pci.h>
60b332e7 55#include <linux/debugfs.h>
e3873444
GL
56#include <linux/of.h>
57#include <linux/of_irq.h>
1da177e4
LT
58
59#include <asm/uaccess.h>
1da177e4
LT
60#include <asm/io.h>
61#include <asm/pgtable.h>
62#include <asm/irq.h>
63#include <asm/cache.h>
64#include <asm/prom.h>
65#include <asm/ptrace.h>
1da177e4 66#include <asm/machdep.h>
0ebfff14 67#include <asm/udbg.h>
3e7f45ad 68#include <asm/smp.h>
ae3a197e 69#include <asm/debug.h>
89c81797 70
d04c56f7 71#ifdef CONFIG_PPC64
1da177e4 72#include <asm/paca.h>
d04c56f7 73#include <asm/firmware.h>
0874dd40 74#include <asm/lv1call.h>
756e7104 75#endif
1bf4af16
AB
76#define CREATE_TRACE_POINTS
77#include <asm/trace.h>
1da177e4 78
8c007bfd
AB
79DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
80EXPORT_PER_CPU_SYMBOL(irq_stat);
81
868accb7 82int __irq_offset_value;
756e7104 83
756e7104 84#ifdef CONFIG_PPC32
b9e5b4e6
BH
85EXPORT_SYMBOL(__irq_offset_value);
86atomic_t ppc_n_lost_interrupts;
756e7104 87
756e7104
SR
88#ifdef CONFIG_TAU_INT
89extern int tau_initialized;
90extern int tau_interrupts(int);
91#endif
b9e5b4e6 92#endif /* CONFIG_PPC32 */
756e7104 93
756e7104 94#ifdef CONFIG_PPC64
cd015707 95
1da177e4 96int distribute_irqs = 1;
d04c56f7 97
7230c564 98static inline notrace unsigned long get_irq_happened(void)
ef2b343e 99{
7230c564 100 unsigned long happened;
ef2b343e
HD
101
102 __asm__ __volatile__("lbz %0,%1(13)"
7230c564 103 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
ef2b343e 104
7230c564 105 return happened;
ef2b343e
HD
106}
107
4e491d14 108static inline notrace void set_soft_enabled(unsigned long enable)
ef2b343e
HD
109{
110 __asm__ __volatile__("stb %0,%1(13)"
111 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
112}
113
7230c564 114static inline notrace int decrementer_check_overflow(void)
7df10275 115{
7230c564
BH
116 u64 now = get_tb_or_rtc();
117 u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
118
7df10275
AB
119 if (now >= *next_tb)
120 set_dec(1);
7230c564 121 return now >= *next_tb;
7df10275
AB
122}
123
7230c564
BH
124/* This is called whenever we are re-enabling interrupts
125 * and returns either 0 (nothing to do) or 500/900 if there's
126 * either an EE or a DEC to generate.
127 *
128 * This is called in two contexts: From arch_local_irq_restore()
129 * before soft-enabling interrupts, and from the exception exit
130 * path when returning from an interrupt from a soft-disabled to
131 * a soft enabled context. In both case we have interrupts hard
132 * disabled.
133 *
134 * We take care of only clearing the bits we handled in the
135 * PACA irq_happened field since we can only re-emit one at a
136 * time and we don't want to "lose" one.
137 */
138notrace unsigned int __check_irq_replay(void)
d04c56f7 139{
ef2b343e 140 /*
7230c564
BH
141 * We use local_paca rather than get_paca() to avoid all
142 * the debug_smp_processor_id() business in this low level
143 * function
ef2b343e 144 */
7230c564 145 unsigned char happened = local_paca->irq_happened;
d04c56f7 146
7230c564
BH
147 /* Clear bit 0 which we wouldn't clear otherwise */
148 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
149
150 /*
151 * Force the delivery of pending soft-disabled interrupts on PS3.
152 * Any HV call will have this side effect.
153 */
154 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
155 u64 tmp, tmp2;
156 lv1_get_version_info(&tmp, &tmp2);
d04c56f7
PM
157 }
158
ef2b343e 159 /*
7230c564
BH
160 * We may have missed a decrementer interrupt. We check the
161 * decrementer itself rather than the paca irq_happened field
162 * in case we also had a rollover while hard disabled
163 */
164 local_paca->irq_happened &= ~PACA_IRQ_DEC;
165 if (decrementer_check_overflow())
166 return 0x900;
167
168 /* Finally check if an external interrupt happened */
169 local_paca->irq_happened &= ~PACA_IRQ_EE;
170 if (happened & PACA_IRQ_EE)
171 return 0x500;
172
173#ifdef CONFIG_PPC_BOOK3E
174 /* Finally check if an EPR external interrupt happened
175 * this bit is typically set if we need to handle another
176 * "edge" interrupt from within the MPIC "EPR" handler
ef2b343e 177 */
7230c564
BH
178 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
179 if (happened & PACA_IRQ_EE_EDGE)
180 return 0x500;
181
182 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
183 if (happened & PACA_IRQ_DBELL)
184 return 0x280;
185#endif /* CONFIG_PPC_BOOK3E */
186
187 /* There should be nothing left ! */
188 BUG_ON(local_paca->irq_happened != 0);
189
190 return 0;
191}
192
193notrace void arch_local_irq_restore(unsigned long en)
194{
195 unsigned char irq_happened;
196 unsigned int replay;
197
198 /* Write the new soft-enabled value */
199 set_soft_enabled(en);
200 if (!en)
201 return;
202 /*
203 * From this point onward, we can take interrupts, preempt,
204 * etc... unless we got hard-disabled. We check if an event
205 * happened. If none happened, we know we can just return.
206 *
207 * We may have preempted before the check below, in which case
208 * we are checking the "new" CPU instead of the old one. This
209 * is only a problem if an event happened on the "old" CPU.
210 *
1d9a4731
SR
211 * External interrupt events will have caused interrupts to
212 * be hard-disabled, so there is no problem, we
7230c564 213 * cannot have preempted.
ef2b343e 214 */
7230c564
BH
215 irq_happened = get_irq_happened();
216 if (!irq_happened)
d04c56f7 217 return;
ef2b343e
HD
218
219 /*
7230c564
BH
220 * We need to hard disable to get a trusted value from
221 * __check_irq_replay(). We also need to soft-disable
222 * again to avoid warnings in there due to the use of
223 * per-cpu variables.
224 *
225 * We know that if the value in irq_happened is exactly 0x01
226 * then we are already hard disabled (there are other less
227 * common cases that we'll ignore for now), so we skip the
228 * (expensive) mtmsrd.
ef2b343e 229 */
7230c564
BH
230 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
231 __hard_irq_disable();
232 set_soft_enabled(0);
e8775d4a 233
37fb9a02 234 /*
7230c564
BH
235 * Check if anything needs to be re-emitted. We haven't
236 * soft-enabled yet to avoid warnings in decrementer_check_overflow
237 * accessing per-cpu variables
e8775d4a 238 */
7230c564
BH
239 replay = __check_irq_replay();
240
241 /* We can soft-enable now */
242 set_soft_enabled(1);
0874dd40
TS
243
244 /*
7230c564
BH
245 * And replay if we have to. This will return with interrupts
246 * hard-enabled.
0874dd40 247 */
7230c564
BH
248 if (replay) {
249 __replay_interrupt(replay);
250 return;
0874dd40
TS
251 }
252
7230c564 253 /* Finally, let's ensure we are hard enabled */
e1fa2e13 254 __hard_irq_enable();
d04c56f7 255}
df9ee292 256EXPORT_SYMBOL(arch_local_irq_restore);
7230c564
BH
257
258/*
259 * This is specifically called by assembly code to re-enable interrupts
260 * if they are currently disabled. This is typically called before
261 * schedule() or do_signal() when returning to userspace. We do it
262 * in C to avoid the burden of dealing with lockdep etc...
263 */
264void restore_interrupts(void)
265{
266 if (irqs_disabled())
267 local_irq_enable();
268}
269
756e7104 270#endif /* CONFIG_PPC64 */
1da177e4 271
433c9c67 272int arch_show_interrupts(struct seq_file *p, int prec)
c86845ed
AB
273{
274 int j;
275
276#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
277 if (tau_initialized) {
278 seq_printf(p, "%*s: ", prec, "TAU");
279 for_each_online_cpu(j)
280 seq_printf(p, "%10u ", tau_interrupts(j));
281 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
282 }
283#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
284
89713ed1
AB
285 seq_printf(p, "%*s: ", prec, "LOC");
286 for_each_online_cpu(j)
287 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
288 seq_printf(p, " Local timer interrupts\n");
289
17081102
AB
290 seq_printf(p, "%*s: ", prec, "SPU");
291 for_each_online_cpu(j)
292 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
293 seq_printf(p, " Spurious interrupts\n");
294
89713ed1
AB
295 seq_printf(p, "%*s: ", prec, "CNT");
296 for_each_online_cpu(j)
297 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
298 seq_printf(p, " Performance monitoring interrupts\n");
299
300 seq_printf(p, "%*s: ", prec, "MCE");
301 for_each_online_cpu(j)
302 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
303 seq_printf(p, " Machine check exceptions\n");
304
c86845ed
AB
305 return 0;
306}
307
89713ed1
AB
308/*
309 * /proc/stat helpers
310 */
311u64 arch_irq_stat_cpu(unsigned int cpu)
312{
313 u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
314
315 sum += per_cpu(irq_stat, cpu).pmu_irqs;
316 sum += per_cpu(irq_stat, cpu).mce_exceptions;
17081102 317 sum += per_cpu(irq_stat, cpu).spurious_irqs;
89713ed1
AB
318
319 return sum;
320}
321
1da177e4 322#ifdef CONFIG_HOTPLUG_CPU
1c91cc57 323void migrate_irqs(void)
1da177e4 324{
6cff46f4 325 struct irq_desc *desc;
1da177e4
LT
326 unsigned int irq;
327 static int warned;
b6decb70 328 cpumask_var_t mask;
1c91cc57 329 const struct cpumask *map = cpu_online_mask;
1da177e4 330
b6decb70 331 alloc_cpumask_var(&mask, GFP_KERNEL);
1da177e4 332
b6decb70 333 for_each_irq(irq) {
7bfbc1f2 334 struct irq_data *data;
e1180287
LB
335 struct irq_chip *chip;
336
6cff46f4 337 desc = irq_to_desc(irq);
3cd85192
JB
338 if (!desc)
339 continue;
340
7bfbc1f2
TG
341 data = irq_desc_get_irq_data(desc);
342 if (irqd_is_per_cpu(data))
1da177e4
LT
343 continue;
344
7bfbc1f2 345 chip = irq_data_get_irq_chip(data);
e1180287 346
7bfbc1f2 347 cpumask_and(mask, data->affinity, map);
b6decb70 348 if (cpumask_any(mask) >= nr_cpu_ids) {
1da177e4 349 printk("Breaking affinity for irq %i\n", irq);
b6decb70 350 cpumask_copy(mask, map);
1da177e4 351 }
e1180287 352 if (chip->irq_set_affinity)
7bfbc1f2 353 chip->irq_set_affinity(data, mask, true);
6cff46f4 354 else if (desc->action && !(warned++))
1da177e4
LT
355 printk("Cannot set affinity for irq %i\n", irq);
356 }
357
b6decb70
AB
358 free_cpumask_var(mask);
359
1da177e4
LT
360 local_irq_enable();
361 mdelay(1);
362 local_irq_disable();
363}
364#endif
365
f2694ba5
ME
366static inline void handle_one_irq(unsigned int irq)
367{
368 struct thread_info *curtp, *irqtp;
369 unsigned long saved_sp_limit;
370 struct irq_desc *desc;
f2694ba5 371
2e455257
MM
372 desc = irq_to_desc(irq);
373 if (!desc)
374 return;
375
f2694ba5
ME
376 /* Switch to the irq stack to handle this */
377 curtp = current_thread_info();
378 irqtp = hardirq_ctx[smp_processor_id()];
379
380 if (curtp == irqtp) {
381 /* We're already on the irq stack, just handle it */
2e455257 382 desc->handle_irq(irq, desc);
f2694ba5
ME
383 return;
384 }
385
f2694ba5
ME
386 saved_sp_limit = current->thread.ksp_limit;
387
f2694ba5
ME
388 irqtp->task = curtp->task;
389 irqtp->flags = 0;
390
391 /* Copy the softirq bits in preempt_count so that the
392 * softirq checks work in the hardirq context. */
393 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
394 (curtp->preempt_count & SOFTIRQ_MASK);
395
396 current->thread.ksp_limit = (unsigned long)irqtp +
397 _ALIGN_UP(sizeof(struct thread_info), 16);
398
835363e6 399 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
f2694ba5
ME
400 current->thread.ksp_limit = saved_sp_limit;
401 irqtp->task = NULL;
402
403 /* Set any flag that may have been set on the
404 * alternate stack
405 */
406 if (irqtp->flags)
407 set_bits(irqtp->flags, &curtp->flags);
408}
f2694ba5 409
d7cb10d6
ME
410static inline void check_stack_overflow(void)
411{
412#ifdef CONFIG_DEBUG_STACKOVERFLOW
413 long sp;
414
415 sp = __get_SP() & (THREAD_SIZE-1);
416
417 /* check for stack overflow: is there less than 2KB free? */
418 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
419 printk("do_IRQ: stack overflow: %ld\n",
420 sp - sizeof(struct thread_info));
421 dump_stack();
422 }
423#endif
424}
425
1da177e4
LT
426void do_IRQ(struct pt_regs *regs)
427{
7d12e780 428 struct pt_regs *old_regs = set_irq_regs(regs);
0ebfff14 429 unsigned int irq;
1da177e4 430
1bf4af16
AB
431 trace_irq_entry(regs);
432
4b218e9b 433 irq_enter();
1da177e4 434
d7cb10d6 435 check_stack_overflow();
1da177e4 436
7230c564
BH
437 /*
438 * Query the platform PIC for the interrupt & ack it.
439 *
440 * This will typically lower the interrupt line to the CPU
441 */
35a84c2f 442 irq = ppc_md.get_irq();
1da177e4 443
7230c564
BH
444 /* We can hard enable interrupts now */
445 may_hard_irq_enable();
446
447 /* And finally process it */
7ba3e4f5 448 if (irq != NO_IRQ)
f2694ba5 449 handle_one_irq(irq);
7ba3e4f5 450 else
17081102 451 __get_cpu_var(irq_stat).spurious_irqs++;
e199500c 452
4b218e9b 453 irq_exit();
7d12e780 454 set_irq_regs(old_regs);
756e7104 455
1bf4af16 456 trace_irq_exit(regs);
e199500c 457}
1da177e4
LT
458
459void __init init_IRQ(void)
460{
70584578
SR
461 if (ppc_md.init_IRQ)
462 ppc_md.init_IRQ();
bcf0b088
KG
463
464 exc_lvl_ctx_init();
465
1da177e4
LT
466 irq_ctx_init();
467}
468
bcf0b088
KG
469#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
470struct thread_info *critirq_ctx[NR_CPUS] __read_mostly;
471struct thread_info *dbgirq_ctx[NR_CPUS] __read_mostly;
472struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
473
474void exc_lvl_ctx_init(void)
475{
476 struct thread_info *tp;
ca1769f7 477 int i, cpu_nr;
bcf0b088
KG
478
479 for_each_possible_cpu(i) {
ca1769f7
ME
480#ifdef CONFIG_PPC64
481 cpu_nr = i;
482#else
483 cpu_nr = get_hard_smp_processor_id(i);
484#endif
485 memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
486 tp = critirq_ctx[cpu_nr];
487 tp->cpu = cpu_nr;
bcf0b088
KG
488 tp->preempt_count = 0;
489
490#ifdef CONFIG_BOOKE
ca1769f7
ME
491 memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE);
492 tp = dbgirq_ctx[cpu_nr];
493 tp->cpu = cpu_nr;
bcf0b088
KG
494 tp->preempt_count = 0;
495
ca1769f7
ME
496 memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE);
497 tp = mcheckirq_ctx[cpu_nr];
498 tp->cpu = cpu_nr;
bcf0b088
KG
499 tp->preempt_count = HARDIRQ_OFFSET;
500#endif
501 }
502}
503#endif
1da177e4 504
22722051
AM
505struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
506struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
1da177e4
LT
507
508void irq_ctx_init(void)
509{
510 struct thread_info *tp;
511 int i;
512
0e551954 513 for_each_possible_cpu(i) {
1da177e4
LT
514 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
515 tp = softirq_ctx[i];
516 tp->cpu = i;
e6768a4f 517 tp->preempt_count = 0;
1da177e4
LT
518
519 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
520 tp = hardirq_ctx[i];
521 tp->cpu = i;
522 tp->preempt_count = HARDIRQ_OFFSET;
523 }
524}
525
c6622f63
PM
526static inline void do_softirq_onstack(void)
527{
528 struct thread_info *curtp, *irqtp;
85218827 529 unsigned long saved_sp_limit = current->thread.ksp_limit;
c6622f63
PM
530
531 curtp = current_thread_info();
532 irqtp = softirq_ctx[smp_processor_id()];
533 irqtp->task = curtp->task;
50d2a422 534 irqtp->flags = 0;
85218827
KG
535 current->thread.ksp_limit = (unsigned long)irqtp +
536 _ALIGN_UP(sizeof(struct thread_info), 16);
c6622f63 537 call_do_softirq(irqtp);
85218827 538 current->thread.ksp_limit = saved_sp_limit;
c6622f63 539 irqtp->task = NULL;
50d2a422
BH
540
541 /* Set any flag that may have been set on the
542 * alternate stack
543 */
544 if (irqtp->flags)
545 set_bits(irqtp->flags, &curtp->flags);
c6622f63 546}
1da177e4 547
1da177e4
LT
548void do_softirq(void)
549{
550 unsigned long flags;
1da177e4
LT
551
552 if (in_interrupt())
1da177e4
LT
553 return;
554
1da177e4 555 local_irq_save(flags);
1da177e4 556
912b2539 557 if (local_softirq_pending())
c6622f63 558 do_softirq_onstack();
1da177e4
LT
559
560 local_irq_restore(flags);
1da177e4 561}
1da177e4 562
35923f12
OJ
563irq_hw_number_t virq_to_hw(unsigned int virq)
564{
4bbdd45a
GL
565 struct irq_data *irq_data = irq_get_irq_data(virq);
566 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
35923f12
OJ
567}
568EXPORT_SYMBOL_GPL(virq_to_hw);
569
6ec36b58
SY
570#ifdef CONFIG_SMP
571int irq_choose_cpu(const struct cpumask *mask)
572{
573 int cpuid;
574
575 if (cpumask_equal(mask, cpu_all_mask)) {
576 static int irq_rover;
577 static DEFINE_RAW_SPINLOCK(irq_rover_lock);
578 unsigned long flags;
579
580 /* Round-robin distribution... */
581do_round_robin:
582 raw_spin_lock_irqsave(&irq_rover_lock, flags);
583
584 irq_rover = cpumask_next(irq_rover, cpu_online_mask);
585 if (irq_rover >= nr_cpu_ids)
586 irq_rover = cpumask_first(cpu_online_mask);
587
588 cpuid = irq_rover;
589
590 raw_spin_unlock_irqrestore(&irq_rover_lock, flags);
591 } else {
592 cpuid = cpumask_first_and(mask, cpu_online_mask);
593 if (cpuid >= nr_cpu_ids)
594 goto do_round_robin;
595 }
596
597 return get_hard_smp_processor_id(cpuid);
598}
599#else
600int irq_choose_cpu(const struct cpumask *mask)
601{
602 return hard_smp_processor_id();
603}
604#endif
0ebfff14 605
cd015707 606int arch_early_irq_init(void)
0ebfff14 607{
cd015707 608 return 0;
0ebfff14
BH
609}
610
c6622f63 611#ifdef CONFIG_PPC64
1da177e4
LT
612static int __init setup_noirqdistrib(char *str)
613{
614 distribute_irqs = 0;
615 return 1;
616}
617
618__setup("noirqdistrib", setup_noirqdistrib);
756e7104 619#endif /* CONFIG_PPC64 */