]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Derived from arch/i386/kernel/irq.c |
3 | * Copyright (C) 1992 Linus Torvalds | |
4 | * Adapted from arch/i386 by Gary Thomas | |
5 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) | |
756e7104 SR |
6 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
7 | * Copyright (C) 1996-2001 Cort Dougan | |
1da177e4 LT |
8 | * Adapted for Power Macintosh by Paul Mackerras |
9 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) | |
10 | * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk). | |
756e7104 | 11 | * |
1da177e4 LT |
12 | * This program is free software; you can redistribute it and/or |
13 | * modify it under the terms of the GNU General Public License | |
14 | * as published by the Free Software Foundation; either version | |
15 | * 2 of the License, or (at your option) any later version. | |
16 | * | |
17 | * This file contains the code used by various IRQ handling routines: | |
18 | * asking for different IRQ's should be done through these routines | |
19 | * instead of just grabbing them. Thus setups with different IRQ numbers | |
20 | * shouldn't result in any weird surprises, and installing new handlers | |
21 | * should be easier. | |
756e7104 SR |
22 | * |
23 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the | |
24 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit | |
25 | * mask register (of which only 16 are defined), hence the weird shifting | |
26 | * and complement of the cached_irq_mask. I want to be able to stuff | |
27 | * this right into the SIU SMASK register. | |
28 | * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx | |
29 | * to reduce code space and undefined function references. | |
1da177e4 LT |
30 | */ |
31 | ||
0ebfff14 BH |
32 | #undef DEBUG |
33 | ||
1da177e4 LT |
34 | #include <linux/module.h> |
35 | #include <linux/threads.h> | |
36 | #include <linux/kernel_stat.h> | |
37 | #include <linux/signal.h> | |
38 | #include <linux/sched.h> | |
756e7104 | 39 | #include <linux/ptrace.h> |
1da177e4 LT |
40 | #include <linux/ioport.h> |
41 | #include <linux/interrupt.h> | |
42 | #include <linux/timex.h> | |
1da177e4 LT |
43 | #include <linux/init.h> |
44 | #include <linux/slab.h> | |
1da177e4 LT |
45 | #include <linux/delay.h> |
46 | #include <linux/irq.h> | |
756e7104 SR |
47 | #include <linux/seq_file.h> |
48 | #include <linux/cpumask.h> | |
1da177e4 LT |
49 | #include <linux/profile.h> |
50 | #include <linux/bitops.h> | |
0ebfff14 BH |
51 | #include <linux/list.h> |
52 | #include <linux/radix-tree.h> | |
53 | #include <linux/mutex.h> | |
54 | #include <linux/bootmem.h> | |
45934c47 | 55 | #include <linux/pci.h> |
1da177e4 LT |
56 | |
57 | #include <asm/uaccess.h> | |
58 | #include <asm/system.h> | |
59 | #include <asm/io.h> | |
60 | #include <asm/pgtable.h> | |
61 | #include <asm/irq.h> | |
62 | #include <asm/cache.h> | |
63 | #include <asm/prom.h> | |
64 | #include <asm/ptrace.h> | |
1da177e4 | 65 | #include <asm/machdep.h> |
0ebfff14 | 66 | #include <asm/udbg.h> |
d04c56f7 | 67 | #ifdef CONFIG_PPC64 |
1da177e4 | 68 | #include <asm/paca.h> |
d04c56f7 | 69 | #include <asm/firmware.h> |
756e7104 | 70 | #endif |
1da177e4 | 71 | |
868accb7 | 72 | int __irq_offset_value; |
756e7104 SR |
73 | static int ppc_spurious_interrupts; |
74 | ||
756e7104 | 75 | #ifdef CONFIG_PPC32 |
b9e5b4e6 BH |
76 | EXPORT_SYMBOL(__irq_offset_value); |
77 | atomic_t ppc_n_lost_interrupts; | |
756e7104 | 78 | |
b9e5b4e6 BH |
79 | #ifndef CONFIG_PPC_MERGE |
80 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | |
756e7104 | 81 | unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; |
b9e5b4e6 | 82 | #endif |
756e7104 SR |
83 | |
84 | #ifdef CONFIG_TAU_INT | |
85 | extern int tau_initialized; | |
86 | extern int tau_interrupts(int); | |
87 | #endif | |
b9e5b4e6 | 88 | #endif /* CONFIG_PPC32 */ |
756e7104 SR |
89 | |
90 | #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) | |
91 | extern atomic_t ipi_recv; | |
92 | extern atomic_t ipi_sent; | |
93 | #endif | |
756e7104 SR |
94 | |
95 | #ifdef CONFIG_PPC64 | |
1da177e4 LT |
96 | EXPORT_SYMBOL(irq_desc); |
97 | ||
98 | int distribute_irqs = 1; | |
d04c56f7 | 99 | |
ef2b343e HD |
100 | static inline unsigned long get_hard_enabled(void) |
101 | { | |
102 | unsigned long enabled; | |
103 | ||
104 | __asm__ __volatile__("lbz %0,%1(13)" | |
105 | : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); | |
106 | ||
107 | return enabled; | |
108 | } | |
109 | ||
110 | static inline void set_soft_enabled(unsigned long enable) | |
111 | { | |
112 | __asm__ __volatile__("stb %0,%1(13)" | |
113 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | |
114 | } | |
115 | ||
d04c56f7 PM |
116 | void local_irq_restore(unsigned long en) |
117 | { | |
ef2b343e HD |
118 | /* |
119 | * get_paca()->soft_enabled = en; | |
120 | * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? | |
121 | * That was allowed before, and in such a case we do need to take care | |
122 | * that gcc will set soft_enabled directly via r13, not choose to use | |
123 | * an intermediate register, lest we're preempted to a different cpu. | |
124 | */ | |
125 | set_soft_enabled(en); | |
d04c56f7 PM |
126 | if (!en) |
127 | return; | |
128 | ||
129 | if (firmware_has_feature(FW_FEATURE_ISERIES)) { | |
ef2b343e HD |
130 | /* |
131 | * Do we need to disable preemption here? Not really: in the | |
132 | * unlikely event that we're preempted to a different cpu in | |
133 | * between getting r13, loading its lppaca_ptr, and loading | |
134 | * its any_int, we might call iseries_handle_interrupts without | |
135 | * an interrupt pending on the new cpu, but that's no disaster, | |
136 | * is it? And the business of preempting us off the old cpu | |
137 | * would itself involve a local_irq_restore which handles the | |
138 | * interrupt to that cpu. | |
139 | * | |
140 | * But use "local_paca->lppaca_ptr" instead of "get_lppaca()" | |
141 | * to avoid any preemption checking added into get_paca(). | |
142 | */ | |
143 | if (local_paca->lppaca_ptr->int_dword.any_int) | |
d04c56f7 PM |
144 | iseries_handle_interrupts(); |
145 | return; | |
146 | } | |
147 | ||
ef2b343e HD |
148 | /* |
149 | * if (get_paca()->hard_enabled) return; | |
150 | * But again we need to take care that gcc gets hard_enabled directly | |
151 | * via r13, not choose to use an intermediate register, lest we're | |
152 | * preempted to a different cpu in between the two instructions. | |
153 | */ | |
154 | if (get_hard_enabled()) | |
d04c56f7 | 155 | return; |
ef2b343e HD |
156 | |
157 | /* | |
158 | * Need to hard-enable interrupts here. Since currently disabled, | |
159 | * no need to take further asm precautions against preemption; but | |
160 | * use local_paca instead of get_paca() to avoid preemption checking. | |
161 | */ | |
162 | local_paca->hard_enabled = en; | |
d04c56f7 PM |
163 | if ((int)mfspr(SPRN_DEC) < 0) |
164 | mtspr(SPRN_DEC, 1); | |
165 | hard_irq_enable(); | |
166 | } | |
756e7104 | 167 | #endif /* CONFIG_PPC64 */ |
1da177e4 LT |
168 | |
169 | int show_interrupts(struct seq_file *p, void *v) | |
170 | { | |
756e7104 SR |
171 | int i = *(loff_t *)v, j; |
172 | struct irqaction *action; | |
1da177e4 LT |
173 | irq_desc_t *desc; |
174 | unsigned long flags; | |
175 | ||
176 | if (i == 0) { | |
756e7104 SR |
177 | seq_puts(p, " "); |
178 | for_each_online_cpu(j) | |
179 | seq_printf(p, "CPU%d ", j); | |
1da177e4 LT |
180 | seq_putc(p, '\n'); |
181 | } | |
182 | ||
183 | if (i < NR_IRQS) { | |
184 | desc = get_irq_desc(i); | |
185 | spin_lock_irqsave(&desc->lock, flags); | |
186 | action = desc->action; | |
187 | if (!action || !action->handler) | |
188 | goto skip; | |
189 | seq_printf(p, "%3d: ", i); | |
190 | #ifdef CONFIG_SMP | |
756e7104 SR |
191 | for_each_online_cpu(j) |
192 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); | |
1da177e4 LT |
193 | #else |
194 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
195 | #endif /* CONFIG_SMP */ | |
d1bef4ed IM |
196 | if (desc->chip) |
197 | seq_printf(p, " %s ", desc->chip->typename); | |
1da177e4 | 198 | else |
756e7104 | 199 | seq_puts(p, " None "); |
1da177e4 | 200 | seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge "); |
756e7104 SR |
201 | seq_printf(p, " %s", action->name); |
202 | for (action = action->next; action; action = action->next) | |
1da177e4 LT |
203 | seq_printf(p, ", %s", action->name); |
204 | seq_putc(p, '\n'); | |
205 | skip: | |
206 | spin_unlock_irqrestore(&desc->lock, flags); | |
756e7104 SR |
207 | } else if (i == NR_IRQS) { |
208 | #ifdef CONFIG_PPC32 | |
209 | #ifdef CONFIG_TAU_INT | |
210 | if (tau_initialized){ | |
211 | seq_puts(p, "TAU: "); | |
394e3902 AM |
212 | for_each_online_cpu(j) |
213 | seq_printf(p, "%10u ", tau_interrupts(j)); | |
756e7104 SR |
214 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); |
215 | } | |
216 | #endif | |
217 | #if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE) | |
218 | /* should this be per processor send/receive? */ | |
219 | seq_printf(p, "IPI (recv/sent): %10u/%u\n", | |
220 | atomic_read(&ipi_recv), atomic_read(&ipi_sent)); | |
221 | #endif | |
222 | #endif /* CONFIG_PPC32 */ | |
1da177e4 | 223 | seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts); |
756e7104 | 224 | } |
1da177e4 LT |
225 | return 0; |
226 | } | |
227 | ||
228 | #ifdef CONFIG_HOTPLUG_CPU | |
229 | void fixup_irqs(cpumask_t map) | |
230 | { | |
231 | unsigned int irq; | |
232 | static int warned; | |
233 | ||
234 | for_each_irq(irq) { | |
235 | cpumask_t mask; | |
236 | ||
237 | if (irq_desc[irq].status & IRQ_PER_CPU) | |
238 | continue; | |
239 | ||
a53da52f | 240 | cpus_and(mask, irq_desc[irq].affinity, map); |
1da177e4 LT |
241 | if (any_online_cpu(mask) == NR_CPUS) { |
242 | printk("Breaking affinity for irq %i\n", irq); | |
243 | mask = map; | |
244 | } | |
d1bef4ed IM |
245 | if (irq_desc[irq].chip->set_affinity) |
246 | irq_desc[irq].chip->set_affinity(irq, mask); | |
1da177e4 LT |
247 | else if (irq_desc[irq].action && !(warned++)) |
248 | printk("Cannot set affinity for irq %i\n", irq); | |
249 | } | |
250 | ||
251 | local_irq_enable(); | |
252 | mdelay(1); | |
253 | local_irq_disable(); | |
254 | } | |
255 | #endif | |
256 | ||
1da177e4 LT |
257 | void do_IRQ(struct pt_regs *regs) |
258 | { | |
7d12e780 | 259 | struct pt_regs *old_regs = set_irq_regs(regs); |
0ebfff14 | 260 | unsigned int irq; |
b709c083 SR |
261 | #ifdef CONFIG_IRQSTACKS |
262 | struct thread_info *curtp, *irqtp; | |
263 | #endif | |
1da177e4 | 264 | |
756e7104 | 265 | irq_enter(); |
1da177e4 LT |
266 | |
267 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
268 | /* Debugging check for stack overflow: is there less than 2KB free? */ | |
269 | { | |
270 | long sp; | |
271 | ||
272 | sp = __get_SP() & (THREAD_SIZE-1); | |
273 | ||
274 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | |
275 | printk("do_IRQ: stack overflow: %ld\n", | |
276 | sp - sizeof(struct thread_info)); | |
277 | dump_stack(); | |
278 | } | |
279 | } | |
280 | #endif | |
281 | ||
756e7104 SR |
282 | /* |
283 | * Every platform is required to implement ppc_md.get_irq. | |
284 | * This function will either return an irq number or -1 to | |
285 | * indicate there are no more pending. | |
286 | * The value -2 is for buggy hardware and means that this IRQ | |
287 | * has already been handled. -- Tom | |
288 | */ | |
35a84c2f | 289 | irq = ppc_md.get_irq(); |
1da177e4 | 290 | |
0ebfff14 | 291 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { |
b709c083 SR |
292 | #ifdef CONFIG_IRQSTACKS |
293 | /* Switch to the irq stack to handle this */ | |
294 | curtp = current_thread_info(); | |
295 | irqtp = hardirq_ctx[smp_processor_id()]; | |
296 | if (curtp != irqtp) { | |
b9e5b4e6 BH |
297 | struct irq_desc *desc = irq_desc + irq; |
298 | void *handler = desc->handle_irq; | |
299 | if (handler == NULL) | |
300 | handler = &__do_IRQ; | |
b709c083 SR |
301 | irqtp->task = curtp->task; |
302 | irqtp->flags = 0; | |
7d12e780 | 303 | call_handle_irq(irq, desc, irqtp, handler); |
b709c083 SR |
304 | irqtp->task = NULL; |
305 | if (irqtp->flags) | |
306 | set_bits(irqtp->flags, &curtp->flags); | |
307 | } else | |
308 | #endif | |
7d12e780 | 309 | generic_handle_irq(irq); |
0ebfff14 | 310 | } else if (irq != NO_IRQ_IGNORE) |
e199500c SR |
311 | /* That's not SMP safe ... but who cares ? */ |
312 | ppc_spurious_interrupts++; | |
313 | ||
756e7104 | 314 | irq_exit(); |
7d12e780 | 315 | set_irq_regs(old_regs); |
756e7104 | 316 | |
e199500c | 317 | #ifdef CONFIG_PPC_ISERIES |
b06a3183 SR |
318 | if (firmware_has_feature(FW_FEATURE_ISERIES) && |
319 | get_lppaca()->int_dword.fields.decr_int) { | |
3356bb9f DG |
320 | get_lppaca()->int_dword.fields.decr_int = 0; |
321 | /* Signal a fake decrementer interrupt */ | |
322 | timer_interrupt(regs); | |
e199500c SR |
323 | } |
324 | #endif | |
325 | } | |
1da177e4 LT |
326 | |
327 | void __init init_IRQ(void) | |
328 | { | |
1da177e4 | 329 | ppc_md.init_IRQ(); |
756e7104 | 330 | #ifdef CONFIG_PPC64 |
1da177e4 | 331 | irq_ctx_init(); |
756e7104 | 332 | #endif |
1da177e4 LT |
333 | } |
334 | ||
1da177e4 | 335 | |
1da177e4 | 336 | #ifdef CONFIG_IRQSTACKS |
22722051 AM |
337 | struct thread_info *softirq_ctx[NR_CPUS] __read_mostly; |
338 | struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
339 | |
340 | void irq_ctx_init(void) | |
341 | { | |
342 | struct thread_info *tp; | |
343 | int i; | |
344 | ||
0e551954 | 345 | for_each_possible_cpu(i) { |
1da177e4 LT |
346 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
347 | tp = softirq_ctx[i]; | |
348 | tp->cpu = i; | |
349 | tp->preempt_count = SOFTIRQ_OFFSET; | |
350 | ||
351 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | |
352 | tp = hardirq_ctx[i]; | |
353 | tp->cpu = i; | |
354 | tp->preempt_count = HARDIRQ_OFFSET; | |
355 | } | |
356 | } | |
357 | ||
c6622f63 PM |
358 | static inline void do_softirq_onstack(void) |
359 | { | |
360 | struct thread_info *curtp, *irqtp; | |
361 | ||
362 | curtp = current_thread_info(); | |
363 | irqtp = softirq_ctx[smp_processor_id()]; | |
364 | irqtp->task = curtp->task; | |
365 | call_do_softirq(irqtp); | |
366 | irqtp->task = NULL; | |
367 | } | |
1da177e4 | 368 | |
c6622f63 PM |
369 | #else |
370 | #define do_softirq_onstack() __do_softirq() | |
371 | #endif /* CONFIG_IRQSTACKS */ | |
372 | ||
1da177e4 LT |
373 | void do_softirq(void) |
374 | { | |
375 | unsigned long flags; | |
1da177e4 LT |
376 | |
377 | if (in_interrupt()) | |
1da177e4 LT |
378 | return; |
379 | ||
1da177e4 | 380 | local_irq_save(flags); |
1da177e4 | 381 | |
912b2539 | 382 | if (local_softirq_pending()) |
c6622f63 | 383 | do_softirq_onstack(); |
1da177e4 LT |
384 | |
385 | local_irq_restore(flags); | |
1da177e4 | 386 | } |
1da177e4 LT |
387 | EXPORT_SYMBOL(do_softirq); |
388 | ||
1da177e4 | 389 | |
1da177e4 | 390 | /* |
0ebfff14 | 391 | * IRQ controller and virtual interrupts |
1da177e4 LT |
392 | */ |
393 | ||
0ebfff14 | 394 | #ifdef CONFIG_PPC_MERGE |
1da177e4 | 395 | |
0ebfff14 BH |
396 | static LIST_HEAD(irq_hosts); |
397 | static spinlock_t irq_big_lock = SPIN_LOCK_UNLOCKED; | |
8ec8f2e8 BH |
398 | static DEFINE_PER_CPU(unsigned int, irq_radix_reader); |
399 | static unsigned int irq_radix_writer; | |
0ebfff14 BH |
400 | struct irq_map_entry irq_map[NR_IRQS]; |
401 | static unsigned int irq_virq_count = NR_IRQS; | |
402 | static struct irq_host *irq_default_host; | |
1da177e4 | 403 | |
0ebfff14 BH |
404 | struct irq_host *irq_alloc_host(unsigned int revmap_type, |
405 | unsigned int revmap_arg, | |
406 | struct irq_host_ops *ops, | |
407 | irq_hw_number_t inval_irq) | |
1da177e4 | 408 | { |
0ebfff14 BH |
409 | struct irq_host *host; |
410 | unsigned int size = sizeof(struct irq_host); | |
411 | unsigned int i; | |
412 | unsigned int *rmap; | |
413 | unsigned long flags; | |
414 | ||
415 | /* Allocate structure and revmap table if using linear mapping */ | |
416 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | |
417 | size += revmap_arg * sizeof(unsigned int); | |
418 | if (mem_init_done) | |
419 | host = kzalloc(size, GFP_KERNEL); | |
420 | else { | |
421 | host = alloc_bootmem(size); | |
422 | if (host) | |
423 | memset(host, 0, size); | |
424 | } | |
425 | if (host == NULL) | |
426 | return NULL; | |
7d01c880 | 427 | |
0ebfff14 BH |
428 | /* Fill structure */ |
429 | host->revmap_type = revmap_type; | |
430 | host->inval_irq = inval_irq; | |
431 | host->ops = ops; | |
7d01c880 | 432 | |
0ebfff14 BH |
433 | spin_lock_irqsave(&irq_big_lock, flags); |
434 | ||
435 | /* If it's a legacy controller, check for duplicates and | |
436 | * mark it as allocated (we use irq 0 host pointer for that | |
437 | */ | |
438 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | |
439 | if (irq_map[0].host != NULL) { | |
440 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
441 | /* If we are early boot, we can't free the structure, | |
442 | * too bad... | |
443 | * this will be fixed once slab is made available early | |
444 | * instead of the current cruft | |
445 | */ | |
446 | if (mem_init_done) | |
447 | kfree(host); | |
448 | return NULL; | |
449 | } | |
450 | irq_map[0].host = host; | |
451 | } | |
452 | ||
453 | list_add(&host->link, &irq_hosts); | |
454 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
455 | ||
456 | /* Additional setups per revmap type */ | |
457 | switch(revmap_type) { | |
458 | case IRQ_HOST_MAP_LEGACY: | |
459 | /* 0 is always the invalid number for legacy */ | |
460 | host->inval_irq = 0; | |
461 | /* setup us as the host for all legacy interrupts */ | |
462 | for (i = 1; i < NUM_ISA_INTERRUPTS; i++) { | |
463 | irq_map[i].hwirq = 0; | |
464 | smp_wmb(); | |
465 | irq_map[i].host = host; | |
466 | smp_wmb(); | |
467 | ||
6e99e458 BH |
468 | /* Clear norequest flags */ |
469 | get_irq_desc(i)->status &= ~IRQ_NOREQUEST; | |
0ebfff14 BH |
470 | |
471 | /* Legacy flags are left to default at this point, | |
472 | * one can then use irq_create_mapping() to | |
473 | * explicitely change them | |
474 | */ | |
6e99e458 | 475 | ops->map(host, i, i); |
0ebfff14 BH |
476 | } |
477 | break; | |
478 | case IRQ_HOST_MAP_LINEAR: | |
479 | rmap = (unsigned int *)(host + 1); | |
480 | for (i = 0; i < revmap_arg; i++) | |
481 | rmap[i] = IRQ_NONE; | |
482 | host->revmap_data.linear.size = revmap_arg; | |
483 | smp_wmb(); | |
484 | host->revmap_data.linear.revmap = rmap; | |
485 | break; | |
486 | default: | |
487 | break; | |
488 | } | |
489 | ||
490 | pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host); | |
491 | ||
492 | return host; | |
1da177e4 LT |
493 | } |
494 | ||
0ebfff14 | 495 | struct irq_host *irq_find_host(struct device_node *node) |
1da177e4 | 496 | { |
0ebfff14 BH |
497 | struct irq_host *h, *found = NULL; |
498 | unsigned long flags; | |
499 | ||
500 | /* We might want to match the legacy controller last since | |
501 | * it might potentially be set to match all interrupts in | |
502 | * the absence of a device node. This isn't a problem so far | |
503 | * yet though... | |
504 | */ | |
505 | spin_lock_irqsave(&irq_big_lock, flags); | |
506 | list_for_each_entry(h, &irq_hosts, link) | |
507 | if (h->ops->match == NULL || h->ops->match(h, node)) { | |
508 | found = h; | |
509 | break; | |
510 | } | |
511 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
512 | return found; | |
513 | } | |
514 | EXPORT_SYMBOL_GPL(irq_find_host); | |
515 | ||
516 | void irq_set_default_host(struct irq_host *host) | |
517 | { | |
518 | pr_debug("irq: Default host set to @0x%p\n", host); | |
1da177e4 | 519 | |
0ebfff14 BH |
520 | irq_default_host = host; |
521 | } | |
1da177e4 | 522 | |
0ebfff14 BH |
523 | void irq_set_virq_count(unsigned int count) |
524 | { | |
525 | pr_debug("irq: Trying to set virq count to %d\n", count); | |
fef1c772 | 526 | |
0ebfff14 BH |
527 | BUG_ON(count < NUM_ISA_INTERRUPTS); |
528 | if (count < NR_IRQS) | |
529 | irq_virq_count = count; | |
530 | } | |
531 | ||
8ec8f2e8 BH |
532 | /* radix tree not lockless safe ! we use a brlock-type mecanism |
533 | * for now, until we can use a lockless radix tree | |
534 | */ | |
535 | static void irq_radix_wrlock(unsigned long *flags) | |
536 | { | |
537 | unsigned int cpu, ok; | |
538 | ||
539 | spin_lock_irqsave(&irq_big_lock, *flags); | |
540 | irq_radix_writer = 1; | |
541 | smp_mb(); | |
542 | do { | |
543 | barrier(); | |
544 | ok = 1; | |
545 | for_each_possible_cpu(cpu) { | |
546 | if (per_cpu(irq_radix_reader, cpu)) { | |
547 | ok = 0; | |
548 | break; | |
549 | } | |
550 | } | |
551 | if (!ok) | |
552 | cpu_relax(); | |
553 | } while(!ok); | |
554 | } | |
555 | ||
556 | static void irq_radix_wrunlock(unsigned long flags) | |
557 | { | |
558 | smp_wmb(); | |
559 | irq_radix_writer = 0; | |
560 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
561 | } | |
562 | ||
563 | static void irq_radix_rdlock(unsigned long *flags) | |
564 | { | |
565 | local_irq_save(*flags); | |
566 | __get_cpu_var(irq_radix_reader) = 1; | |
567 | smp_mb(); | |
568 | if (likely(irq_radix_writer == 0)) | |
569 | return; | |
570 | __get_cpu_var(irq_radix_reader) = 0; | |
571 | smp_wmb(); | |
572 | spin_lock(&irq_big_lock); | |
573 | __get_cpu_var(irq_radix_reader) = 1; | |
574 | spin_unlock(&irq_big_lock); | |
575 | } | |
576 | ||
577 | static void irq_radix_rdunlock(unsigned long flags) | |
578 | { | |
579 | __get_cpu_var(irq_radix_reader) = 0; | |
580 | local_irq_restore(flags); | |
581 | } | |
582 | ||
583 | ||
0ebfff14 | 584 | unsigned int irq_create_mapping(struct irq_host *host, |
6e99e458 | 585 | irq_hw_number_t hwirq) |
0ebfff14 BH |
586 | { |
587 | unsigned int virq, hint; | |
588 | ||
6e99e458 | 589 | pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq); |
0ebfff14 BH |
590 | |
591 | /* Look for default host if nececssary */ | |
592 | if (host == NULL) | |
593 | host = irq_default_host; | |
594 | if (host == NULL) { | |
595 | printk(KERN_WARNING "irq_create_mapping called for" | |
596 | " NULL host, hwirq=%lx\n", hwirq); | |
597 | WARN_ON(1); | |
598 | return NO_IRQ; | |
1da177e4 | 599 | } |
0ebfff14 | 600 | pr_debug("irq: -> using host @%p\n", host); |
1da177e4 | 601 | |
0ebfff14 BH |
602 | /* Check if mapping already exist, if it does, call |
603 | * host->ops->map() to update the flags | |
604 | */ | |
605 | virq = irq_find_mapping(host, hwirq); | |
606 | if (virq != IRQ_NONE) { | |
acc900ef IK |
607 | if (host->ops->remap) |
608 | host->ops->remap(host, virq, hwirq); | |
0ebfff14 | 609 | pr_debug("irq: -> existing mapping on virq %d\n", virq); |
0ebfff14 | 610 | return virq; |
1da177e4 LT |
611 | } |
612 | ||
0ebfff14 BH |
613 | /* Get a virtual interrupt number */ |
614 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) { | |
615 | /* Handle legacy */ | |
616 | virq = (unsigned int)hwirq; | |
617 | if (virq == 0 || virq >= NUM_ISA_INTERRUPTS) | |
618 | return NO_IRQ; | |
619 | return virq; | |
620 | } else { | |
621 | /* Allocate a virtual interrupt number */ | |
622 | hint = hwirq % irq_virq_count; | |
623 | virq = irq_alloc_virt(host, 1, hint); | |
624 | if (virq == NO_IRQ) { | |
625 | pr_debug("irq: -> virq allocation failed\n"); | |
626 | return NO_IRQ; | |
627 | } | |
628 | } | |
629 | pr_debug("irq: -> obtained virq %d\n", virq); | |
630 | ||
6e99e458 BH |
631 | /* Clear IRQ_NOREQUEST flag */ |
632 | get_irq_desc(virq)->status &= ~IRQ_NOREQUEST; | |
0ebfff14 BH |
633 | |
634 | /* map it */ | |
6e99e458 BH |
635 | smp_wmb(); |
636 | irq_map[virq].hwirq = hwirq; | |
637 | smp_mb(); | |
638 | if (host->ops->map(host, virq, hwirq)) { | |
0ebfff14 BH |
639 | pr_debug("irq: -> mapping failed, freeing\n"); |
640 | irq_free_virt(virq, 1); | |
641 | return NO_IRQ; | |
642 | } | |
1da177e4 | 643 | return virq; |
0ebfff14 BH |
644 | } |
645 | EXPORT_SYMBOL_GPL(irq_create_mapping); | |
646 | ||
f3d2ab41 AV |
647 | unsigned int irq_create_of_mapping(struct device_node *controller, |
648 | u32 *intspec, unsigned int intsize) | |
0ebfff14 BH |
649 | { |
650 | struct irq_host *host; | |
651 | irq_hw_number_t hwirq; | |
6e99e458 BH |
652 | unsigned int type = IRQ_TYPE_NONE; |
653 | unsigned int virq; | |
1da177e4 | 654 | |
0ebfff14 BH |
655 | if (controller == NULL) |
656 | host = irq_default_host; | |
657 | else | |
658 | host = irq_find_host(controller); | |
6e99e458 BH |
659 | if (host == NULL) { |
660 | printk(KERN_WARNING "irq: no irq host found for %s !\n", | |
661 | controller->full_name); | |
0ebfff14 | 662 | return NO_IRQ; |
6e99e458 | 663 | } |
0ebfff14 BH |
664 | |
665 | /* If host has no translation, then we assume interrupt line */ | |
666 | if (host->ops->xlate == NULL) | |
667 | hwirq = intspec[0]; | |
668 | else { | |
669 | if (host->ops->xlate(host, controller, intspec, intsize, | |
6e99e458 | 670 | &hwirq, &type)) |
0ebfff14 | 671 | return NO_IRQ; |
1da177e4 | 672 | } |
0ebfff14 | 673 | |
6e99e458 BH |
674 | /* Create mapping */ |
675 | virq = irq_create_mapping(host, hwirq); | |
676 | if (virq == NO_IRQ) | |
677 | return virq; | |
678 | ||
679 | /* Set type if specified and different than the current one */ | |
680 | if (type != IRQ_TYPE_NONE && | |
681 | type != (get_irq_desc(virq)->status & IRQF_TRIGGER_MASK)) | |
682 | set_irq_type(virq, type); | |
683 | return virq; | |
1da177e4 | 684 | } |
0ebfff14 | 685 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); |
1da177e4 | 686 | |
0ebfff14 | 687 | unsigned int irq_of_parse_and_map(struct device_node *dev, int index) |
1da177e4 | 688 | { |
0ebfff14 | 689 | struct of_irq oirq; |
1da177e4 | 690 | |
0ebfff14 BH |
691 | if (of_irq_map_one(dev, index, &oirq)) |
692 | return NO_IRQ; | |
1da177e4 | 693 | |
0ebfff14 BH |
694 | return irq_create_of_mapping(oirq.controller, oirq.specifier, |
695 | oirq.size); | |
696 | } | |
697 | EXPORT_SYMBOL_GPL(irq_of_parse_and_map); | |
1da177e4 | 698 | |
0ebfff14 BH |
699 | void irq_dispose_mapping(unsigned int virq) |
700 | { | |
5414c6be | 701 | struct irq_host *host; |
0ebfff14 BH |
702 | irq_hw_number_t hwirq; |
703 | unsigned long flags; | |
1da177e4 | 704 | |
5414c6be ME |
705 | if (virq == NO_IRQ) |
706 | return; | |
707 | ||
708 | host = irq_map[virq].host; | |
0ebfff14 BH |
709 | WARN_ON (host == NULL); |
710 | if (host == NULL) | |
711 | return; | |
1da177e4 | 712 | |
0ebfff14 BH |
713 | /* Never unmap legacy interrupts */ |
714 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | |
715 | return; | |
1da177e4 | 716 | |
0ebfff14 BH |
717 | /* remove chip and handler */ |
718 | set_irq_chip_and_handler(virq, NULL, NULL); | |
719 | ||
720 | /* Make sure it's completed */ | |
721 | synchronize_irq(virq); | |
722 | ||
723 | /* Tell the PIC about it */ | |
724 | if (host->ops->unmap) | |
725 | host->ops->unmap(host, virq); | |
726 | smp_mb(); | |
727 | ||
728 | /* Clear reverse map */ | |
729 | hwirq = irq_map[virq].hwirq; | |
730 | switch(host->revmap_type) { | |
731 | case IRQ_HOST_MAP_LINEAR: | |
732 | if (hwirq < host->revmap_data.linear.size) | |
733 | host->revmap_data.linear.revmap[hwirq] = IRQ_NONE; | |
734 | break; | |
735 | case IRQ_HOST_MAP_TREE: | |
736 | /* Check if radix tree allocated yet */ | |
737 | if (host->revmap_data.tree.gfp_mask == 0) | |
738 | break; | |
8ec8f2e8 | 739 | irq_radix_wrlock(&flags); |
0ebfff14 | 740 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
8ec8f2e8 | 741 | irq_radix_wrunlock(flags); |
0ebfff14 BH |
742 | break; |
743 | } | |
1da177e4 | 744 | |
0ebfff14 BH |
745 | /* Destroy map */ |
746 | smp_mb(); | |
747 | irq_map[virq].hwirq = host->inval_irq; | |
1da177e4 | 748 | |
0ebfff14 BH |
749 | /* Set some flags */ |
750 | get_irq_desc(virq)->status |= IRQ_NOREQUEST; | |
1da177e4 | 751 | |
0ebfff14 BH |
752 | /* Free it */ |
753 | irq_free_virt(virq, 1); | |
1da177e4 | 754 | } |
0ebfff14 | 755 | EXPORT_SYMBOL_GPL(irq_dispose_mapping); |
1da177e4 | 756 | |
0ebfff14 BH |
757 | unsigned int irq_find_mapping(struct irq_host *host, |
758 | irq_hw_number_t hwirq) | |
759 | { | |
760 | unsigned int i; | |
761 | unsigned int hint = hwirq % irq_virq_count; | |
762 | ||
763 | /* Look for default host if nececssary */ | |
764 | if (host == NULL) | |
765 | host = irq_default_host; | |
766 | if (host == NULL) | |
767 | return NO_IRQ; | |
768 | ||
769 | /* legacy -> bail early */ | |
770 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | |
771 | return hwirq; | |
772 | ||
773 | /* Slow path does a linear search of the map */ | |
774 | if (hint < NUM_ISA_INTERRUPTS) | |
775 | hint = NUM_ISA_INTERRUPTS; | |
776 | i = hint; | |
777 | do { | |
778 | if (irq_map[i].host == host && | |
779 | irq_map[i].hwirq == hwirq) | |
780 | return i; | |
781 | i++; | |
782 | if (i >= irq_virq_count) | |
783 | i = NUM_ISA_INTERRUPTS; | |
784 | } while(i != hint); | |
785 | return NO_IRQ; | |
786 | } | |
787 | EXPORT_SYMBOL_GPL(irq_find_mapping); | |
1da177e4 | 788 | |
0ebfff14 BH |
789 | |
790 | unsigned int irq_radix_revmap(struct irq_host *host, | |
791 | irq_hw_number_t hwirq) | |
1da177e4 | 792 | { |
0ebfff14 BH |
793 | struct radix_tree_root *tree; |
794 | struct irq_map_entry *ptr; | |
795 | unsigned int virq; | |
796 | unsigned long flags; | |
1da177e4 | 797 | |
0ebfff14 | 798 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); |
1da177e4 | 799 | |
0ebfff14 BH |
800 | /* Check if the radix tree exist yet. We test the value of |
801 | * the gfp_mask for that. Sneaky but saves another int in the | |
802 | * structure. If not, we fallback to slow mode | |
803 | */ | |
804 | tree = &host->revmap_data.tree; | |
805 | if (tree->gfp_mask == 0) | |
806 | return irq_find_mapping(host, hwirq); | |
807 | ||
0ebfff14 | 808 | /* Now try to resolve */ |
8ec8f2e8 | 809 | irq_radix_rdlock(&flags); |
0ebfff14 | 810 | ptr = radix_tree_lookup(tree, hwirq); |
8ec8f2e8 BH |
811 | irq_radix_rdunlock(flags); |
812 | ||
0ebfff14 BH |
813 | /* Found it, return */ |
814 | if (ptr) { | |
815 | virq = ptr - irq_map; | |
8ec8f2e8 | 816 | return virq; |
1da177e4 | 817 | } |
0ebfff14 BH |
818 | |
819 | /* If not there, try to insert it */ | |
820 | virq = irq_find_mapping(host, hwirq); | |
8ec8f2e8 BH |
821 | if (virq != NO_IRQ) { |
822 | irq_radix_wrlock(&flags); | |
e5c14ce1 | 823 | radix_tree_insert(tree, hwirq, &irq_map[virq]); |
8ec8f2e8 BH |
824 | irq_radix_wrunlock(flags); |
825 | } | |
0ebfff14 | 826 | return virq; |
1da177e4 LT |
827 | } |
828 | ||
0ebfff14 BH |
829 | unsigned int irq_linear_revmap(struct irq_host *host, |
830 | irq_hw_number_t hwirq) | |
c6622f63 | 831 | { |
0ebfff14 | 832 | unsigned int *revmap; |
c6622f63 | 833 | |
0ebfff14 BH |
834 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); |
835 | ||
836 | /* Check revmap bounds */ | |
837 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | |
838 | return irq_find_mapping(host, hwirq); | |
839 | ||
840 | /* Check if revmap was allocated */ | |
841 | revmap = host->revmap_data.linear.revmap; | |
842 | if (unlikely(revmap == NULL)) | |
843 | return irq_find_mapping(host, hwirq); | |
844 | ||
845 | /* Fill up revmap with slow path if no mapping found */ | |
846 | if (unlikely(revmap[hwirq] == NO_IRQ)) | |
847 | revmap[hwirq] = irq_find_mapping(host, hwirq); | |
848 | ||
849 | return revmap[hwirq]; | |
c6622f63 PM |
850 | } |
851 | ||
0ebfff14 BH |
852 | unsigned int irq_alloc_virt(struct irq_host *host, |
853 | unsigned int count, | |
854 | unsigned int hint) | |
855 | { | |
856 | unsigned long flags; | |
857 | unsigned int i, j, found = NO_IRQ; | |
c6622f63 | 858 | |
0ebfff14 BH |
859 | if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS)) |
860 | return NO_IRQ; | |
861 | ||
862 | spin_lock_irqsave(&irq_big_lock, flags); | |
863 | ||
864 | /* Use hint for 1 interrupt if any */ | |
865 | if (count == 1 && hint >= NUM_ISA_INTERRUPTS && | |
866 | hint < irq_virq_count && irq_map[hint].host == NULL) { | |
867 | found = hint; | |
868 | goto hint_found; | |
869 | } | |
870 | ||
871 | /* Look for count consecutive numbers in the allocatable | |
872 | * (non-legacy) space | |
873 | */ | |
e1251465 ME |
874 | for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) { |
875 | if (irq_map[i].host != NULL) | |
876 | j = 0; | |
877 | else | |
878 | j++; | |
879 | ||
880 | if (j == count) { | |
881 | found = i - count + 1; | |
882 | break; | |
883 | } | |
0ebfff14 BH |
884 | } |
885 | if (found == NO_IRQ) { | |
886 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
887 | return NO_IRQ; | |
888 | } | |
889 | hint_found: | |
890 | for (i = found; i < (found + count); i++) { | |
891 | irq_map[i].hwirq = host->inval_irq; | |
892 | smp_wmb(); | |
893 | irq_map[i].host = host; | |
894 | } | |
895 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
896 | return found; | |
897 | } | |
898 | ||
899 | void irq_free_virt(unsigned int virq, unsigned int count) | |
1da177e4 LT |
900 | { |
901 | unsigned long flags; | |
0ebfff14 | 902 | unsigned int i; |
1da177e4 | 903 | |
0ebfff14 BH |
904 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
905 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | |
1da177e4 | 906 | |
0ebfff14 BH |
907 | spin_lock_irqsave(&irq_big_lock, flags); |
908 | for (i = virq; i < (virq + count); i++) { | |
909 | struct irq_host *host; | |
1da177e4 | 910 | |
0ebfff14 BH |
911 | if (i < NUM_ISA_INTERRUPTS || |
912 | (virq + count) > irq_virq_count) | |
913 | continue; | |
1da177e4 | 914 | |
0ebfff14 BH |
915 | host = irq_map[i].host; |
916 | irq_map[i].hwirq = host->inval_irq; | |
917 | smp_wmb(); | |
918 | irq_map[i].host = NULL; | |
919 | } | |
920 | spin_unlock_irqrestore(&irq_big_lock, flags); | |
1da177e4 | 921 | } |
0ebfff14 BH |
922 | |
923 | void irq_early_init(void) | |
924 | { | |
925 | unsigned int i; | |
926 | ||
927 | for (i = 0; i < NR_IRQS; i++) | |
928 | get_irq_desc(i)->status |= IRQ_NOREQUEST; | |
929 | } | |
930 | ||
931 | /* We need to create the radix trees late */ | |
932 | static int irq_late_init(void) | |
933 | { | |
934 | struct irq_host *h; | |
935 | unsigned long flags; | |
936 | ||
8ec8f2e8 | 937 | irq_radix_wrlock(&flags); |
0ebfff14 BH |
938 | list_for_each_entry(h, &irq_hosts, link) { |
939 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | |
940 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_ATOMIC); | |
941 | } | |
8ec8f2e8 | 942 | irq_radix_wrunlock(flags); |
0ebfff14 BH |
943 | |
944 | return 0; | |
945 | } | |
946 | arch_initcall(irq_late_init); | |
947 | ||
948 | #endif /* CONFIG_PPC_MERGE */ | |
1da177e4 | 949 | |
204face4 JM |
950 | #ifdef CONFIG_PCI_MSI |
951 | int pci_enable_msi(struct pci_dev * pdev) | |
952 | { | |
953 | if (ppc_md.enable_msi) | |
954 | return ppc_md.enable_msi(pdev); | |
955 | else | |
956 | return -1; | |
957 | } | |
45934c47 | 958 | EXPORT_SYMBOL(pci_enable_msi); |
204face4 JM |
959 | |
960 | void pci_disable_msi(struct pci_dev * pdev) | |
961 | { | |
962 | if (ppc_md.disable_msi) | |
963 | ppc_md.disable_msi(pdev); | |
964 | } | |
45934c47 | 965 | EXPORT_SYMBOL(pci_disable_msi); |
204face4 JM |
966 | |
967 | void pci_scan_msi_device(struct pci_dev *dev) {} | |
968 | int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec) {return -1;} | |
969 | void pci_disable_msix(struct pci_dev *dev) {} | |
970 | void msi_remove_pci_irq_vectors(struct pci_dev *dev) {} | |
971 | void disable_msi_mode(struct pci_dev *dev, int pos, int type) {} | |
972 | void pci_no_msi(void) {} | |
45934c47 JM |
973 | EXPORT_SYMBOL(pci_enable_msix); |
974 | EXPORT_SYMBOL(pci_disable_msix); | |
204face4 JM |
975 | |
976 | #endif | |
977 | ||
c6622f63 | 978 | #ifdef CONFIG_PPC64 |
1da177e4 LT |
979 | static int __init setup_noirqdistrib(char *str) |
980 | { | |
981 | distribute_irqs = 0; | |
982 | return 1; | |
983 | } | |
984 | ||
985 | __setup("noirqdistrib", setup_noirqdistrib); | |
756e7104 | 986 | #endif /* CONFIG_PPC64 */ |