2 * Derived from arch/i386/kernel/irq.c
3 * Copyright (C) 1992 Linus Torvalds
4 * Adapted from arch/i386 by Gary Thomas
5 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Updated and modified by Cort Dougan <cort@fsmlabs.com>
7 * Copyright (C) 1996-2001 Cort Dougan
8 * Adapted for Power Macintosh by Paul Mackerras
9 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
16 * This file contains the code used by various IRQ handling routines:
17 * asking for different IRQ's should be done through these routines
18 * instead of just grabbing them. Thus setups with different IRQ numbers
19 * shouldn't result in any weird surprises, and installing new handlers
22 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
23 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
24 * mask register (of which only 16 are defined), hence the weird shifting
25 * and complement of the cached_irq_mask. I want to be able to stuff
26 * this right into the SIU SMASK register.
27 * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28 * to reduce code space and undefined function references.
33 #include <linux/export.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
57 #include <linux/of_irq.h>
59 #include <asm/uaccess.h>
60 #include <asm/system.h>
62 #include <asm/pgtable.h>
64 #include <asm/cache.h>
66 #include <asm/ptrace.h>
67 #include <asm/machdep.h>
73 #include <asm/firmware.h>
74 #include <asm/lv1call.h>
76 #define CREATE_TRACE_POINTS
77 #include <asm/trace.h>
79 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t
, irq_stat
);
80 EXPORT_PER_CPU_SYMBOL(irq_stat
);
82 int __irq_offset_value
;
85 EXPORT_SYMBOL(__irq_offset_value
);
86 atomic_t ppc_n_lost_interrupts
;
89 extern int tau_initialized
;
90 extern int tau_interrupts(int);
92 #endif /* CONFIG_PPC32 */
96 #ifndef CONFIG_SPARSE_IRQ
97 EXPORT_SYMBOL(irq_desc
);
100 int distribute_irqs
= 1;
102 static inline notrace
unsigned long get_hard_enabled(void)
104 unsigned long enabled
;
106 __asm__
__volatile__("lbz %0,%1(13)"
107 : "=r" (enabled
) : "i" (offsetof(struct paca_struct
, hard_enabled
)));
112 static inline notrace
void set_soft_enabled(unsigned long enable
)
114 __asm__
__volatile__("stb %0,%1(13)"
115 : : "r" (enable
), "i" (offsetof(struct paca_struct
, soft_enabled
)));
118 static inline notrace
void decrementer_check_overflow(void)
120 u64 now
= get_tb_or_rtc();
121 u64
*next_tb
= &__get_cpu_var(decrementers_next_tb
);
127 notrace
void arch_local_irq_restore(unsigned long en
)
130 * get_paca()->soft_enabled = en;
131 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
132 * That was allowed before, and in such a case we do need to take care
133 * that gcc will set soft_enabled directly via r13, not choose to use
134 * an intermediate register, lest we're preempted to a different cpu.
136 set_soft_enabled(en
);
140 #ifdef CONFIG_PPC_STD_MMU_64
141 if (firmware_has_feature(FW_FEATURE_ISERIES
)) {
143 * Do we need to disable preemption here? Not really: in the
144 * unlikely event that we're preempted to a different cpu in
145 * between getting r13, loading its lppaca_ptr, and loading
146 * its any_int, we might call iseries_handle_interrupts without
147 * an interrupt pending on the new cpu, but that's no disaster,
148 * is it? And the business of preempting us off the old cpu
149 * would itself involve a local_irq_restore which handles the
150 * interrupt to that cpu.
152 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
153 * to avoid any preemption checking added into get_paca().
155 if (local_paca
->lppaca_ptr
->int_dword
.any_int
)
156 iseries_handle_interrupts();
158 #endif /* CONFIG_PPC_STD_MMU_64 */
161 * if (get_paca()->hard_enabled) return;
162 * But again we need to take care that gcc gets hard_enabled directly
163 * via r13, not choose to use an intermediate register, lest we're
164 * preempted to a different cpu in between the two instructions.
166 if (get_hard_enabled())
170 * Need to hard-enable interrupts here. Since currently disabled,
171 * no need to take further asm precautions against preemption; but
172 * use local_paca instead of get_paca() to avoid preemption checking.
174 local_paca
->hard_enabled
= en
;
177 * Trigger the decrementer if we have a pending event. Some processors
178 * only trigger on edge transitions of the sign bit. We might also
179 * have disabled interrupts long enough that the decrementer wrapped
182 decrementer_check_overflow();
185 * Force the delivery of pending soft-disabled interrupts on PS3.
186 * Any HV call will have this side effect.
188 if (firmware_has_feature(FW_FEATURE_PS3_LV1
)) {
190 lv1_get_version_info(&tmp
, &tmp2
);
195 EXPORT_SYMBOL(arch_local_irq_restore
);
196 #endif /* CONFIG_PPC64 */
198 int arch_show_interrupts(struct seq_file
*p
, int prec
)
202 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
203 if (tau_initialized
) {
204 seq_printf(p
, "%*s: ", prec
, "TAU");
205 for_each_online_cpu(j
)
206 seq_printf(p
, "%10u ", tau_interrupts(j
));
207 seq_puts(p
, " PowerPC Thermal Assist (cpu temp)\n");
209 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
211 seq_printf(p
, "%*s: ", prec
, "LOC");
212 for_each_online_cpu(j
)
213 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).timer_irqs
);
214 seq_printf(p
, " Local timer interrupts\n");
216 seq_printf(p
, "%*s: ", prec
, "SPU");
217 for_each_online_cpu(j
)
218 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).spurious_irqs
);
219 seq_printf(p
, " Spurious interrupts\n");
221 seq_printf(p
, "%*s: ", prec
, "CNT");
222 for_each_online_cpu(j
)
223 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).pmu_irqs
);
224 seq_printf(p
, " Performance monitoring interrupts\n");
226 seq_printf(p
, "%*s: ", prec
, "MCE");
227 for_each_online_cpu(j
)
228 seq_printf(p
, "%10u ", per_cpu(irq_stat
, j
).mce_exceptions
);
229 seq_printf(p
, " Machine check exceptions\n");
237 u64
arch_irq_stat_cpu(unsigned int cpu
)
239 u64 sum
= per_cpu(irq_stat
, cpu
).timer_irqs
;
241 sum
+= per_cpu(irq_stat
, cpu
).pmu_irqs
;
242 sum
+= per_cpu(irq_stat
, cpu
).mce_exceptions
;
243 sum
+= per_cpu(irq_stat
, cpu
).spurious_irqs
;
248 #ifdef CONFIG_HOTPLUG_CPU
249 void migrate_irqs(void)
251 struct irq_desc
*desc
;
255 const struct cpumask
*map
= cpu_online_mask
;
257 alloc_cpumask_var(&mask
, GFP_KERNEL
);
260 struct irq_data
*data
;
261 struct irq_chip
*chip
;
263 desc
= irq_to_desc(irq
);
267 data
= irq_desc_get_irq_data(desc
);
268 if (irqd_is_per_cpu(data
))
271 chip
= irq_data_get_irq_chip(data
);
273 cpumask_and(mask
, data
->affinity
, map
);
274 if (cpumask_any(mask
) >= nr_cpu_ids
) {
275 printk("Breaking affinity for irq %i\n", irq
);
276 cpumask_copy(mask
, map
);
278 if (chip
->irq_set_affinity
)
279 chip
->irq_set_affinity(data
, mask
, true);
280 else if (desc
->action
&& !(warned
++))
281 printk("Cannot set affinity for irq %i\n", irq
);
284 free_cpumask_var(mask
);
292 static inline void handle_one_irq(unsigned int irq
)
294 struct thread_info
*curtp
, *irqtp
;
295 unsigned long saved_sp_limit
;
296 struct irq_desc
*desc
;
298 desc
= irq_to_desc(irq
);
302 /* Switch to the irq stack to handle this */
303 curtp
= current_thread_info();
304 irqtp
= hardirq_ctx
[smp_processor_id()];
306 if (curtp
== irqtp
) {
307 /* We're already on the irq stack, just handle it */
308 desc
->handle_irq(irq
, desc
);
312 saved_sp_limit
= current
->thread
.ksp_limit
;
314 irqtp
->task
= curtp
->task
;
317 /* Copy the softirq bits in preempt_count so that the
318 * softirq checks work in the hardirq context. */
319 irqtp
->preempt_count
= (irqtp
->preempt_count
& ~SOFTIRQ_MASK
) |
320 (curtp
->preempt_count
& SOFTIRQ_MASK
);
322 current
->thread
.ksp_limit
= (unsigned long)irqtp
+
323 _ALIGN_UP(sizeof(struct thread_info
), 16);
325 call_handle_irq(irq
, desc
, irqtp
, desc
->handle_irq
);
326 current
->thread
.ksp_limit
= saved_sp_limit
;
329 /* Set any flag that may have been set on the
333 set_bits(irqtp
->flags
, &curtp
->flags
);
336 static inline void check_stack_overflow(void)
338 #ifdef CONFIG_DEBUG_STACKOVERFLOW
341 sp
= __get_SP() & (THREAD_SIZE
-1);
343 /* check for stack overflow: is there less than 2KB free? */
344 if (unlikely(sp
< (sizeof(struct thread_info
) + 2048))) {
345 printk("do_IRQ: stack overflow: %ld\n",
346 sp
- sizeof(struct thread_info
));
352 void do_IRQ(struct pt_regs
*regs
)
354 struct pt_regs
*old_regs
= set_irq_regs(regs
);
357 trace_irq_entry(regs
);
361 check_stack_overflow();
363 irq
= ppc_md
.get_irq();
365 if (irq
!= NO_IRQ
&& irq
!= NO_IRQ_IGNORE
)
367 else if (irq
!= NO_IRQ_IGNORE
)
368 __get_cpu_var(irq_stat
).spurious_irqs
++;
371 set_irq_regs(old_regs
);
373 #ifdef CONFIG_PPC_ISERIES
374 if (firmware_has_feature(FW_FEATURE_ISERIES
) &&
375 get_lppaca()->int_dword
.fields
.decr_int
) {
376 get_lppaca()->int_dword
.fields
.decr_int
= 0;
377 /* Signal a fake decrementer interrupt */
378 timer_interrupt(regs
);
382 trace_irq_exit(regs
);
385 void __init
init_IRQ(void)
395 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
396 struct thread_info
*critirq_ctx
[NR_CPUS
] __read_mostly
;
397 struct thread_info
*dbgirq_ctx
[NR_CPUS
] __read_mostly
;
398 struct thread_info
*mcheckirq_ctx
[NR_CPUS
] __read_mostly
;
400 void exc_lvl_ctx_init(void)
402 struct thread_info
*tp
;
405 for_each_possible_cpu(i
) {
409 cpu_nr
= get_hard_smp_processor_id(i
);
411 memset((void *)critirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
412 tp
= critirq_ctx
[cpu_nr
];
414 tp
->preempt_count
= 0;
417 memset((void *)dbgirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
418 tp
= dbgirq_ctx
[cpu_nr
];
420 tp
->preempt_count
= 0;
422 memset((void *)mcheckirq_ctx
[cpu_nr
], 0, THREAD_SIZE
);
423 tp
= mcheckirq_ctx
[cpu_nr
];
425 tp
->preempt_count
= HARDIRQ_OFFSET
;
431 struct thread_info
*softirq_ctx
[NR_CPUS
] __read_mostly
;
432 struct thread_info
*hardirq_ctx
[NR_CPUS
] __read_mostly
;
434 void irq_ctx_init(void)
436 struct thread_info
*tp
;
439 for_each_possible_cpu(i
) {
440 memset((void *)softirq_ctx
[i
], 0, THREAD_SIZE
);
443 tp
->preempt_count
= 0;
445 memset((void *)hardirq_ctx
[i
], 0, THREAD_SIZE
);
448 tp
->preempt_count
= HARDIRQ_OFFSET
;
452 static inline void do_softirq_onstack(void)
454 struct thread_info
*curtp
, *irqtp
;
455 unsigned long saved_sp_limit
= current
->thread
.ksp_limit
;
457 curtp
= current_thread_info();
458 irqtp
= softirq_ctx
[smp_processor_id()];
459 irqtp
->task
= curtp
->task
;
461 current
->thread
.ksp_limit
= (unsigned long)irqtp
+
462 _ALIGN_UP(sizeof(struct thread_info
), 16);
463 call_do_softirq(irqtp
);
464 current
->thread
.ksp_limit
= saved_sp_limit
;
467 /* Set any flag that may have been set on the
471 set_bits(irqtp
->flags
, &curtp
->flags
);
474 void do_softirq(void)
481 local_irq_save(flags
);
483 if (local_softirq_pending())
484 do_softirq_onstack();
486 local_irq_restore(flags
);
491 * IRQ controller and virtual interrupts
494 static LIST_HEAD(irq_domain_list
);
495 static DEFINE_MUTEX(irq_domain_mutex
);
496 static DEFINE_MUTEX(revmap_trees_mutex
);
497 static unsigned int irq_virq_count
= NR_IRQS
;
498 static struct irq_domain
*irq_default_host
;
500 irq_hw_number_t
irqd_to_hwirq(struct irq_data
*d
)
504 EXPORT_SYMBOL_GPL(irqd_to_hwirq
);
506 irq_hw_number_t
virq_to_hw(unsigned int virq
)
508 struct irq_data
*irq_data
= irq_get_irq_data(virq
);
509 return WARN_ON(!irq_data
) ? 0 : irq_data
->hwirq
;
511 EXPORT_SYMBOL_GPL(virq_to_hw
);
513 bool virq_is_host(unsigned int virq
, struct irq_domain
*host
)
515 struct irq_data
*irq_data
= irq_get_irq_data(virq
);
516 return irq_data
? irq_data
->domain
== host
: false;
518 EXPORT_SYMBOL_GPL(virq_is_host
);
520 static int default_irq_host_match(struct irq_domain
*h
, struct device_node
*np
)
522 return h
->of_node
!= NULL
&& h
->of_node
== np
;
525 struct irq_domain
*irq_alloc_host(struct device_node
*of_node
,
526 unsigned int revmap_type
,
527 unsigned int revmap_arg
,
528 struct irq_domain_ops
*ops
,
529 irq_hw_number_t inval_irq
)
531 struct irq_domain
*host
, *h
;
532 unsigned int size
= sizeof(struct irq_domain
);
536 /* Allocate structure and revmap table if using linear mapping */
537 if (revmap_type
== IRQ_DOMAIN_MAP_LINEAR
)
538 size
+= revmap_arg
* sizeof(unsigned int);
539 host
= kzalloc(size
, GFP_KERNEL
);
544 host
->revmap_type
= revmap_type
;
545 host
->inval_irq
= inval_irq
;
547 host
->of_node
= of_node_get(of_node
);
549 if (host
->ops
->match
== NULL
)
550 host
->ops
->match
= default_irq_host_match
;
552 mutex_lock(&irq_domain_mutex
);
553 /* Make sure only one legacy controller can be created */
554 if (revmap_type
== IRQ_DOMAIN_MAP_LEGACY
) {
555 list_for_each_entry(h
, &irq_domain_list
, link
) {
556 if (WARN_ON(h
->revmap_type
== IRQ_DOMAIN_MAP_LEGACY
)) {
557 mutex_unlock(&irq_domain_mutex
);
558 of_node_put(host
->of_node
);
564 list_add(&host
->link
, &irq_domain_list
);
565 mutex_unlock(&irq_domain_mutex
);
567 /* Additional setups per revmap type */
568 switch(revmap_type
) {
569 case IRQ_DOMAIN_MAP_LEGACY
:
570 /* 0 is always the invalid number for legacy */
572 /* setup us as the host for all legacy interrupts */
573 for (i
= 1; i
< NUM_ISA_INTERRUPTS
; i
++) {
574 struct irq_data
*irq_data
= irq_get_irq_data(i
);
576 irq_data
->domain
= host
;
578 /* Legacy flags are left to default at this point,
579 * one can then use irq_create_mapping() to
580 * explicitly change them
582 ops
->map(host
, i
, i
);
584 /* Clear norequest flags */
585 irq_clear_status_flags(i
, IRQ_NOREQUEST
);
588 case IRQ_DOMAIN_MAP_LINEAR
:
589 rmap
= (unsigned int *)(host
+ 1);
590 for (i
= 0; i
< revmap_arg
; i
++)
592 host
->revmap_data
.linear
.size
= revmap_arg
;
593 host
->revmap_data
.linear
.revmap
= rmap
;
595 case IRQ_DOMAIN_MAP_TREE
:
596 INIT_RADIX_TREE(&host
->revmap_data
.tree
, GFP_KERNEL
);
602 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type
, host
);
607 struct irq_domain
*irq_find_host(struct device_node
*node
)
609 struct irq_domain
*h
, *found
= NULL
;
611 /* We might want to match the legacy controller last since
612 * it might potentially be set to match all interrupts in
613 * the absence of a device node. This isn't a problem so far
616 mutex_lock(&irq_domain_mutex
);
617 list_for_each_entry(h
, &irq_domain_list
, link
)
618 if (h
->ops
->match(h
, node
)) {
622 mutex_unlock(&irq_domain_mutex
);
625 EXPORT_SYMBOL_GPL(irq_find_host
);
627 void irq_set_default_host(struct irq_domain
*host
)
629 pr_debug("irq: Default host set to @0x%p\n", host
);
631 irq_default_host
= host
;
634 void irq_set_virq_count(unsigned int count
)
636 pr_debug("irq: Trying to set virq count to %d\n", count
);
638 BUG_ON(count
< NUM_ISA_INTERRUPTS
);
640 irq_virq_count
= count
;
643 static int irq_setup_virq(struct irq_domain
*host
, unsigned int virq
,
644 irq_hw_number_t hwirq
)
646 struct irq_data
*irq_data
= irq_get_irq_data(virq
);
648 irq_data
->hwirq
= hwirq
;
649 irq_data
->domain
= host
;
650 if (host
->ops
->map(host
, virq
, hwirq
)) {
651 pr_debug("irq: -> mapping failed, freeing\n");
652 irq_data
->domain
= NULL
;
657 irq_clear_status_flags(virq
, IRQ_NOREQUEST
);
662 unsigned int irq_create_direct_mapping(struct irq_domain
*host
)
667 host
= irq_default_host
;
669 BUG_ON(host
== NULL
);
670 WARN_ON(host
->revmap_type
!= IRQ_DOMAIN_MAP_NOMAP
);
672 virq
= irq_alloc_desc_from(1, 0);
673 if (virq
== NO_IRQ
) {
674 pr_debug("irq: create_direct virq allocation failed\n");
677 if (virq
>= irq_virq_count
) {
678 pr_err("ERROR: no free irqs available below %i maximum\n",
684 pr_debug("irq: create_direct obtained virq %d\n", virq
);
686 if (irq_setup_virq(host
, virq
, virq
)) {
694 unsigned int irq_create_mapping(struct irq_domain
*host
,
695 irq_hw_number_t hwirq
)
697 unsigned int virq
, hint
;
699 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host
, hwirq
);
701 /* Look for default host if nececssary */
703 host
= irq_default_host
;
705 printk(KERN_WARNING
"irq_create_mapping called for"
706 " NULL host, hwirq=%lx\n", hwirq
);
710 pr_debug("irq: -> using host @%p\n", host
);
712 /* Check if mapping already exists */
713 virq
= irq_find_mapping(host
, hwirq
);
714 if (virq
!= NO_IRQ
) {
715 pr_debug("irq: -> existing mapping on virq %d\n", virq
);
719 /* Get a virtual interrupt number */
720 if (host
->revmap_type
== IRQ_DOMAIN_MAP_LEGACY
) {
722 virq
= (unsigned int)hwirq
;
723 if (virq
== 0 || virq
>= NUM_ISA_INTERRUPTS
)
727 /* Allocate a virtual interrupt number */
728 hint
= hwirq
% irq_virq_count
;
731 virq
= irq_alloc_desc_from(hint
, 0);
733 virq
= irq_alloc_desc_from(1, 0);
734 if (virq
== NO_IRQ
) {
735 pr_debug("irq: -> virq allocation failed\n");
740 if (irq_setup_virq(host
, virq
, hwirq
)) {
741 if (host
->revmap_type
!= IRQ_DOMAIN_MAP_LEGACY
)
746 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
747 hwirq
, host
->of_node
? host
->of_node
->full_name
: "null", virq
);
751 EXPORT_SYMBOL_GPL(irq_create_mapping
);
753 unsigned int irq_create_of_mapping(struct device_node
*controller
,
754 const u32
*intspec
, unsigned int intsize
)
756 struct irq_domain
*host
;
757 irq_hw_number_t hwirq
;
758 unsigned int type
= IRQ_TYPE_NONE
;
761 if (controller
== NULL
)
762 host
= irq_default_host
;
764 host
= irq_find_host(controller
);
766 printk(KERN_WARNING
"irq: no irq host found for %s !\n",
767 controller
->full_name
);
771 /* If host has no translation, then we assume interrupt line */
772 if (host
->ops
->xlate
== NULL
)
775 if (host
->ops
->xlate(host
, controller
, intspec
, intsize
,
781 virq
= irq_create_mapping(host
, hwirq
);
785 /* Set type if specified and different than the current one */
786 if (type
!= IRQ_TYPE_NONE
&&
787 type
!= (irqd_get_trigger_type(irq_get_irq_data(virq
))))
788 irq_set_irq_type(virq
, type
);
791 EXPORT_SYMBOL_GPL(irq_create_of_mapping
);
793 void irq_dispose_mapping(unsigned int virq
)
795 struct irq_data
*irq_data
= irq_get_irq_data(virq
);
796 struct irq_domain
*host
;
797 irq_hw_number_t hwirq
;
799 if (virq
== NO_IRQ
|| !irq_data
)
802 host
= irq_data
->domain
;
803 if (WARN_ON(host
== NULL
))
806 /* Never unmap legacy interrupts */
807 if (host
->revmap_type
== IRQ_DOMAIN_MAP_LEGACY
)
810 irq_set_status_flags(virq
, IRQ_NOREQUEST
);
812 /* remove chip and handler */
813 irq_set_chip_and_handler(virq
, NULL
, NULL
);
815 /* Make sure it's completed */
816 synchronize_irq(virq
);
818 /* Tell the PIC about it */
819 if (host
->ops
->unmap
)
820 host
->ops
->unmap(host
, virq
);
823 /* Clear reverse map */
824 hwirq
= irq_data
->hwirq
;
825 switch(host
->revmap_type
) {
826 case IRQ_DOMAIN_MAP_LINEAR
:
827 if (hwirq
< host
->revmap_data
.linear
.size
)
828 host
->revmap_data
.linear
.revmap
[hwirq
] = NO_IRQ
;
830 case IRQ_DOMAIN_MAP_TREE
:
831 mutex_lock(&revmap_trees_mutex
);
832 radix_tree_delete(&host
->revmap_data
.tree
, hwirq
);
833 mutex_unlock(&revmap_trees_mutex
);
838 irq_data
->hwirq
= host
->inval_irq
;
842 EXPORT_SYMBOL_GPL(irq_dispose_mapping
);
844 unsigned int irq_find_mapping(struct irq_domain
*host
,
845 irq_hw_number_t hwirq
)
848 unsigned int hint
= hwirq
% irq_virq_count
;
850 /* Look for default host if nececssary */
852 host
= irq_default_host
;
856 /* legacy -> bail early */
857 if (host
->revmap_type
== IRQ_DOMAIN_MAP_LEGACY
)
860 /* Slow path does a linear search of the map */
865 struct irq_data
*data
= irq_get_irq_data(i
);
866 if (data
&& (data
->domain
== host
) && (data
->hwirq
== hwirq
))
869 if (i
>= irq_virq_count
)
874 EXPORT_SYMBOL_GPL(irq_find_mapping
);
877 int irq_choose_cpu(const struct cpumask
*mask
)
881 if (cpumask_equal(mask
, cpu_all_mask
)) {
882 static int irq_rover
;
883 static DEFINE_RAW_SPINLOCK(irq_rover_lock
);
886 /* Round-robin distribution... */
888 raw_spin_lock_irqsave(&irq_rover_lock
, flags
);
890 irq_rover
= cpumask_next(irq_rover
, cpu_online_mask
);
891 if (irq_rover
>= nr_cpu_ids
)
892 irq_rover
= cpumask_first(cpu_online_mask
);
896 raw_spin_unlock_irqrestore(&irq_rover_lock
, flags
);
898 cpuid
= cpumask_first_and(mask
, cpu_online_mask
);
899 if (cpuid
>= nr_cpu_ids
)
903 return get_hard_smp_processor_id(cpuid
);
906 int irq_choose_cpu(const struct cpumask
*mask
)
908 return hard_smp_processor_id();
912 unsigned int irq_radix_revmap_lookup(struct irq_domain
*host
,
913 irq_hw_number_t hwirq
)
915 struct irq_data
*irq_data
;
917 if (WARN_ON_ONCE(host
->revmap_type
!= IRQ_DOMAIN_MAP_TREE
))
918 return irq_find_mapping(host
, hwirq
);
921 * Freeing an irq can delete nodes along the path to
922 * do the lookup via call_rcu.
925 irq_data
= radix_tree_lookup(&host
->revmap_data
.tree
, hwirq
);
929 * If found in radix tree, then fine.
930 * Else fallback to linear lookup - this should not happen in practice
931 * as it means that we failed to insert the node in the radix tree.
933 return irq_data
? irq_data
->irq
: irq_find_mapping(host
, hwirq
);
936 void irq_radix_revmap_insert(struct irq_domain
*host
, unsigned int virq
,
937 irq_hw_number_t hwirq
)
939 struct irq_data
*irq_data
= irq_get_irq_data(virq
);
941 if (WARN_ON(host
->revmap_type
!= IRQ_DOMAIN_MAP_TREE
))
944 if (virq
!= NO_IRQ
) {
945 mutex_lock(&revmap_trees_mutex
);
946 radix_tree_insert(&host
->revmap_data
.tree
, hwirq
, irq_data
);
947 mutex_unlock(&revmap_trees_mutex
);
951 unsigned int irq_linear_revmap(struct irq_domain
*host
,
952 irq_hw_number_t hwirq
)
954 unsigned int *revmap
;
956 if (WARN_ON_ONCE(host
->revmap_type
!= IRQ_DOMAIN_MAP_LINEAR
))
957 return irq_find_mapping(host
, hwirq
);
959 /* Check revmap bounds */
960 if (unlikely(hwirq
>= host
->revmap_data
.linear
.size
))
961 return irq_find_mapping(host
, hwirq
);
963 /* Check if revmap was allocated */
964 revmap
= host
->revmap_data
.linear
.revmap
;
965 if (unlikely(revmap
== NULL
))
966 return irq_find_mapping(host
, hwirq
);
968 /* Fill up revmap with slow path if no mapping found */
969 if (unlikely(revmap
[hwirq
] == NO_IRQ
))
970 revmap
[hwirq
] = irq_find_mapping(host
, hwirq
);
972 return revmap
[hwirq
];
975 int arch_early_irq_init(void)
980 #ifdef CONFIG_VIRQ_DEBUG
981 static int virq_debug_show(struct seq_file
*m
, void *private)
984 struct irq_desc
*desc
;
986 static const char none
[] = "none";
990 seq_printf(m
, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
991 "chip name", "chip data", "host name");
993 for (i
= 1; i
< nr_irqs
; i
++) {
994 desc
= irq_to_desc(i
);
998 raw_spin_lock_irqsave(&desc
->lock
, flags
);
1000 if (desc
->action
&& desc
->action
->handler
) {
1001 struct irq_chip
*chip
;
1003 seq_printf(m
, "%5d ", i
);
1004 seq_printf(m
, "0x%05lx ", desc
->irq_data
.hwirq
);
1006 chip
= irq_desc_get_chip(desc
);
1007 if (chip
&& chip
->name
)
1011 seq_printf(m
, "%-15s ", p
);
1013 data
= irq_desc_get_chip_data(desc
);
1014 seq_printf(m
, "0x%16p ", data
);
1016 if (desc
->irq_data
.domain
->of_node
)
1017 p
= desc
->irq_data
.domain
->of_node
->full_name
;
1020 seq_printf(m
, "%s\n", p
);
1023 raw_spin_unlock_irqrestore(&desc
->lock
, flags
);
1029 static int virq_debug_open(struct inode
*inode
, struct file
*file
)
1031 return single_open(file
, virq_debug_show
, inode
->i_private
);
1034 static const struct file_operations virq_debug_fops
= {
1035 .open
= virq_debug_open
,
1037 .llseek
= seq_lseek
,
1038 .release
= single_release
,
1041 static int __init
irq_debugfs_init(void)
1043 if (debugfs_create_file("virq_mapping", S_IRUGO
, powerpc_debugfs_root
,
1044 NULL
, &virq_debug_fops
) == NULL
)
1049 __initcall(irq_debugfs_init
);
1050 #endif /* CONFIG_VIRQ_DEBUG */
1053 static int __init
setup_noirqdistrib(char *str
)
1055 distribute_irqs
= 0;
1059 __setup("noirqdistrib", setup_noirqdistrib
);
1060 #endif /* CONFIG_PPC64 */