]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 LT |
2 | /* |
3 | * linux/arch/ia64/kernel/irq.c | |
4 | * | |
5 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar | |
6 | * | |
7 | * This file contains the code used by various IRQ handling routines: | |
72fdbdce | 8 | * asking for different IRQs should be done through these routines |
1da177e4 LT |
9 | * instead of just grabbing them. Thus setups with different IRQ numbers |
10 | * shouldn't result in any weird surprises, and installing new handlers | |
11 | * should be easier. | |
12 | * | |
13 | * Copyright (C) Ashok Raj<ashok.raj@intel.com>, Intel Corporation 2004 | |
14 | * | |
15 | * 4/14/2004: Added code to handle cpu migration and do safe irq | |
72fdbdce | 16 | * migration without losing interrupts for iosapic |
1da177e4 LT |
17 | * architecture. |
18 | */ | |
19 | ||
20 | #include <asm/delay.h> | |
7c0f6ba6 | 21 | #include <linux/uaccess.h> |
1da177e4 LT |
22 | #include <linux/module.h> |
23 | #include <linux/seq_file.h> | |
24 | #include <linux/interrupt.h> | |
25 | #include <linux/kernel_stat.h> | |
26 | ||
d303e9e9 | 27 | #include <asm/mca.h> |
b3545192 | 28 | #include <asm/xtp.h> |
d303e9e9 | 29 | |
1da177e4 LT |
30 | /* |
31 | * 'what should we do if we get a hw irq event on an illegal vector'. | |
32 | * each architecture has to answer this themselves. | |
33 | */ | |
34 | void ack_bad_irq(unsigned int irq) | |
35 | { | |
36 | printk(KERN_ERR "Unexpected irq vector 0x%x on CPU %u!\n", irq, smp_processor_id()); | |
37 | } | |
38 | ||
1da177e4 LT |
39 | /* |
40 | * Interrupt statistics: | |
41 | */ | |
42 | ||
43 | atomic_t irq_err_count; | |
44 | ||
45 | /* | |
46 | * /proc/interrupts printing: | |
47 | */ | |
e3d78122 | 48 | int arch_show_interrupts(struct seq_file *p, int prec) |
1da177e4 | 49 | { |
e3d78122 | 50 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); |
1da177e4 LT |
51 | return 0; |
52 | } | |
53 | ||
54 | #ifdef CONFIG_SMP | |
1da177e4 LT |
55 | static char irq_redir [NR_IRQS]; // = { [0 ... NR_IRQS-1] = 1 }; |
56 | ||
1da177e4 LT |
57 | void set_irq_affinity_info (unsigned int irq, int hwid, int redir) |
58 | { | |
1da177e4 | 59 | if (irq < NR_IRQS) { |
c42574ed | 60 | cpumask_copy(irq_get_affinity_mask(irq), |
d3b66bf2 | 61 | cpumask_of(cpu_logical_id(hwid))); |
1da177e4 LT |
62 | irq_redir[irq] = (char) (redir & 0xff); |
63 | } | |
64 | } | |
1da177e4 LT |
65 | #endif /* CONFIG_SMP */ |
66 | ||
d303e9e9 TL |
67 | int __init arch_early_irq_init(void) |
68 | { | |
69 | ia64_mca_irq_init(); | |
70 | return 0; | |
71 | } | |
72 | ||
1da177e4 LT |
73 | #ifdef CONFIG_HOTPLUG_CPU |
74 | unsigned int vectors_in_migration[NR_IRQS]; | |
75 | ||
76 | /* | |
d3b66bf2 | 77 | * Since cpu_online_mask is already updated, we just need to check for |
1da177e4 LT |
78 | * affinity that has zeros |
79 | */ | |
80 | static void migrate_irqs(void) | |
81 | { | |
1da177e4 LT |
82 | int irq, new_cpu; |
83 | ||
84 | for (irq=0; irq < NR_IRQS; irq++) { | |
428a40c5 TG |
85 | struct irq_desc *desc = irq_to_desc(irq); |
86 | struct irq_data *data = irq_desc_get_irq_data(desc); | |
87 | struct irq_chip *chip = irq_data_get_irq_chip(data); | |
1da177e4 | 88 | |
f5e5bf08 | 89 | if (irqd_irq_disabled(data)) |
29a00277 MD |
90 | continue; |
91 | ||
1da177e4 LT |
92 | /* |
93 | * No handling for now. | |
94 | * TBD: Implement a disable function so we can now | |
95 | * tell CPU not to respond to these local intr sources. | |
96 | * such as ITV,CPEI,MCA etc. | |
97 | */ | |
428a40c5 | 98 | if (irqd_is_per_cpu(data)) |
1da177e4 LT |
99 | continue; |
100 | ||
c42574ed JL |
101 | if (cpumask_any_and(irq_data_get_affinity_mask(data), |
102 | cpu_online_mask) >= nr_cpu_ids) { | |
1da177e4 LT |
103 | /* |
104 | * Save it for phase 2 processing | |
105 | */ | |
106 | vectors_in_migration[irq] = irq; | |
107 | ||
d3b66bf2 | 108 | new_cpu = cpumask_any(cpu_online_mask); |
1da177e4 LT |
109 | |
110 | /* | |
111 | * Al three are essential, currently WARN_ON.. maybe panic? | |
112 | */ | |
428a40c5 TG |
113 | if (chip && chip->irq_disable && |
114 | chip->irq_enable && chip->irq_set_affinity) { | |
115 | chip->irq_disable(data); | |
116 | chip->irq_set_affinity(data, | |
117 | cpumask_of(new_cpu), false); | |
118 | chip->irq_enable(data); | |
1da177e4 | 119 | } else { |
428a40c5 TG |
120 | WARN_ON((!chip || !chip->irq_disable || |
121 | !chip->irq_enable || | |
122 | !chip->irq_set_affinity)); | |
1da177e4 LT |
123 | } |
124 | } | |
125 | } | |
126 | } | |
127 | ||
128 | void fixup_irqs(void) | |
129 | { | |
130 | unsigned int irq; | |
131 | extern void ia64_process_pending_intr(void); | |
ff741906 AR |
132 | extern volatile int time_keeper_id; |
133 | ||
751fc784 HS |
134 | /* Mask ITV to disable timer */ |
135 | ia64_set_itv(1 << 16); | |
ff741906 AR |
136 | |
137 | /* | |
138 | * Find a new timesync master | |
139 | */ | |
140 | if (smp_processor_id() == time_keeper_id) { | |
d3b66bf2 | 141 | time_keeper_id = cpumask_first(cpu_online_mask); |
ff741906 AR |
142 | printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id); |
143 | } | |
1da177e4 | 144 | |
1da177e4 | 145 | /* |
72fdbdce | 146 | * Phase 1: Locate IRQs bound to this cpu and |
1da177e4 LT |
147 | * relocate them for cpu removal. |
148 | */ | |
149 | migrate_irqs(); | |
150 | ||
151 | /* | |
152 | * Phase 2: Perform interrupt processing for all entries reported in | |
153 | * local APIC. | |
154 | */ | |
155 | ia64_process_pending_intr(); | |
156 | ||
157 | /* | |
158 | * Phase 3: Now handle any interrupts not captured in local APIC. | |
159 | * This is to account for cases that device interrupted during the time the | |
160 | * rte was being disabled and re-programmed. | |
161 | */ | |
162 | for (irq=0; irq < NR_IRQS; irq++) { | |
163 | if (vectors_in_migration[irq]) { | |
8c1addbc TL |
164 | struct pt_regs *old_regs = set_irq_regs(NULL); |
165 | ||
1da177e4 | 166 | vectors_in_migration[irq]=0; |
5fbb004a | 167 | generic_handle_irq(irq); |
8c1addbc | 168 | set_irq_regs(old_regs); |
1da177e4 LT |
169 | } |
170 | } | |
171 | ||
172 | /* | |
173 | * Now let processor die. We do irq disable and max_xtp() to | |
174 | * ensure there is no more interrupts routed to this processor. | |
175 | * But the local timer interrupt can have 1 pending which we | |
176 | * take care in timer_interrupt(). | |
177 | */ | |
178 | max_xtp(); | |
179 | local_irq_disable(); | |
180 | } | |
181 | #endif |