]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kernel/irqinit.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / irqinit.c
1 #include <linux/linkage.h>
2 #include <linux/errno.h>
3 #include <linux/signal.h>
4 #include <linux/sched.h>
5 #include <linux/ioport.h>
6 #include <linux/interrupt.h>
7 #include <linux/timex.h>
8 #include <linux/random.h>
9 #include <linux/kprobes.h>
10 #include <linux/init.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/sysdev.h>
13 #include <linux/bitops.h>
14 #include <linux/acpi.h>
15 #include <linux/io.h>
16 #include <linux/delay.h>
17
18 #include <asm/atomic.h>
19 #include <asm/system.h>
20 #include <asm/timer.h>
21 #include <asm/hw_irq.h>
22 #include <asm/pgtable.h>
23 #include <asm/desc.h>
24 #include <asm/apic.h>
25 #include <asm/setup.h>
26 #include <asm/i8259.h>
27 #include <asm/traps.h>
28
29 /*
30 * ISA PIC or low IO-APIC triggered (INTA-cycle or APIC) interrupts:
31 * (these are usually mapped to vectors 0x30-0x3f)
32 */
33
34 /*
35 * The IO-APIC gives us many more interrupt sources. Most of these
36 * are unused but an SMP system is supposed to have enough memory ...
37 * sometimes (mostly wrt. hw bugs) we get corrupted vectors all
38 * across the spectrum, so we really want to be prepared to get all
39 * of these. Plus, more powerful systems might have more than 64
40 * IO-APIC registers.
41 *
42 * (these are usually mapped into the 0x30-0xff vector range)
43 */
44
45 #ifdef CONFIG_X86_32
46 /*
47 * Note that on a 486, we don't want to do a SIGFPE on an irq13
48 * as the irq is unreliable, and exception 16 works correctly
49 * (ie as explained in the intel literature). On a 386, you
50 * can't use exception 16 due to bad IBM design, so we have to
51 * rely on the less exact irq13.
52 *
53 * Careful.. Not only is IRQ13 unreliable, but it is also
54 * leads to races. IBM designers who came up with it should
55 * be shot.
56 */
57
58 static irqreturn_t math_error_irq(int cpl, void *dev_id)
59 {
60 outb(0, 0xF0);
61 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
62 return IRQ_NONE;
63 math_error((void __user *)get_irq_regs()->ip);
64 return IRQ_HANDLED;
65 }
66
67 /*
68 * New motherboards sometimes make IRQ 13 be a PCI interrupt,
69 * so allow interrupt sharing.
70 */
71 static struct irqaction fpu_irq = {
72 .handler = math_error_irq,
73 .name = "fpu",
74 };
75 #endif
76
77 /*
78 * IRQ2 is cascade interrupt to second interrupt controller
79 */
80 static struct irqaction irq2 = {
81 .handler = no_action,
82 .name = "cascade",
83 };
84
85 DEFINE_PER_CPU(vector_irq_t, vector_irq) = {
86 [0 ... NR_VECTORS - 1] = -1,
87 };
88
89 int vector_used_by_percpu_irq(unsigned int vector)
90 {
91 int cpu;
92
93 for_each_online_cpu(cpu) {
94 if (per_cpu(vector_irq, cpu)[vector] != -1)
95 return 1;
96 }
97
98 return 0;
99 }
100
101 void __init init_ISA_irqs(void)
102 {
103 int i;
104
105 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
106 init_bsp_APIC();
107 #endif
108 legacy_pic->init(0);
109
110 /*
111 * 16 old-style INTA-cycle interrupts:
112 */
113 for (i = 0; i < legacy_pic->nr_legacy_irqs; i++) {
114 struct irq_desc *desc = irq_to_desc(i);
115
116 desc->status = IRQ_DISABLED;
117 desc->action = NULL;
118 desc->depth = 1;
119
120 set_irq_chip_and_handler_name(i, &i8259A_chip,
121 handle_level_irq, "XT");
122 }
123 }
124
125 void __init init_IRQ(void)
126 {
127 int i;
128
129 /*
130 * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
131 * If these IRQ's are handled by legacy interrupt-controllers like PIC,
132 * then this configuration will likely be static after the boot. If
133 * these IRQ's are handled by more mordern controllers like IO-APIC,
134 * then this vector space can be freed and re-used dynamically as the
135 * irq's migrate etc.
136 */
137 for (i = 0; i < legacy_pic->nr_legacy_irqs; i++)
138 per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
139
140 x86_init.irqs.intr_init();
141 }
142
143 /*
144 * Setup the vector to irq mappings.
145 */
146 void setup_vector_irq(int cpu)
147 {
148 #ifndef CONFIG_X86_IO_APIC
149 int irq;
150
151 /*
152 * On most of the platforms, legacy PIC delivers the interrupts on the
153 * boot cpu. But there are certain platforms where PIC interrupts are
154 * delivered to multiple cpu's. If the legacy IRQ is handled by the
155 * legacy PIC, for the new cpu that is coming online, setup the static
156 * legacy vector to irq mapping:
157 */
158 for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++)
159 per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
160 #endif
161
162 __setup_vector_irq(cpu);
163 }
164
165 static void __init smp_intr_init(void)
166 {
167 #ifdef CONFIG_SMP
168 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
169 /*
170 * The reschedule interrupt is a CPU-to-CPU reschedule-helper
171 * IPI, driven by wakeup.
172 */
173 alloc_intr_gate(RESCHEDULE_VECTOR, reschedule_interrupt);
174
175 /* IPIs for invalidation */
176 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+0, invalidate_interrupt0);
177 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+1, invalidate_interrupt1);
178 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+2, invalidate_interrupt2);
179 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+3, invalidate_interrupt3);
180 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+4, invalidate_interrupt4);
181 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+5, invalidate_interrupt5);
182 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+6, invalidate_interrupt6);
183 alloc_intr_gate(INVALIDATE_TLB_VECTOR_START+7, invalidate_interrupt7);
184
185 /* IPI for generic function call */
186 alloc_intr_gate(CALL_FUNCTION_VECTOR, call_function_interrupt);
187
188 /* IPI for generic single function call */
189 alloc_intr_gate(CALL_FUNCTION_SINGLE_VECTOR,
190 call_function_single_interrupt);
191
192 /* Low priority IPI to cleanup after moving an irq */
193 set_intr_gate(IRQ_MOVE_CLEANUP_VECTOR, irq_move_cleanup_interrupt);
194 set_bit(IRQ_MOVE_CLEANUP_VECTOR, used_vectors);
195
196 /* IPI used for rebooting/stopping */
197 alloc_intr_gate(REBOOT_VECTOR, reboot_interrupt);
198 #endif
199 #endif /* CONFIG_SMP */
200 }
201
202 static void __init apic_intr_init(void)
203 {
204 smp_intr_init();
205
206 #ifdef CONFIG_X86_THERMAL_VECTOR
207 alloc_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
208 #endif
209 #ifdef CONFIG_X86_MCE_THRESHOLD
210 alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
211 #endif
212 #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_LOCAL_APIC)
213 alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt);
214 #endif
215
216 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
217 /* self generated IPI for local APIC timer */
218 alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
219
220 /* IPI for X86 platform specific use */
221 alloc_intr_gate(X86_PLATFORM_IPI_VECTOR, x86_platform_ipi);
222
223 /* IPI vectors for APIC spurious and error interrupts */
224 alloc_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
225 alloc_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
226
227 /* Performance monitoring interrupts: */
228 # ifdef CONFIG_PERF_EVENTS
229 alloc_intr_gate(LOCAL_PENDING_VECTOR, perf_pending_interrupt);
230 # endif
231
232 #endif
233 }
234
235 void __init native_init_IRQ(void)
236 {
237 int i;
238
239 /* Execute any quirks before the call gates are initialised: */
240 x86_init.irqs.pre_vector_init();
241
242 apic_intr_init();
243
244 /*
245 * Cover the whole vector space, no vector can escape
246 * us. (some of these will be overridden and become
247 * 'special' SMP interrupts)
248 */
249 for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
250 /* IA32_SYSCALL_VECTOR could be used in trap_init already. */
251 if (!test_bit(i, used_vectors))
252 set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
253 }
254
255 if (!acpi_ioapic)
256 setup_irq(2, &irq2);
257
258 #ifdef CONFIG_X86_32
259 /*
260 * External FPU? Set up irq13 if so, for
261 * original braindamaged IBM FERR coupling.
262 */
263 if (boot_cpu_data.hard_math && !cpu_has_fpu)
264 setup_irq(FPU_IRQ, &fpu_irq);
265
266 irq_ctx_init(smp_processor_id());
267 #endif
268 }