2 * Common interrupt code for 32 and 64 bit
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
8 #include <linux/seq_file.h>
10 #include <linux/ftrace.h>
11 #include <linux/delay.h>
12 #include <linux/export.h>
15 #include <asm/io_apic.h>
18 #include <asm/hw_irq.h>
21 #define CREATE_TRACE_POINTS
22 #include <asm/trace/irq_vectors.h>
24 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t
, irq_stat
);
25 EXPORT_PER_CPU_SYMBOL(irq_stat
);
27 DEFINE_PER_CPU(struct pt_regs
*, irq_regs
);
28 EXPORT_PER_CPU_SYMBOL(irq_regs
);
30 atomic_t irq_err_count
;
33 * 'what should we do if we get a hw irq event on an illegal vector'.
34 * each architecture has to answer this themselves.
36 void ack_bad_irq(unsigned int irq
)
38 if (printk_ratelimit())
39 pr_err("unexpected IRQ trap at vector %02x\n", irq
);
42 * Currently unexpected vectors happen only on SMP and APIC.
43 * We _must_ ack these because every local APIC has only N
44 * irq slots per priority level, and a 'hanging, unacked' IRQ
45 * holds up an irq slot - in excessive cases (when multiple
46 * unexpected vectors occur) that might lock up the APIC
48 * But only ack when the APIC is enabled -AK
53 #define irq_stats(x) (&per_cpu(irq_stat, x))
55 * /proc/interrupts printing for arch specific interrupts
57 int arch_show_interrupts(struct seq_file
*p
, int prec
)
61 seq_printf(p
, "%*s: ", prec
, "NMI");
62 for_each_online_cpu(j
)
63 seq_printf(p
, "%10u ", irq_stats(j
)->__nmi_count
);
64 seq_puts(p
, " Non-maskable interrupts\n");
65 #ifdef CONFIG_X86_LOCAL_APIC
66 seq_printf(p
, "%*s: ", prec
, "LOC");
67 for_each_online_cpu(j
)
68 seq_printf(p
, "%10u ", irq_stats(j
)->apic_timer_irqs
);
69 seq_puts(p
, " Local timer interrupts\n");
71 seq_printf(p
, "%*s: ", prec
, "SPU");
72 for_each_online_cpu(j
)
73 seq_printf(p
, "%10u ", irq_stats(j
)->irq_spurious_count
);
74 seq_puts(p
, " Spurious interrupts\n");
75 seq_printf(p
, "%*s: ", prec
, "PMI");
76 for_each_online_cpu(j
)
77 seq_printf(p
, "%10u ", irq_stats(j
)->apic_perf_irqs
);
78 seq_puts(p
, " Performance monitoring interrupts\n");
79 seq_printf(p
, "%*s: ", prec
, "IWI");
80 for_each_online_cpu(j
)
81 seq_printf(p
, "%10u ", irq_stats(j
)->apic_irq_work_irqs
);
82 seq_puts(p
, " IRQ work interrupts\n");
83 seq_printf(p
, "%*s: ", prec
, "RTR");
84 for_each_online_cpu(j
)
85 seq_printf(p
, "%10u ", irq_stats(j
)->icr_read_retry_count
);
86 seq_puts(p
, " APIC ICR read retries\n");
87 if (x86_platform_ipi_callback
) {
88 seq_printf(p
, "%*s: ", prec
, "PLT");
89 for_each_online_cpu(j
)
90 seq_printf(p
, "%10u ", irq_stats(j
)->x86_platform_ipis
);
91 seq_puts(p
, " Platform interrupts\n");
95 seq_printf(p
, "%*s: ", prec
, "RES");
96 for_each_online_cpu(j
)
97 seq_printf(p
, "%10u ", irq_stats(j
)->irq_resched_count
);
98 seq_puts(p
, " Rescheduling interrupts\n");
99 seq_printf(p
, "%*s: ", prec
, "CAL");
100 for_each_online_cpu(j
)
101 seq_printf(p
, "%10u ", irq_stats(j
)->irq_call_count
);
102 seq_puts(p
, " Function call interrupts\n");
103 seq_printf(p
, "%*s: ", prec
, "TLB");
104 for_each_online_cpu(j
)
105 seq_printf(p
, "%10u ", irq_stats(j
)->irq_tlb_count
);
106 seq_puts(p
, " TLB shootdowns\n");
108 #ifdef CONFIG_X86_THERMAL_VECTOR
109 seq_printf(p
, "%*s: ", prec
, "TRM");
110 for_each_online_cpu(j
)
111 seq_printf(p
, "%10u ", irq_stats(j
)->irq_thermal_count
);
112 seq_puts(p
, " Thermal event interrupts\n");
114 #ifdef CONFIG_X86_MCE_THRESHOLD
115 seq_printf(p
, "%*s: ", prec
, "THR");
116 for_each_online_cpu(j
)
117 seq_printf(p
, "%10u ", irq_stats(j
)->irq_threshold_count
);
118 seq_puts(p
, " Threshold APIC interrupts\n");
120 #ifdef CONFIG_X86_MCE_AMD
121 seq_printf(p
, "%*s: ", prec
, "DFR");
122 for_each_online_cpu(j
)
123 seq_printf(p
, "%10u ", irq_stats(j
)->irq_deferred_error_count
);
124 seq_puts(p
, " Deferred Error APIC interrupts\n");
126 #ifdef CONFIG_X86_MCE
127 seq_printf(p
, "%*s: ", prec
, "MCE");
128 for_each_online_cpu(j
)
129 seq_printf(p
, "%10u ", per_cpu(mce_exception_count
, j
));
130 seq_puts(p
, " Machine check exceptions\n");
131 seq_printf(p
, "%*s: ", prec
, "MCP");
132 for_each_online_cpu(j
)
133 seq_printf(p
, "%10u ", per_cpu(mce_poll_count
, j
));
134 seq_puts(p
, " Machine check polls\n");
136 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
137 if (test_bit(HYPERVISOR_CALLBACK_VECTOR
, used_vectors
)) {
138 seq_printf(p
, "%*s: ", prec
, "HYP");
139 for_each_online_cpu(j
)
140 seq_printf(p
, "%10u ",
141 irq_stats(j
)->irq_hv_callback_count
);
142 seq_puts(p
, " Hypervisor callback interrupts\n");
145 seq_printf(p
, "%*s: %10u\n", prec
, "ERR", atomic_read(&irq_err_count
));
146 #if defined(CONFIG_X86_IO_APIC)
147 seq_printf(p
, "%*s: %10u\n", prec
, "MIS", atomic_read(&irq_mis_count
));
149 #ifdef CONFIG_HAVE_KVM
150 seq_printf(p
, "%*s: ", prec
, "PIN");
151 for_each_online_cpu(j
)
152 seq_printf(p
, "%10u ", irq_stats(j
)->kvm_posted_intr_ipis
);
153 seq_puts(p
, " Posted-interrupt notification event\n");
155 seq_printf(p
, "%*s: ", prec
, "NPI");
156 for_each_online_cpu(j
)
157 seq_printf(p
, "%10u ",
158 irq_stats(j
)->kvm_posted_intr_nested_ipis
);
159 seq_puts(p
, " Nested posted-interrupt event\n");
161 seq_printf(p
, "%*s: ", prec
, "PIW");
162 for_each_online_cpu(j
)
163 seq_printf(p
, "%10u ",
164 irq_stats(j
)->kvm_posted_intr_wakeup_ipis
);
165 seq_puts(p
, " Posted-interrupt wakeup event\n");
173 u64
arch_irq_stat_cpu(unsigned int cpu
)
175 u64 sum
= irq_stats(cpu
)->__nmi_count
;
177 #ifdef CONFIG_X86_LOCAL_APIC
178 sum
+= irq_stats(cpu
)->apic_timer_irqs
;
179 sum
+= irq_stats(cpu
)->irq_spurious_count
;
180 sum
+= irq_stats(cpu
)->apic_perf_irqs
;
181 sum
+= irq_stats(cpu
)->apic_irq_work_irqs
;
182 sum
+= irq_stats(cpu
)->icr_read_retry_count
;
183 if (x86_platform_ipi_callback
)
184 sum
+= irq_stats(cpu
)->x86_platform_ipis
;
187 sum
+= irq_stats(cpu
)->irq_resched_count
;
188 sum
+= irq_stats(cpu
)->irq_call_count
;
190 #ifdef CONFIG_X86_THERMAL_VECTOR
191 sum
+= irq_stats(cpu
)->irq_thermal_count
;
193 #ifdef CONFIG_X86_MCE_THRESHOLD
194 sum
+= irq_stats(cpu
)->irq_threshold_count
;
196 #ifdef CONFIG_X86_MCE
197 sum
+= per_cpu(mce_exception_count
, cpu
);
198 sum
+= per_cpu(mce_poll_count
, cpu
);
203 u64
arch_irq_stat(void)
205 u64 sum
= atomic_read(&irq_err_count
);
211 * do_IRQ handles all normal device IRQ's (the special
212 * SMP cross-CPU interrupts have their own specific
215 __visible
unsigned int __irq_entry
do_IRQ(struct pt_regs
*regs
)
217 struct pt_regs
*old_regs
= set_irq_regs(regs
);
218 struct irq_desc
* desc
;
219 /* high bit used in ret_from_ code */
220 unsigned vector
= ~regs
->orig_ax
;
223 * NB: Unlike exception entries, IRQ entries do not reliably
224 * handle context tracking in the low-level entry code. This is
225 * because syscall entries execute briefly with IRQs on before
226 * updating context tracking state, so we can take an IRQ from
227 * kernel mode with CONTEXT_USER. The low-level entry code only
228 * updates the context if we came from user mode, so we won't
229 * switch to CONTEXT_KERNEL. We'll fix that once the syscall
230 * code is cleaned up enough that we can cleanly defer enabling
236 /* entering_irq() tells RCU that we're not quiescent. Check it. */
237 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
239 desc
= __this_cpu_read(vector_irq
[vector
]);
241 if (!handle_irq(desc
, regs
)) {
244 if (desc
!= VECTOR_RETRIGGERED
) {
245 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
246 __func__
, smp_processor_id(),
249 __this_cpu_write(vector_irq
[vector
], VECTOR_UNUSED
);
255 set_irq_regs(old_regs
);
259 #ifdef CONFIG_X86_LOCAL_APIC
260 /* Function pointer for generic interrupt vector handling */
261 void (*x86_platform_ipi_callback
)(void) = NULL
;
263 * Handler for X86_PLATFORM_IPI_VECTOR.
265 __visible
void __irq_entry
smp_x86_platform_ipi(struct pt_regs
*regs
)
267 struct pt_regs
*old_regs
= set_irq_regs(regs
);
270 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR
);
271 inc_irq_stat(x86_platform_ipis
);
272 if (x86_platform_ipi_callback
)
273 x86_platform_ipi_callback();
274 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR
);
276 set_irq_regs(old_regs
);
280 #ifdef CONFIG_HAVE_KVM
281 static void dummy_handler(void) {}
282 static void (*kvm_posted_intr_wakeup_handler
)(void) = dummy_handler
;
284 void kvm_set_posted_intr_wakeup_handler(void (*handler
)(void))
287 kvm_posted_intr_wakeup_handler
= handler
;
289 kvm_posted_intr_wakeup_handler
= dummy_handler
;
291 EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler
);
294 * Handler for POSTED_INTERRUPT_VECTOR.
296 __visible
void smp_kvm_posted_intr_ipi(struct pt_regs
*regs
)
298 struct pt_regs
*old_regs
= set_irq_regs(regs
);
301 inc_irq_stat(kvm_posted_intr_ipis
);
303 set_irq_regs(old_regs
);
307 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
309 __visible
void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs
*regs
)
311 struct pt_regs
*old_regs
= set_irq_regs(regs
);
314 inc_irq_stat(kvm_posted_intr_wakeup_ipis
);
315 kvm_posted_intr_wakeup_handler();
317 set_irq_regs(old_regs
);
321 * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
323 __visible
void smp_kvm_posted_intr_nested_ipi(struct pt_regs
*regs
)
325 struct pt_regs
*old_regs
= set_irq_regs(regs
);
328 inc_irq_stat(kvm_posted_intr_nested_ipis
);
330 set_irq_regs(old_regs
);
335 #ifdef CONFIG_HOTPLUG_CPU
337 /* These two declarations are only used in check_irq_vectors_for_cpu_disable()
338 * below, which is protected by stop_machine(). Putting them on the stack
339 * results in a stack frame overflow. Dynamically allocating could result in a
340 * failure so declare these two cpumasks as global.
342 static struct cpumask affinity_new
, online_new
;
345 * This cpu is going to be removed and its vectors migrated to the remaining
346 * online cpus. Check to see if there are enough vectors in the remaining cpus.
347 * This function is protected by stop_machine().
349 int check_irq_vectors_for_cpu_disable(void)
351 unsigned int this_cpu
, vector
, this_count
, count
;
352 struct irq_desc
*desc
;
353 struct irq_data
*data
;
356 this_cpu
= smp_processor_id();
357 cpumask_copy(&online_new
, cpu_online_mask
);
358 cpumask_clear_cpu(this_cpu
, &online_new
);
361 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
362 desc
= __this_cpu_read(vector_irq
[vector
]);
363 if (IS_ERR_OR_NULL(desc
))
366 * Protect against concurrent action removal, affinity
369 raw_spin_lock(&desc
->lock
);
370 data
= irq_desc_get_irq_data(desc
);
371 cpumask_copy(&affinity_new
,
372 irq_data_get_affinity_mask(data
));
373 cpumask_clear_cpu(this_cpu
, &affinity_new
);
375 /* Do not count inactive or per-cpu irqs. */
376 if (!irq_desc_has_action(desc
) || irqd_is_per_cpu(data
)) {
377 raw_spin_unlock(&desc
->lock
);
381 raw_spin_unlock(&desc
->lock
);
383 * A single irq may be mapped to multiple cpu's
384 * vector_irq[] (for example IOAPIC cluster mode). In
385 * this case we have two possibilities:
387 * 1) the resulting affinity mask is empty; that is
388 * this the down'd cpu is the last cpu in the irq's
391 * 2) the resulting affinity mask is no longer a
392 * subset of the online cpus but the affinity mask is
393 * not zero; that is the down'd cpu is the last online
394 * cpu in a user set affinity mask.
396 if (cpumask_empty(&affinity_new
) ||
397 !cpumask_subset(&affinity_new
, &online_new
))
400 /* No need to check any further. */
405 for_each_online_cpu(cpu
) {
409 * We scan from FIRST_EXTERNAL_VECTOR to first system
410 * vector. If the vector is marked in the used vectors
411 * bitmap or an irq is assigned to it, we don't count
414 * As this is an inaccurate snapshot anyway, we can do
415 * this w/o holding vector_lock.
417 for (vector
= FIRST_EXTERNAL_VECTOR
;
418 vector
< FIRST_SYSTEM_VECTOR
; vector
++) {
419 if (!test_bit(vector
, used_vectors
) &&
420 IS_ERR_OR_NULL(per_cpu(vector_irq
, cpu
)[vector
])) {
421 if (++count
== this_count
)
427 if (count
< this_count
) {
428 pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
429 this_cpu
, this_count
, count
);
435 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
436 void fixup_irqs(void)
438 unsigned int irr
, vector
;
439 struct irq_desc
*desc
;
440 struct irq_data
*data
;
441 struct irq_chip
*chip
;
443 irq_migrate_all_off_this_cpu();
446 * We can remove mdelay() and then send spuriuous interrupts to
447 * new cpu targets for all the irqs that were handled previously by
448 * this cpu. While it works, I have seen spurious interrupt messages
449 * (nothing wrong but still...).
451 * So for now, retain mdelay(1) and check the IRR and then send those
452 * interrupts to new targets as this cpu is already offlined...
457 * We can walk the vector array of this cpu without holding
458 * vector_lock because the cpu is already marked !online, so
459 * nothing else will touch it.
461 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
462 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq
[vector
])))
465 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
466 if (irr
& (1 << (vector
% 32))) {
467 desc
= __this_cpu_read(vector_irq
[vector
]);
469 raw_spin_lock(&desc
->lock
);
470 data
= irq_desc_get_irq_data(desc
);
471 chip
= irq_data_get_irq_chip(data
);
472 if (chip
->irq_retrigger
) {
473 chip
->irq_retrigger(data
);
474 __this_cpu_write(vector_irq
[vector
], VECTOR_RETRIGGERED
);
476 raw_spin_unlock(&desc
->lock
);
478 if (__this_cpu_read(vector_irq
[vector
]) != VECTOR_RETRIGGERED
)
479 __this_cpu_write(vector_irq
[vector
], VECTOR_UNUSED
);