2 * Common interrupt code for 32 and 64 bit
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
8 #include <linux/seq_file.h>
10 #include <linux/ftrace.h>
11 #include <linux/delay.h>
12 #include <linux/export.h>
15 #include <asm/io_apic.h>
18 #include <asm/hw_irq.h>
21 #define CREATE_TRACE_POINTS
23 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t
, irq_stat
);
24 EXPORT_PER_CPU_SYMBOL(irq_stat
);
26 DEFINE_PER_CPU(struct pt_regs
*, irq_regs
);
27 EXPORT_PER_CPU_SYMBOL(irq_regs
);
29 atomic_t irq_err_count
;
31 /* Function pointer for generic interrupt vector handling */
32 void (*x86_platform_ipi_callback
)(void) = NULL
;
35 * 'what should we do if we get a hw irq event on an illegal vector'.
36 * each architecture has to answer this themselves.
38 void ack_bad_irq(unsigned int irq
)
40 if (printk_ratelimit())
41 pr_err("unexpected IRQ trap at vector %02x\n", irq
);
44 * Currently unexpected vectors happen only on SMP and APIC.
45 * We _must_ ack these because every local APIC has only N
46 * irq slots per priority level, and a 'hanging, unacked' IRQ
47 * holds up an irq slot - in excessive cases (when multiple
48 * unexpected vectors occur) that might lock up the APIC
50 * But only ack when the APIC is enabled -AK
55 #define irq_stats(x) (&per_cpu(irq_stat, x))
57 * /proc/interrupts printing for arch specific interrupts
59 int arch_show_interrupts(struct seq_file
*p
, int prec
)
63 seq_printf(p
, "%*s: ", prec
, "NMI");
64 for_each_online_cpu(j
)
65 seq_printf(p
, "%10u ", irq_stats(j
)->__nmi_count
);
66 seq_puts(p
, " Non-maskable interrupts\n");
67 #ifdef CONFIG_X86_LOCAL_APIC
68 seq_printf(p
, "%*s: ", prec
, "LOC");
69 for_each_online_cpu(j
)
70 seq_printf(p
, "%10u ", irq_stats(j
)->apic_timer_irqs
);
71 seq_puts(p
, " Local timer interrupts\n");
73 seq_printf(p
, "%*s: ", prec
, "SPU");
74 for_each_online_cpu(j
)
75 seq_printf(p
, "%10u ", irq_stats(j
)->irq_spurious_count
);
76 seq_puts(p
, " Spurious interrupts\n");
77 seq_printf(p
, "%*s: ", prec
, "PMI");
78 for_each_online_cpu(j
)
79 seq_printf(p
, "%10u ", irq_stats(j
)->apic_perf_irqs
);
80 seq_puts(p
, " Performance monitoring interrupts\n");
81 seq_printf(p
, "%*s: ", prec
, "IWI");
82 for_each_online_cpu(j
)
83 seq_printf(p
, "%10u ", irq_stats(j
)->apic_irq_work_irqs
);
84 seq_puts(p
, " IRQ work interrupts\n");
85 seq_printf(p
, "%*s: ", prec
, "RTR");
86 for_each_online_cpu(j
)
87 seq_printf(p
, "%10u ", irq_stats(j
)->icr_read_retry_count
);
88 seq_puts(p
, " APIC ICR read retries\n");
90 if (x86_platform_ipi_callback
) {
91 seq_printf(p
, "%*s: ", prec
, "PLT");
92 for_each_online_cpu(j
)
93 seq_printf(p
, "%10u ", irq_stats(j
)->x86_platform_ipis
);
94 seq_puts(p
, " Platform interrupts\n");
97 seq_printf(p
, "%*s: ", prec
, "RES");
98 for_each_online_cpu(j
)
99 seq_printf(p
, "%10u ", irq_stats(j
)->irq_resched_count
);
100 seq_puts(p
, " Rescheduling interrupts\n");
101 seq_printf(p
, "%*s: ", prec
, "CAL");
102 for_each_online_cpu(j
)
103 seq_printf(p
, "%10u ", irq_stats(j
)->irq_call_count
);
104 seq_puts(p
, " Function call interrupts\n");
105 seq_printf(p
, "%*s: ", prec
, "TLB");
106 for_each_online_cpu(j
)
107 seq_printf(p
, "%10u ", irq_stats(j
)->irq_tlb_count
);
108 seq_puts(p
, " TLB shootdowns\n");
110 #ifdef CONFIG_X86_THERMAL_VECTOR
111 seq_printf(p
, "%*s: ", prec
, "TRM");
112 for_each_online_cpu(j
)
113 seq_printf(p
, "%10u ", irq_stats(j
)->irq_thermal_count
);
114 seq_puts(p
, " Thermal event interrupts\n");
116 #ifdef CONFIG_X86_MCE_THRESHOLD
117 seq_printf(p
, "%*s: ", prec
, "THR");
118 for_each_online_cpu(j
)
119 seq_printf(p
, "%10u ", irq_stats(j
)->irq_threshold_count
);
120 seq_puts(p
, " Threshold APIC interrupts\n");
122 #ifdef CONFIG_X86_MCE_AMD
123 seq_printf(p
, "%*s: ", prec
, "DFR");
124 for_each_online_cpu(j
)
125 seq_printf(p
, "%10u ", irq_stats(j
)->irq_deferred_error_count
);
126 seq_puts(p
, " Deferred Error APIC interrupts\n");
128 #ifdef CONFIG_X86_MCE
129 seq_printf(p
, "%*s: ", prec
, "MCE");
130 for_each_online_cpu(j
)
131 seq_printf(p
, "%10u ", per_cpu(mce_exception_count
, j
));
132 seq_puts(p
, " Machine check exceptions\n");
133 seq_printf(p
, "%*s: ", prec
, "MCP");
134 for_each_online_cpu(j
)
135 seq_printf(p
, "%10u ", per_cpu(mce_poll_count
, j
));
136 seq_puts(p
, " Machine check polls\n");
138 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
139 if (test_bit(HYPERVISOR_CALLBACK_VECTOR
, used_vectors
)) {
140 seq_printf(p
, "%*s: ", prec
, "HYP");
141 for_each_online_cpu(j
)
142 seq_printf(p
, "%10u ",
143 irq_stats(j
)->irq_hv_callback_count
);
144 seq_puts(p
, " Hypervisor callback interrupts\n");
147 seq_printf(p
, "%*s: %10u\n", prec
, "ERR", atomic_read(&irq_err_count
));
148 #if defined(CONFIG_X86_IO_APIC)
149 seq_printf(p
, "%*s: %10u\n", prec
, "MIS", atomic_read(&irq_mis_count
));
151 #ifdef CONFIG_HAVE_KVM
152 seq_printf(p
, "%*s: ", prec
, "PIN");
153 for_each_online_cpu(j
)
154 seq_printf(p
, "%10u ", irq_stats(j
)->kvm_posted_intr_ipis
);
155 seq_puts(p
, " Posted-interrupt notification event\n");
157 seq_printf(p
, "%*s: ", prec
, "NPI");
158 for_each_online_cpu(j
)
159 seq_printf(p
, "%10u ",
160 irq_stats(j
)->kvm_posted_intr_nested_ipis
);
161 seq_puts(p
, " Nested posted-interrupt event\n");
163 seq_printf(p
, "%*s: ", prec
, "PIW");
164 for_each_online_cpu(j
)
165 seq_printf(p
, "%10u ",
166 irq_stats(j
)->kvm_posted_intr_wakeup_ipis
);
167 seq_puts(p
, " Posted-interrupt wakeup event\n");
175 u64
arch_irq_stat_cpu(unsigned int cpu
)
177 u64 sum
= irq_stats(cpu
)->__nmi_count
;
179 #ifdef CONFIG_X86_LOCAL_APIC
180 sum
+= irq_stats(cpu
)->apic_timer_irqs
;
181 sum
+= irq_stats(cpu
)->irq_spurious_count
;
182 sum
+= irq_stats(cpu
)->apic_perf_irqs
;
183 sum
+= irq_stats(cpu
)->apic_irq_work_irqs
;
184 sum
+= irq_stats(cpu
)->icr_read_retry_count
;
186 if (x86_platform_ipi_callback
)
187 sum
+= irq_stats(cpu
)->x86_platform_ipis
;
189 sum
+= irq_stats(cpu
)->irq_resched_count
;
190 sum
+= irq_stats(cpu
)->irq_call_count
;
192 #ifdef CONFIG_X86_THERMAL_VECTOR
193 sum
+= irq_stats(cpu
)->irq_thermal_count
;
195 #ifdef CONFIG_X86_MCE_THRESHOLD
196 sum
+= irq_stats(cpu
)->irq_threshold_count
;
198 #ifdef CONFIG_X86_MCE
199 sum
+= per_cpu(mce_exception_count
, cpu
);
200 sum
+= per_cpu(mce_poll_count
, cpu
);
205 u64
arch_irq_stat(void)
207 u64 sum
= atomic_read(&irq_err_count
);
213 * do_IRQ handles all normal device IRQ's (the special
214 * SMP cross-CPU interrupts have their own specific
217 __visible
unsigned int __irq_entry
do_IRQ(struct pt_regs
*regs
)
219 struct pt_regs
*old_regs
= set_irq_regs(regs
);
220 struct irq_desc
* desc
;
221 /* high bit used in ret_from_ code */
222 unsigned vector
= ~regs
->orig_ax
;
226 /* entering_irq() tells RCU that we're not quiescent. Check it. */
227 RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
229 desc
= __this_cpu_read(vector_irq
[vector
]);
231 if (!handle_irq(desc
, regs
)) {
234 if (desc
!= VECTOR_RETRIGGERED
) {
235 pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
236 __func__
, smp_processor_id(),
239 __this_cpu_write(vector_irq
[vector
], VECTOR_UNUSED
);
245 set_irq_regs(old_regs
);
250 * Handler for X86_PLATFORM_IPI_VECTOR.
252 void __smp_x86_platform_ipi(void)
254 inc_irq_stat(x86_platform_ipis
);
256 if (x86_platform_ipi_callback
)
257 x86_platform_ipi_callback();
260 __visible
void __irq_entry
smp_x86_platform_ipi(struct pt_regs
*regs
)
262 struct pt_regs
*old_regs
= set_irq_regs(regs
);
265 __smp_x86_platform_ipi();
267 set_irq_regs(old_regs
);
270 #ifdef CONFIG_HAVE_KVM
271 static void dummy_handler(void) {}
272 static void (*kvm_posted_intr_wakeup_handler
)(void) = dummy_handler
;
274 void kvm_set_posted_intr_wakeup_handler(void (*handler
)(void))
277 kvm_posted_intr_wakeup_handler
= handler
;
279 kvm_posted_intr_wakeup_handler
= dummy_handler
;
281 EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler
);
284 * Handler for POSTED_INTERRUPT_VECTOR.
286 __visible
void smp_kvm_posted_intr_ipi(struct pt_regs
*regs
)
288 struct pt_regs
*old_regs
= set_irq_regs(regs
);
291 inc_irq_stat(kvm_posted_intr_ipis
);
293 set_irq_regs(old_regs
);
297 * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
299 __visible
void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs
*regs
)
301 struct pt_regs
*old_regs
= set_irq_regs(regs
);
304 inc_irq_stat(kvm_posted_intr_wakeup_ipis
);
305 kvm_posted_intr_wakeup_handler();
307 set_irq_regs(old_regs
);
311 * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
313 __visible
void smp_kvm_posted_intr_nested_ipi(struct pt_regs
*regs
)
315 struct pt_regs
*old_regs
= set_irq_regs(regs
);
318 inc_irq_stat(kvm_posted_intr_nested_ipis
);
320 set_irq_regs(old_regs
);
324 __visible
void __irq_entry
smp_trace_x86_platform_ipi(struct pt_regs
*regs
)
326 struct pt_regs
*old_regs
= set_irq_regs(regs
);
329 __smp_x86_platform_ipi();
331 set_irq_regs(old_regs
);
334 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq
);
336 #ifdef CONFIG_HOTPLUG_CPU
338 /* These two declarations are only used in check_irq_vectors_for_cpu_disable()
339 * below, which is protected by stop_machine(). Putting them on the stack
340 * results in a stack frame overflow. Dynamically allocating could result in a
341 * failure so declare these two cpumasks as global.
343 static struct cpumask affinity_new
, online_new
;
346 * This cpu is going to be removed and its vectors migrated to the remaining
347 * online cpus. Check to see if there are enough vectors in the remaining cpus.
348 * This function is protected by stop_machine().
350 int check_irq_vectors_for_cpu_disable(void)
352 unsigned int this_cpu
, vector
, this_count
, count
;
353 struct irq_desc
*desc
;
354 struct irq_data
*data
;
357 this_cpu
= smp_processor_id();
358 cpumask_copy(&online_new
, cpu_online_mask
);
359 cpumask_clear_cpu(this_cpu
, &online_new
);
362 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
363 desc
= __this_cpu_read(vector_irq
[vector
]);
364 if (IS_ERR_OR_NULL(desc
))
367 * Protect against concurrent action removal, affinity
370 raw_spin_lock(&desc
->lock
);
371 data
= irq_desc_get_irq_data(desc
);
372 cpumask_copy(&affinity_new
,
373 irq_data_get_affinity_mask(data
));
374 cpumask_clear_cpu(this_cpu
, &affinity_new
);
376 /* Do not count inactive or per-cpu irqs. */
377 if (!irq_desc_has_action(desc
) || irqd_is_per_cpu(data
)) {
378 raw_spin_unlock(&desc
->lock
);
382 raw_spin_unlock(&desc
->lock
);
384 * A single irq may be mapped to multiple cpu's
385 * vector_irq[] (for example IOAPIC cluster mode). In
386 * this case we have two possibilities:
388 * 1) the resulting affinity mask is empty; that is
389 * this the down'd cpu is the last cpu in the irq's
392 * 2) the resulting affinity mask is no longer a
393 * subset of the online cpus but the affinity mask is
394 * not zero; that is the down'd cpu is the last online
395 * cpu in a user set affinity mask.
397 if (cpumask_empty(&affinity_new
) ||
398 !cpumask_subset(&affinity_new
, &online_new
))
401 /* No need to check any further. */
406 for_each_online_cpu(cpu
) {
410 * We scan from FIRST_EXTERNAL_VECTOR to first system
411 * vector. If the vector is marked in the used vectors
412 * bitmap or an irq is assigned to it, we don't count
415 * As this is an inaccurate snapshot anyway, we can do
416 * this w/o holding vector_lock.
418 for (vector
= FIRST_EXTERNAL_VECTOR
;
419 vector
< first_system_vector
; vector
++) {
420 if (!test_bit(vector
, used_vectors
) &&
421 IS_ERR_OR_NULL(per_cpu(vector_irq
, cpu
)[vector
])) {
422 if (++count
== this_count
)
428 if (count
< this_count
) {
429 pr_warn("CPU %d disable failed: CPU has %u vectors assigned and there are only %u available.\n",
430 this_cpu
, this_count
, count
);
436 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
437 void fixup_irqs(void)
439 unsigned int irr
, vector
;
440 struct irq_desc
*desc
;
441 struct irq_data
*data
;
442 struct irq_chip
*chip
;
444 irq_migrate_all_off_this_cpu();
447 * We can remove mdelay() and then send spuriuous interrupts to
448 * new cpu targets for all the irqs that were handled previously by
449 * this cpu. While it works, I have seen spurious interrupt messages
450 * (nothing wrong but still...).
452 * So for now, retain mdelay(1) and check the IRR and then send those
453 * interrupts to new targets as this cpu is already offlined...
458 * We can walk the vector array of this cpu without holding
459 * vector_lock because the cpu is already marked !online, so
460 * nothing else will touch it.
462 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
463 if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq
[vector
])))
466 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
467 if (irr
& (1 << (vector
% 32))) {
468 desc
= __this_cpu_read(vector_irq
[vector
]);
470 raw_spin_lock(&desc
->lock
);
471 data
= irq_desc_get_irq_data(desc
);
472 chip
= irq_data_get_irq_chip(data
);
473 if (chip
->irq_retrigger
) {
474 chip
->irq_retrigger(data
);
475 __this_cpu_write(vector_irq
[vector
], VECTOR_RETRIGGERED
);
477 raw_spin_unlock(&desc
->lock
);
479 if (__this_cpu_read(vector_irq
[vector
]) != VECTOR_RETRIGGERED
)
480 __this_cpu_write(vector_irq
[vector
], VECTOR_UNUSED
);