2 * Common interrupt code for 32 and 64 bit
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
8 #include <linux/seq_file.h>
10 #include <linux/ftrace.h>
11 #include <linux/delay.h>
12 #include <linux/export.h>
15 #include <asm/io_apic.h>
19 #include <asm/hw_irq.h>
20 #include <asm/trace/irq_vectors.h>
22 atomic_t irq_err_count
;
24 /* Function pointer for generic interrupt vector handling */
25 void (*x86_platform_ipi_callback
)(void) = NULL
;
28 * 'what should we do if we get a hw irq event on an illegal vector'.
29 * each architecture has to answer this themselves.
31 void ack_bad_irq(unsigned int irq
)
33 if (printk_ratelimit())
34 pr_err("unexpected IRQ trap at vector %02x\n", irq
);
37 * Currently unexpected vectors happen only on SMP and APIC.
38 * We _must_ ack these because every local APIC has only N
39 * irq slots per priority level, and a 'hanging, unacked' IRQ
40 * holds up an irq slot - in excessive cases (when multiple
41 * unexpected vectors occur) that might lock up the APIC
43 * But only ack when the APIC is enabled -AK
48 #define irq_stats(x) (&per_cpu(irq_stat, x))
50 * /proc/interrupts printing for arch specific interrupts
52 int arch_show_interrupts(struct seq_file
*p
, int prec
)
56 seq_printf(p
, "%*s: ", prec
, "NMI");
57 for_each_online_cpu(j
)
58 seq_printf(p
, "%10u ", irq_stats(j
)->__nmi_count
);
59 seq_printf(p
, " Non-maskable interrupts\n");
60 #ifdef CONFIG_X86_LOCAL_APIC
61 seq_printf(p
, "%*s: ", prec
, "LOC");
62 for_each_online_cpu(j
)
63 seq_printf(p
, "%10u ", irq_stats(j
)->apic_timer_irqs
);
64 seq_printf(p
, " Local timer interrupts\n");
66 seq_printf(p
, "%*s: ", prec
, "SPU");
67 for_each_online_cpu(j
)
68 seq_printf(p
, "%10u ", irq_stats(j
)->irq_spurious_count
);
69 seq_printf(p
, " Spurious interrupts\n");
70 seq_printf(p
, "%*s: ", prec
, "PMI");
71 for_each_online_cpu(j
)
72 seq_printf(p
, "%10u ", irq_stats(j
)->apic_perf_irqs
);
73 seq_printf(p
, " Performance monitoring interrupts\n");
74 seq_printf(p
, "%*s: ", prec
, "IWI");
75 for_each_online_cpu(j
)
76 seq_printf(p
, "%10u ", irq_stats(j
)->apic_irq_work_irqs
);
77 seq_printf(p
, " IRQ work interrupts\n");
78 seq_printf(p
, "%*s: ", prec
, "RTR");
79 for_each_online_cpu(j
)
80 seq_printf(p
, "%10u ", irq_stats(j
)->icr_read_retry_count
);
81 seq_printf(p
, " APIC ICR read retries\n");
83 if (x86_platform_ipi_callback
) {
84 seq_printf(p
, "%*s: ", prec
, "PLT");
85 for_each_online_cpu(j
)
86 seq_printf(p
, "%10u ", irq_stats(j
)->x86_platform_ipis
);
87 seq_printf(p
, " Platform interrupts\n");
90 seq_printf(p
, "%*s: ", prec
, "RES");
91 for_each_online_cpu(j
)
92 seq_printf(p
, "%10u ", irq_stats(j
)->irq_resched_count
);
93 seq_printf(p
, " Rescheduling interrupts\n");
94 seq_printf(p
, "%*s: ", prec
, "CAL");
95 for_each_online_cpu(j
)
96 seq_printf(p
, "%10u ", irq_stats(j
)->irq_call_count
-
97 irq_stats(j
)->irq_tlb_count
);
98 seq_printf(p
, " Function call interrupts\n");
99 seq_printf(p
, "%*s: ", prec
, "TLB");
100 for_each_online_cpu(j
)
101 seq_printf(p
, "%10u ", irq_stats(j
)->irq_tlb_count
);
102 seq_printf(p
, " TLB shootdowns\n");
104 #ifdef CONFIG_X86_THERMAL_VECTOR
105 seq_printf(p
, "%*s: ", prec
, "TRM");
106 for_each_online_cpu(j
)
107 seq_printf(p
, "%10u ", irq_stats(j
)->irq_thermal_count
);
108 seq_printf(p
, " Thermal event interrupts\n");
110 #ifdef CONFIG_X86_MCE_THRESHOLD
111 seq_printf(p
, "%*s: ", prec
, "THR");
112 for_each_online_cpu(j
)
113 seq_printf(p
, "%10u ", irq_stats(j
)->irq_threshold_count
);
114 seq_printf(p
, " Threshold APIC interrupts\n");
116 #ifdef CONFIG_X86_MCE
117 seq_printf(p
, "%*s: ", prec
, "MCE");
118 for_each_online_cpu(j
)
119 seq_printf(p
, "%10u ", per_cpu(mce_exception_count
, j
));
120 seq_printf(p
, " Machine check exceptions\n");
121 seq_printf(p
, "%*s: ", prec
, "MCP");
122 for_each_online_cpu(j
)
123 seq_printf(p
, "%10u ", per_cpu(mce_poll_count
, j
));
124 seq_printf(p
, " Machine check polls\n");
126 seq_printf(p
, "%*s: %10u\n", prec
, "ERR", atomic_read(&irq_err_count
));
127 #if defined(CONFIG_X86_IO_APIC)
128 seq_printf(p
, "%*s: %10u\n", prec
, "MIS", atomic_read(&irq_mis_count
));
136 u64
arch_irq_stat_cpu(unsigned int cpu
)
138 u64 sum
= irq_stats(cpu
)->__nmi_count
;
140 #ifdef CONFIG_X86_LOCAL_APIC
141 sum
+= irq_stats(cpu
)->apic_timer_irqs
;
142 sum
+= irq_stats(cpu
)->irq_spurious_count
;
143 sum
+= irq_stats(cpu
)->apic_perf_irqs
;
144 sum
+= irq_stats(cpu
)->apic_irq_work_irqs
;
145 sum
+= irq_stats(cpu
)->icr_read_retry_count
;
147 if (x86_platform_ipi_callback
)
148 sum
+= irq_stats(cpu
)->x86_platform_ipis
;
150 sum
+= irq_stats(cpu
)->irq_resched_count
;
151 sum
+= irq_stats(cpu
)->irq_call_count
;
153 #ifdef CONFIG_X86_THERMAL_VECTOR
154 sum
+= irq_stats(cpu
)->irq_thermal_count
;
156 #ifdef CONFIG_X86_MCE_THRESHOLD
157 sum
+= irq_stats(cpu
)->irq_threshold_count
;
159 #ifdef CONFIG_X86_MCE
160 sum
+= per_cpu(mce_exception_count
, cpu
);
161 sum
+= per_cpu(mce_poll_count
, cpu
);
166 u64
arch_irq_stat(void)
168 u64 sum
= atomic_read(&irq_err_count
);
174 * do_IRQ handles all normal device IRQ's (the special
175 * SMP cross-CPU interrupts have their own specific
178 unsigned int __irq_entry
do_IRQ(struct pt_regs
*regs
)
180 struct pt_regs
*old_regs
= set_irq_regs(regs
);
182 /* high bit used in ret_from_ code */
183 unsigned vector
= ~regs
->orig_ax
;
189 irq
= __this_cpu_read(vector_irq
[vector
]);
191 if (!handle_irq(irq
, regs
)) {
194 if (printk_ratelimit())
195 pr_emerg("%s: %d.%d No irq handler for vector (irq %d)\n",
196 __func__
, smp_processor_id(), vector
, irq
);
201 set_irq_regs(old_regs
);
206 * Handler for X86_PLATFORM_IPI_VECTOR.
208 void __smp_x86_platform_ipi(void)
210 inc_irq_stat(x86_platform_ipis
);
212 if (x86_platform_ipi_callback
)
213 x86_platform_ipi_callback();
216 void smp_x86_platform_ipi(struct pt_regs
*regs
)
218 struct pt_regs
*old_regs
= set_irq_regs(regs
);
221 __smp_x86_platform_ipi();
223 set_irq_regs(old_regs
);
226 #ifdef CONFIG_HAVE_KVM
228 * Handler for POSTED_INTERRUPT_VECTOR.
230 void smp_kvm_posted_intr_ipi(struct pt_regs
*regs
)
232 struct pt_regs
*old_regs
= set_irq_regs(regs
);
240 inc_irq_stat(kvm_posted_intr_ipis
);
244 set_irq_regs(old_regs
);
248 void smp_trace_x86_platform_ipi(struct pt_regs
*regs
)
250 struct pt_regs
*old_regs
= set_irq_regs(regs
);
253 trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR
);
254 __smp_x86_platform_ipi();
255 trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR
);
257 set_irq_regs(old_regs
);
260 EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq
);
262 #ifdef CONFIG_HOTPLUG_CPU
263 /* A cpu has been removed from cpu_online_mask. Reset irq affinities. */
264 void fixup_irqs(void)
266 unsigned int irq
, vector
;
268 struct irq_desc
*desc
;
269 struct irq_data
*data
;
270 struct irq_chip
*chip
;
272 for_each_irq_desc(irq
, desc
) {
273 int break_affinity
= 0;
274 int set_affinity
= 1;
275 const struct cpumask
*affinity
;
282 /* interrupt's are disabled at this point */
283 raw_spin_lock(&desc
->lock
);
285 data
= irq_desc_get_irq_data(desc
);
286 affinity
= data
->affinity
;
287 if (!irq_has_action(irq
) || irqd_is_per_cpu(data
) ||
288 cpumask_subset(affinity
, cpu_online_mask
)) {
289 raw_spin_unlock(&desc
->lock
);
294 * Complete the irq move. This cpu is going down and for
295 * non intr-remapping case, we can't wait till this interrupt
296 * arrives at this cpu before completing the irq move.
298 irq_force_complete_move(irq
);
300 if (cpumask_any_and(affinity
, cpu_online_mask
) >= nr_cpu_ids
) {
302 affinity
= cpu_online_mask
;
305 chip
= irq_data_get_irq_chip(data
);
306 if (!irqd_can_move_in_process_context(data
) && chip
->irq_mask
)
307 chip
->irq_mask(data
);
309 if (chip
->irq_set_affinity
)
310 chip
->irq_set_affinity(data
, affinity
, true);
311 else if (!(warned
++))
315 * We unmask if the irq was not marked masked by the
316 * core code. That respects the lazy irq disable
319 if (!irqd_can_move_in_process_context(data
) &&
320 !irqd_irq_masked(data
) && chip
->irq_unmask
)
321 chip
->irq_unmask(data
);
323 raw_spin_unlock(&desc
->lock
);
325 if (break_affinity
&& set_affinity
)
326 pr_notice("Broke affinity for irq %i\n", irq
);
327 else if (!set_affinity
)
328 pr_notice("Cannot set affinity for irq %i\n", irq
);
332 * We can remove mdelay() and then send spuriuous interrupts to
333 * new cpu targets for all the irqs that were handled previously by
334 * this cpu. While it works, I have seen spurious interrupt messages
335 * (nothing wrong but still...).
337 * So for now, retain mdelay(1) and check the IRR and then send those
338 * interrupts to new targets as this cpu is already offlined...
342 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
345 if (__this_cpu_read(vector_irq
[vector
]) < 0)
348 irr
= apic_read(APIC_IRR
+ (vector
/ 32 * 0x10));
349 if (irr
& (1 << (vector
% 32))) {
350 irq
= __this_cpu_read(vector_irq
[vector
]);
352 desc
= irq_to_desc(irq
);
353 data
= irq_desc_get_irq_data(desc
);
354 chip
= irq_data_get_irq_chip(data
);
355 raw_spin_lock(&desc
->lock
);
356 if (chip
->irq_retrigger
)
357 chip
->irq_retrigger(data
);
358 raw_spin_unlock(&desc
->lock
);
360 __this_cpu_write(vector_irq
[vector
], -1);