]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * linux/arch/i386/kernel/irq.c | |
3 | * | |
4 | * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar | |
5 | * | |
6 | * This file contains the lowest level x86-specific interrupt | |
7 | * entry, irq-stacks and irq statistics code. All the remaining | |
8 | * irq logic is done by the generic kernel/irq/ code and | |
9 | * by the x86-specific irq controller code. (e.g. i8259.c and | |
10 | * io_apic.c.) | |
11 | */ | |
12 | ||
13 | #include <asm/uaccess.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/seq_file.h> | |
16 | #include <linux/interrupt.h> | |
17 | #include <linux/kernel_stat.h> | |
f3705136 ZM |
18 | #include <linux/notifier.h> |
19 | #include <linux/cpu.h> | |
20 | #include <linux/delay.h> | |
1da177e4 | 21 | |
22fc6ecc | 22 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; |
1da177e4 LT |
23 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
24 | ||
25 | #ifndef CONFIG_X86_LOCAL_APIC | |
26 | /* | |
27 | * 'what should we do if we get a hw irq event on an illegal vector'. | |
28 | * each architecture has to answer this themselves. | |
29 | */ | |
30 | void ack_bad_irq(unsigned int irq) | |
31 | { | |
32 | printk("unexpected IRQ trap at vector %02x\n", irq); | |
33 | } | |
34 | #endif | |
35 | ||
36 | #ifdef CONFIG_4KSTACKS | |
37 | /* | |
38 | * per-CPU IRQ handling contexts (thread information and stack) | |
39 | */ | |
40 | union irq_ctx { | |
41 | struct thread_info tinfo; | |
42 | u32 stack[THREAD_SIZE/sizeof(u32)]; | |
43 | }; | |
44 | ||
22722051 AM |
45 | static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly; |
46 | static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly; | |
1da177e4 LT |
47 | #endif |
48 | ||
49 | /* | |
50 | * do_IRQ handles all normal device IRQ's (the special | |
51 | * SMP cross-CPU interrupts have their own specific | |
52 | * handlers). | |
53 | */ | |
54 | fastcall unsigned int do_IRQ(struct pt_regs *regs) | |
55 | { | |
19eadf98 RR |
56 | /* high bit used in ret_from_ code */ |
57 | int irq = ~regs->orig_eax; | |
f5b9ed7a | 58 | struct irq_desc *desc = irq_desc + irq; |
1da177e4 LT |
59 | #ifdef CONFIG_4KSTACKS |
60 | union irq_ctx *curctx, *irqctx; | |
61 | u32 *isp; | |
62 | #endif | |
63 | ||
a052b68b AM |
64 | if (unlikely((unsigned)irq >= NR_IRQS)) { |
65 | printk(KERN_EMERG "%s: cannot handle IRQ %d\n", | |
66 | __FUNCTION__, irq); | |
67 | BUG(); | |
68 | } | |
69 | ||
1da177e4 LT |
70 | irq_enter(); |
71 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | |
72 | /* Debugging check for stack overflow: is there less than 1KB free? */ | |
73 | { | |
74 | long esp; | |
75 | ||
76 | __asm__ __volatile__("andl %%esp,%0" : | |
77 | "=r" (esp) : "0" (THREAD_SIZE - 1)); | |
78 | if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) { | |
79 | printk("do_IRQ: stack overflow: %ld\n", | |
80 | esp - sizeof(struct thread_info)); | |
81 | dump_stack(); | |
82 | } | |
83 | } | |
84 | #endif | |
85 | ||
86 | #ifdef CONFIG_4KSTACKS | |
87 | ||
88 | curctx = (union irq_ctx *) current_thread_info(); | |
89 | irqctx = hardirq_ctx[smp_processor_id()]; | |
90 | ||
91 | /* | |
92 | * this is where we switch to the IRQ stack. However, if we are | |
93 | * already using the IRQ stack (because we interrupted a hardirq | |
94 | * handler) we can't do that and just have to keep using the | |
95 | * current stack (which is the irq stack already after all) | |
96 | */ | |
97 | if (curctx != irqctx) { | |
f5b9ed7a | 98 | int arg1, arg2, arg3, ebx; |
1da177e4 LT |
99 | |
100 | /* build the stack frame on the IRQ stack */ | |
101 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); | |
102 | irqctx->tinfo.task = curctx->tinfo.task; | |
103 | irqctx->tinfo.previous_esp = current_stack_pointer; | |
104 | ||
a5d157e0 BS |
105 | /* |
106 | * Copy the softirq bits in preempt_count so that the | |
107 | * softirq checks work in the hardirq context. | |
108 | */ | |
109 | irqctx->tinfo.preempt_count = | |
91bf4602 AM |
110 | (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) | |
111 | (curctx->tinfo.preempt_count & SOFTIRQ_MASK); | |
a5d157e0 | 112 | |
1da177e4 | 113 | asm volatile( |
f5b9ed7a IM |
114 | " xchgl %%ebx,%%esp \n" |
115 | " call *%%edi \n" | |
1da177e4 | 116 | " movl %%ebx,%%esp \n" |
f5b9ed7a IM |
117 | : "=a" (arg1), "=d" (arg2), "=c" (arg3), "=b" (ebx) |
118 | : "0" (irq), "1" (desc), "2" (regs), "3" (isp), | |
119 | "D" (desc->handle_irq) | |
120 | : "memory", "cc" | |
1da177e4 LT |
121 | ); |
122 | } else | |
123 | #endif | |
f5b9ed7a | 124 | desc->handle_irq(irq, desc, regs); |
1da177e4 LT |
125 | |
126 | irq_exit(); | |
127 | ||
128 | return 1; | |
129 | } | |
130 | ||
131 | #ifdef CONFIG_4KSTACKS | |
132 | ||
133 | /* | |
134 | * These should really be __section__(".bss.page_aligned") as well, but | |
135 | * gcc's 3.0 and earlier don't handle that correctly. | |
136 | */ | |
137 | static char softirq_stack[NR_CPUS * THREAD_SIZE] | |
138 | __attribute__((__aligned__(THREAD_SIZE))); | |
139 | ||
140 | static char hardirq_stack[NR_CPUS * THREAD_SIZE] | |
141 | __attribute__((__aligned__(THREAD_SIZE))); | |
142 | ||
143 | /* | |
144 | * allocate per-cpu stacks for hardirq and for softirq processing | |
145 | */ | |
146 | void irq_ctx_init(int cpu) | |
147 | { | |
148 | union irq_ctx *irqctx; | |
149 | ||
150 | if (hardirq_ctx[cpu]) | |
151 | return; | |
152 | ||
153 | irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE]; | |
154 | irqctx->tinfo.task = NULL; | |
155 | irqctx->tinfo.exec_domain = NULL; | |
156 | irqctx->tinfo.cpu = cpu; | |
157 | irqctx->tinfo.preempt_count = HARDIRQ_OFFSET; | |
158 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); | |
159 | ||
160 | hardirq_ctx[cpu] = irqctx; | |
161 | ||
162 | irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE]; | |
163 | irqctx->tinfo.task = NULL; | |
164 | irqctx->tinfo.exec_domain = NULL; | |
165 | irqctx->tinfo.cpu = cpu; | |
55f327fa | 166 | irqctx->tinfo.preempt_count = 0; |
1da177e4 LT |
167 | irqctx->tinfo.addr_limit = MAKE_MM_SEG(0); |
168 | ||
169 | softirq_ctx[cpu] = irqctx; | |
170 | ||
171 | printk("CPU %u irqstacks, hard=%p soft=%p\n", | |
172 | cpu,hardirq_ctx[cpu],softirq_ctx[cpu]); | |
173 | } | |
174 | ||
e1367daf LS |
175 | void irq_ctx_exit(int cpu) |
176 | { | |
177 | hardirq_ctx[cpu] = NULL; | |
178 | } | |
179 | ||
1da177e4 LT |
180 | extern asmlinkage void __do_softirq(void); |
181 | ||
182 | asmlinkage void do_softirq(void) | |
183 | { | |
184 | unsigned long flags; | |
185 | struct thread_info *curctx; | |
186 | union irq_ctx *irqctx; | |
187 | u32 *isp; | |
188 | ||
189 | if (in_interrupt()) | |
190 | return; | |
191 | ||
192 | local_irq_save(flags); | |
193 | ||
194 | if (local_softirq_pending()) { | |
195 | curctx = current_thread_info(); | |
196 | irqctx = softirq_ctx[smp_processor_id()]; | |
197 | irqctx->tinfo.task = curctx->task; | |
198 | irqctx->tinfo.previous_esp = current_stack_pointer; | |
199 | ||
200 | /* build the stack frame on the softirq stack */ | |
201 | isp = (u32*) ((char*)irqctx + sizeof(*irqctx)); | |
202 | ||
203 | asm volatile( | |
204 | " xchgl %%ebx,%%esp \n" | |
205 | " call __do_softirq \n" | |
206 | " movl %%ebx,%%esp \n" | |
207 | : "=b"(isp) | |
208 | : "0"(isp) | |
209 | : "memory", "cc", "edx", "ecx", "eax" | |
210 | ); | |
55f327fa IM |
211 | /* |
212 | * Shouldnt happen, we returned above if in_interrupt(): | |
213 | */ | |
214 | WARN_ON_ONCE(softirq_count()); | |
1da177e4 LT |
215 | } |
216 | ||
217 | local_irq_restore(flags); | |
218 | } | |
219 | ||
220 | EXPORT_SYMBOL(do_softirq); | |
221 | #endif | |
222 | ||
223 | /* | |
224 | * Interrupt statistics: | |
225 | */ | |
226 | ||
227 | atomic_t irq_err_count; | |
228 | ||
229 | /* | |
230 | * /proc/interrupts printing: | |
231 | */ | |
232 | ||
233 | int show_interrupts(struct seq_file *p, void *v) | |
234 | { | |
235 | int i = *(loff_t *) v, j; | |
236 | struct irqaction * action; | |
237 | unsigned long flags; | |
238 | ||
239 | if (i == 0) { | |
240 | seq_printf(p, " "); | |
9f40a72a | 241 | for_each_online_cpu(j) |
bdbdaa79 | 242 | seq_printf(p, "CPU%-8d",j); |
1da177e4 LT |
243 | seq_putc(p, '\n'); |
244 | } | |
245 | ||
246 | if (i < NR_IRQS) { | |
247 | spin_lock_irqsave(&irq_desc[i].lock, flags); | |
248 | action = irq_desc[i].action; | |
249 | if (!action) | |
250 | goto skip; | |
251 | seq_printf(p, "%3d: ",i); | |
252 | #ifndef CONFIG_SMP | |
253 | seq_printf(p, "%10u ", kstat_irqs(i)); | |
254 | #else | |
9f40a72a | 255 | for_each_online_cpu(j) |
f3705136 | 256 | seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); |
1da177e4 | 257 | #endif |
f5b9ed7a IM |
258 | seq_printf(p, " %8s", irq_desc[i].chip->name); |
259 | seq_printf(p, "-%s", handle_irq_name(irq_desc[i].handle_irq)); | |
1da177e4 LT |
260 | seq_printf(p, " %s", action->name); |
261 | ||
262 | for (action=action->next; action; action = action->next) | |
263 | seq_printf(p, ", %s", action->name); | |
264 | ||
265 | seq_putc(p, '\n'); | |
266 | skip: | |
267 | spin_unlock_irqrestore(&irq_desc[i].lock, flags); | |
268 | } else if (i == NR_IRQS) { | |
269 | seq_printf(p, "NMI: "); | |
9f40a72a | 270 | for_each_online_cpu(j) |
f3705136 | 271 | seq_printf(p, "%10u ", nmi_count(j)); |
1da177e4 LT |
272 | seq_putc(p, '\n'); |
273 | #ifdef CONFIG_X86_LOCAL_APIC | |
274 | seq_printf(p, "LOC: "); | |
9f40a72a | 275 | for_each_online_cpu(j) |
f3705136 ZM |
276 | seq_printf(p, "%10u ", |
277 | per_cpu(irq_stat,j).apic_timer_irqs); | |
1da177e4 LT |
278 | seq_putc(p, '\n'); |
279 | #endif | |
280 | seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); | |
281 | #if defined(CONFIG_X86_IO_APIC) | |
282 | seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count)); | |
283 | #endif | |
284 | } | |
285 | return 0; | |
286 | } | |
f3705136 ZM |
287 | |
288 | #ifdef CONFIG_HOTPLUG_CPU | |
289 | #include <mach_apic.h> | |
290 | ||
291 | void fixup_irqs(cpumask_t map) | |
292 | { | |
293 | unsigned int irq; | |
294 | static int warned; | |
295 | ||
296 | for (irq = 0; irq < NR_IRQS; irq++) { | |
297 | cpumask_t mask; | |
298 | if (irq == 2) | |
299 | continue; | |
300 | ||
a53da52f | 301 | cpus_and(mask, irq_desc[irq].affinity, map); |
f3705136 ZM |
302 | if (any_online_cpu(mask) == NR_CPUS) { |
303 | printk("Breaking affinity for irq %i\n", irq); | |
304 | mask = map; | |
305 | } | |
d1bef4ed IM |
306 | if (irq_desc[irq].chip->set_affinity) |
307 | irq_desc[irq].chip->set_affinity(irq, mask); | |
f3705136 ZM |
308 | else if (irq_desc[irq].action && !(warned++)) |
309 | printk("Cannot set affinity for irq %i\n", irq); | |
310 | } | |
311 | ||
312 | #if 0 | |
313 | barrier(); | |
314 | /* Ingo Molnar says: "after the IO-APIC masks have been redirected | |
315 | [note the nop - the interrupt-enable boundary on x86 is two | |
316 | instructions from sti] - to flush out pending hardirqs and | |
317 | IPIs. After this point nothing is supposed to reach this CPU." */ | |
318 | __asm__ __volatile__("sti; nop; cli"); | |
319 | barrier(); | |
320 | #else | |
321 | /* That doesn't seem sufficient. Give it 1ms. */ | |
322 | local_irq_enable(); | |
323 | mdelay(1); | |
324 | local_irq_disable(); | |
325 | #endif | |
326 | } | |
327 | #endif | |
328 |