]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/i386/kernel/i8259.c
Linux-2.6.12-rc2
[mirror_ubuntu-artful-kernel.git] / arch / i386 / kernel / i8259.c
1 #include <linux/config.h>
2 #include <linux/errno.h>
3 #include <linux/signal.h>
4 #include <linux/sched.h>
5 #include <linux/ioport.h>
6 #include <linux/interrupt.h>
7 #include <linux/slab.h>
8 #include <linux/random.h>
9 #include <linux/smp_lock.h>
10 #include <linux/init.h>
11 #include <linux/kernel_stat.h>
12 #include <linux/sysdev.h>
13 #include <linux/bitops.h>
14
15 #include <asm/8253pit.h>
16 #include <asm/atomic.h>
17 #include <asm/system.h>
18 #include <asm/io.h>
19 #include <asm/irq.h>
20 #include <asm/timer.h>
21 #include <asm/pgtable.h>
22 #include <asm/delay.h>
23 #include <asm/desc.h>
24 #include <asm/apic.h>
25 #include <asm/arch_hooks.h>
26 #include <asm/i8259.h>
27
28 #include <linux/irq.h>
29
30 #include <io_ports.h>
31
32 /*
33 * This is the 'legacy' 8259A Programmable Interrupt Controller,
34 * present in the majority of PC/AT boxes.
35 * plus some generic x86 specific things if generic specifics makes
36 * any sense at all.
37 * this file should become arch/i386/kernel/irq.c when the old irq.c
38 * moves to arch independent land
39 */
40
41 DEFINE_SPINLOCK(i8259A_lock);
42
43 static void end_8259A_irq (unsigned int irq)
44 {
45 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
46 irq_desc[irq].action)
47 enable_8259A_irq(irq);
48 }
49
50 #define shutdown_8259A_irq disable_8259A_irq
51
52 static void mask_and_ack_8259A(unsigned int);
53
54 unsigned int startup_8259A_irq(unsigned int irq)
55 {
56 enable_8259A_irq(irq);
57 return 0; /* never anything pending */
58 }
59
60 static struct hw_interrupt_type i8259A_irq_type = {
61 .typename = "XT-PIC",
62 .startup = startup_8259A_irq,
63 .shutdown = shutdown_8259A_irq,
64 .enable = enable_8259A_irq,
65 .disable = disable_8259A_irq,
66 .ack = mask_and_ack_8259A,
67 .end = end_8259A_irq,
68 };
69
70 /*
71 * 8259A PIC functions to handle ISA devices:
72 */
73
74 /*
75 * This contains the irq mask for both 8259A irq controllers,
76 */
77 unsigned int cached_irq_mask = 0xffff;
78
79 /*
80 * Not all IRQs can be routed through the IO-APIC, eg. on certain (older)
81 * boards the timer interrupt is not really connected to any IO-APIC pin,
82 * it's fed to the master 8259A's IR0 line only.
83 *
84 * Any '1' bit in this mask means the IRQ is routed through the IO-APIC.
85 * this 'mixed mode' IRQ handling costs nothing because it's only used
86 * at IRQ setup time.
87 */
88 unsigned long io_apic_irqs;
89
90 void disable_8259A_irq(unsigned int irq)
91 {
92 unsigned int mask = 1 << irq;
93 unsigned long flags;
94
95 spin_lock_irqsave(&i8259A_lock, flags);
96 cached_irq_mask |= mask;
97 if (irq & 8)
98 outb(cached_slave_mask, PIC_SLAVE_IMR);
99 else
100 outb(cached_master_mask, PIC_MASTER_IMR);
101 spin_unlock_irqrestore(&i8259A_lock, flags);
102 }
103
104 void enable_8259A_irq(unsigned int irq)
105 {
106 unsigned int mask = ~(1 << irq);
107 unsigned long flags;
108
109 spin_lock_irqsave(&i8259A_lock, flags);
110 cached_irq_mask &= mask;
111 if (irq & 8)
112 outb(cached_slave_mask, PIC_SLAVE_IMR);
113 else
114 outb(cached_master_mask, PIC_MASTER_IMR);
115 spin_unlock_irqrestore(&i8259A_lock, flags);
116 }
117
118 int i8259A_irq_pending(unsigned int irq)
119 {
120 unsigned int mask = 1<<irq;
121 unsigned long flags;
122 int ret;
123
124 spin_lock_irqsave(&i8259A_lock, flags);
125 if (irq < 8)
126 ret = inb(PIC_MASTER_CMD) & mask;
127 else
128 ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
129 spin_unlock_irqrestore(&i8259A_lock, flags);
130
131 return ret;
132 }
133
134 void make_8259A_irq(unsigned int irq)
135 {
136 disable_irq_nosync(irq);
137 io_apic_irqs &= ~(1<<irq);
138 irq_desc[irq].handler = &i8259A_irq_type;
139 enable_irq(irq);
140 }
141
142 /*
143 * This function assumes to be called rarely. Switching between
144 * 8259A registers is slow.
145 * This has to be protected by the irq controller spinlock
146 * before being called.
147 */
148 static inline int i8259A_irq_real(unsigned int irq)
149 {
150 int value;
151 int irqmask = 1<<irq;
152
153 if (irq < 8) {
154 outb(0x0B,PIC_MASTER_CMD); /* ISR register */
155 value = inb(PIC_MASTER_CMD) & irqmask;
156 outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */
157 return value;
158 }
159 outb(0x0B,PIC_SLAVE_CMD); /* ISR register */
160 value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
161 outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */
162 return value;
163 }
164
165 /*
166 * Careful! The 8259A is a fragile beast, it pretty
167 * much _has_ to be done exactly like this (mask it
168 * first, _then_ send the EOI, and the order of EOI
169 * to the two 8259s is important!
170 */
171 static void mask_and_ack_8259A(unsigned int irq)
172 {
173 unsigned int irqmask = 1 << irq;
174 unsigned long flags;
175
176 spin_lock_irqsave(&i8259A_lock, flags);
177 /*
178 * Lightweight spurious IRQ detection. We do not want
179 * to overdo spurious IRQ handling - it's usually a sign
180 * of hardware problems, so we only do the checks we can
181 * do without slowing down good hardware unnecesserily.
182 *
183 * Note that IRQ7 and IRQ15 (the two spurious IRQs
184 * usually resulting from the 8259A-1|2 PICs) occur
185 * even if the IRQ is masked in the 8259A. Thus we
186 * can check spurious 8259A IRQs without doing the
187 * quite slow i8259A_irq_real() call for every IRQ.
188 * This does not cover 100% of spurious interrupts,
189 * but should be enough to warn the user that there
190 * is something bad going on ...
191 */
192 if (cached_irq_mask & irqmask)
193 goto spurious_8259A_irq;
194 cached_irq_mask |= irqmask;
195
196 handle_real_irq:
197 if (irq & 8) {
198 inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
199 outb(cached_slave_mask, PIC_SLAVE_IMR);
200 outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
201 outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
202 } else {
203 inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
204 outb(cached_master_mask, PIC_MASTER_IMR);
205 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
206 }
207 spin_unlock_irqrestore(&i8259A_lock, flags);
208 return;
209
210 spurious_8259A_irq:
211 /*
212 * this is the slow path - should happen rarely.
213 */
214 if (i8259A_irq_real(irq))
215 /*
216 * oops, the IRQ _is_ in service according to the
217 * 8259A - not spurious, go handle it.
218 */
219 goto handle_real_irq;
220
221 {
222 static int spurious_irq_mask;
223 /*
224 * At this point we can be sure the IRQ is spurious,
225 * lets ACK and report it. [once per IRQ]
226 */
227 if (!(spurious_irq_mask & irqmask)) {
228 printk(KERN_DEBUG "spurious 8259A interrupt: IRQ%d.\n", irq);
229 spurious_irq_mask |= irqmask;
230 }
231 atomic_inc(&irq_err_count);
232 /*
233 * Theoretically we do not have to handle this IRQ,
234 * but in Linux this does not cause problems and is
235 * simpler for us.
236 */
237 goto handle_real_irq;
238 }
239 }
240
241 static char irq_trigger[2];
242 /**
243 * ELCR registers (0x4d0, 0x4d1) control edge/level of IRQ
244 */
245 static void restore_ELCR(char *trigger)
246 {
247 outb(trigger[0], 0x4d0);
248 outb(trigger[1], 0x4d1);
249 }
250
251 static void save_ELCR(char *trigger)
252 {
253 /* IRQ 0,1,2,8,13 are marked as reserved */
254 trigger[0] = inb(0x4d0) & 0xF8;
255 trigger[1] = inb(0x4d1) & 0xDE;
256 }
257
258 static int i8259A_resume(struct sys_device *dev)
259 {
260 init_8259A(0);
261 restore_ELCR(irq_trigger);
262 return 0;
263 }
264
265 static int i8259A_suspend(struct sys_device *dev, u32 state)
266 {
267 save_ELCR(irq_trigger);
268 return 0;
269 }
270
271 static struct sysdev_class i8259_sysdev_class = {
272 set_kset_name("i8259"),
273 .suspend = i8259A_suspend,
274 .resume = i8259A_resume,
275 };
276
277 static struct sys_device device_i8259A = {
278 .id = 0,
279 .cls = &i8259_sysdev_class,
280 };
281
282 static int __init i8259A_init_sysfs(void)
283 {
284 int error = sysdev_class_register(&i8259_sysdev_class);
285 if (!error)
286 error = sysdev_register(&device_i8259A);
287 return error;
288 }
289
290 device_initcall(i8259A_init_sysfs);
291
292 void init_8259A(int auto_eoi)
293 {
294 unsigned long flags;
295
296 spin_lock_irqsave(&i8259A_lock, flags);
297
298 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
299 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
300
301 /*
302 * outb_p - this has to work on a wide range of PC hardware.
303 */
304 outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
305 outb_p(0x20 + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0-7 mapped to 0x20-0x27 */
306 outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
307 if (auto_eoi) /* master does Auto EOI */
308 outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
309 else /* master expects normal EOI */
310 outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
311
312 outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
313 outb_p(0x20 + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0-7 mapped to 0x28-0x2f */
314 outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
315 outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
316 if (auto_eoi)
317 /*
318 * in AEOI mode we just have to mask the interrupt
319 * when acking.
320 */
321 i8259A_irq_type.ack = disable_8259A_irq;
322 else
323 i8259A_irq_type.ack = mask_and_ack_8259A;
324
325 udelay(100); /* wait for 8259A to initialize */
326
327 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
328 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
329
330 spin_unlock_irqrestore(&i8259A_lock, flags);
331 }
332
333 /*
334 * Note that on a 486, we don't want to do a SIGFPE on an irq13
335 * as the irq is unreliable, and exception 16 works correctly
336 * (ie as explained in the intel literature). On a 386, you
337 * can't use exception 16 due to bad IBM design, so we have to
338 * rely on the less exact irq13.
339 *
340 * Careful.. Not only is IRQ13 unreliable, but it is also
341 * leads to races. IBM designers who came up with it should
342 * be shot.
343 */
344
345
346 static irqreturn_t math_error_irq(int cpl, void *dev_id, struct pt_regs *regs)
347 {
348 extern void math_error(void __user *);
349 outb(0,0xF0);
350 if (ignore_fpu_irq || !boot_cpu_data.hard_math)
351 return IRQ_NONE;
352 math_error((void __user *)regs->eip);
353 return IRQ_HANDLED;
354 }
355
356 /*
357 * New motherboards sometimes make IRQ 13 be a PCI interrupt,
358 * so allow interrupt sharing.
359 */
360 static struct irqaction fpu_irq = { math_error_irq, 0, CPU_MASK_NONE, "fpu", NULL, NULL };
361
362 void __init init_ISA_irqs (void)
363 {
364 int i;
365
366 #ifdef CONFIG_X86_LOCAL_APIC
367 init_bsp_APIC();
368 #endif
369 init_8259A(0);
370
371 for (i = 0; i < NR_IRQS; i++) {
372 irq_desc[i].status = IRQ_DISABLED;
373 irq_desc[i].action = NULL;
374 irq_desc[i].depth = 1;
375
376 if (i < 16) {
377 /*
378 * 16 old-style INTA-cycle interrupts:
379 */
380 irq_desc[i].handler = &i8259A_irq_type;
381 } else {
382 /*
383 * 'high' PCI IRQs filled in on demand
384 */
385 irq_desc[i].handler = &no_irq_type;
386 }
387 }
388 }
389
390 void __init init_IRQ(void)
391 {
392 int i;
393
394 /* all the set up before the call gates are initialised */
395 pre_intr_init_hook();
396
397 /*
398 * Cover the whole vector space, no vector can escape
399 * us. (some of these will be overridden and become
400 * 'special' SMP interrupts)
401 */
402 for (i = 0; i < (NR_VECTORS - FIRST_EXTERNAL_VECTOR); i++) {
403 int vector = FIRST_EXTERNAL_VECTOR + i;
404 if (i >= NR_IRQS)
405 break;
406 if (vector != SYSCALL_VECTOR)
407 set_intr_gate(vector, interrupt[i]);
408 }
409
410 /* setup after call gates are initialised (usually add in
411 * the architecture specific gates)
412 */
413 intr_init_hook();
414
415 /*
416 * Set the clock to HZ Hz, we already have a valid
417 * vector now:
418 */
419 setup_pit_timer();
420
421 /*
422 * External FPU? Set up irq13 if so, for
423 * original braindamaged IBM FERR coupling.
424 */
425 if (boot_cpu_data.hard_math && !cpu_has_fpu)
426 setup_irq(FPU_IRQ, &fpu_irq);
427
428 irq_ctx_init(smp_processor_id());
429 }