]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/x86/kernel/apic/io_apic.c
Merge branch 'x86-irq-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-eoan-kernel.git] / arch / x86 / kernel / apic / io_apic.c
1 /*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23 #include <linux/mm.h>
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
39 #include <linux/slab.h>
40 #ifdef CONFIG_ACPI
41 #include <acpi/acpi_bus.h>
42 #endif
43 #include <linux/bootmem.h>
44 #include <linux/dmar.h>
45 #include <linux/hpet.h>
46
47 #include <asm/idle.h>
48 #include <asm/io.h>
49 #include <asm/smp.h>
50 #include <asm/cpu.h>
51 #include <asm/desc.h>
52 #include <asm/proto.h>
53 #include <asm/acpi.h>
54 #include <asm/dma.h>
55 #include <asm/timer.h>
56 #include <asm/i8259.h>
57 #include <asm/nmi.h>
58 #include <asm/msidef.h>
59 #include <asm/hypertransport.h>
60 #include <asm/setup.h>
61 #include <asm/irq_remapping.h>
62 #include <asm/hpet.h>
63 #include <asm/hw_irq.h>
64
65 #include <asm/apic.h>
66
67 #define __apicdebuginit(type) static type __init
68 #define for_each_irq_pin(entry, head) \
69 for (entry = head; entry; entry = entry->next)
70
71 /*
72 * Is the SiS APIC rmw bug present ?
73 * -1 = don't know, 0 = no, 1 = yes
74 */
75 int sis_apic_bug = -1;
76
77 static DEFINE_RAW_SPINLOCK(ioapic_lock);
78 static DEFINE_RAW_SPINLOCK(vector_lock);
79
80 /*
81 * # of IRQ routing registers
82 */
83 int nr_ioapic_registers[MAX_IO_APICS];
84
85 /* I/O APIC entries */
86 struct mpc_ioapic mp_ioapics[MAX_IO_APICS];
87 int nr_ioapics;
88
89 /* IO APIC gsi routing info */
90 struct mp_ioapic_gsi mp_gsi_routing[MAX_IO_APICS];
91
92 /* The last gsi number used */
93 u32 gsi_end;
94
95 /* MP IRQ source entries */
96 struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
97
98 /* # of MP IRQ source entries */
99 int mp_irq_entries;
100
101 /* GSI interrupts */
102 static int nr_irqs_gsi = NR_IRQS_LEGACY;
103
104 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
105 int mp_bus_id_to_type[MAX_MP_BUSSES];
106 #endif
107
108 DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
109
110 int skip_ioapic_setup;
111
112 void arch_disable_smp_support(void)
113 {
114 #ifdef CONFIG_PCI
115 noioapicquirk = 1;
116 noioapicreroute = -1;
117 #endif
118 skip_ioapic_setup = 1;
119 }
120
121 static int __init parse_noapic(char *str)
122 {
123 /* disable IO-APIC */
124 arch_disable_smp_support();
125 return 0;
126 }
127 early_param("noapic", parse_noapic);
128
129 struct irq_pin_list {
130 int apic, pin;
131 struct irq_pin_list *next;
132 };
133
134 static struct irq_pin_list *get_one_free_irq_2_pin(int node)
135 {
136 struct irq_pin_list *pin;
137
138 pin = kzalloc_node(sizeof(*pin), GFP_ATOMIC, node);
139
140 return pin;
141 }
142
143 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
144 #ifdef CONFIG_SPARSE_IRQ
145 static struct irq_cfg irq_cfgx[NR_IRQS_LEGACY];
146 #else
147 static struct irq_cfg irq_cfgx[NR_IRQS];
148 #endif
149
150 int __init arch_early_irq_init(void)
151 {
152 struct irq_cfg *cfg;
153 struct irq_desc *desc;
154 int count;
155 int node;
156 int i;
157
158 if (!legacy_pic->nr_legacy_irqs) {
159 nr_irqs_gsi = 0;
160 io_apic_irqs = ~0UL;
161 }
162
163 cfg = irq_cfgx;
164 count = ARRAY_SIZE(irq_cfgx);
165 node= cpu_to_node(boot_cpu_id);
166
167 for (i = 0; i < count; i++) {
168 desc = irq_to_desc(i);
169 desc->chip_data = &cfg[i];
170 zalloc_cpumask_var_node(&cfg[i].domain, GFP_NOWAIT, node);
171 zalloc_cpumask_var_node(&cfg[i].old_domain, GFP_NOWAIT, node);
172 /*
173 * For legacy IRQ's, start with assigning irq0 to irq15 to
174 * IRQ0_VECTOR to IRQ15_VECTOR on cpu 0.
175 */
176 if (i < legacy_pic->nr_legacy_irqs) {
177 cfg[i].vector = IRQ0_VECTOR + i;
178 cpumask_set_cpu(0, cfg[i].domain);
179 }
180 }
181
182 return 0;
183 }
184
185 #ifdef CONFIG_SPARSE_IRQ
186 struct irq_cfg *irq_cfg(unsigned int irq)
187 {
188 struct irq_cfg *cfg = NULL;
189 struct irq_desc *desc;
190
191 desc = irq_to_desc(irq);
192 if (desc)
193 cfg = desc->chip_data;
194
195 return cfg;
196 }
197
198 static struct irq_cfg *get_one_free_irq_cfg(int node)
199 {
200 struct irq_cfg *cfg;
201
202 cfg = kzalloc_node(sizeof(*cfg), GFP_ATOMIC, node);
203 if (cfg) {
204 if (!zalloc_cpumask_var_node(&cfg->domain, GFP_ATOMIC, node)) {
205 kfree(cfg);
206 cfg = NULL;
207 } else if (!zalloc_cpumask_var_node(&cfg->old_domain,
208 GFP_ATOMIC, node)) {
209 free_cpumask_var(cfg->domain);
210 kfree(cfg);
211 cfg = NULL;
212 }
213 }
214
215 return cfg;
216 }
217
218 int arch_init_chip_data(struct irq_desc *desc, int node)
219 {
220 struct irq_cfg *cfg;
221
222 cfg = desc->chip_data;
223 if (!cfg) {
224 desc->chip_data = get_one_free_irq_cfg(node);
225 if (!desc->chip_data) {
226 printk(KERN_ERR "can not alloc irq_cfg\n");
227 BUG_ON(1);
228 }
229 }
230
231 return 0;
232 }
233
234 /* for move_irq_desc */
235 static void
236 init_copy_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg, int node)
237 {
238 struct irq_pin_list *old_entry, *head, *tail, *entry;
239
240 cfg->irq_2_pin = NULL;
241 old_entry = old_cfg->irq_2_pin;
242 if (!old_entry)
243 return;
244
245 entry = get_one_free_irq_2_pin(node);
246 if (!entry)
247 return;
248
249 entry->apic = old_entry->apic;
250 entry->pin = old_entry->pin;
251 head = entry;
252 tail = entry;
253 old_entry = old_entry->next;
254 while (old_entry) {
255 entry = get_one_free_irq_2_pin(node);
256 if (!entry) {
257 entry = head;
258 while (entry) {
259 head = entry->next;
260 kfree(entry);
261 entry = head;
262 }
263 /* still use the old one */
264 return;
265 }
266 entry->apic = old_entry->apic;
267 entry->pin = old_entry->pin;
268 tail->next = entry;
269 tail = entry;
270 old_entry = old_entry->next;
271 }
272
273 tail->next = NULL;
274 cfg->irq_2_pin = head;
275 }
276
277 static void free_irq_2_pin(struct irq_cfg *old_cfg, struct irq_cfg *cfg)
278 {
279 struct irq_pin_list *entry, *next;
280
281 if (old_cfg->irq_2_pin == cfg->irq_2_pin)
282 return;
283
284 entry = old_cfg->irq_2_pin;
285
286 while (entry) {
287 next = entry->next;
288 kfree(entry);
289 entry = next;
290 }
291 old_cfg->irq_2_pin = NULL;
292 }
293
294 void arch_init_copy_chip_data(struct irq_desc *old_desc,
295 struct irq_desc *desc, int node)
296 {
297 struct irq_cfg *cfg;
298 struct irq_cfg *old_cfg;
299
300 cfg = get_one_free_irq_cfg(node);
301
302 if (!cfg)
303 return;
304
305 desc->chip_data = cfg;
306
307 old_cfg = old_desc->chip_data;
308
309 memcpy(cfg, old_cfg, sizeof(struct irq_cfg));
310
311 init_copy_irq_2_pin(old_cfg, cfg, node);
312 }
313
314 static void free_irq_cfg(struct irq_cfg *old_cfg)
315 {
316 kfree(old_cfg);
317 }
318
319 void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc)
320 {
321 struct irq_cfg *old_cfg, *cfg;
322
323 old_cfg = old_desc->chip_data;
324 cfg = desc->chip_data;
325
326 if (old_cfg == cfg)
327 return;
328
329 if (old_cfg) {
330 free_irq_2_pin(old_cfg, cfg);
331 free_irq_cfg(old_cfg);
332 old_desc->chip_data = NULL;
333 }
334 }
335 /* end for move_irq_desc */
336
337 #else
338 struct irq_cfg *irq_cfg(unsigned int irq)
339 {
340 return irq < nr_irqs ? irq_cfgx + irq : NULL;
341 }
342
343 #endif
344
345 struct io_apic {
346 unsigned int index;
347 unsigned int unused[3];
348 unsigned int data;
349 unsigned int unused2[11];
350 unsigned int eoi;
351 };
352
353 static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
354 {
355 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
356 + (mp_ioapics[idx].apicaddr & ~PAGE_MASK);
357 }
358
359 static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
360 {
361 struct io_apic __iomem *io_apic = io_apic_base(apic);
362 writel(vector, &io_apic->eoi);
363 }
364
365 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
366 {
367 struct io_apic __iomem *io_apic = io_apic_base(apic);
368 writel(reg, &io_apic->index);
369 return readl(&io_apic->data);
370 }
371
372 static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
373 {
374 struct io_apic __iomem *io_apic = io_apic_base(apic);
375 writel(reg, &io_apic->index);
376 writel(value, &io_apic->data);
377 }
378
379 /*
380 * Re-write a value: to be used for read-modify-write
381 * cycles where the read already set up the index register.
382 *
383 * Older SiS APIC requires we rewrite the index register
384 */
385 static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
386 {
387 struct io_apic __iomem *io_apic = io_apic_base(apic);
388
389 if (sis_apic_bug)
390 writel(reg, &io_apic->index);
391 writel(value, &io_apic->data);
392 }
393
394 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
395 {
396 struct irq_pin_list *entry;
397 unsigned long flags;
398
399 raw_spin_lock_irqsave(&ioapic_lock, flags);
400 for_each_irq_pin(entry, cfg->irq_2_pin) {
401 unsigned int reg;
402 int pin;
403
404 pin = entry->pin;
405 reg = io_apic_read(entry->apic, 0x10 + pin*2);
406 /* Is the remote IRR bit set? */
407 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
408 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
409 return true;
410 }
411 }
412 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
413
414 return false;
415 }
416
417 union entry_union {
418 struct { u32 w1, w2; };
419 struct IO_APIC_route_entry entry;
420 };
421
422 static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
423 {
424 union entry_union eu;
425 unsigned long flags;
426 raw_spin_lock_irqsave(&ioapic_lock, flags);
427 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
428 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
429 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
430 return eu.entry;
431 }
432
433 /*
434 * When we write a new IO APIC routing entry, we need to write the high
435 * word first! If the mask bit in the low word is clear, we will enable
436 * the interrupt, and we need to make sure the entry is fully populated
437 * before that happens.
438 */
439 static void
440 __ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
441 {
442 union entry_union eu = {{0, 0}};
443
444 eu.entry = e;
445 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
446 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
447 }
448
449 void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
450 {
451 unsigned long flags;
452 raw_spin_lock_irqsave(&ioapic_lock, flags);
453 __ioapic_write_entry(apic, pin, e);
454 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
455 }
456
457 /*
458 * When we mask an IO APIC routing entry, we need to write the low
459 * word first, in order to set the mask bit before we change the
460 * high bits!
461 */
462 static void ioapic_mask_entry(int apic, int pin)
463 {
464 unsigned long flags;
465 union entry_union eu = { .entry.mask = 1 };
466
467 raw_spin_lock_irqsave(&ioapic_lock, flags);
468 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
469 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
470 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
471 }
472
473 /*
474 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
475 * shared ISA-space IRQs, so we have to support them. We are super
476 * fast in the common case, and fast for shared ISA-space IRQs.
477 */
478 static int
479 add_pin_to_irq_node_nopanic(struct irq_cfg *cfg, int node, int apic, int pin)
480 {
481 struct irq_pin_list **last, *entry;
482
483 /* don't allow duplicates */
484 last = &cfg->irq_2_pin;
485 for_each_irq_pin(entry, cfg->irq_2_pin) {
486 if (entry->apic == apic && entry->pin == pin)
487 return 0;
488 last = &entry->next;
489 }
490
491 entry = get_one_free_irq_2_pin(node);
492 if (!entry) {
493 printk(KERN_ERR "can not alloc irq_pin_list (%d,%d,%d)\n",
494 node, apic, pin);
495 return -ENOMEM;
496 }
497 entry->apic = apic;
498 entry->pin = pin;
499
500 *last = entry;
501 return 0;
502 }
503
504 static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
505 {
506 if (add_pin_to_irq_node_nopanic(cfg, node, apic, pin))
507 panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
508 }
509
510 /*
511 * Reroute an IRQ to a different pin.
512 */
513 static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
514 int oldapic, int oldpin,
515 int newapic, int newpin)
516 {
517 struct irq_pin_list *entry;
518
519 for_each_irq_pin(entry, cfg->irq_2_pin) {
520 if (entry->apic == oldapic && entry->pin == oldpin) {
521 entry->apic = newapic;
522 entry->pin = newpin;
523 /* every one is different, right? */
524 return;
525 }
526 }
527
528 /* old apic/pin didn't exist, so just add new ones */
529 add_pin_to_irq_node(cfg, node, newapic, newpin);
530 }
531
532 static void __io_apic_modify_irq(struct irq_pin_list *entry,
533 int mask_and, int mask_or,
534 void (*final)(struct irq_pin_list *entry))
535 {
536 unsigned int reg, pin;
537
538 pin = entry->pin;
539 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
540 reg &= mask_and;
541 reg |= mask_or;
542 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
543 if (final)
544 final(entry);
545 }
546
547 static void io_apic_modify_irq(struct irq_cfg *cfg,
548 int mask_and, int mask_or,
549 void (*final)(struct irq_pin_list *entry))
550 {
551 struct irq_pin_list *entry;
552
553 for_each_irq_pin(entry, cfg->irq_2_pin)
554 __io_apic_modify_irq(entry, mask_and, mask_or, final);
555 }
556
557 static void __mask_and_edge_IO_APIC_irq(struct irq_pin_list *entry)
558 {
559 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_LEVEL_TRIGGER,
560 IO_APIC_REDIR_MASKED, NULL);
561 }
562
563 static void __unmask_and_level_IO_APIC_irq(struct irq_pin_list *entry)
564 {
565 __io_apic_modify_irq(entry, ~IO_APIC_REDIR_MASKED,
566 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
567 }
568
569 static void __unmask_IO_APIC_irq(struct irq_cfg *cfg)
570 {
571 io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
572 }
573
574 static void io_apic_sync(struct irq_pin_list *entry)
575 {
576 /*
577 * Synchronize the IO-APIC and the CPU by doing
578 * a dummy read from the IO-APIC
579 */
580 struct io_apic __iomem *io_apic;
581 io_apic = io_apic_base(entry->apic);
582 readl(&io_apic->data);
583 }
584
585 static void __mask_IO_APIC_irq(struct irq_cfg *cfg)
586 {
587 io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
588 }
589
590 static void mask_IO_APIC_irq_desc(struct irq_desc *desc)
591 {
592 struct irq_cfg *cfg = desc->chip_data;
593 unsigned long flags;
594
595 BUG_ON(!cfg);
596
597 raw_spin_lock_irqsave(&ioapic_lock, flags);
598 __mask_IO_APIC_irq(cfg);
599 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
600 }
601
602 static void unmask_IO_APIC_irq_desc(struct irq_desc *desc)
603 {
604 struct irq_cfg *cfg = desc->chip_data;
605 unsigned long flags;
606
607 raw_spin_lock_irqsave(&ioapic_lock, flags);
608 __unmask_IO_APIC_irq(cfg);
609 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
610 }
611
612 static void mask_IO_APIC_irq(unsigned int irq)
613 {
614 struct irq_desc *desc = irq_to_desc(irq);
615
616 mask_IO_APIC_irq_desc(desc);
617 }
618 static void unmask_IO_APIC_irq(unsigned int irq)
619 {
620 struct irq_desc *desc = irq_to_desc(irq);
621
622 unmask_IO_APIC_irq_desc(desc);
623 }
624
625 static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
626 {
627 struct IO_APIC_route_entry entry;
628
629 /* Check delivery_mode to be sure we're not clearing an SMI pin */
630 entry = ioapic_read_entry(apic, pin);
631 if (entry.delivery_mode == dest_SMI)
632 return;
633 /*
634 * Disable it in the IO-APIC irq-routing table:
635 */
636 ioapic_mask_entry(apic, pin);
637 }
638
639 static void clear_IO_APIC (void)
640 {
641 int apic, pin;
642
643 for (apic = 0; apic < nr_ioapics; apic++)
644 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
645 clear_IO_APIC_pin(apic, pin);
646 }
647
648 #ifdef CONFIG_X86_32
649 /*
650 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
651 * specific CPU-side IRQs.
652 */
653
654 #define MAX_PIRQS 8
655 static int pirq_entries[MAX_PIRQS] = {
656 [0 ... MAX_PIRQS - 1] = -1
657 };
658
659 static int __init ioapic_pirq_setup(char *str)
660 {
661 int i, max;
662 int ints[MAX_PIRQS+1];
663
664 get_options(str, ARRAY_SIZE(ints), ints);
665
666 apic_printk(APIC_VERBOSE, KERN_INFO
667 "PIRQ redirection, working around broken MP-BIOS.\n");
668 max = MAX_PIRQS;
669 if (ints[0] < MAX_PIRQS)
670 max = ints[0];
671
672 for (i = 0; i < max; i++) {
673 apic_printk(APIC_VERBOSE, KERN_DEBUG
674 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
675 /*
676 * PIRQs are mapped upside down, usually.
677 */
678 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
679 }
680 return 1;
681 }
682
683 __setup("pirq=", ioapic_pirq_setup);
684 #endif /* CONFIG_X86_32 */
685
686 struct IO_APIC_route_entry **alloc_ioapic_entries(void)
687 {
688 int apic;
689 struct IO_APIC_route_entry **ioapic_entries;
690
691 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
692 GFP_ATOMIC);
693 if (!ioapic_entries)
694 return 0;
695
696 for (apic = 0; apic < nr_ioapics; apic++) {
697 ioapic_entries[apic] =
698 kzalloc(sizeof(struct IO_APIC_route_entry) *
699 nr_ioapic_registers[apic], GFP_ATOMIC);
700 if (!ioapic_entries[apic])
701 goto nomem;
702 }
703
704 return ioapic_entries;
705
706 nomem:
707 while (--apic >= 0)
708 kfree(ioapic_entries[apic]);
709 kfree(ioapic_entries);
710
711 return 0;
712 }
713
714 /*
715 * Saves all the IO-APIC RTE's
716 */
717 int save_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
718 {
719 int apic, pin;
720
721 if (!ioapic_entries)
722 return -ENOMEM;
723
724 for (apic = 0; apic < nr_ioapics; apic++) {
725 if (!ioapic_entries[apic])
726 return -ENOMEM;
727
728 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
729 ioapic_entries[apic][pin] =
730 ioapic_read_entry(apic, pin);
731 }
732
733 return 0;
734 }
735
736 /*
737 * Mask all IO APIC entries.
738 */
739 void mask_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
740 {
741 int apic, pin;
742
743 if (!ioapic_entries)
744 return;
745
746 for (apic = 0; apic < nr_ioapics; apic++) {
747 if (!ioapic_entries[apic])
748 break;
749
750 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
751 struct IO_APIC_route_entry entry;
752
753 entry = ioapic_entries[apic][pin];
754 if (!entry.mask) {
755 entry.mask = 1;
756 ioapic_write_entry(apic, pin, entry);
757 }
758 }
759 }
760 }
761
762 /*
763 * Restore IO APIC entries which was saved in ioapic_entries.
764 */
765 int restore_IO_APIC_setup(struct IO_APIC_route_entry **ioapic_entries)
766 {
767 int apic, pin;
768
769 if (!ioapic_entries)
770 return -ENOMEM;
771
772 for (apic = 0; apic < nr_ioapics; apic++) {
773 if (!ioapic_entries[apic])
774 return -ENOMEM;
775
776 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
777 ioapic_write_entry(apic, pin,
778 ioapic_entries[apic][pin]);
779 }
780 return 0;
781 }
782
783 void free_ioapic_entries(struct IO_APIC_route_entry **ioapic_entries)
784 {
785 int apic;
786
787 for (apic = 0; apic < nr_ioapics; apic++)
788 kfree(ioapic_entries[apic]);
789
790 kfree(ioapic_entries);
791 }
792
793 /*
794 * Find the IRQ entry number of a certain pin.
795 */
796 static int find_irq_entry(int apic, int pin, int type)
797 {
798 int i;
799
800 for (i = 0; i < mp_irq_entries; i++)
801 if (mp_irqs[i].irqtype == type &&
802 (mp_irqs[i].dstapic == mp_ioapics[apic].apicid ||
803 mp_irqs[i].dstapic == MP_APIC_ALL) &&
804 mp_irqs[i].dstirq == pin)
805 return i;
806
807 return -1;
808 }
809
810 /*
811 * Find the pin to which IRQ[irq] (ISA) is connected
812 */
813 static int __init find_isa_irq_pin(int irq, int type)
814 {
815 int i;
816
817 for (i = 0; i < mp_irq_entries; i++) {
818 int lbus = mp_irqs[i].srcbus;
819
820 if (test_bit(lbus, mp_bus_not_pci) &&
821 (mp_irqs[i].irqtype == type) &&
822 (mp_irqs[i].srcbusirq == irq))
823
824 return mp_irqs[i].dstirq;
825 }
826 return -1;
827 }
828
829 static int __init find_isa_irq_apic(int irq, int type)
830 {
831 int i;
832
833 for (i = 0; i < mp_irq_entries; i++) {
834 int lbus = mp_irqs[i].srcbus;
835
836 if (test_bit(lbus, mp_bus_not_pci) &&
837 (mp_irqs[i].irqtype == type) &&
838 (mp_irqs[i].srcbusirq == irq))
839 break;
840 }
841 if (i < mp_irq_entries) {
842 int apic;
843 for(apic = 0; apic < nr_ioapics; apic++) {
844 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic)
845 return apic;
846 }
847 }
848
849 return -1;
850 }
851
852 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
853 /*
854 * EISA Edge/Level control register, ELCR
855 */
856 static int EISA_ELCR(unsigned int irq)
857 {
858 if (irq < legacy_pic->nr_legacy_irqs) {
859 unsigned int port = 0x4d0 + (irq >> 3);
860 return (inb(port) >> (irq & 7)) & 1;
861 }
862 apic_printk(APIC_VERBOSE, KERN_INFO
863 "Broken MPtable reports ISA irq %d\n", irq);
864 return 0;
865 }
866
867 #endif
868
869 /* ISA interrupts are always polarity zero edge triggered,
870 * when listed as conforming in the MP table. */
871
872 #define default_ISA_trigger(idx) (0)
873 #define default_ISA_polarity(idx) (0)
874
875 /* EISA interrupts are always polarity zero and can be edge or level
876 * trigger depending on the ELCR value. If an interrupt is listed as
877 * EISA conforming in the MP table, that means its trigger type must
878 * be read in from the ELCR */
879
880 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].srcbusirq))
881 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
882
883 /* PCI interrupts are always polarity one level triggered,
884 * when listed as conforming in the MP table. */
885
886 #define default_PCI_trigger(idx) (1)
887 #define default_PCI_polarity(idx) (1)
888
889 /* MCA interrupts are always polarity zero level triggered,
890 * when listed as conforming in the MP table. */
891
892 #define default_MCA_trigger(idx) (1)
893 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
894
895 static int MPBIOS_polarity(int idx)
896 {
897 int bus = mp_irqs[idx].srcbus;
898 int polarity;
899
900 /*
901 * Determine IRQ line polarity (high active or low active):
902 */
903 switch (mp_irqs[idx].irqflag & 3)
904 {
905 case 0: /* conforms, ie. bus-type dependent polarity */
906 if (test_bit(bus, mp_bus_not_pci))
907 polarity = default_ISA_polarity(idx);
908 else
909 polarity = default_PCI_polarity(idx);
910 break;
911 case 1: /* high active */
912 {
913 polarity = 0;
914 break;
915 }
916 case 2: /* reserved */
917 {
918 printk(KERN_WARNING "broken BIOS!!\n");
919 polarity = 1;
920 break;
921 }
922 case 3: /* low active */
923 {
924 polarity = 1;
925 break;
926 }
927 default: /* invalid */
928 {
929 printk(KERN_WARNING "broken BIOS!!\n");
930 polarity = 1;
931 break;
932 }
933 }
934 return polarity;
935 }
936
937 static int MPBIOS_trigger(int idx)
938 {
939 int bus = mp_irqs[idx].srcbus;
940 int trigger;
941
942 /*
943 * Determine IRQ trigger mode (edge or level sensitive):
944 */
945 switch ((mp_irqs[idx].irqflag>>2) & 3)
946 {
947 case 0: /* conforms, ie. bus-type dependent */
948 if (test_bit(bus, mp_bus_not_pci))
949 trigger = default_ISA_trigger(idx);
950 else
951 trigger = default_PCI_trigger(idx);
952 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
953 switch (mp_bus_id_to_type[bus]) {
954 case MP_BUS_ISA: /* ISA pin */
955 {
956 /* set before the switch */
957 break;
958 }
959 case MP_BUS_EISA: /* EISA pin */
960 {
961 trigger = default_EISA_trigger(idx);
962 break;
963 }
964 case MP_BUS_PCI: /* PCI pin */
965 {
966 /* set before the switch */
967 break;
968 }
969 case MP_BUS_MCA: /* MCA pin */
970 {
971 trigger = default_MCA_trigger(idx);
972 break;
973 }
974 default:
975 {
976 printk(KERN_WARNING "broken BIOS!!\n");
977 trigger = 1;
978 break;
979 }
980 }
981 #endif
982 break;
983 case 1: /* edge */
984 {
985 trigger = 0;
986 break;
987 }
988 case 2: /* reserved */
989 {
990 printk(KERN_WARNING "broken BIOS!!\n");
991 trigger = 1;
992 break;
993 }
994 case 3: /* level */
995 {
996 trigger = 1;
997 break;
998 }
999 default: /* invalid */
1000 {
1001 printk(KERN_WARNING "broken BIOS!!\n");
1002 trigger = 0;
1003 break;
1004 }
1005 }
1006 return trigger;
1007 }
1008
1009 static inline int irq_polarity(int idx)
1010 {
1011 return MPBIOS_polarity(idx);
1012 }
1013
1014 static inline int irq_trigger(int idx)
1015 {
1016 return MPBIOS_trigger(idx);
1017 }
1018
1019 static int pin_2_irq(int idx, int apic, int pin)
1020 {
1021 int irq;
1022 int bus = mp_irqs[idx].srcbus;
1023
1024 /*
1025 * Debugging check, we are in big trouble if this message pops up!
1026 */
1027 if (mp_irqs[idx].dstirq != pin)
1028 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1029
1030 if (test_bit(bus, mp_bus_not_pci)) {
1031 irq = mp_irqs[idx].srcbusirq;
1032 } else {
1033 u32 gsi = mp_gsi_routing[apic].gsi_base + pin;
1034
1035 if (gsi >= NR_IRQS_LEGACY)
1036 irq = gsi;
1037 else
1038 irq = gsi_end + 1 + gsi;
1039 }
1040
1041 #ifdef CONFIG_X86_32
1042 /*
1043 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1044 */
1045 if ((pin >= 16) && (pin <= 23)) {
1046 if (pirq_entries[pin-16] != -1) {
1047 if (!pirq_entries[pin-16]) {
1048 apic_printk(APIC_VERBOSE, KERN_DEBUG
1049 "disabling PIRQ%d\n", pin-16);
1050 } else {
1051 irq = pirq_entries[pin-16];
1052 apic_printk(APIC_VERBOSE, KERN_DEBUG
1053 "using PIRQ%d -> IRQ %d\n",
1054 pin-16, irq);
1055 }
1056 }
1057 }
1058 #endif
1059
1060 return irq;
1061 }
1062
1063 /*
1064 * Find a specific PCI IRQ entry.
1065 * Not an __init, possibly needed by modules
1066 */
1067 int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
1068 struct io_apic_irq_attr *irq_attr)
1069 {
1070 int apic, i, best_guess = -1;
1071
1072 apic_printk(APIC_DEBUG,
1073 "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
1074 bus, slot, pin);
1075 if (test_bit(bus, mp_bus_not_pci)) {
1076 apic_printk(APIC_VERBOSE,
1077 "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1078 return -1;
1079 }
1080 for (i = 0; i < mp_irq_entries; i++) {
1081 int lbus = mp_irqs[i].srcbus;
1082
1083 for (apic = 0; apic < nr_ioapics; apic++)
1084 if (mp_ioapics[apic].apicid == mp_irqs[i].dstapic ||
1085 mp_irqs[i].dstapic == MP_APIC_ALL)
1086 break;
1087
1088 if (!test_bit(lbus, mp_bus_not_pci) &&
1089 !mp_irqs[i].irqtype &&
1090 (bus == lbus) &&
1091 (slot == ((mp_irqs[i].srcbusirq >> 2) & 0x1f))) {
1092 int irq = pin_2_irq(i, apic, mp_irqs[i].dstirq);
1093
1094 if (!(apic || IO_APIC_IRQ(irq)))
1095 continue;
1096
1097 if (pin == (mp_irqs[i].srcbusirq & 3)) {
1098 set_io_apic_irq_attr(irq_attr, apic,
1099 mp_irqs[i].dstirq,
1100 irq_trigger(i),
1101 irq_polarity(i));
1102 return irq;
1103 }
1104 /*
1105 * Use the first all-but-pin matching entry as a
1106 * best-guess fuzzy result for broken mptables.
1107 */
1108 if (best_guess < 0) {
1109 set_io_apic_irq_attr(irq_attr, apic,
1110 mp_irqs[i].dstirq,
1111 irq_trigger(i),
1112 irq_polarity(i));
1113 best_guess = irq;
1114 }
1115 }
1116 }
1117 return best_guess;
1118 }
1119 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1120
1121 void lock_vector_lock(void)
1122 {
1123 /* Used to the online set of cpus does not change
1124 * during assign_irq_vector.
1125 */
1126 raw_spin_lock(&vector_lock);
1127 }
1128
1129 void unlock_vector_lock(void)
1130 {
1131 raw_spin_unlock(&vector_lock);
1132 }
1133
1134 static int
1135 __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1136 {
1137 /*
1138 * NOTE! The local APIC isn't very good at handling
1139 * multiple interrupts at the same interrupt level.
1140 * As the interrupt level is determined by taking the
1141 * vector number and shifting that right by 4, we
1142 * want to spread these out a bit so that they don't
1143 * all fall in the same interrupt level.
1144 *
1145 * Also, we've got to be careful not to trash gate
1146 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1147 */
1148 static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
1149 static int current_offset = VECTOR_OFFSET_START % 8;
1150 unsigned int old_vector;
1151 int cpu, err;
1152 cpumask_var_t tmp_mask;
1153
1154 if (cfg->move_in_progress)
1155 return -EBUSY;
1156
1157 if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
1158 return -ENOMEM;
1159
1160 old_vector = cfg->vector;
1161 if (old_vector) {
1162 cpumask_and(tmp_mask, mask, cpu_online_mask);
1163 cpumask_and(tmp_mask, cfg->domain, tmp_mask);
1164 if (!cpumask_empty(tmp_mask)) {
1165 free_cpumask_var(tmp_mask);
1166 return 0;
1167 }
1168 }
1169
1170 /* Only try and allocate irqs on cpus that are present */
1171 err = -ENOSPC;
1172 for_each_cpu_and(cpu, mask, cpu_online_mask) {
1173 int new_cpu;
1174 int vector, offset;
1175
1176 apic->vector_allocation_domain(cpu, tmp_mask);
1177
1178 vector = current_vector;
1179 offset = current_offset;
1180 next:
1181 vector += 8;
1182 if (vector >= first_system_vector) {
1183 /* If out of vectors on large boxen, must share them. */
1184 offset = (offset + 1) % 8;
1185 vector = FIRST_EXTERNAL_VECTOR + offset;
1186 }
1187 if (unlikely(current_vector == vector))
1188 continue;
1189
1190 if (test_bit(vector, used_vectors))
1191 goto next;
1192
1193 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1194 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1195 goto next;
1196 /* Found one! */
1197 current_vector = vector;
1198 current_offset = offset;
1199 if (old_vector) {
1200 cfg->move_in_progress = 1;
1201 cpumask_copy(cfg->old_domain, cfg->domain);
1202 }
1203 for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
1204 per_cpu(vector_irq, new_cpu)[vector] = irq;
1205 cfg->vector = vector;
1206 cpumask_copy(cfg->domain, tmp_mask);
1207 err = 0;
1208 break;
1209 }
1210 free_cpumask_var(tmp_mask);
1211 return err;
1212 }
1213
1214 int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
1215 {
1216 int err;
1217 unsigned long flags;
1218
1219 raw_spin_lock_irqsave(&vector_lock, flags);
1220 err = __assign_irq_vector(irq, cfg, mask);
1221 raw_spin_unlock_irqrestore(&vector_lock, flags);
1222 return err;
1223 }
1224
1225 static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1226 {
1227 int cpu, vector;
1228
1229 BUG_ON(!cfg->vector);
1230
1231 vector = cfg->vector;
1232 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
1233 per_cpu(vector_irq, cpu)[vector] = -1;
1234
1235 cfg->vector = 0;
1236 cpumask_clear(cfg->domain);
1237
1238 if (likely(!cfg->move_in_progress))
1239 return;
1240 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
1241 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1242 vector++) {
1243 if (per_cpu(vector_irq, cpu)[vector] != irq)
1244 continue;
1245 per_cpu(vector_irq, cpu)[vector] = -1;
1246 break;
1247 }
1248 }
1249 cfg->move_in_progress = 0;
1250 }
1251
1252 void __setup_vector_irq(int cpu)
1253 {
1254 /* Initialize vector_irq on a new cpu */
1255 int irq, vector;
1256 struct irq_cfg *cfg;
1257 struct irq_desc *desc;
1258
1259 /*
1260 * vector_lock will make sure that we don't run into irq vector
1261 * assignments that might be happening on another cpu in parallel,
1262 * while we setup our initial vector to irq mappings.
1263 */
1264 raw_spin_lock(&vector_lock);
1265 /* Mark the inuse vectors */
1266 for_each_irq_desc(irq, desc) {
1267 cfg = desc->chip_data;
1268
1269 /*
1270 * If it is a legacy IRQ handled by the legacy PIC, this cpu
1271 * will be part of the irq_cfg's domain.
1272 */
1273 if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq))
1274 cpumask_set_cpu(cpu, cfg->domain);
1275
1276 if (!cpumask_test_cpu(cpu, cfg->domain))
1277 continue;
1278 vector = cfg->vector;
1279 per_cpu(vector_irq, cpu)[vector] = irq;
1280 }
1281 /* Mark the free vectors */
1282 for (vector = 0; vector < NR_VECTORS; ++vector) {
1283 irq = per_cpu(vector_irq, cpu)[vector];
1284 if (irq < 0)
1285 continue;
1286
1287 cfg = irq_cfg(irq);
1288 if (!cpumask_test_cpu(cpu, cfg->domain))
1289 per_cpu(vector_irq, cpu)[vector] = -1;
1290 }
1291 raw_spin_unlock(&vector_lock);
1292 }
1293
1294 static struct irq_chip ioapic_chip;
1295 static struct irq_chip ir_ioapic_chip;
1296
1297 #define IOAPIC_AUTO -1
1298 #define IOAPIC_EDGE 0
1299 #define IOAPIC_LEVEL 1
1300
1301 #ifdef CONFIG_X86_32
1302 static inline int IO_APIC_irq_trigger(int irq)
1303 {
1304 int apic, idx, pin;
1305
1306 for (apic = 0; apic < nr_ioapics; apic++) {
1307 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1308 idx = find_irq_entry(apic, pin, mp_INT);
1309 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1310 return irq_trigger(idx);
1311 }
1312 }
1313 /*
1314 * nonexistent IRQs are edge default
1315 */
1316 return 0;
1317 }
1318 #else
1319 static inline int IO_APIC_irq_trigger(int irq)
1320 {
1321 return 1;
1322 }
1323 #endif
1324
1325 static void ioapic_register_intr(int irq, struct irq_desc *desc, unsigned long trigger)
1326 {
1327
1328 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1329 trigger == IOAPIC_LEVEL)
1330 desc->status |= IRQ_LEVEL;
1331 else
1332 desc->status &= ~IRQ_LEVEL;
1333
1334 if (irq_remapped(irq)) {
1335 desc->status |= IRQ_MOVE_PCNTXT;
1336 if (trigger)
1337 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1338 handle_fasteoi_irq,
1339 "fasteoi");
1340 else
1341 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1342 handle_edge_irq, "edge");
1343 return;
1344 }
1345
1346 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1347 trigger == IOAPIC_LEVEL)
1348 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1349 handle_fasteoi_irq,
1350 "fasteoi");
1351 else
1352 set_irq_chip_and_handler_name(irq, &ioapic_chip,
1353 handle_edge_irq, "edge");
1354 }
1355
1356 int setup_ioapic_entry(int apic_id, int irq,
1357 struct IO_APIC_route_entry *entry,
1358 unsigned int destination, int trigger,
1359 int polarity, int vector, int pin)
1360 {
1361 /*
1362 * add it to the IO-APIC irq-routing table:
1363 */
1364 memset(entry,0,sizeof(*entry));
1365
1366 if (intr_remapping_enabled) {
1367 struct intel_iommu *iommu = map_ioapic_to_ir(apic_id);
1368 struct irte irte;
1369 struct IR_IO_APIC_route_entry *ir_entry =
1370 (struct IR_IO_APIC_route_entry *) entry;
1371 int index;
1372
1373 if (!iommu)
1374 panic("No mapping iommu for ioapic %d\n", apic_id);
1375
1376 index = alloc_irte(iommu, irq, 1);
1377 if (index < 0)
1378 panic("Failed to allocate IRTE for ioapic %d\n", apic_id);
1379
1380 memset(&irte, 0, sizeof(irte));
1381
1382 irte.present = 1;
1383 irte.dst_mode = apic->irq_dest_mode;
1384 /*
1385 * Trigger mode in the IRTE will always be edge, and the
1386 * actual level or edge trigger will be setup in the IO-APIC
1387 * RTE. This will help simplify level triggered irq migration.
1388 * For more details, see the comments above explainig IO-APIC
1389 * irq migration in the presence of interrupt-remapping.
1390 */
1391 irte.trigger_mode = 0;
1392 irte.dlvry_mode = apic->irq_delivery_mode;
1393 irte.vector = vector;
1394 irte.dest_id = IRTE_DEST(destination);
1395
1396 /* Set source-id of interrupt request */
1397 set_ioapic_sid(&irte, apic_id);
1398
1399 modify_irte(irq, &irte);
1400
1401 ir_entry->index2 = (index >> 15) & 0x1;
1402 ir_entry->zero = 0;
1403 ir_entry->format = 1;
1404 ir_entry->index = (index & 0x7fff);
1405 /*
1406 * IO-APIC RTE will be configured with virtual vector.
1407 * irq handler will do the explicit EOI to the io-apic.
1408 */
1409 ir_entry->vector = pin;
1410 } else {
1411 entry->delivery_mode = apic->irq_delivery_mode;
1412 entry->dest_mode = apic->irq_dest_mode;
1413 entry->dest = destination;
1414 entry->vector = vector;
1415 }
1416
1417 entry->mask = 0; /* enable IRQ */
1418 entry->trigger = trigger;
1419 entry->polarity = polarity;
1420
1421 /* Mask level triggered irqs.
1422 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1423 */
1424 if (trigger)
1425 entry->mask = 1;
1426 return 0;
1427 }
1428
1429 static void setup_IO_APIC_irq(int apic_id, int pin, unsigned int irq, struct irq_desc *desc,
1430 int trigger, int polarity)
1431 {
1432 struct irq_cfg *cfg;
1433 struct IO_APIC_route_entry entry;
1434 unsigned int dest;
1435
1436 if (!IO_APIC_IRQ(irq))
1437 return;
1438
1439 cfg = desc->chip_data;
1440
1441 /*
1442 * For legacy irqs, cfg->domain starts with cpu 0 for legacy
1443 * controllers like 8259. Now that IO-APIC can handle this irq, update
1444 * the cfg->domain.
1445 */
1446 if (irq < legacy_pic->nr_legacy_irqs && cpumask_test_cpu(0, cfg->domain))
1447 apic->vector_allocation_domain(0, cfg->domain);
1448
1449 if (assign_irq_vector(irq, cfg, apic->target_cpus()))
1450 return;
1451
1452 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
1453
1454 apic_printk(APIC_VERBOSE,KERN_DEBUG
1455 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1456 "IRQ %d Mode:%i Active:%i)\n",
1457 apic_id, mp_ioapics[apic_id].apicid, pin, cfg->vector,
1458 irq, trigger, polarity);
1459
1460
1461 if (setup_ioapic_entry(mp_ioapics[apic_id].apicid, irq, &entry,
1462 dest, trigger, polarity, cfg->vector, pin)) {
1463 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1464 mp_ioapics[apic_id].apicid, pin);
1465 __clear_irq_vector(irq, cfg);
1466 return;
1467 }
1468
1469 ioapic_register_intr(irq, desc, trigger);
1470 if (irq < legacy_pic->nr_legacy_irqs)
1471 legacy_pic->chip->mask(irq);
1472
1473 ioapic_write_entry(apic_id, pin, entry);
1474 }
1475
1476 static struct {
1477 DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
1478 } mp_ioapic_routing[MAX_IO_APICS];
1479
1480 static void __init setup_IO_APIC_irqs(void)
1481 {
1482 int apic_id, pin, idx, irq;
1483 int notcon = 0;
1484 struct irq_desc *desc;
1485 struct irq_cfg *cfg;
1486 int node = cpu_to_node(boot_cpu_id);
1487
1488 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1489
1490 for (apic_id = 0; apic_id < nr_ioapics; apic_id++)
1491 for (pin = 0; pin < nr_ioapic_registers[apic_id]; pin++) {
1492 idx = find_irq_entry(apic_id, pin, mp_INT);
1493 if (idx == -1) {
1494 if (!notcon) {
1495 notcon = 1;
1496 apic_printk(APIC_VERBOSE,
1497 KERN_DEBUG " %d-%d",
1498 mp_ioapics[apic_id].apicid, pin);
1499 } else
1500 apic_printk(APIC_VERBOSE, " %d-%d",
1501 mp_ioapics[apic_id].apicid, pin);
1502 continue;
1503 }
1504 if (notcon) {
1505 apic_printk(APIC_VERBOSE,
1506 " (apicid-pin) not connected\n");
1507 notcon = 0;
1508 }
1509
1510 irq = pin_2_irq(idx, apic_id, pin);
1511
1512 if ((apic_id > 0) && (irq > 16))
1513 continue;
1514
1515 /*
1516 * Skip the timer IRQ if there's a quirk handler
1517 * installed and if it returns 1:
1518 */
1519 if (apic->multi_timer_check &&
1520 apic->multi_timer_check(apic_id, irq))
1521 continue;
1522
1523 desc = irq_to_desc_alloc_node(irq, node);
1524 if (!desc) {
1525 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1526 continue;
1527 }
1528 cfg = desc->chip_data;
1529 add_pin_to_irq_node(cfg, node, apic_id, pin);
1530 /*
1531 * don't mark it in pin_programmed, so later acpi could
1532 * set it correctly when irq < 16
1533 */
1534 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1535 irq_trigger(idx), irq_polarity(idx));
1536 }
1537
1538 if (notcon)
1539 apic_printk(APIC_VERBOSE,
1540 " (apicid-pin) not connected\n");
1541 }
1542
1543 /*
1544 * for the gsit that is not in first ioapic
1545 * but could not use acpi_register_gsi()
1546 * like some special sci in IBM x3330
1547 */
1548 void setup_IO_APIC_irq_extra(u32 gsi)
1549 {
1550 int apic_id = 0, pin, idx, irq;
1551 int node = cpu_to_node(boot_cpu_id);
1552 struct irq_desc *desc;
1553 struct irq_cfg *cfg;
1554
1555 /*
1556 * Convert 'gsi' to 'ioapic.pin'.
1557 */
1558 apic_id = mp_find_ioapic(gsi);
1559 if (apic_id < 0)
1560 return;
1561
1562 pin = mp_find_ioapic_pin(apic_id, gsi);
1563 idx = find_irq_entry(apic_id, pin, mp_INT);
1564 if (idx == -1)
1565 return;
1566
1567 irq = pin_2_irq(idx, apic_id, pin);
1568 #ifdef CONFIG_SPARSE_IRQ
1569 desc = irq_to_desc(irq);
1570 if (desc)
1571 return;
1572 #endif
1573 desc = irq_to_desc_alloc_node(irq, node);
1574 if (!desc) {
1575 printk(KERN_INFO "can not get irq_desc for %d\n", irq);
1576 return;
1577 }
1578
1579 cfg = desc->chip_data;
1580 add_pin_to_irq_node(cfg, node, apic_id, pin);
1581
1582 if (test_bit(pin, mp_ioapic_routing[apic_id].pin_programmed)) {
1583 pr_debug("Pin %d-%d already programmed\n",
1584 mp_ioapics[apic_id].apicid, pin);
1585 return;
1586 }
1587 set_bit(pin, mp_ioapic_routing[apic_id].pin_programmed);
1588
1589 setup_IO_APIC_irq(apic_id, pin, irq, desc,
1590 irq_trigger(idx), irq_polarity(idx));
1591 }
1592
1593 /*
1594 * Set up the timer pin, possibly with the 8259A-master behind.
1595 */
1596 static void __init setup_timer_IRQ0_pin(unsigned int apic_id, unsigned int pin,
1597 int vector)
1598 {
1599 struct IO_APIC_route_entry entry;
1600
1601 if (intr_remapping_enabled)
1602 return;
1603
1604 memset(&entry, 0, sizeof(entry));
1605
1606 /*
1607 * We use logical delivery to get the timer IRQ
1608 * to the first CPU.
1609 */
1610 entry.dest_mode = apic->irq_dest_mode;
1611 entry.mask = 0; /* don't mask IRQ for edge */
1612 entry.dest = apic->cpu_mask_to_apicid(apic->target_cpus());
1613 entry.delivery_mode = apic->irq_delivery_mode;
1614 entry.polarity = 0;
1615 entry.trigger = 0;
1616 entry.vector = vector;
1617
1618 /*
1619 * The timer IRQ doesn't have to know that behind the
1620 * scene we may have a 8259A-master in AEOI mode ...
1621 */
1622 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1623
1624 /*
1625 * Add it to the IO-APIC irq-routing table:
1626 */
1627 ioapic_write_entry(apic_id, pin, entry);
1628 }
1629
1630
1631 __apicdebuginit(void) print_IO_APIC(void)
1632 {
1633 int apic, i;
1634 union IO_APIC_reg_00 reg_00;
1635 union IO_APIC_reg_01 reg_01;
1636 union IO_APIC_reg_02 reg_02;
1637 union IO_APIC_reg_03 reg_03;
1638 unsigned long flags;
1639 struct irq_cfg *cfg;
1640 struct irq_desc *desc;
1641 unsigned int irq;
1642
1643 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1644 for (i = 0; i < nr_ioapics; i++)
1645 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
1646 mp_ioapics[i].apicid, nr_ioapic_registers[i]);
1647
1648 /*
1649 * We are a bit conservative about what we expect. We have to
1650 * know about every hardware change ASAP.
1651 */
1652 printk(KERN_INFO "testing the IO APIC.......................\n");
1653
1654 for (apic = 0; apic < nr_ioapics; apic++) {
1655
1656 raw_spin_lock_irqsave(&ioapic_lock, flags);
1657 reg_00.raw = io_apic_read(apic, 0);
1658 reg_01.raw = io_apic_read(apic, 1);
1659 if (reg_01.bits.version >= 0x10)
1660 reg_02.raw = io_apic_read(apic, 2);
1661 if (reg_01.bits.version >= 0x20)
1662 reg_03.raw = io_apic_read(apic, 3);
1663 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
1664
1665 printk("\n");
1666 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].apicid);
1667 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1668 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1669 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1670 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1671
1672 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1673 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1674
1675 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1676 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1677
1678 /*
1679 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1680 * but the value of reg_02 is read as the previous read register
1681 * value, so ignore it if reg_02 == reg_01.
1682 */
1683 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1684 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1685 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1686 }
1687
1688 /*
1689 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1690 * or reg_03, but the value of reg_0[23] is read as the previous read
1691 * register value, so ignore it if reg_03 == reg_0[12].
1692 */
1693 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1694 reg_03.raw != reg_01.raw) {
1695 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1696 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1697 }
1698
1699 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1700
1701 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1702 " Stat Dmod Deli Vect:\n");
1703
1704 for (i = 0; i <= reg_01.bits.entries; i++) {
1705 struct IO_APIC_route_entry entry;
1706
1707 entry = ioapic_read_entry(apic, i);
1708
1709 printk(KERN_DEBUG " %02x %03X ",
1710 i,
1711 entry.dest
1712 );
1713
1714 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1715 entry.mask,
1716 entry.trigger,
1717 entry.irr,
1718 entry.polarity,
1719 entry.delivery_status,
1720 entry.dest_mode,
1721 entry.delivery_mode,
1722 entry.vector
1723 );
1724 }
1725 }
1726 printk(KERN_DEBUG "IRQ to pin mappings:\n");
1727 for_each_irq_desc(irq, desc) {
1728 struct irq_pin_list *entry;
1729
1730 cfg = desc->chip_data;
1731 entry = cfg->irq_2_pin;
1732 if (!entry)
1733 continue;
1734 printk(KERN_DEBUG "IRQ%d ", irq);
1735 for_each_irq_pin(entry, cfg->irq_2_pin)
1736 printk("-> %d:%d", entry->apic, entry->pin);
1737 printk("\n");
1738 }
1739
1740 printk(KERN_INFO ".................................... done.\n");
1741
1742 return;
1743 }
1744
1745 __apicdebuginit(void) print_APIC_field(int base)
1746 {
1747 int i;
1748
1749 printk(KERN_DEBUG);
1750
1751 for (i = 0; i < 8; i++)
1752 printk(KERN_CONT "%08x", apic_read(base + i*0x10));
1753
1754 printk(KERN_CONT "\n");
1755 }
1756
1757 __apicdebuginit(void) print_local_APIC(void *dummy)
1758 {
1759 unsigned int i, v, ver, maxlvt;
1760 u64 icr;
1761
1762 printk(KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1763 smp_processor_id(), hard_smp_processor_id());
1764 v = apic_read(APIC_ID);
1765 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1766 v = apic_read(APIC_LVR);
1767 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1768 ver = GET_APIC_VERSION(v);
1769 maxlvt = lapic_get_maxlvt();
1770
1771 v = apic_read(APIC_TASKPRI);
1772 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1773
1774 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1775 if (!APIC_XAPIC(ver)) {
1776 v = apic_read(APIC_ARBPRI);
1777 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1778 v & APIC_ARBPRI_MASK);
1779 }
1780 v = apic_read(APIC_PROCPRI);
1781 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1782 }
1783
1784 /*
1785 * Remote read supported only in the 82489DX and local APIC for
1786 * Pentium processors.
1787 */
1788 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1789 v = apic_read(APIC_RRR);
1790 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1791 }
1792
1793 v = apic_read(APIC_LDR);
1794 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
1795 if (!x2apic_enabled()) {
1796 v = apic_read(APIC_DFR);
1797 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1798 }
1799 v = apic_read(APIC_SPIV);
1800 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1801
1802 printk(KERN_DEBUG "... APIC ISR field:\n");
1803 print_APIC_field(APIC_ISR);
1804 printk(KERN_DEBUG "... APIC TMR field:\n");
1805 print_APIC_field(APIC_TMR);
1806 printk(KERN_DEBUG "... APIC IRR field:\n");
1807 print_APIC_field(APIC_IRR);
1808
1809 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1810 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1811 apic_write(APIC_ESR, 0);
1812
1813 v = apic_read(APIC_ESR);
1814 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1815 }
1816
1817 icr = apic_icr_read();
1818 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1819 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1820
1821 v = apic_read(APIC_LVTT);
1822 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1823
1824 if (maxlvt > 3) { /* PC is LVT#4. */
1825 v = apic_read(APIC_LVTPC);
1826 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1827 }
1828 v = apic_read(APIC_LVT0);
1829 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1830 v = apic_read(APIC_LVT1);
1831 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1832
1833 if (maxlvt > 2) { /* ERR is LVT#3. */
1834 v = apic_read(APIC_LVTERR);
1835 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1836 }
1837
1838 v = apic_read(APIC_TMICT);
1839 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1840 v = apic_read(APIC_TMCCT);
1841 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1842 v = apic_read(APIC_TDCR);
1843 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1844
1845 if (boot_cpu_has(X86_FEATURE_EXTAPIC)) {
1846 v = apic_read(APIC_EFEAT);
1847 maxlvt = (v >> 16) & 0xff;
1848 printk(KERN_DEBUG "... APIC EFEAT: %08x\n", v);
1849 v = apic_read(APIC_ECTRL);
1850 printk(KERN_DEBUG "... APIC ECTRL: %08x\n", v);
1851 for (i = 0; i < maxlvt; i++) {
1852 v = apic_read(APIC_EILVTn(i));
1853 printk(KERN_DEBUG "... APIC EILVT%d: %08x\n", i, v);
1854 }
1855 }
1856 printk("\n");
1857 }
1858
1859 __apicdebuginit(void) print_local_APICs(int maxcpu)
1860 {
1861 int cpu;
1862
1863 if (!maxcpu)
1864 return;
1865
1866 preempt_disable();
1867 for_each_online_cpu(cpu) {
1868 if (cpu >= maxcpu)
1869 break;
1870 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1871 }
1872 preempt_enable();
1873 }
1874
1875 __apicdebuginit(void) print_PIC(void)
1876 {
1877 unsigned int v;
1878 unsigned long flags;
1879
1880 if (!legacy_pic->nr_legacy_irqs)
1881 return;
1882
1883 printk(KERN_DEBUG "\nprinting PIC contents\n");
1884
1885 raw_spin_lock_irqsave(&i8259A_lock, flags);
1886
1887 v = inb(0xa1) << 8 | inb(0x21);
1888 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1889
1890 v = inb(0xa0) << 8 | inb(0x20);
1891 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1892
1893 outb(0x0b,0xa0);
1894 outb(0x0b,0x20);
1895 v = inb(0xa0) << 8 | inb(0x20);
1896 outb(0x0a,0xa0);
1897 outb(0x0a,0x20);
1898
1899 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
1900
1901 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1902
1903 v = inb(0x4d1) << 8 | inb(0x4d0);
1904 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1905 }
1906
1907 static int __initdata show_lapic = 1;
1908 static __init int setup_show_lapic(char *arg)
1909 {
1910 int num = -1;
1911
1912 if (strcmp(arg, "all") == 0) {
1913 show_lapic = CONFIG_NR_CPUS;
1914 } else {
1915 get_option(&arg, &num);
1916 if (num >= 0)
1917 show_lapic = num;
1918 }
1919
1920 return 1;
1921 }
1922 __setup("show_lapic=", setup_show_lapic);
1923
1924 __apicdebuginit(int) print_ICs(void)
1925 {
1926 if (apic_verbosity == APIC_QUIET)
1927 return 0;
1928
1929 print_PIC();
1930
1931 /* don't print out if apic is not there */
1932 if (!cpu_has_apic && !apic_from_smp_config())
1933 return 0;
1934
1935 print_local_APICs(show_lapic);
1936 print_IO_APIC();
1937
1938 return 0;
1939 }
1940
1941 fs_initcall(print_ICs);
1942
1943
1944 /* Where if anywhere is the i8259 connect in external int mode */
1945 static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1946
1947 void __init enable_IO_APIC(void)
1948 {
1949 int i8259_apic, i8259_pin;
1950 int apic;
1951
1952 if (!legacy_pic->nr_legacy_irqs)
1953 return;
1954
1955 for(apic = 0; apic < nr_ioapics; apic++) {
1956 int pin;
1957 /* See if any of the pins is in ExtINT mode */
1958 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1959 struct IO_APIC_route_entry entry;
1960 entry = ioapic_read_entry(apic, pin);
1961
1962 /* If the interrupt line is enabled and in ExtInt mode
1963 * I have found the pin where the i8259 is connected.
1964 */
1965 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1966 ioapic_i8259.apic = apic;
1967 ioapic_i8259.pin = pin;
1968 goto found_i8259;
1969 }
1970 }
1971 }
1972 found_i8259:
1973 /* Look to see what if the MP table has reported the ExtINT */
1974 /* If we could not find the appropriate pin by looking at the ioapic
1975 * the i8259 probably is not connected the ioapic but give the
1976 * mptable a chance anyway.
1977 */
1978 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1979 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1980 /* Trust the MP table if nothing is setup in the hardware */
1981 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1982 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1983 ioapic_i8259.pin = i8259_pin;
1984 ioapic_i8259.apic = i8259_apic;
1985 }
1986 /* Complain if the MP table and the hardware disagree */
1987 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1988 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1989 {
1990 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1991 }
1992
1993 /*
1994 * Do not trust the IO-APIC being empty at bootup
1995 */
1996 clear_IO_APIC();
1997 }
1998
1999 /*
2000 * Not an __init, needed by the reboot code
2001 */
2002 void disable_IO_APIC(void)
2003 {
2004 /*
2005 * Clear the IO-APIC before rebooting:
2006 */
2007 clear_IO_APIC();
2008
2009 if (!legacy_pic->nr_legacy_irqs)
2010 return;
2011
2012 /*
2013 * If the i8259 is routed through an IOAPIC
2014 * Put that IOAPIC in virtual wire mode
2015 * so legacy interrupts can be delivered.
2016 *
2017 * With interrupt-remapping, for now we will use virtual wire A mode,
2018 * as virtual wire B is little complex (need to configure both
2019 * IOAPIC RTE aswell as interrupt-remapping table entry).
2020 * As this gets called during crash dump, keep this simple for now.
2021 */
2022 if (ioapic_i8259.pin != -1 && !intr_remapping_enabled) {
2023 struct IO_APIC_route_entry entry;
2024
2025 memset(&entry, 0, sizeof(entry));
2026 entry.mask = 0; /* Enabled */
2027 entry.trigger = 0; /* Edge */
2028 entry.irr = 0;
2029 entry.polarity = 0; /* High */
2030 entry.delivery_status = 0;
2031 entry.dest_mode = 0; /* Physical */
2032 entry.delivery_mode = dest_ExtINT; /* ExtInt */
2033 entry.vector = 0;
2034 entry.dest = read_apic_id();
2035
2036 /*
2037 * Add it to the IO-APIC irq-routing table:
2038 */
2039 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
2040 }
2041
2042 /*
2043 * Use virtual wire A mode when interrupt remapping is enabled.
2044 */
2045 if (cpu_has_apic || apic_from_smp_config())
2046 disconnect_bsp_APIC(!intr_remapping_enabled &&
2047 ioapic_i8259.pin != -1);
2048 }
2049
2050 #ifdef CONFIG_X86_32
2051 /*
2052 * function to set the IO-APIC physical IDs based on the
2053 * values stored in the MPC table.
2054 *
2055 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
2056 */
2057
2058 void __init setup_ioapic_ids_from_mpc(void)
2059 {
2060 union IO_APIC_reg_00 reg_00;
2061 physid_mask_t phys_id_present_map;
2062 int apic_id;
2063 int i;
2064 unsigned char old_id;
2065 unsigned long flags;
2066
2067 if (acpi_ioapic)
2068 return;
2069 /*
2070 * Don't check I/O APIC IDs for xAPIC systems. They have
2071 * no meaning without the serial APIC bus.
2072 */
2073 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2074 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
2075 return;
2076 /*
2077 * This is broken; anything with a real cpu count has to
2078 * circumvent this idiocy regardless.
2079 */
2080 apic->ioapic_phys_id_map(&phys_cpu_present_map, &phys_id_present_map);
2081
2082 /*
2083 * Set the IOAPIC ID to the value stored in the MPC table.
2084 */
2085 for (apic_id = 0; apic_id < nr_ioapics; apic_id++) {
2086
2087 /* Read the register 0 value */
2088 raw_spin_lock_irqsave(&ioapic_lock, flags);
2089 reg_00.raw = io_apic_read(apic_id, 0);
2090 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2091
2092 old_id = mp_ioapics[apic_id].apicid;
2093
2094 if (mp_ioapics[apic_id].apicid >= get_physical_broadcast()) {
2095 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
2096 apic_id, mp_ioapics[apic_id].apicid);
2097 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2098 reg_00.bits.ID);
2099 mp_ioapics[apic_id].apicid = reg_00.bits.ID;
2100 }
2101
2102 /*
2103 * Sanity check, is the ID really free? Every APIC in a
2104 * system must have a unique ID or we get lots of nice
2105 * 'stuck on smp_invalidate_needed IPI wait' messages.
2106 */
2107 if (apic->check_apicid_used(&phys_id_present_map,
2108 mp_ioapics[apic_id].apicid)) {
2109 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
2110 apic_id, mp_ioapics[apic_id].apicid);
2111 for (i = 0; i < get_physical_broadcast(); i++)
2112 if (!physid_isset(i, phys_id_present_map))
2113 break;
2114 if (i >= get_physical_broadcast())
2115 panic("Max APIC ID exceeded!\n");
2116 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2117 i);
2118 physid_set(i, phys_id_present_map);
2119 mp_ioapics[apic_id].apicid = i;
2120 } else {
2121 physid_mask_t tmp;
2122 apic->apicid_to_cpu_present(mp_ioapics[apic_id].apicid, &tmp);
2123 apic_printk(APIC_VERBOSE, "Setting %d in the "
2124 "phys_id_present_map\n",
2125 mp_ioapics[apic_id].apicid);
2126 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2127 }
2128
2129
2130 /*
2131 * We need to adjust the IRQ routing table
2132 * if the ID changed.
2133 */
2134 if (old_id != mp_ioapics[apic_id].apicid)
2135 for (i = 0; i < mp_irq_entries; i++)
2136 if (mp_irqs[i].dstapic == old_id)
2137 mp_irqs[i].dstapic
2138 = mp_ioapics[apic_id].apicid;
2139
2140 /*
2141 * Read the right value from the MPC table and
2142 * write it into the ID register.
2143 */
2144 apic_printk(APIC_VERBOSE, KERN_INFO
2145 "...changing IO-APIC physical APIC ID to %d ...",
2146 mp_ioapics[apic_id].apicid);
2147
2148 reg_00.bits.ID = mp_ioapics[apic_id].apicid;
2149 raw_spin_lock_irqsave(&ioapic_lock, flags);
2150 io_apic_write(apic_id, 0, reg_00.raw);
2151 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2152
2153 /*
2154 * Sanity check
2155 */
2156 raw_spin_lock_irqsave(&ioapic_lock, flags);
2157 reg_00.raw = io_apic_read(apic_id, 0);
2158 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2159 if (reg_00.bits.ID != mp_ioapics[apic_id].apicid)
2160 printk("could not set ID!\n");
2161 else
2162 apic_printk(APIC_VERBOSE, " ok.\n");
2163 }
2164 }
2165 #endif
2166
2167 int no_timer_check __initdata;
2168
2169 static int __init notimercheck(char *s)
2170 {
2171 no_timer_check = 1;
2172 return 1;
2173 }
2174 __setup("no_timer_check", notimercheck);
2175
2176 /*
2177 * There is a nasty bug in some older SMP boards, their mptable lies
2178 * about the timer IRQ. We do the following to work around the situation:
2179 *
2180 * - timer IRQ defaults to IO-APIC IRQ
2181 * - if this function detects that timer IRQs are defunct, then we fall
2182 * back to ISA timer IRQs
2183 */
2184 static int __init timer_irq_works(void)
2185 {
2186 unsigned long t1 = jiffies;
2187 unsigned long flags;
2188
2189 if (no_timer_check)
2190 return 1;
2191
2192 local_save_flags(flags);
2193 local_irq_enable();
2194 /* Let ten ticks pass... */
2195 mdelay((10 * 1000) / HZ);
2196 local_irq_restore(flags);
2197
2198 /*
2199 * Expect a few ticks at least, to be sure some possible
2200 * glue logic does not lock up after one or two first
2201 * ticks in a non-ExtINT mode. Also the local APIC
2202 * might have cached one ExtINT interrupt. Finally, at
2203 * least one tick may be lost due to delays.
2204 */
2205
2206 /* jiffies wrap? */
2207 if (time_after(jiffies, t1 + 4))
2208 return 1;
2209 return 0;
2210 }
2211
2212 /*
2213 * In the SMP+IOAPIC case it might happen that there are an unspecified
2214 * number of pending IRQ events unhandled. These cases are very rare,
2215 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2216 * better to do it this way as thus we do not have to be aware of
2217 * 'pending' interrupts in the IRQ path, except at this point.
2218 */
2219 /*
2220 * Edge triggered needs to resend any interrupt
2221 * that was delayed but this is now handled in the device
2222 * independent code.
2223 */
2224
2225 /*
2226 * Starting up a edge-triggered IO-APIC interrupt is
2227 * nasty - we need to make sure that we get the edge.
2228 * If it is already asserted for some reason, we need
2229 * return 1 to indicate that is was pending.
2230 *
2231 * This is not complete - we should be able to fake
2232 * an edge even if it isn't on the 8259A...
2233 */
2234
2235 static unsigned int startup_ioapic_irq(unsigned int irq)
2236 {
2237 int was_pending = 0;
2238 unsigned long flags;
2239 struct irq_cfg *cfg;
2240
2241 raw_spin_lock_irqsave(&ioapic_lock, flags);
2242 if (irq < legacy_pic->nr_legacy_irqs) {
2243 legacy_pic->chip->mask(irq);
2244 if (legacy_pic->irq_pending(irq))
2245 was_pending = 1;
2246 }
2247 cfg = irq_cfg(irq);
2248 __unmask_IO_APIC_irq(cfg);
2249 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2250
2251 return was_pending;
2252 }
2253
2254 static int ioapic_retrigger_irq(unsigned int irq)
2255 {
2256
2257 struct irq_cfg *cfg = irq_cfg(irq);
2258 unsigned long flags;
2259
2260 raw_spin_lock_irqsave(&vector_lock, flags);
2261 apic->send_IPI_mask(cpumask_of(cpumask_first(cfg->domain)), cfg->vector);
2262 raw_spin_unlock_irqrestore(&vector_lock, flags);
2263
2264 return 1;
2265 }
2266
2267 /*
2268 * Level and edge triggered IO-APIC interrupts need different handling,
2269 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2270 * handled with the level-triggered descriptor, but that one has slightly
2271 * more overhead. Level-triggered interrupts cannot be handled with the
2272 * edge-triggered handler, without risking IRQ storms and other ugly
2273 * races.
2274 */
2275
2276 #ifdef CONFIG_SMP
2277 void send_cleanup_vector(struct irq_cfg *cfg)
2278 {
2279 cpumask_var_t cleanup_mask;
2280
2281 if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
2282 unsigned int i;
2283 for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
2284 apic->send_IPI_mask(cpumask_of(i), IRQ_MOVE_CLEANUP_VECTOR);
2285 } else {
2286 cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
2287 apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2288 free_cpumask_var(cleanup_mask);
2289 }
2290 cfg->move_in_progress = 0;
2291 }
2292
2293 static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
2294 {
2295 int apic, pin;
2296 struct irq_pin_list *entry;
2297 u8 vector = cfg->vector;
2298
2299 for_each_irq_pin(entry, cfg->irq_2_pin) {
2300 unsigned int reg;
2301
2302 apic = entry->apic;
2303 pin = entry->pin;
2304 /*
2305 * With interrupt-remapping, destination information comes
2306 * from interrupt-remapping table entry.
2307 */
2308 if (!irq_remapped(irq))
2309 io_apic_write(apic, 0x11 + pin*2, dest);
2310 reg = io_apic_read(apic, 0x10 + pin*2);
2311 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
2312 reg |= vector;
2313 io_apic_modify(apic, 0x10 + pin*2, reg);
2314 }
2315 }
2316
2317 /*
2318 * Either sets desc->affinity to a valid value, and returns
2319 * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
2320 * leaves desc->affinity untouched.
2321 */
2322 unsigned int
2323 set_desc_affinity(struct irq_desc *desc, const struct cpumask *mask,
2324 unsigned int *dest_id)
2325 {
2326 struct irq_cfg *cfg;
2327 unsigned int irq;
2328
2329 if (!cpumask_intersects(mask, cpu_online_mask))
2330 return -1;
2331
2332 irq = desc->irq;
2333 cfg = desc->chip_data;
2334 if (assign_irq_vector(irq, cfg, mask))
2335 return -1;
2336
2337 cpumask_copy(desc->affinity, mask);
2338
2339 *dest_id = apic->cpu_mask_to_apicid_and(desc->affinity, cfg->domain);
2340 return 0;
2341 }
2342
2343 static int
2344 set_ioapic_affinity_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2345 {
2346 struct irq_cfg *cfg;
2347 unsigned long flags;
2348 unsigned int dest;
2349 unsigned int irq;
2350 int ret = -1;
2351
2352 irq = desc->irq;
2353 cfg = desc->chip_data;
2354
2355 raw_spin_lock_irqsave(&ioapic_lock, flags);
2356 ret = set_desc_affinity(desc, mask, &dest);
2357 if (!ret) {
2358 /* Only the high 8 bits are valid. */
2359 dest = SET_APIC_LOGICAL_ID(dest);
2360 __target_IO_APIC_irq(irq, dest, cfg);
2361 }
2362 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2363
2364 return ret;
2365 }
2366
2367 static int
2368 set_ioapic_affinity_irq(unsigned int irq, const struct cpumask *mask)
2369 {
2370 struct irq_desc *desc;
2371
2372 desc = irq_to_desc(irq);
2373
2374 return set_ioapic_affinity_irq_desc(desc, mask);
2375 }
2376
2377 #ifdef CONFIG_INTR_REMAP
2378
2379 /*
2380 * Migrate the IO-APIC irq in the presence of intr-remapping.
2381 *
2382 * For both level and edge triggered, irq migration is a simple atomic
2383 * update(of vector and cpu destination) of IRTE and flush the hardware cache.
2384 *
2385 * For level triggered, we eliminate the io-apic RTE modification (with the
2386 * updated vector information), by using a virtual vector (io-apic pin number).
2387 * Real vector that is used for interrupting cpu will be coming from
2388 * the interrupt-remapping table entry.
2389 */
2390 static int
2391 migrate_ioapic_irq_desc(struct irq_desc *desc, const struct cpumask *mask)
2392 {
2393 struct irq_cfg *cfg;
2394 struct irte irte;
2395 unsigned int dest;
2396 unsigned int irq;
2397 int ret = -1;
2398
2399 if (!cpumask_intersects(mask, cpu_online_mask))
2400 return ret;
2401
2402 irq = desc->irq;
2403 if (get_irte(irq, &irte))
2404 return ret;
2405
2406 cfg = desc->chip_data;
2407 if (assign_irq_vector(irq, cfg, mask))
2408 return ret;
2409
2410 dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
2411
2412 irte.vector = cfg->vector;
2413 irte.dest_id = IRTE_DEST(dest);
2414
2415 /*
2416 * Modified the IRTE and flushes the Interrupt entry cache.
2417 */
2418 modify_irte(irq, &irte);
2419
2420 if (cfg->move_in_progress)
2421 send_cleanup_vector(cfg);
2422
2423 cpumask_copy(desc->affinity, mask);
2424
2425 return 0;
2426 }
2427
2428 /*
2429 * Migrates the IRQ destination in the process context.
2430 */
2431 static int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2432 const struct cpumask *mask)
2433 {
2434 return migrate_ioapic_irq_desc(desc, mask);
2435 }
2436 static int set_ir_ioapic_affinity_irq(unsigned int irq,
2437 const struct cpumask *mask)
2438 {
2439 struct irq_desc *desc = irq_to_desc(irq);
2440
2441 return set_ir_ioapic_affinity_irq_desc(desc, mask);
2442 }
2443 #else
2444 static inline int set_ir_ioapic_affinity_irq_desc(struct irq_desc *desc,
2445 const struct cpumask *mask)
2446 {
2447 return 0;
2448 }
2449 #endif
2450
2451 asmlinkage void smp_irq_move_cleanup_interrupt(void)
2452 {
2453 unsigned vector, me;
2454
2455 ack_APIC_irq();
2456 exit_idle();
2457 irq_enter();
2458
2459 me = smp_processor_id();
2460 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2461 unsigned int irq;
2462 unsigned int irr;
2463 struct irq_desc *desc;
2464 struct irq_cfg *cfg;
2465 irq = __get_cpu_var(vector_irq)[vector];
2466
2467 if (irq == -1)
2468 continue;
2469
2470 desc = irq_to_desc(irq);
2471 if (!desc)
2472 continue;
2473
2474 cfg = irq_cfg(irq);
2475 raw_spin_lock(&desc->lock);
2476
2477 /*
2478 * Check if the irq migration is in progress. If so, we
2479 * haven't received the cleanup request yet for this irq.
2480 */
2481 if (cfg->move_in_progress)
2482 goto unlock;
2483
2484 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2485 goto unlock;
2486
2487 irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
2488 /*
2489 * Check if the vector that needs to be cleanedup is
2490 * registered at the cpu's IRR. If so, then this is not
2491 * the best time to clean it up. Lets clean it up in the
2492 * next attempt by sending another IRQ_MOVE_CLEANUP_VECTOR
2493 * to myself.
2494 */
2495 if (irr & (1 << (vector % 32))) {
2496 apic->send_IPI_self(IRQ_MOVE_CLEANUP_VECTOR);
2497 goto unlock;
2498 }
2499 __get_cpu_var(vector_irq)[vector] = -1;
2500 unlock:
2501 raw_spin_unlock(&desc->lock);
2502 }
2503
2504 irq_exit();
2505 }
2506
2507 static void __irq_complete_move(struct irq_desc **descp, unsigned vector)
2508 {
2509 struct irq_desc *desc = *descp;
2510 struct irq_cfg *cfg = desc->chip_data;
2511 unsigned me;
2512
2513 if (likely(!cfg->move_in_progress))
2514 return;
2515
2516 me = smp_processor_id();
2517
2518 if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
2519 send_cleanup_vector(cfg);
2520 }
2521
2522 static void irq_complete_move(struct irq_desc **descp)
2523 {
2524 __irq_complete_move(descp, ~get_irq_regs()->orig_ax);
2525 }
2526
2527 void irq_force_complete_move(int irq)
2528 {
2529 struct irq_desc *desc = irq_to_desc(irq);
2530 struct irq_cfg *cfg = desc->chip_data;
2531
2532 if (!cfg)
2533 return;
2534
2535 __irq_complete_move(&desc, cfg->vector);
2536 }
2537 #else
2538 static inline void irq_complete_move(struct irq_desc **descp) {}
2539 #endif
2540
2541 static void ack_apic_edge(unsigned int irq)
2542 {
2543 struct irq_desc *desc = irq_to_desc(irq);
2544
2545 irq_complete_move(&desc);
2546 move_native_irq(irq);
2547 ack_APIC_irq();
2548 }
2549
2550 atomic_t irq_mis_count;
2551
2552 /*
2553 * IO-APIC versions below 0x20 don't support EOI register.
2554 * For the record, here is the information about various versions:
2555 * 0Xh 82489DX
2556 * 1Xh I/OAPIC or I/O(x)APIC which are not PCI 2.2 Compliant
2557 * 2Xh I/O(x)APIC which is PCI 2.2 Compliant
2558 * 30h-FFh Reserved
2559 *
2560 * Some of the Intel ICH Specs (ICH2 to ICH5) documents the io-apic
2561 * version as 0x2. This is an error with documentation and these ICH chips
2562 * use io-apic's of version 0x20.
2563 *
2564 * For IO-APIC's with EOI register, we use that to do an explicit EOI.
2565 * Otherwise, we simulate the EOI message manually by changing the trigger
2566 * mode to edge and then back to level, with RTE being masked during this.
2567 */
2568 static void __eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
2569 {
2570 struct irq_pin_list *entry;
2571
2572 for_each_irq_pin(entry, cfg->irq_2_pin) {
2573 if (mp_ioapics[entry->apic].apicver >= 0x20) {
2574 /*
2575 * Intr-remapping uses pin number as the virtual vector
2576 * in the RTE. Actual vector is programmed in
2577 * intr-remapping table entry. Hence for the io-apic
2578 * EOI we use the pin number.
2579 */
2580 if (irq_remapped(irq))
2581 io_apic_eoi(entry->apic, entry->pin);
2582 else
2583 io_apic_eoi(entry->apic, cfg->vector);
2584 } else {
2585 __mask_and_edge_IO_APIC_irq(entry);
2586 __unmask_and_level_IO_APIC_irq(entry);
2587 }
2588 }
2589 }
2590
2591 static void eoi_ioapic_irq(struct irq_desc *desc)
2592 {
2593 struct irq_cfg *cfg;
2594 unsigned long flags;
2595 unsigned int irq;
2596
2597 irq = desc->irq;
2598 cfg = desc->chip_data;
2599
2600 raw_spin_lock_irqsave(&ioapic_lock, flags);
2601 __eoi_ioapic_irq(irq, cfg);
2602 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
2603 }
2604
2605 static void ack_apic_level(unsigned int irq)
2606 {
2607 struct irq_desc *desc = irq_to_desc(irq);
2608 unsigned long v;
2609 int i;
2610 struct irq_cfg *cfg;
2611 int do_unmask_irq = 0;
2612
2613 irq_complete_move(&desc);
2614 #ifdef CONFIG_GENERIC_PENDING_IRQ
2615 /* If we are moving the irq we need to mask it */
2616 if (unlikely(desc->status & IRQ_MOVE_PENDING)) {
2617 do_unmask_irq = 1;
2618 mask_IO_APIC_irq_desc(desc);
2619 }
2620 #endif
2621
2622 /*
2623 * It appears there is an erratum which affects at least version 0x11
2624 * of I/O APIC (that's the 82093AA and cores integrated into various
2625 * chipsets). Under certain conditions a level-triggered interrupt is
2626 * erroneously delivered as edge-triggered one but the respective IRR
2627 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2628 * message but it will never arrive and further interrupts are blocked
2629 * from the source. The exact reason is so far unknown, but the
2630 * phenomenon was observed when two consecutive interrupt requests
2631 * from a given source get delivered to the same CPU and the source is
2632 * temporarily disabled in between.
2633 *
2634 * A workaround is to simulate an EOI message manually. We achieve it
2635 * by setting the trigger mode to edge and then to level when the edge
2636 * trigger mode gets detected in the TMR of a local APIC for a
2637 * level-triggered interrupt. We mask the source for the time of the
2638 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2639 * The idea is from Manfred Spraul. --macro
2640 *
2641 * Also in the case when cpu goes offline, fixup_irqs() will forward
2642 * any unhandled interrupt on the offlined cpu to the new cpu
2643 * destination that is handling the corresponding interrupt. This
2644 * interrupt forwarding is done via IPI's. Hence, in this case also
2645 * level-triggered io-apic interrupt will be seen as an edge
2646 * interrupt in the IRR. And we can't rely on the cpu's EOI
2647 * to be broadcasted to the IO-APIC's which will clear the remoteIRR
2648 * corresponding to the level-triggered interrupt. Hence on IO-APIC's
2649 * supporting EOI register, we do an explicit EOI to clear the
2650 * remote IRR and on IO-APIC's which don't have an EOI register,
2651 * we use the above logic (mask+edge followed by unmask+level) from
2652 * Manfred Spraul to clear the remote IRR.
2653 */
2654 cfg = desc->chip_data;
2655 i = cfg->vector;
2656 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2657
2658 /*
2659 * We must acknowledge the irq before we move it or the acknowledge will
2660 * not propagate properly.
2661 */
2662 ack_APIC_irq();
2663
2664 /*
2665 * Tail end of clearing remote IRR bit (either by delivering the EOI
2666 * message via io-apic EOI register write or simulating it using
2667 * mask+edge followed by unnask+level logic) manually when the
2668 * level triggered interrupt is seen as the edge triggered interrupt
2669 * at the cpu.
2670 */
2671 if (!(v & (1 << (i & 0x1f)))) {
2672 atomic_inc(&irq_mis_count);
2673
2674 eoi_ioapic_irq(desc);
2675 }
2676
2677 /* Now we can move and renable the irq */
2678 if (unlikely(do_unmask_irq)) {
2679 /* Only migrate the irq if the ack has been received.
2680 *
2681 * On rare occasions the broadcast level triggered ack gets
2682 * delayed going to ioapics, and if we reprogram the
2683 * vector while Remote IRR is still set the irq will never
2684 * fire again.
2685 *
2686 * To prevent this scenario we read the Remote IRR bit
2687 * of the ioapic. This has two effects.
2688 * - On any sane system the read of the ioapic will
2689 * flush writes (and acks) going to the ioapic from
2690 * this cpu.
2691 * - We get to see if the ACK has actually been delivered.
2692 *
2693 * Based on failed experiments of reprogramming the
2694 * ioapic entry from outside of irq context starting
2695 * with masking the ioapic entry and then polling until
2696 * Remote IRR was clear before reprogramming the
2697 * ioapic I don't trust the Remote IRR bit to be
2698 * completey accurate.
2699 *
2700 * However there appears to be no other way to plug
2701 * this race, so if the Remote IRR bit is not
2702 * accurate and is causing problems then it is a hardware bug
2703 * and you can go talk to the chipset vendor about it.
2704 */
2705 cfg = desc->chip_data;
2706 if (!io_apic_level_ack_pending(cfg))
2707 move_masked_irq(irq);
2708 unmask_IO_APIC_irq_desc(desc);
2709 }
2710 }
2711
2712 #ifdef CONFIG_INTR_REMAP
2713 static void ir_ack_apic_edge(unsigned int irq)
2714 {
2715 ack_APIC_irq();
2716 }
2717
2718 static void ir_ack_apic_level(unsigned int irq)
2719 {
2720 struct irq_desc *desc = irq_to_desc(irq);
2721
2722 ack_APIC_irq();
2723 eoi_ioapic_irq(desc);
2724 }
2725 #endif /* CONFIG_INTR_REMAP */
2726
2727 static struct irq_chip ioapic_chip __read_mostly = {
2728 .name = "IO-APIC",
2729 .startup = startup_ioapic_irq,
2730 .mask = mask_IO_APIC_irq,
2731 .unmask = unmask_IO_APIC_irq,
2732 .ack = ack_apic_edge,
2733 .eoi = ack_apic_level,
2734 #ifdef CONFIG_SMP
2735 .set_affinity = set_ioapic_affinity_irq,
2736 #endif
2737 .retrigger = ioapic_retrigger_irq,
2738 };
2739
2740 static struct irq_chip ir_ioapic_chip __read_mostly = {
2741 .name = "IR-IO-APIC",
2742 .startup = startup_ioapic_irq,
2743 .mask = mask_IO_APIC_irq,
2744 .unmask = unmask_IO_APIC_irq,
2745 #ifdef CONFIG_INTR_REMAP
2746 .ack = ir_ack_apic_edge,
2747 .eoi = ir_ack_apic_level,
2748 #ifdef CONFIG_SMP
2749 .set_affinity = set_ir_ioapic_affinity_irq,
2750 #endif
2751 #endif
2752 .retrigger = ioapic_retrigger_irq,
2753 };
2754
2755 static inline void init_IO_APIC_traps(void)
2756 {
2757 int irq;
2758 struct irq_desc *desc;
2759 struct irq_cfg *cfg;
2760
2761 /*
2762 * NOTE! The local APIC isn't very good at handling
2763 * multiple interrupts at the same interrupt level.
2764 * As the interrupt level is determined by taking the
2765 * vector number and shifting that right by 4, we
2766 * want to spread these out a bit so that they don't
2767 * all fall in the same interrupt level.
2768 *
2769 * Also, we've got to be careful not to trash gate
2770 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2771 */
2772 for_each_irq_desc(irq, desc) {
2773 cfg = desc->chip_data;
2774 if (IO_APIC_IRQ(irq) && cfg && !cfg->vector) {
2775 /*
2776 * Hmm.. We don't have an entry for this,
2777 * so default to an old-fashioned 8259
2778 * interrupt if we can..
2779 */
2780 if (irq < legacy_pic->nr_legacy_irqs)
2781 legacy_pic->make_irq(irq);
2782 else
2783 /* Strange. Oh, well.. */
2784 desc->chip = &no_irq_chip;
2785 }
2786 }
2787 }
2788
2789 /*
2790 * The local APIC irq-chip implementation:
2791 */
2792
2793 static void mask_lapic_irq(unsigned int irq)
2794 {
2795 unsigned long v;
2796
2797 v = apic_read(APIC_LVT0);
2798 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
2799 }
2800
2801 static void unmask_lapic_irq(unsigned int irq)
2802 {
2803 unsigned long v;
2804
2805 v = apic_read(APIC_LVT0);
2806 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
2807 }
2808
2809 static void ack_lapic_irq(unsigned int irq)
2810 {
2811 ack_APIC_irq();
2812 }
2813
2814 static struct irq_chip lapic_chip __read_mostly = {
2815 .name = "local-APIC",
2816 .mask = mask_lapic_irq,
2817 .unmask = unmask_lapic_irq,
2818 .ack = ack_lapic_irq,
2819 };
2820
2821 static void lapic_register_intr(int irq, struct irq_desc *desc)
2822 {
2823 desc->status &= ~IRQ_LEVEL;
2824 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2825 "edge");
2826 }
2827
2828 static void __init setup_nmi(void)
2829 {
2830 /*
2831 * Dirty trick to enable the NMI watchdog ...
2832 * We put the 8259A master into AEOI mode and
2833 * unmask on all local APICs LVT0 as NMI.
2834 *
2835 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2836 * is from Maciej W. Rozycki - so we do not have to EOI from
2837 * the NMI handler or the timer interrupt.
2838 */
2839 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2840
2841 enable_NMI_through_LVT0();
2842
2843 apic_printk(APIC_VERBOSE, " done.\n");
2844 }
2845
2846 /*
2847 * This looks a bit hackish but it's about the only one way of sending
2848 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2849 * not support the ExtINT mode, unfortunately. We need to send these
2850 * cycles as some i82489DX-based boards have glue logic that keeps the
2851 * 8259A interrupt line asserted until INTA. --macro
2852 */
2853 static inline void __init unlock_ExtINT_logic(void)
2854 {
2855 int apic, pin, i;
2856 struct IO_APIC_route_entry entry0, entry1;
2857 unsigned char save_control, save_freq_select;
2858
2859 pin = find_isa_irq_pin(8, mp_INT);
2860 if (pin == -1) {
2861 WARN_ON_ONCE(1);
2862 return;
2863 }
2864 apic = find_isa_irq_apic(8, mp_INT);
2865 if (apic == -1) {
2866 WARN_ON_ONCE(1);
2867 return;
2868 }
2869
2870 entry0 = ioapic_read_entry(apic, pin);
2871 clear_IO_APIC_pin(apic, pin);
2872
2873 memset(&entry1, 0, sizeof(entry1));
2874
2875 entry1.dest_mode = 0; /* physical delivery */
2876 entry1.mask = 0; /* unmask IRQ now */
2877 entry1.dest = hard_smp_processor_id();
2878 entry1.delivery_mode = dest_ExtINT;
2879 entry1.polarity = entry0.polarity;
2880 entry1.trigger = 0;
2881 entry1.vector = 0;
2882
2883 ioapic_write_entry(apic, pin, entry1);
2884
2885 save_control = CMOS_READ(RTC_CONTROL);
2886 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2887 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2888 RTC_FREQ_SELECT);
2889 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2890
2891 i = 100;
2892 while (i-- > 0) {
2893 mdelay(10);
2894 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2895 i -= 10;
2896 }
2897
2898 CMOS_WRITE(save_control, RTC_CONTROL);
2899 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
2900 clear_IO_APIC_pin(apic, pin);
2901
2902 ioapic_write_entry(apic, pin, entry0);
2903 }
2904
2905 static int disable_timer_pin_1 __initdata;
2906 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2907 static int __init disable_timer_pin_setup(char *arg)
2908 {
2909 disable_timer_pin_1 = 1;
2910 return 0;
2911 }
2912 early_param("disable_timer_pin_1", disable_timer_pin_setup);
2913
2914 int timer_through_8259 __initdata;
2915
2916 /*
2917 * This code may look a bit paranoid, but it's supposed to cooperate with
2918 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2919 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2920 * fanatically on his truly buggy board.
2921 *
2922 * FIXME: really need to revamp this for all platforms.
2923 */
2924 static inline void __init check_timer(void)
2925 {
2926 struct irq_desc *desc = irq_to_desc(0);
2927 struct irq_cfg *cfg = desc->chip_data;
2928 int node = cpu_to_node(boot_cpu_id);
2929 int apic1, pin1, apic2, pin2;
2930 unsigned long flags;
2931 int no_pin1 = 0;
2932
2933 local_irq_save(flags);
2934
2935 /*
2936 * get/set the timer IRQ vector:
2937 */
2938 legacy_pic->chip->mask(0);
2939 assign_irq_vector(0, cfg, apic->target_cpus());
2940
2941 /*
2942 * As IRQ0 is to be enabled in the 8259A, the virtual
2943 * wire has to be disabled in the local APIC. Also
2944 * timer interrupts need to be acknowledged manually in
2945 * the 8259A for the i82489DX when using the NMI
2946 * watchdog as that APIC treats NMIs as level-triggered.
2947 * The AEOI mode will finish them in the 8259A
2948 * automatically.
2949 */
2950 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
2951 legacy_pic->init(1);
2952 #ifdef CONFIG_X86_32
2953 {
2954 unsigned int ver;
2955
2956 ver = apic_read(APIC_LVR);
2957 ver = GET_APIC_VERSION(ver);
2958 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
2959 }
2960 #endif
2961
2962 pin1 = find_isa_irq_pin(0, mp_INT);
2963 apic1 = find_isa_irq_apic(0, mp_INT);
2964 pin2 = ioapic_i8259.pin;
2965 apic2 = ioapic_i8259.apic;
2966
2967 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2968 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2969 cfg->vector, apic1, pin1, apic2, pin2);
2970
2971 /*
2972 * Some BIOS writers are clueless and report the ExtINTA
2973 * I/O APIC input from the cascaded 8259A as the timer
2974 * interrupt input. So just in case, if only one pin
2975 * was found above, try it both directly and through the
2976 * 8259A.
2977 */
2978 if (pin1 == -1) {
2979 if (intr_remapping_enabled)
2980 panic("BIOS bug: timer not connected to IO-APIC");
2981 pin1 = pin2;
2982 apic1 = apic2;
2983 no_pin1 = 1;
2984 } else if (pin2 == -1) {
2985 pin2 = pin1;
2986 apic2 = apic1;
2987 }
2988
2989 if (pin1 != -1) {
2990 /*
2991 * Ok, does IRQ0 through the IOAPIC work?
2992 */
2993 if (no_pin1) {
2994 add_pin_to_irq_node(cfg, node, apic1, pin1);
2995 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
2996 } else {
2997 /* for edge trigger, setup_IO_APIC_irq already
2998 * leave it unmasked.
2999 * so only need to unmask if it is level-trigger
3000 * do we really have level trigger timer?
3001 */
3002 int idx;
3003 idx = find_irq_entry(apic1, pin1, mp_INT);
3004 if (idx != -1 && irq_trigger(idx))
3005 unmask_IO_APIC_irq_desc(desc);
3006 }
3007 if (timer_irq_works()) {
3008 if (nmi_watchdog == NMI_IO_APIC) {
3009 setup_nmi();
3010 legacy_pic->chip->unmask(0);
3011 }
3012 if (disable_timer_pin_1 > 0)
3013 clear_IO_APIC_pin(0, pin1);
3014 goto out;
3015 }
3016 if (intr_remapping_enabled)
3017 panic("timer doesn't work through Interrupt-remapped IO-APIC");
3018 local_irq_disable();
3019 clear_IO_APIC_pin(apic1, pin1);
3020 if (!no_pin1)
3021 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
3022 "8254 timer not connected to IO-APIC\n");
3023
3024 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
3025 "(IRQ0) through the 8259A ...\n");
3026 apic_printk(APIC_QUIET, KERN_INFO
3027 "..... (found apic %d pin %d) ...\n", apic2, pin2);
3028 /*
3029 * legacy devices should be connected to IO APIC #0
3030 */
3031 replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
3032 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
3033 legacy_pic->chip->unmask(0);
3034 if (timer_irq_works()) {
3035 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
3036 timer_through_8259 = 1;
3037 if (nmi_watchdog == NMI_IO_APIC) {
3038 legacy_pic->chip->mask(0);
3039 setup_nmi();
3040 legacy_pic->chip->unmask(0);
3041 }
3042 goto out;
3043 }
3044 /*
3045 * Cleanup, just in case ...
3046 */
3047 local_irq_disable();
3048 legacy_pic->chip->mask(0);
3049 clear_IO_APIC_pin(apic2, pin2);
3050 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
3051 }
3052
3053 if (nmi_watchdog == NMI_IO_APIC) {
3054 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
3055 "through the IO-APIC - disabling NMI Watchdog!\n");
3056 nmi_watchdog = NMI_NONE;
3057 }
3058 #ifdef CONFIG_X86_32
3059 timer_ack = 0;
3060 #endif
3061
3062 apic_printk(APIC_QUIET, KERN_INFO
3063 "...trying to set up timer as Virtual Wire IRQ...\n");
3064
3065 lapic_register_intr(0, desc);
3066 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
3067 legacy_pic->chip->unmask(0);
3068
3069 if (timer_irq_works()) {
3070 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3071 goto out;
3072 }
3073 local_irq_disable();
3074 legacy_pic->chip->mask(0);
3075 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
3076 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
3077
3078 apic_printk(APIC_QUIET, KERN_INFO
3079 "...trying to set up timer as ExtINT IRQ...\n");
3080
3081 legacy_pic->init(0);
3082 legacy_pic->make_irq(0);
3083 apic_write(APIC_LVT0, APIC_DM_EXTINT);
3084
3085 unlock_ExtINT_logic();
3086
3087 if (timer_irq_works()) {
3088 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
3089 goto out;
3090 }
3091 local_irq_disable();
3092 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
3093 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
3094 "report. Then try booting with the 'noapic' option.\n");
3095 out:
3096 local_irq_restore(flags);
3097 }
3098
3099 /*
3100 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
3101 * to devices. However there may be an I/O APIC pin available for
3102 * this interrupt regardless. The pin may be left unconnected, but
3103 * typically it will be reused as an ExtINT cascade interrupt for
3104 * the master 8259A. In the MPS case such a pin will normally be
3105 * reported as an ExtINT interrupt in the MP table. With ACPI
3106 * there is no provision for ExtINT interrupts, and in the absence
3107 * of an override it would be treated as an ordinary ISA I/O APIC
3108 * interrupt, that is edge-triggered and unmasked by default. We
3109 * used to do this, but it caused problems on some systems because
3110 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
3111 * the same ExtINT cascade interrupt to drive the local APIC of the
3112 * bootstrap processor. Therefore we refrain from routing IRQ2 to
3113 * the I/O APIC in all cases now. No actual device should request
3114 * it anyway. --macro
3115 */
3116 #define PIC_IRQS (1UL << PIC_CASCADE_IR)
3117
3118 void __init setup_IO_APIC(void)
3119 {
3120
3121 /*
3122 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
3123 */
3124 io_apic_irqs = legacy_pic->nr_legacy_irqs ? ~PIC_IRQS : ~0UL;
3125
3126 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
3127 /*
3128 * Set up IO-APIC IRQ routing.
3129 */
3130 x86_init.mpparse.setup_ioapic_ids();
3131
3132 sync_Arb_IDs();
3133 setup_IO_APIC_irqs();
3134 init_IO_APIC_traps();
3135 if (legacy_pic->nr_legacy_irqs)
3136 check_timer();
3137 }
3138
3139 /*
3140 * Called after all the initialization is done. If we didnt find any
3141 * APIC bugs then we can allow the modify fast path
3142 */
3143
3144 static int __init io_apic_bug_finalize(void)
3145 {
3146 if (sis_apic_bug == -1)
3147 sis_apic_bug = 0;
3148 return 0;
3149 }
3150
3151 late_initcall(io_apic_bug_finalize);
3152
3153 struct sysfs_ioapic_data {
3154 struct sys_device dev;
3155 struct IO_APIC_route_entry entry[0];
3156 };
3157 static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
3158
3159 static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
3160 {
3161 struct IO_APIC_route_entry *entry;
3162 struct sysfs_ioapic_data *data;
3163 int i;
3164
3165 data = container_of(dev, struct sysfs_ioapic_data, dev);
3166 entry = data->entry;
3167 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
3168 *entry = ioapic_read_entry(dev->id, i);
3169
3170 return 0;
3171 }
3172
3173 static int ioapic_resume(struct sys_device *dev)
3174 {
3175 struct IO_APIC_route_entry *entry;
3176 struct sysfs_ioapic_data *data;
3177 unsigned long flags;
3178 union IO_APIC_reg_00 reg_00;
3179 int i;
3180
3181 data = container_of(dev, struct sysfs_ioapic_data, dev);
3182 entry = data->entry;
3183
3184 raw_spin_lock_irqsave(&ioapic_lock, flags);
3185 reg_00.raw = io_apic_read(dev->id, 0);
3186 if (reg_00.bits.ID != mp_ioapics[dev->id].apicid) {
3187 reg_00.bits.ID = mp_ioapics[dev->id].apicid;
3188 io_apic_write(dev->id, 0, reg_00.raw);
3189 }
3190 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3191 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
3192 ioapic_write_entry(dev->id, i, entry[i]);
3193
3194 return 0;
3195 }
3196
3197 static struct sysdev_class ioapic_sysdev_class = {
3198 .name = "ioapic",
3199 .suspend = ioapic_suspend,
3200 .resume = ioapic_resume,
3201 };
3202
3203 static int __init ioapic_init_sysfs(void)
3204 {
3205 struct sys_device * dev;
3206 int i, size, error;
3207
3208 error = sysdev_class_register(&ioapic_sysdev_class);
3209 if (error)
3210 return error;
3211
3212 for (i = 0; i < nr_ioapics; i++ ) {
3213 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
3214 * sizeof(struct IO_APIC_route_entry);
3215 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
3216 if (!mp_ioapic_data[i]) {
3217 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3218 continue;
3219 }
3220 dev = &mp_ioapic_data[i]->dev;
3221 dev->id = i;
3222 dev->cls = &ioapic_sysdev_class;
3223 error = sysdev_register(dev);
3224 if (error) {
3225 kfree(mp_ioapic_data[i]);
3226 mp_ioapic_data[i] = NULL;
3227 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3228 continue;
3229 }
3230 }
3231
3232 return 0;
3233 }
3234
3235 device_initcall(ioapic_init_sysfs);
3236
3237 /*
3238 * Dynamic irq allocate and deallocation
3239 */
3240 unsigned int create_irq_nr(unsigned int irq_want, int node)
3241 {
3242 /* Allocate an unused irq */
3243 unsigned int irq;
3244 unsigned int new;
3245 unsigned long flags;
3246 struct irq_cfg *cfg_new = NULL;
3247 struct irq_desc *desc_new = NULL;
3248
3249 irq = 0;
3250 if (irq_want < nr_irqs_gsi)
3251 irq_want = nr_irqs_gsi;
3252
3253 raw_spin_lock_irqsave(&vector_lock, flags);
3254 for (new = irq_want; new < nr_irqs; new++) {
3255 desc_new = irq_to_desc_alloc_node(new, node);
3256 if (!desc_new) {
3257 printk(KERN_INFO "can not get irq_desc for %d\n", new);
3258 continue;
3259 }
3260 cfg_new = desc_new->chip_data;
3261
3262 if (cfg_new->vector != 0)
3263 continue;
3264
3265 desc_new = move_irq_desc(desc_new, node);
3266 cfg_new = desc_new->chip_data;
3267
3268 if (__assign_irq_vector(new, cfg_new, apic->target_cpus()) == 0)
3269 irq = new;
3270 break;
3271 }
3272 raw_spin_unlock_irqrestore(&vector_lock, flags);
3273
3274 if (irq > 0)
3275 dynamic_irq_init_keep_chip_data(irq);
3276
3277 return irq;
3278 }
3279
3280 int create_irq(void)
3281 {
3282 int node = cpu_to_node(boot_cpu_id);
3283 unsigned int irq_want;
3284 int irq;
3285
3286 irq_want = nr_irqs_gsi;
3287 irq = create_irq_nr(irq_want, node);
3288
3289 if (irq == 0)
3290 irq = -1;
3291
3292 return irq;
3293 }
3294
3295 void destroy_irq(unsigned int irq)
3296 {
3297 unsigned long flags;
3298
3299 dynamic_irq_cleanup_keep_chip_data(irq);
3300
3301 free_irte(irq);
3302 raw_spin_lock_irqsave(&vector_lock, flags);
3303 __clear_irq_vector(irq, get_irq_chip_data(irq));
3304 raw_spin_unlock_irqrestore(&vector_lock, flags);
3305 }
3306
3307 /*
3308 * MSI message composition
3309 */
3310 #ifdef CONFIG_PCI_MSI
3311 static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
3312 struct msi_msg *msg, u8 hpet_id)
3313 {
3314 struct irq_cfg *cfg;
3315 int err;
3316 unsigned dest;
3317
3318 if (disable_apic)
3319 return -ENXIO;
3320
3321 cfg = irq_cfg(irq);
3322 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3323 if (err)
3324 return err;
3325
3326 dest = apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus());
3327
3328 if (irq_remapped(irq)) {
3329 struct irte irte;
3330 int ir_index;
3331 u16 sub_handle;
3332
3333 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3334 BUG_ON(ir_index == -1);
3335
3336 memset (&irte, 0, sizeof(irte));
3337
3338 irte.present = 1;
3339 irte.dst_mode = apic->irq_dest_mode;
3340 irte.trigger_mode = 0; /* edge */
3341 irte.dlvry_mode = apic->irq_delivery_mode;
3342 irte.vector = cfg->vector;
3343 irte.dest_id = IRTE_DEST(dest);
3344
3345 /* Set source-id of interrupt request */
3346 if (pdev)
3347 set_msi_sid(&irte, pdev);
3348 else
3349 set_hpet_sid(&irte, hpet_id);
3350
3351 modify_irte(irq, &irte);
3352
3353 msg->address_hi = MSI_ADDR_BASE_HI;
3354 msg->data = sub_handle;
3355 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3356 MSI_ADDR_IR_SHV |
3357 MSI_ADDR_IR_INDEX1(ir_index) |
3358 MSI_ADDR_IR_INDEX2(ir_index);
3359 } else {
3360 if (x2apic_enabled())
3361 msg->address_hi = MSI_ADDR_BASE_HI |
3362 MSI_ADDR_EXT_DEST_ID(dest);
3363 else
3364 msg->address_hi = MSI_ADDR_BASE_HI;
3365
3366 msg->address_lo =
3367 MSI_ADDR_BASE_LO |
3368 ((apic->irq_dest_mode == 0) ?
3369 MSI_ADDR_DEST_MODE_PHYSICAL:
3370 MSI_ADDR_DEST_MODE_LOGICAL) |
3371 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3372 MSI_ADDR_REDIRECTION_CPU:
3373 MSI_ADDR_REDIRECTION_LOWPRI) |
3374 MSI_ADDR_DEST_ID(dest);
3375
3376 msg->data =
3377 MSI_DATA_TRIGGER_EDGE |
3378 MSI_DATA_LEVEL_ASSERT |
3379 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3380 MSI_DATA_DELIVERY_FIXED:
3381 MSI_DATA_DELIVERY_LOWPRI) |
3382 MSI_DATA_VECTOR(cfg->vector);
3383 }
3384 return err;
3385 }
3386
3387 #ifdef CONFIG_SMP
3388 static int set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3389 {
3390 struct irq_desc *desc = irq_to_desc(irq);
3391 struct irq_cfg *cfg;
3392 struct msi_msg msg;
3393 unsigned int dest;
3394
3395 if (set_desc_affinity(desc, mask, &dest))
3396 return -1;
3397
3398 cfg = desc->chip_data;
3399
3400 read_msi_msg_desc(desc, &msg);
3401
3402 msg.data &= ~MSI_DATA_VECTOR_MASK;
3403 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3404 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3405 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3406
3407 write_msi_msg_desc(desc, &msg);
3408
3409 return 0;
3410 }
3411 #ifdef CONFIG_INTR_REMAP
3412 /*
3413 * Migrate the MSI irq to another cpumask. This migration is
3414 * done in the process context using interrupt-remapping hardware.
3415 */
3416 static int
3417 ir_set_msi_irq_affinity(unsigned int irq, const struct cpumask *mask)
3418 {
3419 struct irq_desc *desc = irq_to_desc(irq);
3420 struct irq_cfg *cfg = desc->chip_data;
3421 unsigned int dest;
3422 struct irte irte;
3423
3424 if (get_irte(irq, &irte))
3425 return -1;
3426
3427 if (set_desc_affinity(desc, mask, &dest))
3428 return -1;
3429
3430 irte.vector = cfg->vector;
3431 irte.dest_id = IRTE_DEST(dest);
3432
3433 /*
3434 * atomically update the IRTE with the new destination and vector.
3435 */
3436 modify_irte(irq, &irte);
3437
3438 /*
3439 * After this point, all the interrupts will start arriving
3440 * at the new destination. So, time to cleanup the previous
3441 * vector allocation.
3442 */
3443 if (cfg->move_in_progress)
3444 send_cleanup_vector(cfg);
3445
3446 return 0;
3447 }
3448
3449 #endif
3450 #endif /* CONFIG_SMP */
3451
3452 /*
3453 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3454 * which implement the MSI or MSI-X Capability Structure.
3455 */
3456 static struct irq_chip msi_chip = {
3457 .name = "PCI-MSI",
3458 .unmask = unmask_msi_irq,
3459 .mask = mask_msi_irq,
3460 .ack = ack_apic_edge,
3461 #ifdef CONFIG_SMP
3462 .set_affinity = set_msi_irq_affinity,
3463 #endif
3464 .retrigger = ioapic_retrigger_irq,
3465 };
3466
3467 static struct irq_chip msi_ir_chip = {
3468 .name = "IR-PCI-MSI",
3469 .unmask = unmask_msi_irq,
3470 .mask = mask_msi_irq,
3471 #ifdef CONFIG_INTR_REMAP
3472 .ack = ir_ack_apic_edge,
3473 #ifdef CONFIG_SMP
3474 .set_affinity = ir_set_msi_irq_affinity,
3475 #endif
3476 #endif
3477 .retrigger = ioapic_retrigger_irq,
3478 };
3479
3480 /*
3481 * Map the PCI dev to the corresponding remapping hardware unit
3482 * and allocate 'nvec' consecutive interrupt-remapping table entries
3483 * in it.
3484 */
3485 static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3486 {
3487 struct intel_iommu *iommu;
3488 int index;
3489
3490 iommu = map_dev_to_ir(dev);
3491 if (!iommu) {
3492 printk(KERN_ERR
3493 "Unable to map PCI %s to iommu\n", pci_name(dev));
3494 return -ENOENT;
3495 }
3496
3497 index = alloc_irte(iommu, irq, nvec);
3498 if (index < 0) {
3499 printk(KERN_ERR
3500 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3501 pci_name(dev));
3502 return -ENOSPC;
3503 }
3504 return index;
3505 }
3506
3507 static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, int irq)
3508 {
3509 int ret;
3510 struct msi_msg msg;
3511
3512 ret = msi_compose_msg(dev, irq, &msg, -1);
3513 if (ret < 0)
3514 return ret;
3515
3516 set_irq_msi(irq, msidesc);
3517 write_msi_msg(irq, &msg);
3518
3519 if (irq_remapped(irq)) {
3520 struct irq_desc *desc = irq_to_desc(irq);
3521 /*
3522 * irq migration in process context
3523 */
3524 desc->status |= IRQ_MOVE_PCNTXT;
3525 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3526 } else
3527 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
3528
3529 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for MSI/MSI-X\n", irq);
3530
3531 return 0;
3532 }
3533
3534 int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3535 {
3536 unsigned int irq;
3537 int ret, sub_handle;
3538 struct msi_desc *msidesc;
3539 unsigned int irq_want;
3540 struct intel_iommu *iommu = NULL;
3541 int index = 0;
3542 int node;
3543
3544 /* x86 doesn't support multiple MSI yet */
3545 if (type == PCI_CAP_ID_MSI && nvec > 1)
3546 return 1;
3547
3548 node = dev_to_node(&dev->dev);
3549 irq_want = nr_irqs_gsi;
3550 sub_handle = 0;
3551 list_for_each_entry(msidesc, &dev->msi_list, list) {
3552 irq = create_irq_nr(irq_want, node);
3553 if (irq == 0)
3554 return -1;
3555 irq_want = irq + 1;
3556 if (!intr_remapping_enabled)
3557 goto no_ir;
3558
3559 if (!sub_handle) {
3560 /*
3561 * allocate the consecutive block of IRTE's
3562 * for 'nvec'
3563 */
3564 index = msi_alloc_irte(dev, irq, nvec);
3565 if (index < 0) {
3566 ret = index;
3567 goto error;
3568 }
3569 } else {
3570 iommu = map_dev_to_ir(dev);
3571 if (!iommu) {
3572 ret = -ENOENT;
3573 goto error;
3574 }
3575 /*
3576 * setup the mapping between the irq and the IRTE
3577 * base index, the sub_handle pointing to the
3578 * appropriate interrupt remap table entry.
3579 */
3580 set_irte_irq(irq, iommu, index, sub_handle);
3581 }
3582 no_ir:
3583 ret = setup_msi_irq(dev, msidesc, irq);
3584 if (ret < 0)
3585 goto error;
3586 sub_handle++;
3587 }
3588 return 0;
3589
3590 error:
3591 destroy_irq(irq);
3592 return ret;
3593 }
3594
3595 void arch_teardown_msi_irq(unsigned int irq)
3596 {
3597 destroy_irq(irq);
3598 }
3599
3600 #if defined (CONFIG_DMAR) || defined (CONFIG_INTR_REMAP)
3601 #ifdef CONFIG_SMP
3602 static int dmar_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3603 {
3604 struct irq_desc *desc = irq_to_desc(irq);
3605 struct irq_cfg *cfg;
3606 struct msi_msg msg;
3607 unsigned int dest;
3608
3609 if (set_desc_affinity(desc, mask, &dest))
3610 return -1;
3611
3612 cfg = desc->chip_data;
3613
3614 dmar_msi_read(irq, &msg);
3615
3616 msg.data &= ~MSI_DATA_VECTOR_MASK;
3617 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3618 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3619 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3620
3621 dmar_msi_write(irq, &msg);
3622
3623 return 0;
3624 }
3625
3626 #endif /* CONFIG_SMP */
3627
3628 static struct irq_chip dmar_msi_type = {
3629 .name = "DMAR_MSI",
3630 .unmask = dmar_msi_unmask,
3631 .mask = dmar_msi_mask,
3632 .ack = ack_apic_edge,
3633 #ifdef CONFIG_SMP
3634 .set_affinity = dmar_msi_set_affinity,
3635 #endif
3636 .retrigger = ioapic_retrigger_irq,
3637 };
3638
3639 int arch_setup_dmar_msi(unsigned int irq)
3640 {
3641 int ret;
3642 struct msi_msg msg;
3643
3644 ret = msi_compose_msg(NULL, irq, &msg, -1);
3645 if (ret < 0)
3646 return ret;
3647 dmar_msi_write(irq, &msg);
3648 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3649 "edge");
3650 return 0;
3651 }
3652 #endif
3653
3654 #ifdef CONFIG_HPET_TIMER
3655
3656 #ifdef CONFIG_SMP
3657 static int hpet_msi_set_affinity(unsigned int irq, const struct cpumask *mask)
3658 {
3659 struct irq_desc *desc = irq_to_desc(irq);
3660 struct irq_cfg *cfg;
3661 struct msi_msg msg;
3662 unsigned int dest;
3663
3664 if (set_desc_affinity(desc, mask, &dest))
3665 return -1;
3666
3667 cfg = desc->chip_data;
3668
3669 hpet_msi_read(irq, &msg);
3670
3671 msg.data &= ~MSI_DATA_VECTOR_MASK;
3672 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3673 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3674 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3675
3676 hpet_msi_write(irq, &msg);
3677
3678 return 0;
3679 }
3680
3681 #endif /* CONFIG_SMP */
3682
3683 static struct irq_chip ir_hpet_msi_type = {
3684 .name = "IR-HPET_MSI",
3685 .unmask = hpet_msi_unmask,
3686 .mask = hpet_msi_mask,
3687 #ifdef CONFIG_INTR_REMAP
3688 .ack = ir_ack_apic_edge,
3689 #ifdef CONFIG_SMP
3690 .set_affinity = ir_set_msi_irq_affinity,
3691 #endif
3692 #endif
3693 .retrigger = ioapic_retrigger_irq,
3694 };
3695
3696 static struct irq_chip hpet_msi_type = {
3697 .name = "HPET_MSI",
3698 .unmask = hpet_msi_unmask,
3699 .mask = hpet_msi_mask,
3700 .ack = ack_apic_edge,
3701 #ifdef CONFIG_SMP
3702 .set_affinity = hpet_msi_set_affinity,
3703 #endif
3704 .retrigger = ioapic_retrigger_irq,
3705 };
3706
3707 int arch_setup_hpet_msi(unsigned int irq, unsigned int id)
3708 {
3709 int ret;
3710 struct msi_msg msg;
3711 struct irq_desc *desc = irq_to_desc(irq);
3712
3713 if (intr_remapping_enabled) {
3714 struct intel_iommu *iommu = map_hpet_to_ir(id);
3715 int index;
3716
3717 if (!iommu)
3718 return -1;
3719
3720 index = alloc_irte(iommu, irq, 1);
3721 if (index < 0)
3722 return -1;
3723 }
3724
3725 ret = msi_compose_msg(NULL, irq, &msg, id);
3726 if (ret < 0)
3727 return ret;
3728
3729 hpet_msi_write(irq, &msg);
3730 desc->status |= IRQ_MOVE_PCNTXT;
3731 if (irq_remapped(irq))
3732 set_irq_chip_and_handler_name(irq, &ir_hpet_msi_type,
3733 handle_edge_irq, "edge");
3734 else
3735 set_irq_chip_and_handler_name(irq, &hpet_msi_type,
3736 handle_edge_irq, "edge");
3737
3738 return 0;
3739 }
3740 #endif
3741
3742 #endif /* CONFIG_PCI_MSI */
3743 /*
3744 * Hypertransport interrupt support
3745 */
3746 #ifdef CONFIG_HT_IRQ
3747
3748 #ifdef CONFIG_SMP
3749
3750 static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
3751 {
3752 struct ht_irq_msg msg;
3753 fetch_ht_irq_msg(irq, &msg);
3754
3755 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
3756 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
3757
3758 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
3759 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
3760
3761 write_ht_irq_msg(irq, &msg);
3762 }
3763
3764 static int set_ht_irq_affinity(unsigned int irq, const struct cpumask *mask)
3765 {
3766 struct irq_desc *desc = irq_to_desc(irq);
3767 struct irq_cfg *cfg;
3768 unsigned int dest;
3769
3770 if (set_desc_affinity(desc, mask, &dest))
3771 return -1;
3772
3773 cfg = desc->chip_data;
3774
3775 target_ht_irq(irq, dest, cfg->vector);
3776
3777 return 0;
3778 }
3779
3780 #endif
3781
3782 static struct irq_chip ht_irq_chip = {
3783 .name = "PCI-HT",
3784 .mask = mask_ht_irq,
3785 .unmask = unmask_ht_irq,
3786 .ack = ack_apic_edge,
3787 #ifdef CONFIG_SMP
3788 .set_affinity = set_ht_irq_affinity,
3789 #endif
3790 .retrigger = ioapic_retrigger_irq,
3791 };
3792
3793 int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3794 {
3795 struct irq_cfg *cfg;
3796 int err;
3797
3798 if (disable_apic)
3799 return -ENXIO;
3800
3801 cfg = irq_cfg(irq);
3802 err = assign_irq_vector(irq, cfg, apic->target_cpus());
3803 if (!err) {
3804 struct ht_irq_msg msg;
3805 unsigned dest;
3806
3807 dest = apic->cpu_mask_to_apicid_and(cfg->domain,
3808 apic->target_cpus());
3809
3810 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
3811
3812 msg.address_lo =
3813 HT_IRQ_LOW_BASE |
3814 HT_IRQ_LOW_DEST_ID(dest) |
3815 HT_IRQ_LOW_VECTOR(cfg->vector) |
3816 ((apic->irq_dest_mode == 0) ?
3817 HT_IRQ_LOW_DM_PHYSICAL :
3818 HT_IRQ_LOW_DM_LOGICAL) |
3819 HT_IRQ_LOW_RQEOI_EDGE |
3820 ((apic->irq_delivery_mode != dest_LowestPrio) ?
3821 HT_IRQ_LOW_MT_FIXED :
3822 HT_IRQ_LOW_MT_ARBITRATED) |
3823 HT_IRQ_LOW_IRQ_MASKED;
3824
3825 write_ht_irq_msg(irq, &msg);
3826
3827 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3828 handle_edge_irq, "edge");
3829
3830 dev_printk(KERN_DEBUG, &dev->dev, "irq %d for HT\n", irq);
3831 }
3832 return err;
3833 }
3834 #endif /* CONFIG_HT_IRQ */
3835
3836 int __init io_apic_get_redir_entries (int ioapic)
3837 {
3838 union IO_APIC_reg_01 reg_01;
3839 unsigned long flags;
3840
3841 raw_spin_lock_irqsave(&ioapic_lock, flags);
3842 reg_01.raw = io_apic_read(ioapic, 1);
3843 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
3844
3845 /* The register returns the maximum index redir index
3846 * supported, which is one less than the total number of redir
3847 * entries.
3848 */
3849 return reg_01.bits.entries + 1;
3850 }
3851
3852 void __init probe_nr_irqs_gsi(void)
3853 {
3854 int nr;
3855
3856 nr = gsi_end + 1 + NR_IRQS_LEGACY;
3857 if (nr > nr_irqs_gsi)
3858 nr_irqs_gsi = nr;
3859
3860 printk(KERN_DEBUG "nr_irqs_gsi: %d\n", nr_irqs_gsi);
3861 }
3862
3863 #ifdef CONFIG_SPARSE_IRQ
3864 int __init arch_probe_nr_irqs(void)
3865 {
3866 int nr;
3867
3868 if (nr_irqs > (NR_VECTORS * nr_cpu_ids))
3869 nr_irqs = NR_VECTORS * nr_cpu_ids;
3870
3871 nr = nr_irqs_gsi + 8 * nr_cpu_ids;
3872 #if defined(CONFIG_PCI_MSI) || defined(CONFIG_HT_IRQ)
3873 /*
3874 * for MSI and HT dyn irq
3875 */
3876 nr += nr_irqs_gsi * 16;
3877 #endif
3878 if (nr < nr_irqs)
3879 nr_irqs = nr;
3880
3881 return 0;
3882 }
3883 #endif
3884
3885 static int __io_apic_set_pci_routing(struct device *dev, int irq,
3886 struct io_apic_irq_attr *irq_attr)
3887 {
3888 struct irq_desc *desc;
3889 struct irq_cfg *cfg;
3890 int node;
3891 int ioapic, pin;
3892 int trigger, polarity;
3893
3894 ioapic = irq_attr->ioapic;
3895 if (!IO_APIC_IRQ(irq)) {
3896 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
3897 ioapic);
3898 return -EINVAL;
3899 }
3900
3901 if (dev)
3902 node = dev_to_node(dev);
3903 else
3904 node = cpu_to_node(boot_cpu_id);
3905
3906 desc = irq_to_desc_alloc_node(irq, node);
3907 if (!desc) {
3908 printk(KERN_INFO "can not get irq_desc %d\n", irq);
3909 return 0;
3910 }
3911
3912 pin = irq_attr->ioapic_pin;
3913 trigger = irq_attr->trigger;
3914 polarity = irq_attr->polarity;
3915
3916 /*
3917 * IRQs < 16 are already in the irq_2_pin[] map
3918 */
3919 if (irq >= legacy_pic->nr_legacy_irqs) {
3920 cfg = desc->chip_data;
3921 if (add_pin_to_irq_node_nopanic(cfg, node, ioapic, pin)) {
3922 printk(KERN_INFO "can not add pin %d for irq %d\n",
3923 pin, irq);
3924 return 0;
3925 }
3926 }
3927
3928 setup_IO_APIC_irq(ioapic, pin, irq, desc, trigger, polarity);
3929
3930 return 0;
3931 }
3932
3933 int io_apic_set_pci_routing(struct device *dev, int irq,
3934 struct io_apic_irq_attr *irq_attr)
3935 {
3936 int ioapic, pin;
3937 /*
3938 * Avoid pin reprogramming. PRTs typically include entries
3939 * with redundant pin->gsi mappings (but unique PCI devices);
3940 * we only program the IOAPIC on the first.
3941 */
3942 ioapic = irq_attr->ioapic;
3943 pin = irq_attr->ioapic_pin;
3944 if (test_bit(pin, mp_ioapic_routing[ioapic].pin_programmed)) {
3945 pr_debug("Pin %d-%d already programmed\n",
3946 mp_ioapics[ioapic].apicid, pin);
3947 return 0;
3948 }
3949 set_bit(pin, mp_ioapic_routing[ioapic].pin_programmed);
3950
3951 return __io_apic_set_pci_routing(dev, irq, irq_attr);
3952 }
3953
3954 u8 __init io_apic_unique_id(u8 id)
3955 {
3956 #ifdef CONFIG_X86_32
3957 if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) &&
3958 !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
3959 return io_apic_get_unique_id(nr_ioapics, id);
3960 else
3961 return id;
3962 #else
3963 int i;
3964 DECLARE_BITMAP(used, 256);
3965
3966 bitmap_zero(used, 256);
3967 for (i = 0; i < nr_ioapics; i++) {
3968 struct mpc_ioapic *ia = &mp_ioapics[i];
3969 __set_bit(ia->apicid, used);
3970 }
3971 if (!test_bit(id, used))
3972 return id;
3973 return find_first_zero_bit(used, 256);
3974 #endif
3975 }
3976
3977 #ifdef CONFIG_X86_32
3978 int __init io_apic_get_unique_id(int ioapic, int apic_id)
3979 {
3980 union IO_APIC_reg_00 reg_00;
3981 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3982 physid_mask_t tmp;
3983 unsigned long flags;
3984 int i = 0;
3985
3986 /*
3987 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3988 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3989 * supports up to 16 on one shared APIC bus.
3990 *
3991 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3992 * advantage of new APIC bus architecture.
3993 */
3994
3995 if (physids_empty(apic_id_map))
3996 apic->ioapic_phys_id_map(&phys_cpu_present_map, &apic_id_map);
3997
3998 raw_spin_lock_irqsave(&ioapic_lock, flags);
3999 reg_00.raw = io_apic_read(ioapic, 0);
4000 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4001
4002 if (apic_id >= get_physical_broadcast()) {
4003 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
4004 "%d\n", ioapic, apic_id, reg_00.bits.ID);
4005 apic_id = reg_00.bits.ID;
4006 }
4007
4008 /*
4009 * Every APIC in a system must have a unique ID or we get lots of nice
4010 * 'stuck on smp_invalidate_needed IPI wait' messages.
4011 */
4012 if (apic->check_apicid_used(&apic_id_map, apic_id)) {
4013
4014 for (i = 0; i < get_physical_broadcast(); i++) {
4015 if (!apic->check_apicid_used(&apic_id_map, i))
4016 break;
4017 }
4018
4019 if (i == get_physical_broadcast())
4020 panic("Max apic_id exceeded!\n");
4021
4022 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
4023 "trying %d\n", ioapic, apic_id, i);
4024
4025 apic_id = i;
4026 }
4027
4028 apic->apicid_to_cpu_present(apic_id, &tmp);
4029 physids_or(apic_id_map, apic_id_map, tmp);
4030
4031 if (reg_00.bits.ID != apic_id) {
4032 reg_00.bits.ID = apic_id;
4033
4034 raw_spin_lock_irqsave(&ioapic_lock, flags);
4035 io_apic_write(ioapic, 0, reg_00.raw);
4036 reg_00.raw = io_apic_read(ioapic, 0);
4037 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4038
4039 /* Sanity check */
4040 if (reg_00.bits.ID != apic_id) {
4041 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
4042 return -1;
4043 }
4044 }
4045
4046 apic_printk(APIC_VERBOSE, KERN_INFO
4047 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
4048
4049 return apic_id;
4050 }
4051 #endif
4052
4053 int __init io_apic_get_version(int ioapic)
4054 {
4055 union IO_APIC_reg_01 reg_01;
4056 unsigned long flags;
4057
4058 raw_spin_lock_irqsave(&ioapic_lock, flags);
4059 reg_01.raw = io_apic_read(ioapic, 1);
4060 raw_spin_unlock_irqrestore(&ioapic_lock, flags);
4061
4062 return reg_01.bits.version;
4063 }
4064
4065 int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity)
4066 {
4067 int ioapic, pin, idx;
4068
4069 if (skip_ioapic_setup)
4070 return -1;
4071
4072 ioapic = mp_find_ioapic(gsi);
4073 if (ioapic < 0)
4074 return -1;
4075
4076 pin = mp_find_ioapic_pin(ioapic, gsi);
4077 if (pin < 0)
4078 return -1;
4079
4080 idx = find_irq_entry(ioapic, pin, mp_INT);
4081 if (idx < 0)
4082 return -1;
4083
4084 *trigger = irq_trigger(idx);
4085 *polarity = irq_polarity(idx);
4086 return 0;
4087 }
4088
4089 /*
4090 * This function currently is only a helper for the i386 smp boot process where
4091 * we need to reprogram the ioredtbls to cater for the cpus which have come online
4092 * so mask in all cases should simply be apic->target_cpus()
4093 */
4094 #ifdef CONFIG_SMP
4095 void __init setup_ioapic_dest(void)
4096 {
4097 int pin, ioapic, irq, irq_entry;
4098 struct irq_desc *desc;
4099 const struct cpumask *mask;
4100
4101 if (skip_ioapic_setup == 1)
4102 return;
4103
4104 for (ioapic = 0; ioapic < nr_ioapics; ioapic++)
4105 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
4106 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
4107 if (irq_entry == -1)
4108 continue;
4109 irq = pin_2_irq(irq_entry, ioapic, pin);
4110
4111 if ((ioapic > 0) && (irq > 16))
4112 continue;
4113
4114 desc = irq_to_desc(irq);
4115
4116 /*
4117 * Honour affinities which have been set in early boot
4118 */
4119 if (desc->status &
4120 (IRQ_NO_BALANCING | IRQ_AFFINITY_SET))
4121 mask = desc->affinity;
4122 else
4123 mask = apic->target_cpus();
4124
4125 if (intr_remapping_enabled)
4126 set_ir_ioapic_affinity_irq_desc(desc, mask);
4127 else
4128 set_ioapic_affinity_irq_desc(desc, mask);
4129 }
4130
4131 }
4132 #endif
4133
4134 #define IOAPIC_RESOURCE_NAME_SIZE 11
4135
4136 static struct resource *ioapic_resources;
4137
4138 static struct resource * __init ioapic_setup_resources(int nr_ioapics)
4139 {
4140 unsigned long n;
4141 struct resource *res;
4142 char *mem;
4143 int i;
4144
4145 if (nr_ioapics <= 0)
4146 return NULL;
4147
4148 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
4149 n *= nr_ioapics;
4150
4151 mem = alloc_bootmem(n);
4152 res = (void *)mem;
4153
4154 mem += sizeof(struct resource) * nr_ioapics;
4155
4156 for (i = 0; i < nr_ioapics; i++) {
4157 res[i].name = mem;
4158 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
4159 snprintf(mem, IOAPIC_RESOURCE_NAME_SIZE, "IOAPIC %u", i);
4160 mem += IOAPIC_RESOURCE_NAME_SIZE;
4161 }
4162
4163 ioapic_resources = res;
4164
4165 return res;
4166 }
4167
4168 void __init ioapic_init_mappings(void)
4169 {
4170 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
4171 struct resource *ioapic_res;
4172 int i;
4173
4174 ioapic_res = ioapic_setup_resources(nr_ioapics);
4175 for (i = 0; i < nr_ioapics; i++) {
4176 if (smp_found_config) {
4177 ioapic_phys = mp_ioapics[i].apicaddr;
4178 #ifdef CONFIG_X86_32
4179 if (!ioapic_phys) {
4180 printk(KERN_ERR
4181 "WARNING: bogus zero IO-APIC "
4182 "address found in MPTABLE, "
4183 "disabling IO/APIC support!\n");
4184 smp_found_config = 0;
4185 skip_ioapic_setup = 1;
4186 goto fake_ioapic_page;
4187 }
4188 #endif
4189 } else {
4190 #ifdef CONFIG_X86_32
4191 fake_ioapic_page:
4192 #endif
4193 ioapic_phys = (unsigned long)alloc_bootmem_pages(PAGE_SIZE);
4194 ioapic_phys = __pa(ioapic_phys);
4195 }
4196 set_fixmap_nocache(idx, ioapic_phys);
4197 apic_printk(APIC_VERBOSE, "mapped IOAPIC to %08lx (%08lx)\n",
4198 __fix_to_virt(idx) + (ioapic_phys & ~PAGE_MASK),
4199 ioapic_phys);
4200 idx++;
4201
4202 ioapic_res->start = ioapic_phys;
4203 ioapic_res->end = ioapic_phys + IO_APIC_SLOT_SIZE - 1;
4204 ioapic_res++;
4205 }
4206 }
4207
4208 void __init ioapic_insert_resources(void)
4209 {
4210 int i;
4211 struct resource *r = ioapic_resources;
4212
4213 if (!r) {
4214 if (nr_ioapics > 0)
4215 printk(KERN_ERR
4216 "IO APIC resources couldn't be allocated.\n");
4217 return;
4218 }
4219
4220 for (i = 0; i < nr_ioapics; i++) {
4221 insert_resource(&iomem_resource, r);
4222 r++;
4223 }
4224 }
4225
4226 int mp_find_ioapic(u32 gsi)
4227 {
4228 int i = 0;
4229
4230 /* Find the IOAPIC that manages this GSI. */
4231 for (i = 0; i < nr_ioapics; i++) {
4232 if ((gsi >= mp_gsi_routing[i].gsi_base)
4233 && (gsi <= mp_gsi_routing[i].gsi_end))
4234 return i;
4235 }
4236
4237 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
4238 return -1;
4239 }
4240
4241 int mp_find_ioapic_pin(int ioapic, u32 gsi)
4242 {
4243 if (WARN_ON(ioapic == -1))
4244 return -1;
4245 if (WARN_ON(gsi > mp_gsi_routing[ioapic].gsi_end))
4246 return -1;
4247
4248 return gsi - mp_gsi_routing[ioapic].gsi_base;
4249 }
4250
4251 static int bad_ioapic(unsigned long address)
4252 {
4253 if (nr_ioapics >= MAX_IO_APICS) {
4254 printk(KERN_WARNING "WARING: Max # of I/O APICs (%d) exceeded "
4255 "(found %d), skipping\n", MAX_IO_APICS, nr_ioapics);
4256 return 1;
4257 }
4258 if (!address) {
4259 printk(KERN_WARNING "WARNING: Bogus (zero) I/O APIC address"
4260 " found in table, skipping!\n");
4261 return 1;
4262 }
4263 return 0;
4264 }
4265
4266 void __init mp_register_ioapic(int id, u32 address, u32 gsi_base)
4267 {
4268 int idx = 0;
4269 int entries;
4270
4271 if (bad_ioapic(address))
4272 return;
4273
4274 idx = nr_ioapics;
4275
4276 mp_ioapics[idx].type = MP_IOAPIC;
4277 mp_ioapics[idx].flags = MPC_APIC_USABLE;
4278 mp_ioapics[idx].apicaddr = address;
4279
4280 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
4281 mp_ioapics[idx].apicid = io_apic_unique_id(id);
4282 mp_ioapics[idx].apicver = io_apic_get_version(idx);
4283
4284 /*
4285 * Build basic GSI lookup table to facilitate gsi->io_apic lookups
4286 * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
4287 */
4288 entries = io_apic_get_redir_entries(idx);
4289 mp_gsi_routing[idx].gsi_base = gsi_base;
4290 mp_gsi_routing[idx].gsi_end = gsi_base + entries - 1;
4291
4292 /*
4293 * The number of IO-APIC IRQ registers (== #pins):
4294 */
4295 nr_ioapic_registers[idx] = entries;
4296
4297 if (mp_gsi_routing[idx].gsi_end > gsi_end)
4298 gsi_end = mp_gsi_routing[idx].gsi_end;
4299
4300 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
4301 "GSI %d-%d\n", idx, mp_ioapics[idx].apicid,
4302 mp_ioapics[idx].apicver, mp_ioapics[idx].apicaddr,
4303 mp_gsi_routing[idx].gsi_base, mp_gsi_routing[idx].gsi_end);
4304
4305 nr_ioapics++;
4306 }
4307
4308 /* Enable IOAPIC early just for system timer */
4309 void __init pre_init_apic_IRQ0(void)
4310 {
4311 struct irq_cfg *cfg;
4312 struct irq_desc *desc;
4313
4314 printk(KERN_INFO "Early APIC setup for system timer0\n");
4315 #ifndef CONFIG_SMP
4316 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_physical_apicid);
4317 #endif
4318 desc = irq_to_desc_alloc_node(0, 0);
4319
4320 setup_local_APIC();
4321
4322 cfg = irq_cfg(0);
4323 add_pin_to_irq_node(cfg, 0, 0, 0);
4324 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
4325
4326 setup_IO_APIC_irq(0, 0, 0, desc, 0, 0);
4327 }