2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/compiler.h>
31 #include <linux/acpi.h>
32 #include <linux/module.h>
33 #include <linux/sysdev.h>
34 #include <linux/msi.h>
35 #include <linux/htirq.h>
36 #include <linux/freezer.h>
37 #include <linux/kthread.h>
38 #include <linux/jiffies.h> /* time_after() */
40 #include <acpi/acpi_bus.h>
42 #include <linux/bootmem.h>
43 #include <linux/dmar.h>
44 #include <linux/hpet.h>
50 #include <asm/proto.h>
53 #include <asm/timer.h>
54 #include <asm/i8259.h>
56 #include <asm/msidef.h>
57 #include <asm/hypertransport.h>
58 #include <asm/setup.h>
59 #include <asm/irq_remapping.h>
61 #include <asm/uv/uv_hub.h>
62 #include <asm/uv/uv_irq.h>
65 #include <mach_apic.h>
66 #include <mach_apicdef.h>
68 #define __apicdebuginit(type) static type __init
71 * Is the SiS APIC rmw bug present ?
72 * -1 = don't know, 0 = no, 1 = yes
74 int sis_apic_bug
= -1;
76 static DEFINE_SPINLOCK(ioapic_lock
);
77 static DEFINE_SPINLOCK(vector_lock
);
80 * # of IRQ routing registers
82 int nr_ioapic_registers
[MAX_IO_APICS
];
84 /* I/O APIC entries */
85 struct mp_config_ioapic mp_ioapics
[MAX_IO_APICS
];
88 /* MP IRQ source entries */
89 struct mp_config_intsrc mp_irqs
[MAX_IRQ_SOURCES
];
91 /* # of MP IRQ source entries */
94 #if defined (CONFIG_MCA) || defined (CONFIG_EISA)
95 int mp_bus_id_to_type
[MAX_MP_BUSSES
];
98 DECLARE_BITMAP(mp_bus_not_pci
, MAX_MP_BUSSES
);
100 int skip_ioapic_setup
;
102 static int __init
parse_noapic(char *str
)
104 /* disable IO-APIC */
105 disable_ioapic_setup();
108 early_param("noapic", parse_noapic
);
113 struct irq_pin_list
*irq_2_pin
;
115 cpumask_t old_domain
;
116 unsigned move_cleanup_count
;
118 u8 move_in_progress
: 1;
121 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
122 static struct irq_cfg irq_cfgx
[NR_IRQS
] = {
123 [0] = { .irq
= 0, .domain
= CPU_MASK_ALL
, .vector
= IRQ0_VECTOR
, },
124 [1] = { .irq
= 1, .domain
= CPU_MASK_ALL
, .vector
= IRQ1_VECTOR
, },
125 [2] = { .irq
= 2, .domain
= CPU_MASK_ALL
, .vector
= IRQ2_VECTOR
, },
126 [3] = { .irq
= 3, .domain
= CPU_MASK_ALL
, .vector
= IRQ3_VECTOR
, },
127 [4] = { .irq
= 4, .domain
= CPU_MASK_ALL
, .vector
= IRQ4_VECTOR
, },
128 [5] = { .irq
= 5, .domain
= CPU_MASK_ALL
, .vector
= IRQ5_VECTOR
, },
129 [6] = { .irq
= 6, .domain
= CPU_MASK_ALL
, .vector
= IRQ6_VECTOR
, },
130 [7] = { .irq
= 7, .domain
= CPU_MASK_ALL
, .vector
= IRQ7_VECTOR
, },
131 [8] = { .irq
= 8, .domain
= CPU_MASK_ALL
, .vector
= IRQ8_VECTOR
, },
132 [9] = { .irq
= 9, .domain
= CPU_MASK_ALL
, .vector
= IRQ9_VECTOR
, },
133 [10] = { .irq
= 10, .domain
= CPU_MASK_ALL
, .vector
= IRQ10_VECTOR
, },
134 [11] = { .irq
= 11, .domain
= CPU_MASK_ALL
, .vector
= IRQ11_VECTOR
, },
135 [12] = { .irq
= 12, .domain
= CPU_MASK_ALL
, .vector
= IRQ12_VECTOR
, },
136 [13] = { .irq
= 13, .domain
= CPU_MASK_ALL
, .vector
= IRQ13_VECTOR
, },
137 [14] = { .irq
= 14, .domain
= CPU_MASK_ALL
, .vector
= IRQ14_VECTOR
, },
138 [15] = { .irq
= 15, .domain
= CPU_MASK_ALL
, .vector
= IRQ15_VECTOR
, },
141 #define for_each_irq_cfg(irq, cfg) \
142 for (irq = 0, cfg = irq_cfgx; irq < nr_irqs; irq++, cfg++)
144 static struct irq_cfg
*irq_cfg(unsigned int irq
)
146 return irq
< nr_irqs
? irq_cfgx
+ irq
: NULL
;
149 static struct irq_cfg
*irq_cfg_alloc(unsigned int irq
)
155 * Rough estimation of how many shared IRQs there are, can be changed
158 #define MAX_PLUS_SHARED_IRQS NR_IRQS
159 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
162 * This is performance-critical, we want to do it O(1)
164 * the indexing order of this array favors 1:1 mappings
165 * between pins and IRQs.
168 struct irq_pin_list
{
170 struct irq_pin_list
*next
;
173 static struct irq_pin_list irq_2_pin_head
[PIN_MAP_SIZE
];
174 static struct irq_pin_list
*irq_2_pin_ptr
;
176 static void __init
irq_2_pin_init(void)
178 struct irq_pin_list
*pin
= irq_2_pin_head
;
181 for (i
= 1; i
< PIN_MAP_SIZE
; i
++)
182 pin
[i
-1].next
= &pin
[i
];
184 irq_2_pin_ptr
= &pin
[0];
187 static struct irq_pin_list
*get_one_free_irq_2_pin(void)
189 struct irq_pin_list
*pin
= irq_2_pin_ptr
;
192 panic("can not get more irq_2_pin\n");
194 irq_2_pin_ptr
= pin
->next
;
201 unsigned int unused
[3];
205 static __attribute_const__
struct io_apic __iomem
*io_apic_base(int idx
)
207 return (void __iomem
*) __fix_to_virt(FIX_IO_APIC_BASE_0
+ idx
)
208 + (mp_ioapics
[idx
].mp_apicaddr
& ~PAGE_MASK
);
211 static inline unsigned int io_apic_read(unsigned int apic
, unsigned int reg
)
213 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
214 writel(reg
, &io_apic
->index
);
215 return readl(&io_apic
->data
);
218 static inline void io_apic_write(unsigned int apic
, unsigned int reg
, unsigned int value
)
220 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
221 writel(reg
, &io_apic
->index
);
222 writel(value
, &io_apic
->data
);
226 * Re-write a value: to be used for read-modify-write
227 * cycles where the read already set up the index register.
229 * Older SiS APIC requires we rewrite the index register
231 static inline void io_apic_modify(unsigned int apic
, unsigned int reg
, unsigned int value
)
233 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
236 writel(reg
, &io_apic
->index
);
237 writel(value
, &io_apic
->data
);
240 static bool io_apic_level_ack_pending(unsigned int irq
)
242 struct irq_pin_list
*entry
;
244 struct irq_cfg
*cfg
= irq_cfg(irq
);
246 spin_lock_irqsave(&ioapic_lock
, flags
);
247 entry
= cfg
->irq_2_pin
;
255 reg
= io_apic_read(entry
->apic
, 0x10 + pin
*2);
256 /* Is the remote IRR bit set? */
257 if (reg
& IO_APIC_REDIR_REMOTE_IRR
) {
258 spin_unlock_irqrestore(&ioapic_lock
, flags
);
265 spin_unlock_irqrestore(&ioapic_lock
, flags
);
271 struct { u32 w1
, w2
; };
272 struct IO_APIC_route_entry entry
;
275 static struct IO_APIC_route_entry
ioapic_read_entry(int apic
, int pin
)
277 union entry_union eu
;
279 spin_lock_irqsave(&ioapic_lock
, flags
);
280 eu
.w1
= io_apic_read(apic
, 0x10 + 2 * pin
);
281 eu
.w2
= io_apic_read(apic
, 0x11 + 2 * pin
);
282 spin_unlock_irqrestore(&ioapic_lock
, flags
);
287 * When we write a new IO APIC routing entry, we need to write the high
288 * word first! If the mask bit in the low word is clear, we will enable
289 * the interrupt, and we need to make sure the entry is fully populated
290 * before that happens.
293 __ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
295 union entry_union eu
;
297 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
298 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
301 static void ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
304 spin_lock_irqsave(&ioapic_lock
, flags
);
305 __ioapic_write_entry(apic
, pin
, e
);
306 spin_unlock_irqrestore(&ioapic_lock
, flags
);
310 * When we mask an IO APIC routing entry, we need to write the low
311 * word first, in order to set the mask bit before we change the
314 static void ioapic_mask_entry(int apic
, int pin
)
317 union entry_union eu
= { .entry
.mask
= 1 };
319 spin_lock_irqsave(&ioapic_lock
, flags
);
320 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
321 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
322 spin_unlock_irqrestore(&ioapic_lock
, flags
);
326 static void __target_IO_APIC_irq(unsigned int irq
, unsigned int dest
, u8 vector
)
330 struct irq_pin_list
*entry
;
333 entry
= cfg
->irq_2_pin
;
342 #ifdef CONFIG_INTR_REMAP
344 * With interrupt-remapping, destination information comes
345 * from interrupt-remapping table entry.
347 if (!irq_remapped(irq
))
348 io_apic_write(apic
, 0x11 + pin
*2, dest
);
350 io_apic_write(apic
, 0x11 + pin
*2, dest
);
352 reg
= io_apic_read(apic
, 0x10 + pin
*2);
353 reg
&= ~IO_APIC_REDIR_VECTOR_MASK
;
355 io_apic_modify(apic
, 0x10 + pin
*2, reg
);
362 static int assign_irq_vector(int irq
, cpumask_t mask
);
364 static void set_ioapic_affinity_irq(unsigned int irq
, cpumask_t mask
)
370 struct irq_desc
*desc
;
372 cpus_and(tmp
, mask
, cpu_online_map
);
377 if (assign_irq_vector(irq
, mask
))
380 cpus_and(tmp
, cfg
->domain
, mask
);
381 dest
= cpu_mask_to_apicid(tmp
);
383 * Only the high 8 bits are valid.
385 dest
= SET_APIC_LOGICAL_ID(dest
);
387 desc
= irq_to_desc(irq
);
388 spin_lock_irqsave(&ioapic_lock
, flags
);
389 __target_IO_APIC_irq(irq
, dest
, cfg
->vector
);
390 desc
->affinity
= mask
;
391 spin_unlock_irqrestore(&ioapic_lock
, flags
);
393 #endif /* CONFIG_SMP */
396 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
397 * shared ISA-space IRQs, so we have to support them. We are super
398 * fast in the common case, and fast for shared ISA-space IRQs.
400 static void add_pin_to_irq(unsigned int irq
, int apic
, int pin
)
403 struct irq_pin_list
*entry
;
405 /* first time to refer irq_cfg, so with new */
406 cfg
= irq_cfg_alloc(irq
);
407 entry
= cfg
->irq_2_pin
;
409 entry
= get_one_free_irq_2_pin();
410 cfg
->irq_2_pin
= entry
;
416 while (entry
->next
) {
417 /* not again, please */
418 if (entry
->apic
== apic
&& entry
->pin
== pin
)
424 entry
->next
= get_one_free_irq_2_pin();
431 * Reroute an IRQ to a different pin.
433 static void __init
replace_pin_at_irq(unsigned int irq
,
434 int oldapic
, int oldpin
,
435 int newapic
, int newpin
)
437 struct irq_cfg
*cfg
= irq_cfg(irq
);
438 struct irq_pin_list
*entry
= cfg
->irq_2_pin
;
442 if (entry
->apic
== oldapic
&& entry
->pin
== oldpin
) {
443 entry
->apic
= newapic
;
446 /* every one is different, right? */
452 /* why? call replace before add? */
454 add_pin_to_irq(irq
, newapic
, newpin
);
457 static inline void io_apic_modify_irq(unsigned int irq
,
458 int mask_and
, int mask_or
,
459 void (*final
)(struct irq_pin_list
*entry
))
463 struct irq_pin_list
*entry
;
466 for (entry
= cfg
->irq_2_pin
; entry
!= NULL
; entry
= entry
->next
) {
469 reg
= io_apic_read(entry
->apic
, 0x10 + pin
* 2);
472 io_apic_modify(entry
->apic
, 0x10 + pin
* 2, reg
);
478 static void __unmask_IO_APIC_irq(unsigned int irq
)
480 io_apic_modify_irq(irq
, ~IO_APIC_REDIR_MASKED
, 0, NULL
);
484 void io_apic_sync(struct irq_pin_list
*entry
)
487 * Synchronize the IO-APIC and the CPU by doing
488 * a dummy read from the IO-APIC
490 struct io_apic __iomem
*io_apic
;
491 io_apic
= io_apic_base(entry
->apic
);
492 readl(&io_apic
->data
);
495 static void __mask_IO_APIC_irq(unsigned int irq
)
497 io_apic_modify_irq(irq
, ~0, IO_APIC_REDIR_MASKED
, &io_apic_sync
);
499 #else /* CONFIG_X86_32 */
500 static void __mask_IO_APIC_irq(unsigned int irq
)
502 io_apic_modify_irq(irq
, ~0, IO_APIC_REDIR_MASKED
, NULL
);
505 static void __mask_and_edge_IO_APIC_irq(unsigned int irq
)
507 io_apic_modify_irq(irq
, ~IO_APIC_REDIR_LEVEL_TRIGGER
,
508 IO_APIC_REDIR_MASKED
, NULL
);
511 static void __unmask_and_level_IO_APIC_irq(unsigned int irq
)
513 io_apic_modify_irq(irq
, ~IO_APIC_REDIR_MASKED
,
514 IO_APIC_REDIR_LEVEL_TRIGGER
, NULL
);
516 #endif /* CONFIG_X86_32 */
518 static void mask_IO_APIC_irq (unsigned int irq
)
522 spin_lock_irqsave(&ioapic_lock
, flags
);
523 __mask_IO_APIC_irq(irq
);
524 spin_unlock_irqrestore(&ioapic_lock
, flags
);
527 static void unmask_IO_APIC_irq (unsigned int irq
)
531 spin_lock_irqsave(&ioapic_lock
, flags
);
532 __unmask_IO_APIC_irq(irq
);
533 spin_unlock_irqrestore(&ioapic_lock
, flags
);
536 static void clear_IO_APIC_pin(unsigned int apic
, unsigned int pin
)
538 struct IO_APIC_route_entry entry
;
540 /* Check delivery_mode to be sure we're not clearing an SMI pin */
541 entry
= ioapic_read_entry(apic
, pin
);
542 if (entry
.delivery_mode
== dest_SMI
)
545 * Disable it in the IO-APIC irq-routing table:
547 ioapic_mask_entry(apic
, pin
);
550 static void clear_IO_APIC (void)
554 for (apic
= 0; apic
< nr_ioapics
; apic
++)
555 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
556 clear_IO_APIC_pin(apic
, pin
);
559 #if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
560 void send_IPI_self(int vector
)
567 apic_wait_icr_idle();
568 cfg
= APIC_DM_FIXED
| APIC_DEST_SELF
| vector
| APIC_DEST_LOGICAL
;
570 * Send the IPI. The write to APIC_ICR fires this off.
572 apic_write(APIC_ICR
, cfg
);
574 #endif /* !CONFIG_SMP && CONFIG_X86_32*/
578 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
579 * specific CPU-side IRQs.
583 static int pirq_entries
[MAX_PIRQS
];
584 static int pirqs_enabled
;
586 static int __init
ioapic_pirq_setup(char *str
)
589 int ints
[MAX_PIRQS
+1];
591 get_options(str
, ARRAY_SIZE(ints
), ints
);
593 for (i
= 0; i
< MAX_PIRQS
; i
++)
594 pirq_entries
[i
] = -1;
597 apic_printk(APIC_VERBOSE
, KERN_INFO
598 "PIRQ redirection, working around broken MP-BIOS.\n");
600 if (ints
[0] < MAX_PIRQS
)
603 for (i
= 0; i
< max
; i
++) {
604 apic_printk(APIC_VERBOSE
, KERN_DEBUG
605 "... PIRQ%d -> IRQ %d\n", i
, ints
[i
+1]);
607 * PIRQs are mapped upside down, usually.
609 pirq_entries
[MAX_PIRQS
-i
-1] = ints
[i
+1];
614 __setup("pirq=", ioapic_pirq_setup
);
615 #endif /* CONFIG_X86_32 */
617 #ifdef CONFIG_INTR_REMAP
618 /* I/O APIC RTE contents at the OS boot up */
619 static struct IO_APIC_route_entry
*early_ioapic_entries
[MAX_IO_APICS
];
622 * Saves and masks all the unmasked IO-APIC RTE's
624 int save_mask_IO_APIC_setup(void)
626 union IO_APIC_reg_01 reg_01
;
631 * The number of IO-APIC IRQ registers (== #pins):
633 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
634 spin_lock_irqsave(&ioapic_lock
, flags
);
635 reg_01
.raw
= io_apic_read(apic
, 1);
636 spin_unlock_irqrestore(&ioapic_lock
, flags
);
637 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
640 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
641 early_ioapic_entries
[apic
] =
642 kzalloc(sizeof(struct IO_APIC_route_entry
) *
643 nr_ioapic_registers
[apic
], GFP_KERNEL
);
644 if (!early_ioapic_entries
[apic
])
648 for (apic
= 0; apic
< nr_ioapics
; apic
++)
649 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
650 struct IO_APIC_route_entry entry
;
652 entry
= early_ioapic_entries
[apic
][pin
] =
653 ioapic_read_entry(apic
, pin
);
656 ioapic_write_entry(apic
, pin
, entry
);
664 kfree(early_ioapic_entries
[apic
--]);
665 memset(early_ioapic_entries
, 0,
666 ARRAY_SIZE(early_ioapic_entries
));
671 void restore_IO_APIC_setup(void)
675 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
676 if (!early_ioapic_entries
[apic
])
678 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
679 ioapic_write_entry(apic
, pin
,
680 early_ioapic_entries
[apic
][pin
]);
681 kfree(early_ioapic_entries
[apic
]);
682 early_ioapic_entries
[apic
] = NULL
;
686 void reinit_intr_remapped_IO_APIC(int intr_remapping
)
689 * for now plain restore of previous settings.
690 * TBD: In the case of OS enabling interrupt-remapping,
691 * IO-APIC RTE's need to be setup to point to interrupt-remapping
692 * table entries. for now, do a plain restore, and wait for
693 * the setup_IO_APIC_irqs() to do proper initialization.
695 restore_IO_APIC_setup();
700 * Find the IRQ entry number of a certain pin.
702 static int find_irq_entry(int apic
, int pin
, int type
)
706 for (i
= 0; i
< mp_irq_entries
; i
++)
707 if (mp_irqs
[i
].mp_irqtype
== type
&&
708 (mp_irqs
[i
].mp_dstapic
== mp_ioapics
[apic
].mp_apicid
||
709 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
) &&
710 mp_irqs
[i
].mp_dstirq
== pin
)
717 * Find the pin to which IRQ[irq] (ISA) is connected
719 static int __init
find_isa_irq_pin(int irq
, int type
)
723 for (i
= 0; i
< mp_irq_entries
; i
++) {
724 int lbus
= mp_irqs
[i
].mp_srcbus
;
726 if (test_bit(lbus
, mp_bus_not_pci
) &&
727 (mp_irqs
[i
].mp_irqtype
== type
) &&
728 (mp_irqs
[i
].mp_srcbusirq
== irq
))
730 return mp_irqs
[i
].mp_dstirq
;
735 static int __init
find_isa_irq_apic(int irq
, int type
)
739 for (i
= 0; i
< mp_irq_entries
; i
++) {
740 int lbus
= mp_irqs
[i
].mp_srcbus
;
742 if (test_bit(lbus
, mp_bus_not_pci
) &&
743 (mp_irqs
[i
].mp_irqtype
== type
) &&
744 (mp_irqs
[i
].mp_srcbusirq
== irq
))
747 if (i
< mp_irq_entries
) {
749 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
750 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
)
759 * Find a specific PCI IRQ entry.
760 * Not an __init, possibly needed by modules
762 static int pin_2_irq(int idx
, int apic
, int pin
);
764 int IO_APIC_get_PCI_irq_vector(int bus
, int slot
, int pin
)
766 int apic
, i
, best_guess
= -1;
768 apic_printk(APIC_DEBUG
, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
770 if (test_bit(bus
, mp_bus_not_pci
)) {
771 apic_printk(APIC_VERBOSE
, "PCI BIOS passed nonexistent PCI bus %d!\n", bus
);
774 for (i
= 0; i
< mp_irq_entries
; i
++) {
775 int lbus
= mp_irqs
[i
].mp_srcbus
;
777 for (apic
= 0; apic
< nr_ioapics
; apic
++)
778 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
||
779 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
)
782 if (!test_bit(lbus
, mp_bus_not_pci
) &&
783 !mp_irqs
[i
].mp_irqtype
&&
785 (slot
== ((mp_irqs
[i
].mp_srcbusirq
>> 2) & 0x1f))) {
786 int irq
= pin_2_irq(i
,apic
,mp_irqs
[i
].mp_dstirq
);
788 if (!(apic
|| IO_APIC_IRQ(irq
)))
791 if (pin
== (mp_irqs
[i
].mp_srcbusirq
& 3))
794 * Use the first all-but-pin matching entry as a
795 * best-guess fuzzy result for broken mptables.
804 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector
);
806 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
808 * EISA Edge/Level control register, ELCR
810 static int EISA_ELCR(unsigned int irq
)
813 unsigned int port
= 0x4d0 + (irq
>> 3);
814 return (inb(port
) >> (irq
& 7)) & 1;
816 apic_printk(APIC_VERBOSE
, KERN_INFO
817 "Broken MPtable reports ISA irq %d\n", irq
);
823 /* ISA interrupts are always polarity zero edge triggered,
824 * when listed as conforming in the MP table. */
826 #define default_ISA_trigger(idx) (0)
827 #define default_ISA_polarity(idx) (0)
829 /* EISA interrupts are always polarity zero and can be edge or level
830 * trigger depending on the ELCR value. If an interrupt is listed as
831 * EISA conforming in the MP table, that means its trigger type must
832 * be read in from the ELCR */
834 #define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
835 #define default_EISA_polarity(idx) default_ISA_polarity(idx)
837 /* PCI interrupts are always polarity one level triggered,
838 * when listed as conforming in the MP table. */
840 #define default_PCI_trigger(idx) (1)
841 #define default_PCI_polarity(idx) (1)
843 /* MCA interrupts are always polarity zero level triggered,
844 * when listed as conforming in the MP table. */
846 #define default_MCA_trigger(idx) (1)
847 #define default_MCA_polarity(idx) default_ISA_polarity(idx)
849 static int MPBIOS_polarity(int idx
)
851 int bus
= mp_irqs
[idx
].mp_srcbus
;
855 * Determine IRQ line polarity (high active or low active):
857 switch (mp_irqs
[idx
].mp_irqflag
& 3)
859 case 0: /* conforms, ie. bus-type dependent polarity */
860 if (test_bit(bus
, mp_bus_not_pci
))
861 polarity
= default_ISA_polarity(idx
);
863 polarity
= default_PCI_polarity(idx
);
865 case 1: /* high active */
870 case 2: /* reserved */
872 printk(KERN_WARNING
"broken BIOS!!\n");
876 case 3: /* low active */
881 default: /* invalid */
883 printk(KERN_WARNING
"broken BIOS!!\n");
891 static int MPBIOS_trigger(int idx
)
893 int bus
= mp_irqs
[idx
].mp_srcbus
;
897 * Determine IRQ trigger mode (edge or level sensitive):
899 switch ((mp_irqs
[idx
].mp_irqflag
>>2) & 3)
901 case 0: /* conforms, ie. bus-type dependent */
902 if (test_bit(bus
, mp_bus_not_pci
))
903 trigger
= default_ISA_trigger(idx
);
905 trigger
= default_PCI_trigger(idx
);
906 #if defined(CONFIG_EISA) || defined(CONFIG_MCA)
907 switch (mp_bus_id_to_type
[bus
]) {
908 case MP_BUS_ISA
: /* ISA pin */
910 /* set before the switch */
913 case MP_BUS_EISA
: /* EISA pin */
915 trigger
= default_EISA_trigger(idx
);
918 case MP_BUS_PCI
: /* PCI pin */
920 /* set before the switch */
923 case MP_BUS_MCA
: /* MCA pin */
925 trigger
= default_MCA_trigger(idx
);
930 printk(KERN_WARNING
"broken BIOS!!\n");
942 case 2: /* reserved */
944 printk(KERN_WARNING
"broken BIOS!!\n");
953 default: /* invalid */
955 printk(KERN_WARNING
"broken BIOS!!\n");
963 static inline int irq_polarity(int idx
)
965 return MPBIOS_polarity(idx
);
968 static inline int irq_trigger(int idx
)
970 return MPBIOS_trigger(idx
);
973 int (*ioapic_renumber_irq
)(int ioapic
, int irq
);
974 static int pin_2_irq(int idx
, int apic
, int pin
)
977 int bus
= mp_irqs
[idx
].mp_srcbus
;
980 * Debugging check, we are in big trouble if this message pops up!
982 if (mp_irqs
[idx
].mp_dstirq
!= pin
)
983 printk(KERN_ERR
"broken BIOS or MPTABLE parser, ayiee!!\n");
985 if (test_bit(bus
, mp_bus_not_pci
)) {
986 irq
= mp_irqs
[idx
].mp_srcbusirq
;
989 * PCI IRQs are mapped in order
993 irq
+= nr_ioapic_registers
[i
++];
996 * For MPS mode, so far only needed by ES7000 platform
998 if (ioapic_renumber_irq
)
999 irq
= ioapic_renumber_irq(apic
, irq
);
1002 #ifdef CONFIG_X86_32
1004 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1006 if ((pin
>= 16) && (pin
<= 23)) {
1007 if (pirq_entries
[pin
-16] != -1) {
1008 if (!pirq_entries
[pin
-16]) {
1009 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1010 "disabling PIRQ%d\n", pin
-16);
1012 irq
= pirq_entries
[pin
-16];
1013 apic_printk(APIC_VERBOSE
, KERN_DEBUG
1014 "using PIRQ%d -> IRQ %d\n",
1024 void lock_vector_lock(void)
1026 /* Used to the online set of cpus does not change
1027 * during assign_irq_vector.
1029 spin_lock(&vector_lock
);
1032 void unlock_vector_lock(void)
1034 spin_unlock(&vector_lock
);
1037 static int __assign_irq_vector(int irq
, cpumask_t mask
)
1040 * NOTE! The local APIC isn't very good at handling
1041 * multiple interrupts at the same interrupt level.
1042 * As the interrupt level is determined by taking the
1043 * vector number and shifting that right by 4, we
1044 * want to spread these out a bit so that they don't
1045 * all fall in the same interrupt level.
1047 * Also, we've got to be careful not to trash gate
1048 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1050 static int current_vector
= FIRST_DEVICE_VECTOR
, current_offset
= 0;
1051 unsigned int old_vector
;
1053 struct irq_cfg
*cfg
;
1057 /* Only try and allocate irqs on cpus that are present */
1058 cpus_and(mask
, mask
, cpu_online_map
);
1060 if ((cfg
->move_in_progress
) || cfg
->move_cleanup_count
)
1063 old_vector
= cfg
->vector
;
1066 cpus_and(tmp
, cfg
->domain
, mask
);
1067 if (!cpus_empty(tmp
))
1071 for_each_cpu_mask_nr(cpu
, mask
) {
1072 cpumask_t domain
, new_mask
;
1076 domain
= vector_allocation_domain(cpu
);
1077 cpus_and(new_mask
, domain
, cpu_online_map
);
1079 vector
= current_vector
;
1080 offset
= current_offset
;
1083 if (vector
>= first_system_vector
) {
1084 /* If we run out of vectors on large boxen, must share them. */
1085 offset
= (offset
+ 1) % 8;
1086 vector
= FIRST_DEVICE_VECTOR
+ offset
;
1088 if (unlikely(current_vector
== vector
))
1090 #ifdef CONFIG_X86_64
1091 if (vector
== IA32_SYSCALL_VECTOR
)
1094 if (vector
== SYSCALL_VECTOR
)
1097 for_each_cpu_mask_nr(new_cpu
, new_mask
)
1098 if (per_cpu(vector_irq
, new_cpu
)[vector
] != -1)
1101 current_vector
= vector
;
1102 current_offset
= offset
;
1104 cfg
->move_in_progress
= 1;
1105 cfg
->old_domain
= cfg
->domain
;
1107 for_each_cpu_mask_nr(new_cpu
, new_mask
)
1108 per_cpu(vector_irq
, new_cpu
)[vector
] = irq
;
1109 cfg
->vector
= vector
;
1110 cfg
->domain
= domain
;
1116 static int assign_irq_vector(int irq
, cpumask_t mask
)
1119 unsigned long flags
;
1121 spin_lock_irqsave(&vector_lock
, flags
);
1122 err
= __assign_irq_vector(irq
, mask
);
1123 spin_unlock_irqrestore(&vector_lock
, flags
);
1127 static void __clear_irq_vector(int irq
)
1129 struct irq_cfg
*cfg
;
1134 BUG_ON(!cfg
->vector
);
1136 vector
= cfg
->vector
;
1137 cpus_and(mask
, cfg
->domain
, cpu_online_map
);
1138 for_each_cpu_mask_nr(cpu
, mask
)
1139 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1142 cpus_clear(cfg
->domain
);
1145 void __setup_vector_irq(int cpu
)
1147 /* Initialize vector_irq on a new cpu */
1148 /* This function must be called with vector_lock held */
1150 struct irq_cfg
*cfg
;
1152 /* Mark the inuse vectors */
1153 for_each_irq_cfg(irq
, cfg
) {
1154 if (!cpu_isset(cpu
, cfg
->domain
))
1156 vector
= cfg
->vector
;
1157 per_cpu(vector_irq
, cpu
)[vector
] = irq
;
1159 /* Mark the free vectors */
1160 for (vector
= 0; vector
< NR_VECTORS
; ++vector
) {
1161 irq
= per_cpu(vector_irq
, cpu
)[vector
];
1166 if (!cpu_isset(cpu
, cfg
->domain
))
1167 per_cpu(vector_irq
, cpu
)[vector
] = -1;
1171 static struct irq_chip ioapic_chip
;
1172 #ifdef CONFIG_INTR_REMAP
1173 static struct irq_chip ir_ioapic_chip
;
1176 #define IOAPIC_AUTO -1
1177 #define IOAPIC_EDGE 0
1178 #define IOAPIC_LEVEL 1
1180 #ifdef CONFIG_X86_32
1181 static inline int IO_APIC_irq_trigger(int irq
)
1185 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1186 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1187 idx
= find_irq_entry(apic
, pin
, mp_INT
);
1188 if ((idx
!= -1) && (irq
== pin_2_irq(idx
, apic
, pin
)))
1189 return irq_trigger(idx
);
1193 * nonexistent IRQs are edge default
1198 static inline int IO_APIC_irq_trigger(int irq
)
1204 static void ioapic_register_intr(int irq
, unsigned long trigger
)
1206 struct irq_desc
*desc
;
1208 desc
= irq_to_desc(irq
);
1210 if ((trigger
== IOAPIC_AUTO
&& IO_APIC_irq_trigger(irq
)) ||
1211 trigger
== IOAPIC_LEVEL
)
1212 desc
->status
|= IRQ_LEVEL
;
1214 desc
->status
&= ~IRQ_LEVEL
;
1216 #ifdef CONFIG_INTR_REMAP
1217 if (irq_remapped(irq
)) {
1218 desc
->status
|= IRQ_MOVE_PCNTXT
;
1220 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1224 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
1225 handle_edge_irq
, "edge");
1229 if ((trigger
== IOAPIC_AUTO
&& IO_APIC_irq_trigger(irq
)) ||
1230 trigger
== IOAPIC_LEVEL
)
1231 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1235 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
1236 handle_edge_irq
, "edge");
1239 static int setup_ioapic_entry(int apic
, int irq
,
1240 struct IO_APIC_route_entry
*entry
,
1241 unsigned int destination
, int trigger
,
1242 int polarity
, int vector
)
1245 * add it to the IO-APIC irq-routing table:
1247 memset(entry
,0,sizeof(*entry
));
1249 #ifdef CONFIG_INTR_REMAP
1250 if (intr_remapping_enabled
) {
1251 struct intel_iommu
*iommu
= map_ioapic_to_ir(apic
);
1253 struct IR_IO_APIC_route_entry
*ir_entry
=
1254 (struct IR_IO_APIC_route_entry
*) entry
;
1258 panic("No mapping iommu for ioapic %d\n", apic
);
1260 index
= alloc_irte(iommu
, irq
, 1);
1262 panic("Failed to allocate IRTE for ioapic %d\n", apic
);
1264 memset(&irte
, 0, sizeof(irte
));
1267 irte
.dst_mode
= INT_DEST_MODE
;
1268 irte
.trigger_mode
= trigger
;
1269 irte
.dlvry_mode
= INT_DELIVERY_MODE
;
1270 irte
.vector
= vector
;
1271 irte
.dest_id
= IRTE_DEST(destination
);
1273 modify_irte(irq
, &irte
);
1275 ir_entry
->index2
= (index
>> 15) & 0x1;
1277 ir_entry
->format
= 1;
1278 ir_entry
->index
= (index
& 0x7fff);
1282 entry
->delivery_mode
= INT_DELIVERY_MODE
;
1283 entry
->dest_mode
= INT_DEST_MODE
;
1284 entry
->dest
= destination
;
1287 entry
->mask
= 0; /* enable IRQ */
1288 entry
->trigger
= trigger
;
1289 entry
->polarity
= polarity
;
1290 entry
->vector
= vector
;
1292 /* Mask level triggered irqs.
1293 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1300 static void setup_IO_APIC_irq(int apic
, int pin
, unsigned int irq
,
1301 int trigger
, int polarity
)
1303 struct irq_cfg
*cfg
;
1304 struct IO_APIC_route_entry entry
;
1307 if (!IO_APIC_IRQ(irq
))
1313 if (assign_irq_vector(irq
, mask
))
1316 cpus_and(mask
, cfg
->domain
, mask
);
1318 apic_printk(APIC_VERBOSE
,KERN_DEBUG
1319 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1320 "IRQ %d Mode:%i Active:%i)\n",
1321 apic
, mp_ioapics
[apic
].mp_apicid
, pin
, cfg
->vector
,
1322 irq
, trigger
, polarity
);
1325 if (setup_ioapic_entry(mp_ioapics
[apic
].mp_apicid
, irq
, &entry
,
1326 cpu_mask_to_apicid(mask
), trigger
, polarity
,
1328 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1329 mp_ioapics
[apic
].mp_apicid
, pin
);
1330 __clear_irq_vector(irq
);
1334 ioapic_register_intr(irq
, trigger
);
1336 disable_8259A_irq(irq
);
1338 ioapic_write_entry(apic
, pin
, entry
);
1341 static void __init
setup_IO_APIC_irqs(void)
1343 int apic
, pin
, idx
, irq
;
1346 apic_printk(APIC_VERBOSE
, KERN_DEBUG
"init IO_APIC IRQs\n");
1348 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1349 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1351 idx
= find_irq_entry(apic
, pin
, mp_INT
);
1355 apic_printk(APIC_VERBOSE
,
1356 KERN_DEBUG
" %d-%d",
1357 mp_ioapics
[apic
].mp_apicid
,
1360 apic_printk(APIC_VERBOSE
, " %d-%d",
1361 mp_ioapics
[apic
].mp_apicid
,
1366 apic_printk(APIC_VERBOSE
,
1367 " (apicid-pin) not connected\n");
1371 irq
= pin_2_irq(idx
, apic
, pin
);
1372 #ifdef CONFIG_X86_32
1373 if (multi_timer_check(apic
, irq
))
1376 add_pin_to_irq(irq
, apic
, pin
);
1378 setup_IO_APIC_irq(apic
, pin
, irq
,
1379 irq_trigger(idx
), irq_polarity(idx
));
1384 apic_printk(APIC_VERBOSE
,
1385 " (apicid-pin) not connected\n");
1389 * Set up the timer pin, possibly with the 8259A-master behind.
1391 static void __init
setup_timer_IRQ0_pin(unsigned int apic
, unsigned int pin
,
1394 struct IO_APIC_route_entry entry
;
1396 #ifdef CONFIG_INTR_REMAP
1397 if (intr_remapping_enabled
)
1401 memset(&entry
, 0, sizeof(entry
));
1404 * We use logical delivery to get the timer IRQ
1407 entry
.dest_mode
= INT_DEST_MODE
;
1408 entry
.mask
= 1; /* mask IRQ now */
1409 entry
.dest
= cpu_mask_to_apicid(TARGET_CPUS
);
1410 entry
.delivery_mode
= INT_DELIVERY_MODE
;
1413 entry
.vector
= vector
;
1416 * The timer IRQ doesn't have to know that behind the
1417 * scene we may have a 8259A-master in AEOI mode ...
1419 set_irq_chip_and_handler_name(0, &ioapic_chip
, handle_edge_irq
, "edge");
1422 * Add it to the IO-APIC irq-routing table:
1424 ioapic_write_entry(apic
, pin
, entry
);
1428 __apicdebuginit(void) print_IO_APIC(void)
1431 union IO_APIC_reg_00 reg_00
;
1432 union IO_APIC_reg_01 reg_01
;
1433 union IO_APIC_reg_02 reg_02
;
1434 union IO_APIC_reg_03 reg_03
;
1435 unsigned long flags
;
1436 struct irq_cfg
*cfg
;
1439 if (apic_verbosity
== APIC_QUIET
)
1442 printk(KERN_DEBUG
"number of MP IRQ sources: %d.\n", mp_irq_entries
);
1443 for (i
= 0; i
< nr_ioapics
; i
++)
1444 printk(KERN_DEBUG
"number of IO-APIC #%d registers: %d.\n",
1445 mp_ioapics
[i
].mp_apicid
, nr_ioapic_registers
[i
]);
1448 * We are a bit conservative about what we expect. We have to
1449 * know about every hardware change ASAP.
1451 printk(KERN_INFO
"testing the IO APIC.......................\n");
1453 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1455 spin_lock_irqsave(&ioapic_lock
, flags
);
1456 reg_00
.raw
= io_apic_read(apic
, 0);
1457 reg_01
.raw
= io_apic_read(apic
, 1);
1458 if (reg_01
.bits
.version
>= 0x10)
1459 reg_02
.raw
= io_apic_read(apic
, 2);
1460 if (reg_01
.bits
.version
>= 0x20)
1461 reg_03
.raw
= io_apic_read(apic
, 3);
1462 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1465 printk(KERN_DEBUG
"IO APIC #%d......\n", mp_ioapics
[apic
].mp_apicid
);
1466 printk(KERN_DEBUG
".... register #00: %08X\n", reg_00
.raw
);
1467 printk(KERN_DEBUG
"....... : physical APIC id: %02X\n", reg_00
.bits
.ID
);
1468 printk(KERN_DEBUG
"....... : Delivery Type: %X\n", reg_00
.bits
.delivery_type
);
1469 printk(KERN_DEBUG
"....... : LTS : %X\n", reg_00
.bits
.LTS
);
1471 printk(KERN_DEBUG
".... register #01: %08X\n", *(int *)®_01
);
1472 printk(KERN_DEBUG
"....... : max redirection entries: %04X\n", reg_01
.bits
.entries
);
1474 printk(KERN_DEBUG
"....... : PRQ implemented: %X\n", reg_01
.bits
.PRQ
);
1475 printk(KERN_DEBUG
"....... : IO APIC version: %04X\n", reg_01
.bits
.version
);
1478 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1479 * but the value of reg_02 is read as the previous read register
1480 * value, so ignore it if reg_02 == reg_01.
1482 if (reg_01
.bits
.version
>= 0x10 && reg_02
.raw
!= reg_01
.raw
) {
1483 printk(KERN_DEBUG
".... register #02: %08X\n", reg_02
.raw
);
1484 printk(KERN_DEBUG
"....... : arbitration: %02X\n", reg_02
.bits
.arbitration
);
1488 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1489 * or reg_03, but the value of reg_0[23] is read as the previous read
1490 * register value, so ignore it if reg_03 == reg_0[12].
1492 if (reg_01
.bits
.version
>= 0x20 && reg_03
.raw
!= reg_02
.raw
&&
1493 reg_03
.raw
!= reg_01
.raw
) {
1494 printk(KERN_DEBUG
".... register #03: %08X\n", reg_03
.raw
);
1495 printk(KERN_DEBUG
"....... : Boot DT : %X\n", reg_03
.bits
.boot_DT
);
1498 printk(KERN_DEBUG
".... IRQ redirection table:\n");
1500 printk(KERN_DEBUG
" NR Dst Mask Trig IRR Pol"
1501 " Stat Dmod Deli Vect: \n");
1503 for (i
= 0; i
<= reg_01
.bits
.entries
; i
++) {
1504 struct IO_APIC_route_entry entry
;
1506 entry
= ioapic_read_entry(apic
, i
);
1508 printk(KERN_DEBUG
" %02x %03X ",
1513 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1518 entry
.delivery_status
,
1520 entry
.delivery_mode
,
1525 printk(KERN_DEBUG
"IRQ to pin mappings:\n");
1526 for_each_irq_cfg(irq
, cfg
) {
1527 struct irq_pin_list
*entry
= cfg
->irq_2_pin
;
1530 printk(KERN_DEBUG
"IRQ%d ", irq
);
1532 printk("-> %d:%d", entry
->apic
, entry
->pin
);
1535 entry
= entry
->next
;
1540 printk(KERN_INFO
".................................... done.\n");
1545 __apicdebuginit(void) print_APIC_bitfield(int base
)
1550 if (apic_verbosity
== APIC_QUIET
)
1553 printk(KERN_DEBUG
"0123456789abcdef0123456789abcdef\n" KERN_DEBUG
);
1554 for (i
= 0; i
< 8; i
++) {
1555 v
= apic_read(base
+ i
*0x10);
1556 for (j
= 0; j
< 32; j
++) {
1566 __apicdebuginit(void) print_local_APIC(void *dummy
)
1568 unsigned int v
, ver
, maxlvt
;
1571 if (apic_verbosity
== APIC_QUIET
)
1574 printk("\n" KERN_DEBUG
"printing local APIC contents on CPU#%d/%d:\n",
1575 smp_processor_id(), hard_smp_processor_id());
1576 v
= apic_read(APIC_ID
);
1577 printk(KERN_INFO
"... APIC ID: %08x (%01x)\n", v
, read_apic_id());
1578 v
= apic_read(APIC_LVR
);
1579 printk(KERN_INFO
"... APIC VERSION: %08x\n", v
);
1580 ver
= GET_APIC_VERSION(v
);
1581 maxlvt
= lapic_get_maxlvt();
1583 v
= apic_read(APIC_TASKPRI
);
1584 printk(KERN_DEBUG
"... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
1586 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1587 if (!APIC_XAPIC(ver
)) {
1588 v
= apic_read(APIC_ARBPRI
);
1589 printk(KERN_DEBUG
"... APIC ARBPRI: %08x (%02x)\n", v
,
1590 v
& APIC_ARBPRI_MASK
);
1592 v
= apic_read(APIC_PROCPRI
);
1593 printk(KERN_DEBUG
"... APIC PROCPRI: %08x\n", v
);
1597 * Remote read supported only in the 82489DX and local APIC for
1598 * Pentium processors.
1600 if (!APIC_INTEGRATED(ver
) || maxlvt
== 3) {
1601 v
= apic_read(APIC_RRR
);
1602 printk(KERN_DEBUG
"... APIC RRR: %08x\n", v
);
1605 v
= apic_read(APIC_LDR
);
1606 printk(KERN_DEBUG
"... APIC LDR: %08x\n", v
);
1607 if (!x2apic_enabled()) {
1608 v
= apic_read(APIC_DFR
);
1609 printk(KERN_DEBUG
"... APIC DFR: %08x\n", v
);
1611 v
= apic_read(APIC_SPIV
);
1612 printk(KERN_DEBUG
"... APIC SPIV: %08x\n", v
);
1614 printk(KERN_DEBUG
"... APIC ISR field:\n");
1615 print_APIC_bitfield(APIC_ISR
);
1616 printk(KERN_DEBUG
"... APIC TMR field:\n");
1617 print_APIC_bitfield(APIC_TMR
);
1618 printk(KERN_DEBUG
"... APIC IRR field:\n");
1619 print_APIC_bitfield(APIC_IRR
);
1621 if (APIC_INTEGRATED(ver
)) { /* !82489DX */
1622 if (maxlvt
> 3) /* Due to the Pentium erratum 3AP. */
1623 apic_write(APIC_ESR
, 0);
1625 v
= apic_read(APIC_ESR
);
1626 printk(KERN_DEBUG
"... APIC ESR: %08x\n", v
);
1629 icr
= apic_icr_read();
1630 printk(KERN_DEBUG
"... APIC ICR: %08x\n", (u32
)icr
);
1631 printk(KERN_DEBUG
"... APIC ICR2: %08x\n", (u32
)(icr
>> 32));
1633 v
= apic_read(APIC_LVTT
);
1634 printk(KERN_DEBUG
"... APIC LVTT: %08x\n", v
);
1636 if (maxlvt
> 3) { /* PC is LVT#4. */
1637 v
= apic_read(APIC_LVTPC
);
1638 printk(KERN_DEBUG
"... APIC LVTPC: %08x\n", v
);
1640 v
= apic_read(APIC_LVT0
);
1641 printk(KERN_DEBUG
"... APIC LVT0: %08x\n", v
);
1642 v
= apic_read(APIC_LVT1
);
1643 printk(KERN_DEBUG
"... APIC LVT1: %08x\n", v
);
1645 if (maxlvt
> 2) { /* ERR is LVT#3. */
1646 v
= apic_read(APIC_LVTERR
);
1647 printk(KERN_DEBUG
"... APIC LVTERR: %08x\n", v
);
1650 v
= apic_read(APIC_TMICT
);
1651 printk(KERN_DEBUG
"... APIC TMICT: %08x\n", v
);
1652 v
= apic_read(APIC_TMCCT
);
1653 printk(KERN_DEBUG
"... APIC TMCCT: %08x\n", v
);
1654 v
= apic_read(APIC_TDCR
);
1655 printk(KERN_DEBUG
"... APIC TDCR: %08x\n", v
);
1659 __apicdebuginit(void) print_all_local_APICs(void)
1664 for_each_online_cpu(cpu
)
1665 smp_call_function_single(cpu
, print_local_APIC
, NULL
, 1);
1669 __apicdebuginit(void) print_PIC(void)
1672 unsigned long flags
;
1674 if (apic_verbosity
== APIC_QUIET
)
1677 printk(KERN_DEBUG
"\nprinting PIC contents\n");
1679 spin_lock_irqsave(&i8259A_lock
, flags
);
1681 v
= inb(0xa1) << 8 | inb(0x21);
1682 printk(KERN_DEBUG
"... PIC IMR: %04x\n", v
);
1684 v
= inb(0xa0) << 8 | inb(0x20);
1685 printk(KERN_DEBUG
"... PIC IRR: %04x\n", v
);
1689 v
= inb(0xa0) << 8 | inb(0x20);
1693 spin_unlock_irqrestore(&i8259A_lock
, flags
);
1695 printk(KERN_DEBUG
"... PIC ISR: %04x\n", v
);
1697 v
= inb(0x4d1) << 8 | inb(0x4d0);
1698 printk(KERN_DEBUG
"... PIC ELCR: %04x\n", v
);
1701 __apicdebuginit(int) print_all_ICs(void)
1704 print_all_local_APICs();
1710 fs_initcall(print_all_ICs
);
1713 /* Where if anywhere is the i8259 connect in external int mode */
1714 static struct { int pin
, apic
; } ioapic_i8259
= { -1, -1 };
1716 void __init
enable_IO_APIC(void)
1718 union IO_APIC_reg_01 reg_01
;
1719 int i8259_apic
, i8259_pin
;
1721 unsigned long flags
;
1723 #ifdef CONFIG_X86_32
1726 for (i
= 0; i
< MAX_PIRQS
; i
++)
1727 pirq_entries
[i
] = -1;
1731 * The number of IO-APIC IRQ registers (== #pins):
1733 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1734 spin_lock_irqsave(&ioapic_lock
, flags
);
1735 reg_01
.raw
= io_apic_read(apic
, 1);
1736 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1737 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
1739 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
1741 /* See if any of the pins is in ExtINT mode */
1742 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1743 struct IO_APIC_route_entry entry
;
1744 entry
= ioapic_read_entry(apic
, pin
);
1746 /* If the interrupt line is enabled and in ExtInt mode
1747 * I have found the pin where the i8259 is connected.
1749 if ((entry
.mask
== 0) && (entry
.delivery_mode
== dest_ExtINT
)) {
1750 ioapic_i8259
.apic
= apic
;
1751 ioapic_i8259
.pin
= pin
;
1757 /* Look to see what if the MP table has reported the ExtINT */
1758 /* If we could not find the appropriate pin by looking at the ioapic
1759 * the i8259 probably is not connected the ioapic but give the
1760 * mptable a chance anyway.
1762 i8259_pin
= find_isa_irq_pin(0, mp_ExtINT
);
1763 i8259_apic
= find_isa_irq_apic(0, mp_ExtINT
);
1764 /* Trust the MP table if nothing is setup in the hardware */
1765 if ((ioapic_i8259
.pin
== -1) && (i8259_pin
>= 0)) {
1766 printk(KERN_WARNING
"ExtINT not setup in hardware but reported by MP table\n");
1767 ioapic_i8259
.pin
= i8259_pin
;
1768 ioapic_i8259
.apic
= i8259_apic
;
1770 /* Complain if the MP table and the hardware disagree */
1771 if (((ioapic_i8259
.apic
!= i8259_apic
) || (ioapic_i8259
.pin
!= i8259_pin
)) &&
1772 (i8259_pin
>= 0) && (ioapic_i8259
.pin
>= 0))
1774 printk(KERN_WARNING
"ExtINT in hardware and MP table differ\n");
1778 * Do not trust the IO-APIC being empty at bootup
1784 * Not an __init, needed by the reboot code
1786 void disable_IO_APIC(void)
1789 * Clear the IO-APIC before rebooting:
1794 * If the i8259 is routed through an IOAPIC
1795 * Put that IOAPIC in virtual wire mode
1796 * so legacy interrupts can be delivered.
1798 if (ioapic_i8259
.pin
!= -1) {
1799 struct IO_APIC_route_entry entry
;
1801 memset(&entry
, 0, sizeof(entry
));
1802 entry
.mask
= 0; /* Enabled */
1803 entry
.trigger
= 0; /* Edge */
1805 entry
.polarity
= 0; /* High */
1806 entry
.delivery_status
= 0;
1807 entry
.dest_mode
= 0; /* Physical */
1808 entry
.delivery_mode
= dest_ExtINT
; /* ExtInt */
1810 entry
.dest
= read_apic_id();
1813 * Add it to the IO-APIC irq-routing table:
1815 ioapic_write_entry(ioapic_i8259
.apic
, ioapic_i8259
.pin
, entry
);
1818 disconnect_bsp_APIC(ioapic_i8259
.pin
!= -1);
1821 #ifdef CONFIG_X86_32
1823 * function to set the IO-APIC physical IDs based on the
1824 * values stored in the MPC table.
1826 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
1829 static void __init
setup_ioapic_ids_from_mpc(void)
1831 union IO_APIC_reg_00 reg_00
;
1832 physid_mask_t phys_id_present_map
;
1835 unsigned char old_id
;
1836 unsigned long flags
;
1838 if (x86_quirks
->setup_ioapic_ids
&& x86_quirks
->setup_ioapic_ids())
1842 * Don't check I/O APIC IDs for xAPIC systems. They have
1843 * no meaning without the serial APIC bus.
1845 if (!(boot_cpu_data
.x86_vendor
== X86_VENDOR_INTEL
)
1846 || APIC_XAPIC(apic_version
[boot_cpu_physical_apicid
]))
1849 * This is broken; anything with a real cpu count has to
1850 * circumvent this idiocy regardless.
1852 phys_id_present_map
= ioapic_phys_id_map(phys_cpu_present_map
);
1855 * Set the IOAPIC ID to the value stored in the MPC table.
1857 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1859 /* Read the register 0 value */
1860 spin_lock_irqsave(&ioapic_lock
, flags
);
1861 reg_00
.raw
= io_apic_read(apic
, 0);
1862 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1864 old_id
= mp_ioapics
[apic
].mp_apicid
;
1866 if (mp_ioapics
[apic
].mp_apicid
>= get_physical_broadcast()) {
1867 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
1868 apic
, mp_ioapics
[apic
].mp_apicid
);
1869 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
1871 mp_ioapics
[apic
].mp_apicid
= reg_00
.bits
.ID
;
1875 * Sanity check, is the ID really free? Every APIC in a
1876 * system must have a unique ID or we get lots of nice
1877 * 'stuck on smp_invalidate_needed IPI wait' messages.
1879 if (check_apicid_used(phys_id_present_map
,
1880 mp_ioapics
[apic
].mp_apicid
)) {
1881 printk(KERN_ERR
"BIOS bug, IO-APIC#%d ID %d is already used!...\n",
1882 apic
, mp_ioapics
[apic
].mp_apicid
);
1883 for (i
= 0; i
< get_physical_broadcast(); i
++)
1884 if (!physid_isset(i
, phys_id_present_map
))
1886 if (i
>= get_physical_broadcast())
1887 panic("Max APIC ID exceeded!\n");
1888 printk(KERN_ERR
"... fixing up to %d. (tell your hw vendor)\n",
1890 physid_set(i
, phys_id_present_map
);
1891 mp_ioapics
[apic
].mp_apicid
= i
;
1894 tmp
= apicid_to_cpu_present(mp_ioapics
[apic
].mp_apicid
);
1895 apic_printk(APIC_VERBOSE
, "Setting %d in the "
1896 "phys_id_present_map\n",
1897 mp_ioapics
[apic
].mp_apicid
);
1898 physids_or(phys_id_present_map
, phys_id_present_map
, tmp
);
1903 * We need to adjust the IRQ routing table
1904 * if the ID changed.
1906 if (old_id
!= mp_ioapics
[apic
].mp_apicid
)
1907 for (i
= 0; i
< mp_irq_entries
; i
++)
1908 if (mp_irqs
[i
].mp_dstapic
== old_id
)
1909 mp_irqs
[i
].mp_dstapic
1910 = mp_ioapics
[apic
].mp_apicid
;
1913 * Read the right value from the MPC table and
1914 * write it into the ID register.
1916 apic_printk(APIC_VERBOSE
, KERN_INFO
1917 "...changing IO-APIC physical APIC ID to %d ...",
1918 mp_ioapics
[apic
].mp_apicid
);
1920 reg_00
.bits
.ID
= mp_ioapics
[apic
].mp_apicid
;
1921 spin_lock_irqsave(&ioapic_lock
, flags
);
1922 io_apic_write(apic
, 0, reg_00
.raw
);
1923 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1928 spin_lock_irqsave(&ioapic_lock
, flags
);
1929 reg_00
.raw
= io_apic_read(apic
, 0);
1930 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1931 if (reg_00
.bits
.ID
!= mp_ioapics
[apic
].mp_apicid
)
1932 printk("could not set ID!\n");
1934 apic_printk(APIC_VERBOSE
, " ok.\n");
1939 int no_timer_check __initdata
;
1941 static int __init
notimercheck(char *s
)
1946 __setup("no_timer_check", notimercheck
);
1949 * There is a nasty bug in some older SMP boards, their mptable lies
1950 * about the timer IRQ. We do the following to work around the situation:
1952 * - timer IRQ defaults to IO-APIC IRQ
1953 * - if this function detects that timer IRQs are defunct, then we fall
1954 * back to ISA timer IRQs
1956 static int __init
timer_irq_works(void)
1958 unsigned long t1
= jiffies
;
1959 unsigned long flags
;
1964 local_save_flags(flags
);
1966 /* Let ten ticks pass... */
1967 mdelay((10 * 1000) / HZ
);
1968 local_irq_restore(flags
);
1971 * Expect a few ticks at least, to be sure some possible
1972 * glue logic does not lock up after one or two first
1973 * ticks in a non-ExtINT mode. Also the local APIC
1974 * might have cached one ExtINT interrupt. Finally, at
1975 * least one tick may be lost due to delays.
1979 if (time_after(jiffies
, t1
+ 4))
1985 * In the SMP+IOAPIC case it might happen that there are an unspecified
1986 * number of pending IRQ events unhandled. These cases are very rare,
1987 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1988 * better to do it this way as thus we do not have to be aware of
1989 * 'pending' interrupts in the IRQ path, except at this point.
1992 * Edge triggered needs to resend any interrupt
1993 * that was delayed but this is now handled in the device
1998 * Starting up a edge-triggered IO-APIC interrupt is
1999 * nasty - we need to make sure that we get the edge.
2000 * If it is already asserted for some reason, we need
2001 * return 1 to indicate that is was pending.
2003 * This is not complete - we should be able to fake
2004 * an edge even if it isn't on the 8259A...
2007 static unsigned int startup_ioapic_irq(unsigned int irq
)
2009 int was_pending
= 0;
2010 unsigned long flags
;
2012 spin_lock_irqsave(&ioapic_lock
, flags
);
2014 disable_8259A_irq(irq
);
2015 if (i8259A_irq_pending(irq
))
2018 __unmask_IO_APIC_irq(irq
);
2019 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2024 #ifdef CONFIG_X86_64
2025 static int ioapic_retrigger_irq(unsigned int irq
)
2028 struct irq_cfg
*cfg
= irq_cfg(irq
);
2029 unsigned long flags
;
2031 spin_lock_irqsave(&vector_lock
, flags
);
2032 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg
->domain
)), cfg
->vector
);
2033 spin_unlock_irqrestore(&vector_lock
, flags
);
2038 static int ioapic_retrigger_irq(unsigned int irq
)
2040 send_IPI_self(irq_cfg(irq
)->vector
);
2047 * Level and edge triggered IO-APIC interrupts need different handling,
2048 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2049 * handled with the level-triggered descriptor, but that one has slightly
2050 * more overhead. Level-triggered interrupts cannot be handled with the
2051 * edge-triggered handler, without risking IRQ storms and other ugly
2057 #ifdef CONFIG_INTR_REMAP
2058 static void ir_irq_migration(struct work_struct
*work
);
2060 static DECLARE_DELAYED_WORK(ir_migration_work
, ir_irq_migration
);
2063 * Migrate the IO-APIC irq in the presence of intr-remapping.
2065 * For edge triggered, irq migration is a simple atomic update(of vector
2066 * and cpu destination) of IRTE and flush the hardware cache.
2068 * For level triggered, we need to modify the io-apic RTE aswell with the update
2069 * vector information, along with modifying IRTE with vector and destination.
2070 * So irq migration for level triggered is little bit more complex compared to
2071 * edge triggered migration. But the good news is, we use the same algorithm
2072 * for level triggered migration as we have today, only difference being,
2073 * we now initiate the irq migration from process context instead of the
2074 * interrupt context.
2076 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2077 * suppression) to the IO-APIC, level triggered irq migration will also be
2078 * as simple as edge triggered migration and we can do the irq migration
2079 * with a simple atomic update to IO-APIC RTE.
2081 static void migrate_ioapic_irq(int irq
, cpumask_t mask
)
2083 struct irq_cfg
*cfg
;
2084 struct irq_desc
*desc
;
2085 cpumask_t tmp
, cleanup_mask
;
2087 int modify_ioapic_rte
;
2089 unsigned long flags
;
2091 cpus_and(tmp
, mask
, cpu_online_map
);
2092 if (cpus_empty(tmp
))
2095 if (get_irte(irq
, &irte
))
2098 if (assign_irq_vector(irq
, mask
))
2102 cpus_and(tmp
, cfg
->domain
, mask
);
2103 dest
= cpu_mask_to_apicid(tmp
);
2105 desc
= irq_to_desc(irq
);
2106 modify_ioapic_rte
= desc
->status
& IRQ_LEVEL
;
2107 if (modify_ioapic_rte
) {
2108 spin_lock_irqsave(&ioapic_lock
, flags
);
2109 __target_IO_APIC_irq(irq
, dest
, cfg
->vector
);
2110 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2113 irte
.vector
= cfg
->vector
;
2114 irte
.dest_id
= IRTE_DEST(dest
);
2117 * Modified the IRTE and flushes the Interrupt entry cache.
2119 modify_irte(irq
, &irte
);
2121 if (cfg
->move_in_progress
) {
2122 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
2123 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
2124 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
2125 cfg
->move_in_progress
= 0;
2128 desc
->affinity
= mask
;
2131 static int migrate_irq_remapped_level(int irq
)
2134 struct irq_desc
*desc
= irq_to_desc(irq
);
2136 mask_IO_APIC_irq(irq
);
2138 if (io_apic_level_ack_pending(irq
)) {
2140 * Interrupt in progress. Migrating irq now will change the
2141 * vector information in the IO-APIC RTE and that will confuse
2142 * the EOI broadcast performed by cpu.
2143 * So, delay the irq migration to the next instance.
2145 schedule_delayed_work(&ir_migration_work
, 1);
2149 /* everthing is clear. we have right of way */
2150 migrate_ioapic_irq(irq
, desc
->pending_mask
);
2153 desc
->status
&= ~IRQ_MOVE_PENDING
;
2154 cpus_clear(desc
->pending_mask
);
2157 unmask_IO_APIC_irq(irq
);
2161 static void ir_irq_migration(struct work_struct
*work
)
2164 struct irq_desc
*desc
;
2166 for_each_irq_desc(irq
, desc
) {
2167 if (desc
->status
& IRQ_MOVE_PENDING
) {
2168 unsigned long flags
;
2170 spin_lock_irqsave(&desc
->lock
, flags
);
2171 if (!desc
->chip
->set_affinity
||
2172 !(desc
->status
& IRQ_MOVE_PENDING
)) {
2173 desc
->status
&= ~IRQ_MOVE_PENDING
;
2174 spin_unlock_irqrestore(&desc
->lock
, flags
);
2178 desc
->chip
->set_affinity(irq
, desc
->pending_mask
);
2179 spin_unlock_irqrestore(&desc
->lock
, flags
);
2185 * Migrates the IRQ destination in the process context.
2187 static void set_ir_ioapic_affinity_irq(unsigned int irq
, cpumask_t mask
)
2189 struct irq_desc
*desc
= irq_to_desc(irq
);
2191 if (desc
->status
& IRQ_LEVEL
) {
2192 desc
->status
|= IRQ_MOVE_PENDING
;
2193 desc
->pending_mask
= mask
;
2194 migrate_irq_remapped_level(irq
);
2198 migrate_ioapic_irq(irq
, mask
);
2202 asmlinkage
void smp_irq_move_cleanup_interrupt(void)
2204 unsigned vector
, me
;
2206 #ifdef CONFIG_X86_64
2211 me
= smp_processor_id();
2212 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
2214 struct irq_desc
*desc
;
2215 struct irq_cfg
*cfg
;
2216 irq
= __get_cpu_var(vector_irq
)[vector
];
2218 desc
= irq_to_desc(irq
);
2223 spin_lock(&desc
->lock
);
2224 if (!cfg
->move_cleanup_count
)
2227 if ((vector
== cfg
->vector
) && cpu_isset(me
, cfg
->domain
))
2230 __get_cpu_var(vector_irq
)[vector
] = -1;
2231 cfg
->move_cleanup_count
--;
2233 spin_unlock(&desc
->lock
);
2239 static void irq_complete_move(unsigned int irq
)
2241 struct irq_cfg
*cfg
= irq_cfg(irq
);
2242 unsigned vector
, me
;
2244 if (likely(!cfg
->move_in_progress
))
2247 vector
= ~get_irq_regs()->orig_ax
;
2248 me
= smp_processor_id();
2249 if ((vector
== cfg
->vector
) && cpu_isset(me
, cfg
->domain
)) {
2250 cpumask_t cleanup_mask
;
2252 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
2253 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
2254 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
2255 cfg
->move_in_progress
= 0;
2259 static inline void irq_complete_move(unsigned int irq
) {}
2261 #ifdef CONFIG_INTR_REMAP
2262 static void ack_x2apic_level(unsigned int irq
)
2267 static void ack_x2apic_edge(unsigned int irq
)
2273 static void ack_apic_edge(unsigned int irq
)
2275 irq_complete_move(irq
);
2276 move_native_irq(irq
);
2280 atomic_t irq_mis_count
;
2282 static void ack_apic_level(unsigned int irq
)
2284 #ifdef CONFIG_X86_32
2288 int do_unmask_irq
= 0;
2290 irq_complete_move(irq
);
2291 #ifdef CONFIG_GENERIC_PENDING_IRQ
2292 /* If we are moving the irq we need to mask it */
2293 if (unlikely(irq_to_desc(irq
)->status
& IRQ_MOVE_PENDING
)) {
2295 mask_IO_APIC_irq(irq
);
2299 #ifdef CONFIG_X86_32
2301 * It appears there is an erratum which affects at least version 0x11
2302 * of I/O APIC (that's the 82093AA and cores integrated into various
2303 * chipsets). Under certain conditions a level-triggered interrupt is
2304 * erroneously delivered as edge-triggered one but the respective IRR
2305 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2306 * message but it will never arrive and further interrupts are blocked
2307 * from the source. The exact reason is so far unknown, but the
2308 * phenomenon was observed when two consecutive interrupt requests
2309 * from a given source get delivered to the same CPU and the source is
2310 * temporarily disabled in between.
2312 * A workaround is to simulate an EOI message manually. We achieve it
2313 * by setting the trigger mode to edge and then to level when the edge
2314 * trigger mode gets detected in the TMR of a local APIC for a
2315 * level-triggered interrupt. We mask the source for the time of the
2316 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2317 * The idea is from Manfred Spraul. --macro
2319 i
= irq_cfg(irq
)->vector
;
2321 v
= apic_read(APIC_TMR
+ ((i
& ~0x1f) >> 1));
2325 * We must acknowledge the irq before we move it or the acknowledge will
2326 * not propagate properly.
2330 /* Now we can move and renable the irq */
2331 if (unlikely(do_unmask_irq
)) {
2332 /* Only migrate the irq if the ack has been received.
2334 * On rare occasions the broadcast level triggered ack gets
2335 * delayed going to ioapics, and if we reprogram the
2336 * vector while Remote IRR is still set the irq will never
2339 * To prevent this scenario we read the Remote IRR bit
2340 * of the ioapic. This has two effects.
2341 * - On any sane system the read of the ioapic will
2342 * flush writes (and acks) going to the ioapic from
2344 * - We get to see if the ACK has actually been delivered.
2346 * Based on failed experiments of reprogramming the
2347 * ioapic entry from outside of irq context starting
2348 * with masking the ioapic entry and then polling until
2349 * Remote IRR was clear before reprogramming the
2350 * ioapic I don't trust the Remote IRR bit to be
2351 * completey accurate.
2353 * However there appears to be no other way to plug
2354 * this race, so if the Remote IRR bit is not
2355 * accurate and is causing problems then it is a hardware bug
2356 * and you can go talk to the chipset vendor about it.
2358 if (!io_apic_level_ack_pending(irq
))
2359 move_masked_irq(irq
);
2360 unmask_IO_APIC_irq(irq
);
2363 #ifdef CONFIG_X86_32
2364 if (!(v
& (1 << (i
& 0x1f)))) {
2365 atomic_inc(&irq_mis_count
);
2366 spin_lock(&ioapic_lock
);
2367 __mask_and_edge_IO_APIC_irq(irq
);
2368 __unmask_and_level_IO_APIC_irq(irq
);
2369 spin_unlock(&ioapic_lock
);
2374 static struct irq_chip ioapic_chip __read_mostly
= {
2376 .startup
= startup_ioapic_irq
,
2377 .mask
= mask_IO_APIC_irq
,
2378 .unmask
= unmask_IO_APIC_irq
,
2379 .ack
= ack_apic_edge
,
2380 .eoi
= ack_apic_level
,
2382 .set_affinity
= set_ioapic_affinity_irq
,
2384 .retrigger
= ioapic_retrigger_irq
,
2387 #ifdef CONFIG_INTR_REMAP
2388 static struct irq_chip ir_ioapic_chip __read_mostly
= {
2389 .name
= "IR-IO-APIC",
2390 .startup
= startup_ioapic_irq
,
2391 .mask
= mask_IO_APIC_irq
,
2392 .unmask
= unmask_IO_APIC_irq
,
2393 .ack
= ack_x2apic_edge
,
2394 .eoi
= ack_x2apic_level
,
2396 .set_affinity
= set_ir_ioapic_affinity_irq
,
2398 .retrigger
= ioapic_retrigger_irq
,
2402 static inline void init_IO_APIC_traps(void)
2405 struct irq_desc
*desc
;
2406 struct irq_cfg
*cfg
;
2409 * NOTE! The local APIC isn't very good at handling
2410 * multiple interrupts at the same interrupt level.
2411 * As the interrupt level is determined by taking the
2412 * vector number and shifting that right by 4, we
2413 * want to spread these out a bit so that they don't
2414 * all fall in the same interrupt level.
2416 * Also, we've got to be careful not to trash gate
2417 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2419 for_each_irq_cfg(irq
, cfg
) {
2420 if (IO_APIC_IRQ(irq
) && !cfg
->vector
) {
2422 * Hmm.. We don't have an entry for this,
2423 * so default to an old-fashioned 8259
2424 * interrupt if we can..
2427 make_8259A_irq(irq
);
2429 desc
= irq_to_desc(irq
);
2430 /* Strange. Oh, well.. */
2431 desc
->chip
= &no_irq_chip
;
2438 * The local APIC irq-chip implementation:
2441 static void mask_lapic_irq(unsigned int irq
)
2445 v
= apic_read(APIC_LVT0
);
2446 apic_write(APIC_LVT0
, v
| APIC_LVT_MASKED
);
2449 static void unmask_lapic_irq(unsigned int irq
)
2453 v
= apic_read(APIC_LVT0
);
2454 apic_write(APIC_LVT0
, v
& ~APIC_LVT_MASKED
);
2457 static void ack_lapic_irq (unsigned int irq
)
2462 static struct irq_chip lapic_chip __read_mostly
= {
2463 .name
= "local-APIC",
2464 .mask
= mask_lapic_irq
,
2465 .unmask
= unmask_lapic_irq
,
2466 .ack
= ack_lapic_irq
,
2469 static void lapic_register_intr(int irq
)
2471 struct irq_desc
*desc
;
2473 desc
= irq_to_desc(irq
);
2474 desc
->status
&= ~IRQ_LEVEL
;
2475 set_irq_chip_and_handler_name(irq
, &lapic_chip
, handle_edge_irq
,
2479 static void __init
setup_nmi(void)
2482 * Dirty trick to enable the NMI watchdog ...
2483 * We put the 8259A master into AEOI mode and
2484 * unmask on all local APICs LVT0 as NMI.
2486 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2487 * is from Maciej W. Rozycki - so we do not have to EOI from
2488 * the NMI handler or the timer interrupt.
2490 apic_printk(APIC_VERBOSE
, KERN_INFO
"activating NMI Watchdog ...");
2492 enable_NMI_through_LVT0();
2494 apic_printk(APIC_VERBOSE
, " done.\n");
2498 * This looks a bit hackish but it's about the only one way of sending
2499 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2500 * not support the ExtINT mode, unfortunately. We need to send these
2501 * cycles as some i82489DX-based boards have glue logic that keeps the
2502 * 8259A interrupt line asserted until INTA. --macro
2504 static inline void __init
unlock_ExtINT_logic(void)
2507 struct IO_APIC_route_entry entry0
, entry1
;
2508 unsigned char save_control
, save_freq_select
;
2510 pin
= find_isa_irq_pin(8, mp_INT
);
2515 apic
= find_isa_irq_apic(8, mp_INT
);
2521 entry0
= ioapic_read_entry(apic
, pin
);
2522 clear_IO_APIC_pin(apic
, pin
);
2524 memset(&entry1
, 0, sizeof(entry1
));
2526 entry1
.dest_mode
= 0; /* physical delivery */
2527 entry1
.mask
= 0; /* unmask IRQ now */
2528 entry1
.dest
= hard_smp_processor_id();
2529 entry1
.delivery_mode
= dest_ExtINT
;
2530 entry1
.polarity
= entry0
.polarity
;
2534 ioapic_write_entry(apic
, pin
, entry1
);
2536 save_control
= CMOS_READ(RTC_CONTROL
);
2537 save_freq_select
= CMOS_READ(RTC_FREQ_SELECT
);
2538 CMOS_WRITE((save_freq_select
& ~RTC_RATE_SELECT
) | 0x6,
2540 CMOS_WRITE(save_control
| RTC_PIE
, RTC_CONTROL
);
2545 if ((CMOS_READ(RTC_INTR_FLAGS
) & RTC_PF
) == RTC_PF
)
2549 CMOS_WRITE(save_control
, RTC_CONTROL
);
2550 CMOS_WRITE(save_freq_select
, RTC_FREQ_SELECT
);
2551 clear_IO_APIC_pin(apic
, pin
);
2553 ioapic_write_entry(apic
, pin
, entry0
);
2556 static int disable_timer_pin_1 __initdata
;
2557 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
2558 static int __init
disable_timer_pin_setup(char *arg
)
2560 disable_timer_pin_1
= 1;
2563 early_param("disable_timer_pin_1", disable_timer_pin_setup
);
2565 int timer_through_8259 __initdata
;
2568 * This code may look a bit paranoid, but it's supposed to cooperate with
2569 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2570 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2571 * fanatically on his truly buggy board.
2573 * FIXME: really need to revamp this for all platforms.
2575 static inline void __init
check_timer(void)
2577 struct irq_cfg
*cfg
= irq_cfg(0);
2578 int apic1
, pin1
, apic2
, pin2
;
2579 unsigned long flags
;
2583 local_irq_save(flags
);
2585 ver
= apic_read(APIC_LVR
);
2586 ver
= GET_APIC_VERSION(ver
);
2589 * get/set the timer IRQ vector:
2591 disable_8259A_irq(0);
2592 assign_irq_vector(0, TARGET_CPUS
);
2595 * As IRQ0 is to be enabled in the 8259A, the virtual
2596 * wire has to be disabled in the local APIC. Also
2597 * timer interrupts need to be acknowledged manually in
2598 * the 8259A for the i82489DX when using the NMI
2599 * watchdog as that APIC treats NMIs as level-triggered.
2600 * The AEOI mode will finish them in the 8259A
2603 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_EXTINT
);
2605 #ifdef CONFIG_X86_32
2606 timer_ack
= (nmi_watchdog
== NMI_IO_APIC
&& !APIC_INTEGRATED(ver
));
2609 pin1
= find_isa_irq_pin(0, mp_INT
);
2610 apic1
= find_isa_irq_apic(0, mp_INT
);
2611 pin2
= ioapic_i8259
.pin
;
2612 apic2
= ioapic_i8259
.apic
;
2614 apic_printk(APIC_QUIET
, KERN_INFO
"..TIMER: vector=0x%02X "
2615 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
2616 cfg
->vector
, apic1
, pin1
, apic2
, pin2
);
2619 * Some BIOS writers are clueless and report the ExtINTA
2620 * I/O APIC input from the cascaded 8259A as the timer
2621 * interrupt input. So just in case, if only one pin
2622 * was found above, try it both directly and through the
2626 #ifdef CONFIG_INTR_REMAP
2627 if (intr_remapping_enabled
)
2628 panic("BIOS bug: timer not connected to IO-APIC");
2633 } else if (pin2
== -1) {
2640 * Ok, does IRQ0 through the IOAPIC work?
2643 add_pin_to_irq(0, apic1
, pin1
);
2644 setup_timer_IRQ0_pin(apic1
, pin1
, cfg
->vector
);
2646 unmask_IO_APIC_irq(0);
2647 if (timer_irq_works()) {
2648 if (nmi_watchdog
== NMI_IO_APIC
) {
2650 enable_8259A_irq(0);
2652 if (disable_timer_pin_1
> 0)
2653 clear_IO_APIC_pin(0, pin1
);
2656 #ifdef CONFIG_INTR_REMAP
2657 if (intr_remapping_enabled
)
2658 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2660 clear_IO_APIC_pin(apic1
, pin1
);
2662 apic_printk(APIC_QUIET
, KERN_ERR
"..MP-BIOS bug: "
2663 "8254 timer not connected to IO-APIC\n");
2665 apic_printk(APIC_QUIET
, KERN_INFO
"...trying to set up timer "
2666 "(IRQ0) through the 8259A ...\n");
2667 apic_printk(APIC_QUIET
, KERN_INFO
2668 "..... (found apic %d pin %d) ...\n", apic2
, pin2
);
2670 * legacy devices should be connected to IO APIC #0
2672 replace_pin_at_irq(0, apic1
, pin1
, apic2
, pin2
);
2673 setup_timer_IRQ0_pin(apic2
, pin2
, cfg
->vector
);
2674 unmask_IO_APIC_irq(0);
2675 enable_8259A_irq(0);
2676 if (timer_irq_works()) {
2677 apic_printk(APIC_QUIET
, KERN_INFO
"....... works.\n");
2678 timer_through_8259
= 1;
2679 if (nmi_watchdog
== NMI_IO_APIC
) {
2680 disable_8259A_irq(0);
2682 enable_8259A_irq(0);
2687 * Cleanup, just in case ...
2689 disable_8259A_irq(0);
2690 clear_IO_APIC_pin(apic2
, pin2
);
2691 apic_printk(APIC_QUIET
, KERN_INFO
"....... failed.\n");
2694 if (nmi_watchdog
== NMI_IO_APIC
) {
2695 apic_printk(APIC_QUIET
, KERN_WARNING
"timer doesn't work "
2696 "through the IO-APIC - disabling NMI Watchdog!\n");
2697 nmi_watchdog
= NMI_NONE
;
2699 #ifdef CONFIG_X86_32
2703 apic_printk(APIC_QUIET
, KERN_INFO
2704 "...trying to set up timer as Virtual Wire IRQ...\n");
2706 lapic_register_intr(0);
2707 apic_write(APIC_LVT0
, APIC_DM_FIXED
| cfg
->vector
); /* Fixed mode */
2708 enable_8259A_irq(0);
2710 if (timer_irq_works()) {
2711 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
2714 disable_8259A_irq(0);
2715 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_FIXED
| cfg
->vector
);
2716 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed.\n");
2718 apic_printk(APIC_QUIET
, KERN_INFO
2719 "...trying to set up timer as ExtINT IRQ...\n");
2723 apic_write(APIC_LVT0
, APIC_DM_EXTINT
);
2725 unlock_ExtINT_logic();
2727 if (timer_irq_works()) {
2728 apic_printk(APIC_QUIET
, KERN_INFO
"..... works.\n");
2731 apic_printk(APIC_QUIET
, KERN_INFO
"..... failed :(.\n");
2732 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
2733 "report. Then try booting with the 'noapic' option.\n");
2735 local_irq_restore(flags
);
2739 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2740 * to devices. However there may be an I/O APIC pin available for
2741 * this interrupt regardless. The pin may be left unconnected, but
2742 * typically it will be reused as an ExtINT cascade interrupt for
2743 * the master 8259A. In the MPS case such a pin will normally be
2744 * reported as an ExtINT interrupt in the MP table. With ACPI
2745 * there is no provision for ExtINT interrupts, and in the absence
2746 * of an override it would be treated as an ordinary ISA I/O APIC
2747 * interrupt, that is edge-triggered and unmasked by default. We
2748 * used to do this, but it caused problems on some systems because
2749 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2750 * the same ExtINT cascade interrupt to drive the local APIC of the
2751 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2752 * the I/O APIC in all cases now. No actual device should request
2753 * it anyway. --macro
2755 #define PIC_IRQS (1 << PIC_CASCADE_IR)
2757 void __init
setup_IO_APIC(void)
2760 #ifdef CONFIG_X86_32
2764 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2768 io_apic_irqs
= ~PIC_IRQS
;
2770 apic_printk(APIC_VERBOSE
, "ENABLING IO-APIC IRQs\n");
2772 * Set up IO-APIC IRQ routing.
2774 #ifdef CONFIG_X86_32
2776 setup_ioapic_ids_from_mpc();
2779 setup_IO_APIC_irqs();
2780 init_IO_APIC_traps();
2785 * Called after all the initialization is done. If we didnt find any
2786 * APIC bugs then we can allow the modify fast path
2789 static int __init
io_apic_bug_finalize(void)
2791 if (sis_apic_bug
== -1)
2796 late_initcall(io_apic_bug_finalize
);
2798 struct sysfs_ioapic_data
{
2799 struct sys_device dev
;
2800 struct IO_APIC_route_entry entry
[0];
2802 static struct sysfs_ioapic_data
* mp_ioapic_data
[MAX_IO_APICS
];
2804 static int ioapic_suspend(struct sys_device
*dev
, pm_message_t state
)
2806 struct IO_APIC_route_entry
*entry
;
2807 struct sysfs_ioapic_data
*data
;
2810 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2811 entry
= data
->entry
;
2812 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++, entry
++ )
2813 *entry
= ioapic_read_entry(dev
->id
, i
);
2818 static int ioapic_resume(struct sys_device
*dev
)
2820 struct IO_APIC_route_entry
*entry
;
2821 struct sysfs_ioapic_data
*data
;
2822 unsigned long flags
;
2823 union IO_APIC_reg_00 reg_00
;
2826 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2827 entry
= data
->entry
;
2829 spin_lock_irqsave(&ioapic_lock
, flags
);
2830 reg_00
.raw
= io_apic_read(dev
->id
, 0);
2831 if (reg_00
.bits
.ID
!= mp_ioapics
[dev
->id
].mp_apicid
) {
2832 reg_00
.bits
.ID
= mp_ioapics
[dev
->id
].mp_apicid
;
2833 io_apic_write(dev
->id
, 0, reg_00
.raw
);
2835 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2836 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++)
2837 ioapic_write_entry(dev
->id
, i
, entry
[i
]);
2842 static struct sysdev_class ioapic_sysdev_class
= {
2844 .suspend
= ioapic_suspend
,
2845 .resume
= ioapic_resume
,
2848 static int __init
ioapic_init_sysfs(void)
2850 struct sys_device
* dev
;
2853 error
= sysdev_class_register(&ioapic_sysdev_class
);
2857 for (i
= 0; i
< nr_ioapics
; i
++ ) {
2858 size
= sizeof(struct sys_device
) + nr_ioapic_registers
[i
]
2859 * sizeof(struct IO_APIC_route_entry
);
2860 mp_ioapic_data
[i
] = kzalloc(size
, GFP_KERNEL
);
2861 if (!mp_ioapic_data
[i
]) {
2862 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
2865 dev
= &mp_ioapic_data
[i
]->dev
;
2867 dev
->cls
= &ioapic_sysdev_class
;
2868 error
= sysdev_register(dev
);
2870 kfree(mp_ioapic_data
[i
]);
2871 mp_ioapic_data
[i
] = NULL
;
2872 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
2880 device_initcall(ioapic_init_sysfs
);
2883 * Dynamic irq allocate and deallocation
2885 unsigned int create_irq_nr(unsigned int irq_want
)
2887 /* Allocate an unused irq */
2890 unsigned long flags
;
2891 struct irq_cfg
*cfg_new
;
2893 irq_want
= nr_irqs
- 1;
2896 spin_lock_irqsave(&vector_lock
, flags
);
2897 for (new = irq_want
; new > 0; new--) {
2898 if (platform_legacy_irq(new))
2900 cfg_new
= irq_cfg(new);
2901 if (cfg_new
&& cfg_new
->vector
!= 0)
2903 /* check if need to create one */
2905 cfg_new
= irq_cfg_alloc(new);
2906 if (__assign_irq_vector(new, TARGET_CPUS
) == 0)
2910 spin_unlock_irqrestore(&vector_lock
, flags
);
2913 dynamic_irq_init(irq
);
2918 int create_irq(void)
2922 irq
= create_irq_nr(nr_irqs
- 1);
2930 void destroy_irq(unsigned int irq
)
2932 unsigned long flags
;
2934 dynamic_irq_cleanup(irq
);
2936 #ifdef CONFIG_INTR_REMAP
2939 spin_lock_irqsave(&vector_lock
, flags
);
2940 __clear_irq_vector(irq
);
2941 spin_unlock_irqrestore(&vector_lock
, flags
);
2945 * MSI message composition
2947 #ifdef CONFIG_PCI_MSI
2948 static int msi_compose_msg(struct pci_dev
*pdev
, unsigned int irq
, struct msi_msg
*msg
)
2950 struct irq_cfg
*cfg
;
2956 err
= assign_irq_vector(irq
, tmp
);
2961 cpus_and(tmp
, cfg
->domain
, tmp
);
2962 dest
= cpu_mask_to_apicid(tmp
);
2964 #ifdef CONFIG_INTR_REMAP
2965 if (irq_remapped(irq
)) {
2970 ir_index
= map_irq_to_irte_handle(irq
, &sub_handle
);
2971 BUG_ON(ir_index
== -1);
2973 memset (&irte
, 0, sizeof(irte
));
2976 irte
.dst_mode
= INT_DEST_MODE
;
2977 irte
.trigger_mode
= 0; /* edge */
2978 irte
.dlvry_mode
= INT_DELIVERY_MODE
;
2979 irte
.vector
= cfg
->vector
;
2980 irte
.dest_id
= IRTE_DEST(dest
);
2982 modify_irte(irq
, &irte
);
2984 msg
->address_hi
= MSI_ADDR_BASE_HI
;
2985 msg
->data
= sub_handle
;
2986 msg
->address_lo
= MSI_ADDR_BASE_LO
| MSI_ADDR_IR_EXT_INT
|
2988 MSI_ADDR_IR_INDEX1(ir_index
) |
2989 MSI_ADDR_IR_INDEX2(ir_index
);
2993 msg
->address_hi
= MSI_ADDR_BASE_HI
;
2996 ((INT_DEST_MODE
== 0) ?
2997 MSI_ADDR_DEST_MODE_PHYSICAL
:
2998 MSI_ADDR_DEST_MODE_LOGICAL
) |
2999 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
3000 MSI_ADDR_REDIRECTION_CPU
:
3001 MSI_ADDR_REDIRECTION_LOWPRI
) |
3002 MSI_ADDR_DEST_ID(dest
);
3005 MSI_DATA_TRIGGER_EDGE
|
3006 MSI_DATA_LEVEL_ASSERT
|
3007 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
3008 MSI_DATA_DELIVERY_FIXED
:
3009 MSI_DATA_DELIVERY_LOWPRI
) |
3010 MSI_DATA_VECTOR(cfg
->vector
);
3016 static void set_msi_irq_affinity(unsigned int irq
, cpumask_t mask
)
3018 struct irq_cfg
*cfg
;
3022 struct irq_desc
*desc
;
3024 cpus_and(tmp
, mask
, cpu_online_map
);
3025 if (cpus_empty(tmp
))
3028 if (assign_irq_vector(irq
, mask
))
3032 cpus_and(tmp
, cfg
->domain
, mask
);
3033 dest
= cpu_mask_to_apicid(tmp
);
3035 read_msi_msg(irq
, &msg
);
3037 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3038 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3039 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3040 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3042 write_msi_msg(irq
, &msg
);
3043 desc
= irq_to_desc(irq
);
3044 desc
->affinity
= mask
;
3047 #ifdef CONFIG_INTR_REMAP
3049 * Migrate the MSI irq to another cpumask. This migration is
3050 * done in the process context using interrupt-remapping hardware.
3052 static void ir_set_msi_irq_affinity(unsigned int irq
, cpumask_t mask
)
3054 struct irq_cfg
*cfg
;
3056 cpumask_t tmp
, cleanup_mask
;
3058 struct irq_desc
*desc
;
3060 cpus_and(tmp
, mask
, cpu_online_map
);
3061 if (cpus_empty(tmp
))
3064 if (get_irte(irq
, &irte
))
3067 if (assign_irq_vector(irq
, mask
))
3071 cpus_and(tmp
, cfg
->domain
, mask
);
3072 dest
= cpu_mask_to_apicid(tmp
);
3074 irte
.vector
= cfg
->vector
;
3075 irte
.dest_id
= IRTE_DEST(dest
);
3078 * atomically update the IRTE with the new destination and vector.
3080 modify_irte(irq
, &irte
);
3083 * After this point, all the interrupts will start arriving
3084 * at the new destination. So, time to cleanup the previous
3085 * vector allocation.
3087 if (cfg
->move_in_progress
) {
3088 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
3089 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
3090 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
3091 cfg
->move_in_progress
= 0;
3094 desc
= irq_to_desc(irq
);
3095 desc
->affinity
= mask
;
3098 #endif /* CONFIG_SMP */
3101 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3102 * which implement the MSI or MSI-X Capability Structure.
3104 static struct irq_chip msi_chip
= {
3106 .unmask
= unmask_msi_irq
,
3107 .mask
= mask_msi_irq
,
3108 .ack
= ack_apic_edge
,
3110 .set_affinity
= set_msi_irq_affinity
,
3112 .retrigger
= ioapic_retrigger_irq
,
3115 #ifdef CONFIG_INTR_REMAP
3116 static struct irq_chip msi_ir_chip
= {
3117 .name
= "IR-PCI-MSI",
3118 .unmask
= unmask_msi_irq
,
3119 .mask
= mask_msi_irq
,
3120 .ack
= ack_x2apic_edge
,
3122 .set_affinity
= ir_set_msi_irq_affinity
,
3124 .retrigger
= ioapic_retrigger_irq
,
3128 * Map the PCI dev to the corresponding remapping hardware unit
3129 * and allocate 'nvec' consecutive interrupt-remapping table entries
3132 static int msi_alloc_irte(struct pci_dev
*dev
, int irq
, int nvec
)
3134 struct intel_iommu
*iommu
;
3137 iommu
= map_dev_to_ir(dev
);
3140 "Unable to map PCI %s to iommu\n", pci_name(dev
));
3144 index
= alloc_irte(iommu
, irq
, nvec
);
3147 "Unable to allocate %d IRTE for PCI %s\n", nvec
,
3155 static int setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*desc
, int irq
)
3160 ret
= msi_compose_msg(dev
, irq
, &msg
);
3164 set_irq_msi(irq
, desc
);
3165 write_msi_msg(irq
, &msg
);
3167 #ifdef CONFIG_INTR_REMAP
3168 if (irq_remapped(irq
)) {
3169 struct irq_desc
*desc
= irq_to_desc(irq
);
3171 * irq migration in process context
3173 desc
->status
|= IRQ_MOVE_PCNTXT
;
3174 set_irq_chip_and_handler_name(irq
, &msi_ir_chip
, handle_edge_irq
, "edge");
3177 set_irq_chip_and_handler_name(irq
, &msi_chip
, handle_edge_irq
, "edge");
3179 dev_printk(KERN_DEBUG
, &dev
->dev
, "irq %d for MSI/MSI-X\n", irq
);
3184 static unsigned int build_irq_for_pci_dev(struct pci_dev
*dev
)
3188 irq
= dev
->bus
->number
;
3196 int arch_setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*desc
)
3200 unsigned int irq_want
;
3202 irq_want
= build_irq_for_pci_dev(dev
) + 0x100;
3204 irq
= create_irq_nr(irq_want
);
3208 #ifdef CONFIG_INTR_REMAP
3209 if (!intr_remapping_enabled
)
3212 ret
= msi_alloc_irte(dev
, irq
, 1);
3217 ret
= setup_msi_irq(dev
, desc
, irq
);
3224 #ifdef CONFIG_INTR_REMAP
3231 int arch_setup_msi_irqs(struct pci_dev
*dev
, int nvec
, int type
)
3234 int ret
, sub_handle
;
3235 struct msi_desc
*desc
;
3236 unsigned int irq_want
;
3238 #ifdef CONFIG_INTR_REMAP
3239 struct intel_iommu
*iommu
= 0;
3243 irq_want
= build_irq_for_pci_dev(dev
) + 0x100;
3245 list_for_each_entry(desc
, &dev
->msi_list
, list
) {
3246 irq
= create_irq_nr(irq_want
--);
3249 #ifdef CONFIG_INTR_REMAP
3250 if (!intr_remapping_enabled
)
3255 * allocate the consecutive block of IRTE's
3258 index
= msi_alloc_irte(dev
, irq
, nvec
);
3264 iommu
= map_dev_to_ir(dev
);
3270 * setup the mapping between the irq and the IRTE
3271 * base index, the sub_handle pointing to the
3272 * appropriate interrupt remap table entry.
3274 set_irte_irq(irq
, iommu
, index
, sub_handle
);
3278 ret
= setup_msi_irq(dev
, desc
, irq
);
3290 void arch_teardown_msi_irq(unsigned int irq
)
3297 static void dmar_msi_set_affinity(unsigned int irq
, cpumask_t mask
)
3299 struct irq_cfg
*cfg
;
3303 struct irq_desc
*desc
;
3305 cpus_and(tmp
, mask
, cpu_online_map
);
3306 if (cpus_empty(tmp
))
3309 if (assign_irq_vector(irq
, mask
))
3313 cpus_and(tmp
, cfg
->domain
, mask
);
3314 dest
= cpu_mask_to_apicid(tmp
);
3316 dmar_msi_read(irq
, &msg
);
3318 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3319 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3320 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3321 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3323 dmar_msi_write(irq
, &msg
);
3324 desc
= irq_to_desc(irq
);
3325 desc
->affinity
= mask
;
3327 #endif /* CONFIG_SMP */
3329 struct irq_chip dmar_msi_type
= {
3331 .unmask
= dmar_msi_unmask
,
3332 .mask
= dmar_msi_mask
,
3333 .ack
= ack_apic_edge
,
3335 .set_affinity
= dmar_msi_set_affinity
,
3337 .retrigger
= ioapic_retrigger_irq
,
3340 int arch_setup_dmar_msi(unsigned int irq
)
3345 ret
= msi_compose_msg(NULL
, irq
, &msg
);
3348 dmar_msi_write(irq
, &msg
);
3349 set_irq_chip_and_handler_name(irq
, &dmar_msi_type
, handle_edge_irq
,
3355 #ifdef CONFIG_HPET_TIMER
3358 static void hpet_msi_set_affinity(unsigned int irq
, cpumask_t mask
)
3360 struct irq_cfg
*cfg
;
3361 struct irq_desc
*desc
;
3366 cpus_and(tmp
, mask
, cpu_online_map
);
3367 if (cpus_empty(tmp
))
3370 if (assign_irq_vector(irq
, mask
))
3374 cpus_and(tmp
, cfg
->domain
, mask
);
3375 dest
= cpu_mask_to_apicid(tmp
);
3377 hpet_msi_read(irq
, &msg
);
3379 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
3380 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
3381 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
3382 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
3384 hpet_msi_write(irq
, &msg
);
3385 desc
= irq_to_desc(irq
);
3386 desc
->affinity
= mask
;
3388 #endif /* CONFIG_SMP */
3390 struct irq_chip hpet_msi_type
= {
3392 .unmask
= hpet_msi_unmask
,
3393 .mask
= hpet_msi_mask
,
3394 .ack
= ack_apic_edge
,
3396 .set_affinity
= hpet_msi_set_affinity
,
3398 .retrigger
= ioapic_retrigger_irq
,
3401 int arch_setup_hpet_msi(unsigned int irq
)
3406 ret
= msi_compose_msg(NULL
, irq
, &msg
);
3410 hpet_msi_write(irq
, &msg
);
3411 set_irq_chip_and_handler_name(irq
, &hpet_msi_type
, handle_edge_irq
,
3418 #endif /* CONFIG_PCI_MSI */
3420 * Hypertransport interrupt support
3422 #ifdef CONFIG_HT_IRQ
3426 static void target_ht_irq(unsigned int irq
, unsigned int dest
, u8 vector
)
3428 struct ht_irq_msg msg
;
3429 fetch_ht_irq_msg(irq
, &msg
);
3431 msg
.address_lo
&= ~(HT_IRQ_LOW_VECTOR_MASK
| HT_IRQ_LOW_DEST_ID_MASK
);
3432 msg
.address_hi
&= ~(HT_IRQ_HIGH_DEST_ID_MASK
);
3434 msg
.address_lo
|= HT_IRQ_LOW_VECTOR(vector
) | HT_IRQ_LOW_DEST_ID(dest
);
3435 msg
.address_hi
|= HT_IRQ_HIGH_DEST_ID(dest
);
3437 write_ht_irq_msg(irq
, &msg
);
3440 static void set_ht_irq_affinity(unsigned int irq
, cpumask_t mask
)
3442 struct irq_cfg
*cfg
;
3445 struct irq_desc
*desc
;
3447 cpus_and(tmp
, mask
, cpu_online_map
);
3448 if (cpus_empty(tmp
))
3451 if (assign_irq_vector(irq
, mask
))
3455 cpus_and(tmp
, cfg
->domain
, mask
);
3456 dest
= cpu_mask_to_apicid(tmp
);
3458 target_ht_irq(irq
, dest
, cfg
->vector
);
3459 desc
= irq_to_desc(irq
);
3460 desc
->affinity
= mask
;
3464 static struct irq_chip ht_irq_chip
= {
3466 .mask
= mask_ht_irq
,
3467 .unmask
= unmask_ht_irq
,
3468 .ack
= ack_apic_edge
,
3470 .set_affinity
= set_ht_irq_affinity
,
3472 .retrigger
= ioapic_retrigger_irq
,
3475 int arch_setup_ht_irq(unsigned int irq
, struct pci_dev
*dev
)
3477 struct irq_cfg
*cfg
;
3482 err
= assign_irq_vector(irq
, tmp
);
3484 struct ht_irq_msg msg
;
3488 cpus_and(tmp
, cfg
->domain
, tmp
);
3489 dest
= cpu_mask_to_apicid(tmp
);
3491 msg
.address_hi
= HT_IRQ_HIGH_DEST_ID(dest
);
3495 HT_IRQ_LOW_DEST_ID(dest
) |
3496 HT_IRQ_LOW_VECTOR(cfg
->vector
) |
3497 ((INT_DEST_MODE
== 0) ?
3498 HT_IRQ_LOW_DM_PHYSICAL
:
3499 HT_IRQ_LOW_DM_LOGICAL
) |
3500 HT_IRQ_LOW_RQEOI_EDGE
|
3501 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
3502 HT_IRQ_LOW_MT_FIXED
:
3503 HT_IRQ_LOW_MT_ARBITRATED
) |
3504 HT_IRQ_LOW_IRQ_MASKED
;
3506 write_ht_irq_msg(irq
, &msg
);
3508 set_irq_chip_and_handler_name(irq
, &ht_irq_chip
,
3509 handle_edge_irq
, "edge");
3511 dev_printk(KERN_DEBUG
, &dev
->dev
, "irq %d for HT\n", irq
);
3515 #endif /* CONFIG_HT_IRQ */
3517 #ifdef CONFIG_X86_64
3519 * Re-target the irq to the specified CPU and enable the specified MMR located
3520 * on the specified blade to allow the sending of MSIs to the specified CPU.
3522 int arch_enable_uv_irq(char *irq_name
, unsigned int irq
, int cpu
, int mmr_blade
,
3523 unsigned long mmr_offset
)
3525 const cpumask_t
*eligible_cpu
= get_cpu_mask(cpu
);
3526 struct irq_cfg
*cfg
;
3528 unsigned long mmr_value
;
3529 struct uv_IO_APIC_route_entry
*entry
;
3530 unsigned long flags
;
3533 err
= assign_irq_vector(irq
, *eligible_cpu
);
3537 spin_lock_irqsave(&vector_lock
, flags
);
3538 set_irq_chip_and_handler_name(irq
, &uv_irq_chip
, handle_percpu_irq
,
3540 spin_unlock_irqrestore(&vector_lock
, flags
);
3545 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
3546 BUG_ON(sizeof(struct uv_IO_APIC_route_entry
) != sizeof(unsigned long));
3548 entry
->vector
= cfg
->vector
;
3549 entry
->delivery_mode
= INT_DELIVERY_MODE
;
3550 entry
->dest_mode
= INT_DEST_MODE
;
3551 entry
->polarity
= 0;
3554 entry
->dest
= cpu_mask_to_apicid(*eligible_cpu
);
3556 mmr_pnode
= uv_blade_to_pnode(mmr_blade
);
3557 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
3563 * Disable the specified MMR located on the specified blade so that MSIs are
3564 * longer allowed to be sent.
3566 void arch_disable_uv_irq(int mmr_blade
, unsigned long mmr_offset
)
3568 unsigned long mmr_value
;
3569 struct uv_IO_APIC_route_entry
*entry
;
3573 entry
= (struct uv_IO_APIC_route_entry
*)&mmr_value
;
3574 BUG_ON(sizeof(struct uv_IO_APIC_route_entry
) != sizeof(unsigned long));
3578 mmr_pnode
= uv_blade_to_pnode(mmr_blade
);
3579 uv_write_global_mmr64(mmr_pnode
, mmr_offset
, mmr_value
);
3581 #endif /* CONFIG_X86_64 */
3583 int __init
io_apic_get_redir_entries (int ioapic
)
3585 union IO_APIC_reg_01 reg_01
;
3586 unsigned long flags
;
3588 spin_lock_irqsave(&ioapic_lock
, flags
);
3589 reg_01
.raw
= io_apic_read(ioapic
, 1);
3590 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3592 return reg_01
.bits
.entries
;
3595 int __init
probe_nr_irqs(void)
3602 int nr_min
= NR_IRQS
;
3605 for (idx
= 0; idx
< nr_ioapics
; idx
++)
3606 nr
+= io_apic_get_redir_entries(idx
) + 1;
3608 /* double it for hotplug and msi and nmi */
3611 /* something wrong ? */
3618 /* --------------------------------------------------------------------------
3619 ACPI-based IOAPIC Configuration
3620 -------------------------------------------------------------------------- */
3624 #ifdef CONFIG_X86_32
3625 int __init
io_apic_get_unique_id(int ioapic
, int apic_id
)
3627 union IO_APIC_reg_00 reg_00
;
3628 static physid_mask_t apic_id_map
= PHYSID_MASK_NONE
;
3630 unsigned long flags
;
3634 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3635 * buses (one for LAPICs, one for IOAPICs), where predecessors only
3636 * supports up to 16 on one shared APIC bus.
3638 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3639 * advantage of new APIC bus architecture.
3642 if (physids_empty(apic_id_map
))
3643 apic_id_map
= ioapic_phys_id_map(phys_cpu_present_map
);
3645 spin_lock_irqsave(&ioapic_lock
, flags
);
3646 reg_00
.raw
= io_apic_read(ioapic
, 0);
3647 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3649 if (apic_id
>= get_physical_broadcast()) {
3650 printk(KERN_WARNING
"IOAPIC[%d]: Invalid apic_id %d, trying "
3651 "%d\n", ioapic
, apic_id
, reg_00
.bits
.ID
);
3652 apic_id
= reg_00
.bits
.ID
;
3656 * Every APIC in a system must have a unique ID or we get lots of nice
3657 * 'stuck on smp_invalidate_needed IPI wait' messages.
3659 if (check_apicid_used(apic_id_map
, apic_id
)) {
3661 for (i
= 0; i
< get_physical_broadcast(); i
++) {
3662 if (!check_apicid_used(apic_id_map
, i
))
3666 if (i
== get_physical_broadcast())
3667 panic("Max apic_id exceeded!\n");
3669 printk(KERN_WARNING
"IOAPIC[%d]: apic_id %d already used, "
3670 "trying %d\n", ioapic
, apic_id
, i
);
3675 tmp
= apicid_to_cpu_present(apic_id
);
3676 physids_or(apic_id_map
, apic_id_map
, tmp
);
3678 if (reg_00
.bits
.ID
!= apic_id
) {
3679 reg_00
.bits
.ID
= apic_id
;
3681 spin_lock_irqsave(&ioapic_lock
, flags
);
3682 io_apic_write(ioapic
, 0, reg_00
.raw
);
3683 reg_00
.raw
= io_apic_read(ioapic
, 0);
3684 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3687 if (reg_00
.bits
.ID
!= apic_id
) {
3688 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic
);
3693 apic_printk(APIC_VERBOSE
, KERN_INFO
3694 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic
, apic_id
);
3699 int __init
io_apic_get_version(int ioapic
)
3701 union IO_APIC_reg_01 reg_01
;
3702 unsigned long flags
;
3704 spin_lock_irqsave(&ioapic_lock
, flags
);
3705 reg_01
.raw
= io_apic_read(ioapic
, 1);
3706 spin_unlock_irqrestore(&ioapic_lock
, flags
);
3708 return reg_01
.bits
.version
;
3712 int io_apic_set_pci_routing (int ioapic
, int pin
, int irq
, int triggering
, int polarity
)
3714 if (!IO_APIC_IRQ(irq
)) {
3715 apic_printk(APIC_QUIET
,KERN_ERR
"IOAPIC[%d]: Invalid reference to IRQ 0\n",
3721 * IRQs < 16 are already in the irq_2_pin[] map
3724 add_pin_to_irq(irq
, ioapic
, pin
);
3726 setup_IO_APIC_irq(ioapic
, pin
, irq
, triggering
, polarity
);
3732 int acpi_get_override_irq(int bus_irq
, int *trigger
, int *polarity
)
3736 if (skip_ioapic_setup
)
3739 for (i
= 0; i
< mp_irq_entries
; i
++)
3740 if (mp_irqs
[i
].mp_irqtype
== mp_INT
&&
3741 mp_irqs
[i
].mp_srcbusirq
== bus_irq
)
3743 if (i
>= mp_irq_entries
)
3746 *trigger
= irq_trigger(i
);
3747 *polarity
= irq_polarity(i
);
3751 #endif /* CONFIG_ACPI */
3754 * This function currently is only a helper for the i386 smp boot process where
3755 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3756 * so mask in all cases should simply be TARGET_CPUS
3759 void __init
setup_ioapic_dest(void)
3761 int pin
, ioapic
, irq
, irq_entry
;
3762 struct irq_cfg
*cfg
;
3764 if (skip_ioapic_setup
== 1)
3767 for (ioapic
= 0; ioapic
< nr_ioapics
; ioapic
++) {
3768 for (pin
= 0; pin
< nr_ioapic_registers
[ioapic
]; pin
++) {
3769 irq_entry
= find_irq_entry(ioapic
, pin
, mp_INT
);
3770 if (irq_entry
== -1)
3772 irq
= pin_2_irq(irq_entry
, ioapic
, pin
);
3774 /* setup_IO_APIC_irqs could fail to get vector for some device
3775 * when you have too many devices, because at that time only boot
3780 setup_IO_APIC_irq(ioapic
, pin
, irq
,
3781 irq_trigger(irq_entry
),
3782 irq_polarity(irq_entry
));
3783 #ifdef CONFIG_INTR_REMAP
3784 else if (intr_remapping_enabled
)
3785 set_ir_ioapic_affinity_irq(irq
, TARGET_CPUS
);
3788 set_ioapic_affinity_irq(irq
, TARGET_CPUS
);
3795 #define IOAPIC_RESOURCE_NAME_SIZE 11
3797 static struct resource
*ioapic_resources
;
3799 static struct resource
* __init
ioapic_setup_resources(void)
3802 struct resource
*res
;
3806 if (nr_ioapics
<= 0)
3809 n
= IOAPIC_RESOURCE_NAME_SIZE
+ sizeof(struct resource
);
3812 mem
= alloc_bootmem(n
);
3816 mem
+= sizeof(struct resource
) * nr_ioapics
;
3818 for (i
= 0; i
< nr_ioapics
; i
++) {
3820 res
[i
].flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
3821 sprintf(mem
, "IOAPIC %u", i
);
3822 mem
+= IOAPIC_RESOURCE_NAME_SIZE
;
3826 ioapic_resources
= res
;
3831 void __init
ioapic_init_mappings(void)
3833 unsigned long ioapic_phys
, idx
= FIX_IO_APIC_BASE_0
;
3834 struct resource
*ioapic_res
;
3838 ioapic_res
= ioapic_setup_resources();
3839 for (i
= 0; i
< nr_ioapics
; i
++) {
3840 if (smp_found_config
) {
3841 ioapic_phys
= mp_ioapics
[i
].mp_apicaddr
;
3842 #ifdef CONFIG_X86_32
3845 "WARNING: bogus zero IO-APIC "
3846 "address found in MPTABLE, "
3847 "disabling IO/APIC support!\n");
3848 smp_found_config
= 0;
3849 skip_ioapic_setup
= 1;
3850 goto fake_ioapic_page
;
3854 #ifdef CONFIG_X86_32
3857 ioapic_phys
= (unsigned long)
3858 alloc_bootmem_pages(PAGE_SIZE
);
3859 ioapic_phys
= __pa(ioapic_phys
);
3861 set_fixmap_nocache(idx
, ioapic_phys
);
3862 apic_printk(APIC_VERBOSE
,
3863 "mapped IOAPIC to %08lx (%08lx)\n",
3864 __fix_to_virt(idx
), ioapic_phys
);
3867 if (ioapic_res
!= NULL
) {
3868 ioapic_res
->start
= ioapic_phys
;
3869 ioapic_res
->end
= ioapic_phys
+ (4 * 1024) - 1;
3875 static int __init
ioapic_insert_resources(void)
3878 struct resource
*r
= ioapic_resources
;
3882 "IO APIC resources could be not be allocated.\n");
3886 for (i
= 0; i
< nr_ioapics
; i
++) {
3887 insert_resource(&iomem_resource
, r
);
3894 /* Insert the IO APIC resources after PCI initialization has occured to handle
3895 * IO APICS that are mapped in on a BAR in PCI space. */
3896 late_initcall(ioapic_insert_resources
);