2 * Intel IO-APIC support for multi-Pentium hosts.
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
24 #include <linux/interrupt.h>
25 #include <linux/init.h>
26 #include <linux/delay.h>
27 #include <linux/sched.h>
28 #include <linux/pci.h>
29 #include <linux/mc146818rtc.h>
30 #include <linux/acpi.h>
31 #include <linux/sysdev.h>
32 #include <linux/msi.h>
33 #include <linux/htirq.h>
34 #include <linux/dmar.h>
35 #include <linux/jiffies.h>
37 #include <acpi/acpi_bus.h>
39 #include <linux/bootmem.h>
40 #include <linux/dmar.h>
46 #include <asm/proto.h>
50 #include <asm/msidef.h>
51 #include <asm/hypertransport.h>
52 #include <asm/irq_remapping.h>
55 #include <mach_apic.h>
60 unsigned move_cleanup_count
;
62 u8 move_in_progress
: 1;
65 /* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
66 static struct irq_cfg irq_cfg
[NR_IRQS
] __read_mostly
= {
67 [0] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ0_VECTOR
, },
68 [1] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ1_VECTOR
, },
69 [2] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ2_VECTOR
, },
70 [3] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ3_VECTOR
, },
71 [4] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ4_VECTOR
, },
72 [5] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ5_VECTOR
, },
73 [6] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ6_VECTOR
, },
74 [7] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ7_VECTOR
, },
75 [8] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ8_VECTOR
, },
76 [9] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ9_VECTOR
, },
77 [10] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ10_VECTOR
, },
78 [11] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ11_VECTOR
, },
79 [12] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ12_VECTOR
, },
80 [13] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ13_VECTOR
, },
81 [14] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ14_VECTOR
, },
82 [15] = { .domain
= CPU_MASK_ALL
, .vector
= IRQ15_VECTOR
, },
85 static int assign_irq_vector(int irq
, cpumask_t mask
);
87 int first_system_vector
= 0xfe;
89 char system_vectors
[NR_VECTORS
] = { [0 ... NR_VECTORS
-1] = SYS_VECTOR_FREE
};
91 #define __apicdebuginit __init
93 int sis_apic_bug
; /* not actually supported, dummy for compile */
95 static int no_timer_check
;
97 static int disable_timer_pin_1 __initdata
;
99 int timer_through_8259 __initdata
;
101 /* Where if anywhere is the i8259 connect in external int mode */
102 static struct { int pin
, apic
; } ioapic_i8259
= { -1, -1 };
104 static DEFINE_SPINLOCK(ioapic_lock
);
105 DEFINE_SPINLOCK(vector_lock
);
108 * # of IRQ routing registers
110 int nr_ioapic_registers
[MAX_IO_APICS
];
112 /* I/O APIC RTE contents at the OS boot up */
113 struct IO_APIC_route_entry
*early_ioapic_entries
[MAX_IO_APICS
];
115 /* I/O APIC entries */
116 struct mp_config_ioapic mp_ioapics
[MAX_IO_APICS
];
119 /* MP IRQ source entries */
120 struct mp_config_intsrc mp_irqs
[MAX_IRQ_SOURCES
];
122 /* # of MP IRQ source entries */
125 DECLARE_BITMAP(mp_bus_not_pci
, MAX_MP_BUSSES
);
128 * Rough estimation of how many shared IRQs there are, can
129 * be changed anytime.
131 #define MAX_PLUS_SHARED_IRQS NR_IRQS
132 #define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
135 * This is performance-critical, we want to do it O(1)
137 * the indexing order of this array favors 1:1 mappings
138 * between pins and IRQs.
141 static struct irq_pin_list
{
142 short apic
, pin
, next
;
143 } irq_2_pin
[PIN_MAP_SIZE
];
147 unsigned int unused
[3];
151 static __attribute_const__
struct io_apic __iomem
*io_apic_base(int idx
)
153 return (void __iomem
*) __fix_to_virt(FIX_IO_APIC_BASE_0
+ idx
)
154 + (mp_ioapics
[idx
].mp_apicaddr
& ~PAGE_MASK
);
157 static inline unsigned int io_apic_read(unsigned int apic
, unsigned int reg
)
159 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
160 writel(reg
, &io_apic
->index
);
161 return readl(&io_apic
->data
);
164 static inline void io_apic_write(unsigned int apic
, unsigned int reg
, unsigned int value
)
166 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
167 writel(reg
, &io_apic
->index
);
168 writel(value
, &io_apic
->data
);
172 * Re-write a value: to be used for read-modify-write
173 * cycles where the read already set up the index register.
175 static inline void io_apic_modify(unsigned int apic
, unsigned int value
)
177 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
178 writel(value
, &io_apic
->data
);
181 static bool io_apic_level_ack_pending(unsigned int irq
)
183 struct irq_pin_list
*entry
;
186 spin_lock_irqsave(&ioapic_lock
, flags
);
187 entry
= irq_2_pin
+ irq
;
195 reg
= io_apic_read(entry
->apic
, 0x10 + pin
*2);
196 /* Is the remote IRR bit set? */
197 if (reg
& IO_APIC_REDIR_REMOTE_IRR
) {
198 spin_unlock_irqrestore(&ioapic_lock
, flags
);
203 entry
= irq_2_pin
+ entry
->next
;
205 spin_unlock_irqrestore(&ioapic_lock
, flags
);
211 * Synchronize the IO-APIC and the CPU by doing
212 * a dummy read from the IO-APIC
214 static inline void io_apic_sync(unsigned int apic
)
216 struct io_apic __iomem
*io_apic
= io_apic_base(apic
);
217 readl(&io_apic
->data
);
220 #define __DO_ACTION(R, ACTION, FINAL) \
224 struct irq_pin_list *entry = irq_2_pin + irq; \
226 BUG_ON(irq >= NR_IRQS); \
232 reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
234 io_apic_modify(entry->apic, reg); \
238 entry = irq_2_pin + entry->next; \
243 struct { u32 w1
, w2
; };
244 struct IO_APIC_route_entry entry
;
247 static struct IO_APIC_route_entry
ioapic_read_entry(int apic
, int pin
)
249 union entry_union eu
;
251 spin_lock_irqsave(&ioapic_lock
, flags
);
252 eu
.w1
= io_apic_read(apic
, 0x10 + 2 * pin
);
253 eu
.w2
= io_apic_read(apic
, 0x11 + 2 * pin
);
254 spin_unlock_irqrestore(&ioapic_lock
, flags
);
259 * When we write a new IO APIC routing entry, we need to write the high
260 * word first! If the mask bit in the low word is clear, we will enable
261 * the interrupt, and we need to make sure the entry is fully populated
262 * before that happens.
265 __ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
267 union entry_union eu
;
269 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
270 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
273 static void ioapic_write_entry(int apic
, int pin
, struct IO_APIC_route_entry e
)
276 spin_lock_irqsave(&ioapic_lock
, flags
);
277 __ioapic_write_entry(apic
, pin
, e
);
278 spin_unlock_irqrestore(&ioapic_lock
, flags
);
282 * When we mask an IO APIC routing entry, we need to write the low
283 * word first, in order to set the mask bit before we change the
286 static void ioapic_mask_entry(int apic
, int pin
)
289 union entry_union eu
= { .entry
.mask
= 1 };
291 spin_lock_irqsave(&ioapic_lock
, flags
);
292 io_apic_write(apic
, 0x10 + 2*pin
, eu
.w1
);
293 io_apic_write(apic
, 0x11 + 2*pin
, eu
.w2
);
294 spin_unlock_irqrestore(&ioapic_lock
, flags
);
298 static void __target_IO_APIC_irq(unsigned int irq
, unsigned int dest
, u8 vector
)
301 struct irq_pin_list
*entry
= irq_2_pin
+ irq
;
303 BUG_ON(irq
>= NR_IRQS
);
311 * With interrupt-remapping, destination information comes
312 * from interrupt-remapping table entry.
314 if (!irq_remapped(irq
))
315 io_apic_write(apic
, 0x11 + pin
*2, dest
);
316 reg
= io_apic_read(apic
, 0x10 + pin
*2);
317 reg
&= ~IO_APIC_REDIR_VECTOR_MASK
;
319 io_apic_modify(apic
, reg
);
322 entry
= irq_2_pin
+ entry
->next
;
326 static void set_ioapic_affinity_irq(unsigned int irq
, cpumask_t mask
)
328 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
333 cpus_and(tmp
, mask
, cpu_online_map
);
337 if (assign_irq_vector(irq
, mask
))
340 cpus_and(tmp
, cfg
->domain
, mask
);
341 dest
= cpu_mask_to_apicid(tmp
);
344 * Only the high 8 bits are valid.
346 dest
= SET_APIC_LOGICAL_ID(dest
);
348 spin_lock_irqsave(&ioapic_lock
, flags
);
349 __target_IO_APIC_irq(irq
, dest
, cfg
->vector
);
350 irq_desc
[irq
].affinity
= mask
;
351 spin_unlock_irqrestore(&ioapic_lock
, flags
);
356 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
357 * shared ISA-space IRQs, so we have to support them. We are super
358 * fast in the common case, and fast for shared ISA-space IRQs.
360 static void add_pin_to_irq(unsigned int irq
, int apic
, int pin
)
362 static int first_free_entry
= NR_IRQS
;
363 struct irq_pin_list
*entry
= irq_2_pin
+ irq
;
365 BUG_ON(irq
>= NR_IRQS
);
367 entry
= irq_2_pin
+ entry
->next
;
369 if (entry
->pin
!= -1) {
370 entry
->next
= first_free_entry
;
371 entry
= irq_2_pin
+ entry
->next
;
372 if (++first_free_entry
>= PIN_MAP_SIZE
)
373 panic("io_apic.c: ran out of irq_2_pin entries!");
380 * Reroute an IRQ to a different pin.
382 static void __init
replace_pin_at_irq(unsigned int irq
,
383 int oldapic
, int oldpin
,
384 int newapic
, int newpin
)
386 struct irq_pin_list
*entry
= irq_2_pin
+ irq
;
389 if (entry
->apic
== oldapic
&& entry
->pin
== oldpin
) {
390 entry
->apic
= newapic
;
395 entry
= irq_2_pin
+ entry
->next
;
400 #define DO_ACTION(name,R,ACTION, FINAL) \
402 static void name##_IO_APIC_irq (unsigned int irq) \
403 __DO_ACTION(R, ACTION, FINAL)
406 DO_ACTION(__mask
, 0, |= IO_APIC_REDIR_MASKED
, io_apic_sync(entry
->apic
))
409 DO_ACTION(__unmask
, 0, &= ~IO_APIC_REDIR_MASKED
, )
411 static void mask_IO_APIC_irq (unsigned int irq
)
415 spin_lock_irqsave(&ioapic_lock
, flags
);
416 __mask_IO_APIC_irq(irq
);
417 spin_unlock_irqrestore(&ioapic_lock
, flags
);
420 static void unmask_IO_APIC_irq (unsigned int irq
)
424 spin_lock_irqsave(&ioapic_lock
, flags
);
425 __unmask_IO_APIC_irq(irq
);
426 spin_unlock_irqrestore(&ioapic_lock
, flags
);
429 static void clear_IO_APIC_pin(unsigned int apic
, unsigned int pin
)
431 struct IO_APIC_route_entry entry
;
433 /* Check delivery_mode to be sure we're not clearing an SMI pin */
434 entry
= ioapic_read_entry(apic
, pin
);
435 if (entry
.delivery_mode
== dest_SMI
)
438 * Disable it in the IO-APIC irq-routing table:
440 ioapic_mask_entry(apic
, pin
);
443 static void clear_IO_APIC (void)
447 for (apic
= 0; apic
< nr_ioapics
; apic
++)
448 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
449 clear_IO_APIC_pin(apic
, pin
);
453 * Saves and masks all the unmasked IO-APIC RTE's
455 int save_mask_IO_APIC_setup(void)
457 union IO_APIC_reg_01 reg_01
;
462 * The number of IO-APIC IRQ registers (== #pins):
464 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
465 spin_lock_irqsave(&ioapic_lock
, flags
);
466 reg_01
.raw
= io_apic_read(apic
, 1);
467 spin_unlock_irqrestore(&ioapic_lock
, flags
);
468 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
471 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
472 early_ioapic_entries
[apic
] =
473 kzalloc(sizeof(struct IO_APIC_route_entry
) *
474 nr_ioapic_registers
[apic
], GFP_KERNEL
);
475 if (!early_ioapic_entries
[apic
])
479 for (apic
= 0; apic
< nr_ioapics
; apic
++)
480 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
481 struct IO_APIC_route_entry entry
;
483 entry
= early_ioapic_entries
[apic
][pin
] =
484 ioapic_read_entry(apic
, pin
);
487 ioapic_write_entry(apic
, pin
, entry
);
493 void restore_IO_APIC_setup(void)
497 for (apic
= 0; apic
< nr_ioapics
; apic
++)
498 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++)
499 ioapic_write_entry(apic
, pin
,
500 early_ioapic_entries
[apic
][pin
]);
503 void reinit_intr_remapped_IO_APIC(int intr_remapping
)
506 * for now plain restore of previous settings.
507 * TBD: In the case of OS enabling interrupt-remapping,
508 * IO-APIC RTE's need to be setup to point to interrupt-remapping
509 * table entries. for now, do a plain restore, and wait for
510 * the setup_IO_APIC_irqs() to do proper initialization.
512 restore_IO_APIC_setup();
515 int skip_ioapic_setup
;
518 static int __init
parse_noapic(char *str
)
520 disable_ioapic_setup();
523 early_param("noapic", parse_noapic
);
525 /* Actually the next is obsolete, but keep it for paranoid reasons -AK */
526 static int __init
disable_timer_pin_setup(char *arg
)
528 disable_timer_pin_1
= 1;
531 __setup("disable_timer_pin_1", disable_timer_pin_setup
);
535 * Find the IRQ entry number of a certain pin.
537 static int find_irq_entry(int apic
, int pin
, int type
)
541 for (i
= 0; i
< mp_irq_entries
; i
++)
542 if (mp_irqs
[i
].mp_irqtype
== type
&&
543 (mp_irqs
[i
].mp_dstapic
== mp_ioapics
[apic
].mp_apicid
||
544 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
) &&
545 mp_irqs
[i
].mp_dstirq
== pin
)
552 * Find the pin to which IRQ[irq] (ISA) is connected
554 static int __init
find_isa_irq_pin(int irq
, int type
)
558 for (i
= 0; i
< mp_irq_entries
; i
++) {
559 int lbus
= mp_irqs
[i
].mp_srcbus
;
561 if (test_bit(lbus
, mp_bus_not_pci
) &&
562 (mp_irqs
[i
].mp_irqtype
== type
) &&
563 (mp_irqs
[i
].mp_srcbusirq
== irq
))
565 return mp_irqs
[i
].mp_dstirq
;
570 static int __init
find_isa_irq_apic(int irq
, int type
)
574 for (i
= 0; i
< mp_irq_entries
; i
++) {
575 int lbus
= mp_irqs
[i
].mp_srcbus
;
577 if (test_bit(lbus
, mp_bus_not_pci
) &&
578 (mp_irqs
[i
].mp_irqtype
== type
) &&
579 (mp_irqs
[i
].mp_srcbusirq
== irq
))
582 if (i
< mp_irq_entries
) {
584 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
585 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
)
594 * Find a specific PCI IRQ entry.
595 * Not an __init, possibly needed by modules
597 static int pin_2_irq(int idx
, int apic
, int pin
);
599 int IO_APIC_get_PCI_irq_vector(int bus
, int slot
, int pin
)
601 int apic
, i
, best_guess
= -1;
603 apic_printk(APIC_DEBUG
, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
605 if (test_bit(bus
, mp_bus_not_pci
)) {
606 apic_printk(APIC_VERBOSE
, "PCI BIOS passed nonexistent PCI bus %d!\n", bus
);
609 for (i
= 0; i
< mp_irq_entries
; i
++) {
610 int lbus
= mp_irqs
[i
].mp_srcbus
;
612 for (apic
= 0; apic
< nr_ioapics
; apic
++)
613 if (mp_ioapics
[apic
].mp_apicid
== mp_irqs
[i
].mp_dstapic
||
614 mp_irqs
[i
].mp_dstapic
== MP_APIC_ALL
)
617 if (!test_bit(lbus
, mp_bus_not_pci
) &&
618 !mp_irqs
[i
].mp_irqtype
&&
620 (slot
== ((mp_irqs
[i
].mp_srcbusirq
>> 2) & 0x1f))) {
621 int irq
= pin_2_irq(i
,apic
,mp_irqs
[i
].mp_dstirq
);
623 if (!(apic
|| IO_APIC_IRQ(irq
)))
626 if (pin
== (mp_irqs
[i
].mp_srcbusirq
& 3))
629 * Use the first all-but-pin matching entry as a
630 * best-guess fuzzy result for broken mptables.
636 BUG_ON(best_guess
>= NR_IRQS
);
640 /* ISA interrupts are always polarity zero edge triggered,
641 * when listed as conforming in the MP table. */
643 #define default_ISA_trigger(idx) (0)
644 #define default_ISA_polarity(idx) (0)
646 /* PCI interrupts are always polarity one level triggered,
647 * when listed as conforming in the MP table. */
649 #define default_PCI_trigger(idx) (1)
650 #define default_PCI_polarity(idx) (1)
652 static int MPBIOS_polarity(int idx
)
654 int bus
= mp_irqs
[idx
].mp_srcbus
;
658 * Determine IRQ line polarity (high active or low active):
660 switch (mp_irqs
[idx
].mp_irqflag
& 3)
662 case 0: /* conforms, ie. bus-type dependent polarity */
663 if (test_bit(bus
, mp_bus_not_pci
))
664 polarity
= default_ISA_polarity(idx
);
666 polarity
= default_PCI_polarity(idx
);
668 case 1: /* high active */
673 case 2: /* reserved */
675 printk(KERN_WARNING
"broken BIOS!!\n");
679 case 3: /* low active */
684 default: /* invalid */
686 printk(KERN_WARNING
"broken BIOS!!\n");
694 static int MPBIOS_trigger(int idx
)
696 int bus
= mp_irqs
[idx
].mp_srcbus
;
700 * Determine IRQ trigger mode (edge or level sensitive):
702 switch ((mp_irqs
[idx
].mp_irqflag
>>2) & 3)
704 case 0: /* conforms, ie. bus-type dependent */
705 if (test_bit(bus
, mp_bus_not_pci
))
706 trigger
= default_ISA_trigger(idx
);
708 trigger
= default_PCI_trigger(idx
);
715 case 2: /* reserved */
717 printk(KERN_WARNING
"broken BIOS!!\n");
726 default: /* invalid */
728 printk(KERN_WARNING
"broken BIOS!!\n");
736 static inline int irq_polarity(int idx
)
738 return MPBIOS_polarity(idx
);
741 static inline int irq_trigger(int idx
)
743 return MPBIOS_trigger(idx
);
746 static int pin_2_irq(int idx
, int apic
, int pin
)
749 int bus
= mp_irqs
[idx
].mp_srcbus
;
752 * Debugging check, we are in big trouble if this message pops up!
754 if (mp_irqs
[idx
].mp_dstirq
!= pin
)
755 printk(KERN_ERR
"broken BIOS or MPTABLE parser, ayiee!!\n");
757 if (test_bit(bus
, mp_bus_not_pci
)) {
758 irq
= mp_irqs
[idx
].mp_srcbusirq
;
761 * PCI IRQs are mapped in order
765 irq
+= nr_ioapic_registers
[i
++];
768 BUG_ON(irq
>= NR_IRQS
);
772 static int __assign_irq_vector(int irq
, cpumask_t mask
)
775 * NOTE! The local APIC isn't very good at handling
776 * multiple interrupts at the same interrupt level.
777 * As the interrupt level is determined by taking the
778 * vector number and shifting that right by 4, we
779 * want to spread these out a bit so that they don't
780 * all fall in the same interrupt level.
782 * Also, we've got to be careful not to trash gate
783 * 0x80, because int 0x80 is hm, kind of importantish. ;)
785 static int current_vector
= FIRST_DEVICE_VECTOR
, current_offset
= 0;
786 unsigned int old_vector
;
790 BUG_ON((unsigned)irq
>= NR_IRQS
);
793 /* Only try and allocate irqs on cpus that are present */
794 cpus_and(mask
, mask
, cpu_online_map
);
796 if ((cfg
->move_in_progress
) || cfg
->move_cleanup_count
)
799 old_vector
= cfg
->vector
;
802 cpus_and(tmp
, cfg
->domain
, mask
);
803 if (!cpus_empty(tmp
))
807 for_each_cpu_mask(cpu
, mask
) {
808 cpumask_t domain
, new_mask
;
812 domain
= vector_allocation_domain(cpu
);
813 cpus_and(new_mask
, domain
, cpu_online_map
);
815 vector
= current_vector
;
816 offset
= current_offset
;
819 if (vector
>= first_system_vector
) {
820 /* If we run out of vectors on large boxen, must share them. */
821 offset
= (offset
+ 1) % 8;
822 vector
= FIRST_DEVICE_VECTOR
+ offset
;
824 if (unlikely(current_vector
== vector
))
826 if (vector
== IA32_SYSCALL_VECTOR
)
828 for_each_cpu_mask(new_cpu
, new_mask
)
829 if (per_cpu(vector_irq
, new_cpu
)[vector
] != -1)
832 current_vector
= vector
;
833 current_offset
= offset
;
835 cfg
->move_in_progress
= 1;
836 cfg
->old_domain
= cfg
->domain
;
838 for_each_cpu_mask(new_cpu
, new_mask
)
839 per_cpu(vector_irq
, new_cpu
)[vector
] = irq
;
840 cfg
->vector
= vector
;
841 cfg
->domain
= domain
;
847 static int assign_irq_vector(int irq
, cpumask_t mask
)
852 spin_lock_irqsave(&vector_lock
, flags
);
853 err
= __assign_irq_vector(irq
, mask
);
854 spin_unlock_irqrestore(&vector_lock
, flags
);
858 static void __clear_irq_vector(int irq
)
864 BUG_ON((unsigned)irq
>= NR_IRQS
);
866 BUG_ON(!cfg
->vector
);
868 vector
= cfg
->vector
;
869 cpus_and(mask
, cfg
->domain
, cpu_online_map
);
870 for_each_cpu_mask(cpu
, mask
)
871 per_cpu(vector_irq
, cpu
)[vector
] = -1;
874 cpus_clear(cfg
->domain
);
877 static void __setup_vector_irq(int cpu
)
879 /* Initialize vector_irq on a new cpu */
880 /* This function must be called with vector_lock held */
883 /* Mark the inuse vectors */
884 for (irq
= 0; irq
< NR_IRQS
; ++irq
) {
885 if (!cpu_isset(cpu
, irq_cfg
[irq
].domain
))
887 vector
= irq_cfg
[irq
].vector
;
888 per_cpu(vector_irq
, cpu
)[vector
] = irq
;
890 /* Mark the free vectors */
891 for (vector
= 0; vector
< NR_VECTORS
; ++vector
) {
892 irq
= per_cpu(vector_irq
, cpu
)[vector
];
895 if (!cpu_isset(cpu
, irq_cfg
[irq
].domain
))
896 per_cpu(vector_irq
, cpu
)[vector
] = -1;
900 void setup_vector_irq(int cpu
)
902 spin_lock(&vector_lock
);
903 __setup_vector_irq(smp_processor_id());
904 spin_unlock(&vector_lock
);
908 static struct irq_chip ioapic_chip
;
909 #ifdef CONFIG_INTR_REMAP
910 static struct irq_chip ir_ioapic_chip
;
913 static void ioapic_register_intr(int irq
, unsigned long trigger
)
916 irq_desc
[irq
].status
|= IRQ_LEVEL
;
918 irq_desc
[irq
].status
&= ~IRQ_LEVEL
;
920 #ifdef CONFIG_INTR_REMAP
921 if (irq_remapped(irq
)) {
922 irq_desc
[irq
].status
|= IRQ_MOVE_PCNTXT
;
924 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
928 set_irq_chip_and_handler_name(irq
, &ir_ioapic_chip
,
929 handle_edge_irq
, "edge");
934 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
938 set_irq_chip_and_handler_name(irq
, &ioapic_chip
,
939 handle_edge_irq
, "edge");
942 static int setup_ioapic_entry(int apic
, int irq
,
943 struct IO_APIC_route_entry
*entry
,
944 unsigned int destination
, int trigger
,
945 int polarity
, int vector
)
948 * add it to the IO-APIC irq-routing table:
950 memset(entry
,0,sizeof(*entry
));
952 #ifdef CONFIG_INTR_REMAP
953 if (intr_remapping_enabled
) {
954 struct intel_iommu
*iommu
= map_ioapic_to_ir(apic
);
956 struct IR_IO_APIC_route_entry
*ir_entry
=
957 (struct IR_IO_APIC_route_entry
*) entry
;
961 panic("No mapping iommu for ioapic %d\n", apic
);
963 index
= alloc_irte(iommu
, irq
, 1);
965 panic("Failed to allocate IRTE for ioapic %d\n", apic
);
967 memset(&irte
, 0, sizeof(irte
));
970 irte
.dst_mode
= INT_DEST_MODE
;
971 irte
.trigger_mode
= trigger
;
972 irte
.dlvry_mode
= INT_DELIVERY_MODE
;
973 irte
.vector
= vector
;
974 irte
.dest_id
= IRTE_DEST(destination
);
976 modify_irte(irq
, &irte
);
978 ir_entry
->index2
= (index
>> 15) & 0x1;
980 ir_entry
->format
= 1;
981 ir_entry
->index
= (index
& 0x7fff);
985 entry
->delivery_mode
= INT_DELIVERY_MODE
;
986 entry
->dest_mode
= INT_DEST_MODE
;
987 entry
->dest
= destination
;
990 entry
->mask
= 0; /* enable IRQ */
991 entry
->trigger
= trigger
;
992 entry
->polarity
= polarity
;
993 entry
->vector
= vector
;
995 /* Mask level triggered irqs.
996 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1003 static void setup_IO_APIC_irq(int apic
, int pin
, unsigned int irq
,
1004 int trigger
, int polarity
)
1006 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
1007 struct IO_APIC_route_entry entry
;
1010 if (!IO_APIC_IRQ(irq
))
1014 if (assign_irq_vector(irq
, mask
))
1017 cpus_and(mask
, cfg
->domain
, mask
);
1019 apic_printk(APIC_VERBOSE
,KERN_DEBUG
1020 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1021 "IRQ %d Mode:%i Active:%i)\n",
1022 apic
, mp_ioapics
[apic
].mp_apicid
, pin
, cfg
->vector
,
1023 irq
, trigger
, polarity
);
1026 if (setup_ioapic_entry(mp_ioapics
[apic
].mp_apicid
, irq
, &entry
,
1027 cpu_mask_to_apicid(mask
), trigger
, polarity
,
1029 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1030 mp_ioapics
[apic
].mp_apicid
, pin
);
1031 __clear_irq_vector(irq
);
1035 ioapic_register_intr(irq
, trigger
);
1037 disable_8259A_irq(irq
);
1039 ioapic_write_entry(apic
, pin
, entry
);
1042 static void __init
setup_IO_APIC_irqs(void)
1044 int apic
, pin
, idx
, irq
, first_notcon
= 1;
1046 apic_printk(APIC_VERBOSE
, KERN_DEBUG
"init IO_APIC IRQs\n");
1048 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1049 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1051 idx
= find_irq_entry(apic
,pin
,mp_INT
);
1054 apic_printk(APIC_VERBOSE
, KERN_DEBUG
" IO-APIC (apicid-pin) %d-%d", mp_ioapics
[apic
].mp_apicid
, pin
);
1057 apic_printk(APIC_VERBOSE
, ", %d-%d", mp_ioapics
[apic
].mp_apicid
, pin
);
1060 if (!first_notcon
) {
1061 apic_printk(APIC_VERBOSE
, " not connected.\n");
1065 irq
= pin_2_irq(idx
, apic
, pin
);
1066 add_pin_to_irq(irq
, apic
, pin
);
1068 setup_IO_APIC_irq(apic
, pin
, irq
,
1069 irq_trigger(idx
), irq_polarity(idx
));
1074 apic_printk(APIC_VERBOSE
, " not connected.\n");
1078 * Set up the timer pin, possibly with the 8259A-master behind.
1080 static void __init
setup_timer_IRQ0_pin(unsigned int apic
, unsigned int pin
,
1083 struct IO_APIC_route_entry entry
;
1085 if (intr_remapping_enabled
)
1088 memset(&entry
, 0, sizeof(entry
));
1091 * We use logical delivery to get the timer IRQ
1094 entry
.dest_mode
= INT_DEST_MODE
;
1095 entry
.mask
= 1; /* mask IRQ now */
1096 entry
.dest
= cpu_mask_to_apicid(TARGET_CPUS
);
1097 entry
.delivery_mode
= INT_DELIVERY_MODE
;
1100 entry
.vector
= vector
;
1103 * The timer IRQ doesn't have to know that behind the
1104 * scene we may have a 8259A-master in AEOI mode ...
1106 set_irq_chip_and_handler_name(0, &ioapic_chip
, handle_edge_irq
, "edge");
1109 * Add it to the IO-APIC irq-routing table:
1111 ioapic_write_entry(apic
, pin
, entry
);
1114 void __apicdebuginit
print_IO_APIC(void)
1117 union IO_APIC_reg_00 reg_00
;
1118 union IO_APIC_reg_01 reg_01
;
1119 union IO_APIC_reg_02 reg_02
;
1120 unsigned long flags
;
1122 if (apic_verbosity
== APIC_QUIET
)
1125 printk(KERN_DEBUG
"number of MP IRQ sources: %d.\n", mp_irq_entries
);
1126 for (i
= 0; i
< nr_ioapics
; i
++)
1127 printk(KERN_DEBUG
"number of IO-APIC #%d registers: %d.\n",
1128 mp_ioapics
[i
].mp_apicid
, nr_ioapic_registers
[i
]);
1131 * We are a bit conservative about what we expect. We have to
1132 * know about every hardware change ASAP.
1134 printk(KERN_INFO
"testing the IO APIC.......................\n");
1136 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1138 spin_lock_irqsave(&ioapic_lock
, flags
);
1139 reg_00
.raw
= io_apic_read(apic
, 0);
1140 reg_01
.raw
= io_apic_read(apic
, 1);
1141 if (reg_01
.bits
.version
>= 0x10)
1142 reg_02
.raw
= io_apic_read(apic
, 2);
1143 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1146 printk(KERN_DEBUG
"IO APIC #%d......\n", mp_ioapics
[apic
].mp_apicid
);
1147 printk(KERN_DEBUG
".... register #00: %08X\n", reg_00
.raw
);
1148 printk(KERN_DEBUG
"....... : physical APIC id: %02X\n", reg_00
.bits
.ID
);
1150 printk(KERN_DEBUG
".... register #01: %08X\n", *(int *)®_01
);
1151 printk(KERN_DEBUG
"....... : max redirection entries: %04X\n", reg_01
.bits
.entries
);
1153 printk(KERN_DEBUG
"....... : PRQ implemented: %X\n", reg_01
.bits
.PRQ
);
1154 printk(KERN_DEBUG
"....... : IO APIC version: %04X\n", reg_01
.bits
.version
);
1156 if (reg_01
.bits
.version
>= 0x10) {
1157 printk(KERN_DEBUG
".... register #02: %08X\n", reg_02
.raw
);
1158 printk(KERN_DEBUG
"....... : arbitration: %02X\n", reg_02
.bits
.arbitration
);
1161 printk(KERN_DEBUG
".... IRQ redirection table:\n");
1163 printk(KERN_DEBUG
" NR Dst Mask Trig IRR Pol"
1164 " Stat Dmod Deli Vect: \n");
1166 for (i
= 0; i
<= reg_01
.bits
.entries
; i
++) {
1167 struct IO_APIC_route_entry entry
;
1169 entry
= ioapic_read_entry(apic
, i
);
1171 printk(KERN_DEBUG
" %02x %03X ",
1176 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1181 entry
.delivery_status
,
1183 entry
.delivery_mode
,
1188 printk(KERN_DEBUG
"IRQ to pin mappings:\n");
1189 for (i
= 0; i
< NR_IRQS
; i
++) {
1190 struct irq_pin_list
*entry
= irq_2_pin
+ i
;
1193 printk(KERN_DEBUG
"IRQ%d ", i
);
1195 printk("-> %d:%d", entry
->apic
, entry
->pin
);
1198 entry
= irq_2_pin
+ entry
->next
;
1203 printk(KERN_INFO
".................................... done.\n");
1210 static __apicdebuginit
void print_APIC_bitfield (int base
)
1215 if (apic_verbosity
== APIC_QUIET
)
1218 printk(KERN_DEBUG
"0123456789abcdef0123456789abcdef\n" KERN_DEBUG
);
1219 for (i
= 0; i
< 8; i
++) {
1220 v
= apic_read(base
+ i
*0x10);
1221 for (j
= 0; j
< 32; j
++) {
1231 void __apicdebuginit
print_local_APIC(void * dummy
)
1233 unsigned int v
, ver
, maxlvt
;
1236 if (apic_verbosity
== APIC_QUIET
)
1239 printk("\n" KERN_DEBUG
"printing local APIC contents on CPU#%d/%d:\n",
1240 smp_processor_id(), hard_smp_processor_id());
1241 v
= apic_read(APIC_ID
);
1242 printk(KERN_INFO
"... APIC ID: %08x (%01x)\n", v
, read_apic_id());
1243 v
= apic_read(APIC_LVR
);
1244 printk(KERN_INFO
"... APIC VERSION: %08x\n", v
);
1245 ver
= GET_APIC_VERSION(v
);
1246 maxlvt
= lapic_get_maxlvt();
1248 v
= apic_read(APIC_TASKPRI
);
1249 printk(KERN_DEBUG
"... APIC TASKPRI: %08x (%02x)\n", v
, v
& APIC_TPRI_MASK
);
1251 v
= apic_read(APIC_ARBPRI
);
1252 printk(KERN_DEBUG
"... APIC ARBPRI: %08x (%02x)\n", v
,
1253 v
& APIC_ARBPRI_MASK
);
1254 v
= apic_read(APIC_PROCPRI
);
1255 printk(KERN_DEBUG
"... APIC PROCPRI: %08x\n", v
);
1257 v
= apic_read(APIC_EOI
);
1258 printk(KERN_DEBUG
"... APIC EOI: %08x\n", v
);
1259 v
= apic_read(APIC_RRR
);
1260 printk(KERN_DEBUG
"... APIC RRR: %08x\n", v
);
1261 v
= apic_read(APIC_LDR
);
1262 printk(KERN_DEBUG
"... APIC LDR: %08x\n", v
);
1263 v
= apic_read(APIC_DFR
);
1264 printk(KERN_DEBUG
"... APIC DFR: %08x\n", v
);
1265 v
= apic_read(APIC_SPIV
);
1266 printk(KERN_DEBUG
"... APIC SPIV: %08x\n", v
);
1268 printk(KERN_DEBUG
"... APIC ISR field:\n");
1269 print_APIC_bitfield(APIC_ISR
);
1270 printk(KERN_DEBUG
"... APIC TMR field:\n");
1271 print_APIC_bitfield(APIC_TMR
);
1272 printk(KERN_DEBUG
"... APIC IRR field:\n");
1273 print_APIC_bitfield(APIC_IRR
);
1275 v
= apic_read(APIC_ESR
);
1276 printk(KERN_DEBUG
"... APIC ESR: %08x\n", v
);
1278 icr
= apic_icr_read();
1279 printk(KERN_DEBUG
"... APIC ICR: %08x\n", icr
);
1280 printk(KERN_DEBUG
"... APIC ICR2: %08x\n", icr
>> 32);
1282 v
= apic_read(APIC_LVTT
);
1283 printk(KERN_DEBUG
"... APIC LVTT: %08x\n", v
);
1285 if (maxlvt
> 3) { /* PC is LVT#4. */
1286 v
= apic_read(APIC_LVTPC
);
1287 printk(KERN_DEBUG
"... APIC LVTPC: %08x\n", v
);
1289 v
= apic_read(APIC_LVT0
);
1290 printk(KERN_DEBUG
"... APIC LVT0: %08x\n", v
);
1291 v
= apic_read(APIC_LVT1
);
1292 printk(KERN_DEBUG
"... APIC LVT1: %08x\n", v
);
1294 if (maxlvt
> 2) { /* ERR is LVT#3. */
1295 v
= apic_read(APIC_LVTERR
);
1296 printk(KERN_DEBUG
"... APIC LVTERR: %08x\n", v
);
1299 v
= apic_read(APIC_TMICT
);
1300 printk(KERN_DEBUG
"... APIC TMICT: %08x\n", v
);
1301 v
= apic_read(APIC_TMCCT
);
1302 printk(KERN_DEBUG
"... APIC TMCCT: %08x\n", v
);
1303 v
= apic_read(APIC_TDCR
);
1304 printk(KERN_DEBUG
"... APIC TDCR: %08x\n", v
);
1308 void print_all_local_APICs (void)
1310 on_each_cpu(print_local_APIC
, NULL
, 1);
1313 void __apicdebuginit
print_PIC(void)
1316 unsigned long flags
;
1318 if (apic_verbosity
== APIC_QUIET
)
1321 printk(KERN_DEBUG
"\nprinting PIC contents\n");
1323 spin_lock_irqsave(&i8259A_lock
, flags
);
1325 v
= inb(0xa1) << 8 | inb(0x21);
1326 printk(KERN_DEBUG
"... PIC IMR: %04x\n", v
);
1328 v
= inb(0xa0) << 8 | inb(0x20);
1329 printk(KERN_DEBUG
"... PIC IRR: %04x\n", v
);
1333 v
= inb(0xa0) << 8 | inb(0x20);
1337 spin_unlock_irqrestore(&i8259A_lock
, flags
);
1339 printk(KERN_DEBUG
"... PIC ISR: %04x\n", v
);
1341 v
= inb(0x4d1) << 8 | inb(0x4d0);
1342 printk(KERN_DEBUG
"... PIC ELCR: %04x\n", v
);
1347 void __init
enable_IO_APIC(void)
1349 union IO_APIC_reg_01 reg_01
;
1350 int i8259_apic
, i8259_pin
;
1352 unsigned long flags
;
1354 for (i
= 0; i
< PIN_MAP_SIZE
; i
++) {
1355 irq_2_pin
[i
].pin
= -1;
1356 irq_2_pin
[i
].next
= 0;
1360 * The number of IO-APIC IRQ registers (== #pins):
1362 for (apic
= 0; apic
< nr_ioapics
; apic
++) {
1363 spin_lock_irqsave(&ioapic_lock
, flags
);
1364 reg_01
.raw
= io_apic_read(apic
, 1);
1365 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1366 nr_ioapic_registers
[apic
] = reg_01
.bits
.entries
+1;
1368 for(apic
= 0; apic
< nr_ioapics
; apic
++) {
1370 /* See if any of the pins is in ExtINT mode */
1371 for (pin
= 0; pin
< nr_ioapic_registers
[apic
]; pin
++) {
1372 struct IO_APIC_route_entry entry
;
1373 entry
= ioapic_read_entry(apic
, pin
);
1375 /* If the interrupt line is enabled and in ExtInt mode
1376 * I have found the pin where the i8259 is connected.
1378 if ((entry
.mask
== 0) && (entry
.delivery_mode
== dest_ExtINT
)) {
1379 ioapic_i8259
.apic
= apic
;
1380 ioapic_i8259
.pin
= pin
;
1386 /* Look to see what if the MP table has reported the ExtINT */
1387 i8259_pin
= find_isa_irq_pin(0, mp_ExtINT
);
1388 i8259_apic
= find_isa_irq_apic(0, mp_ExtINT
);
1389 /* Trust the MP table if nothing is setup in the hardware */
1390 if ((ioapic_i8259
.pin
== -1) && (i8259_pin
>= 0)) {
1391 printk(KERN_WARNING
"ExtINT not setup in hardware but reported by MP table\n");
1392 ioapic_i8259
.pin
= i8259_pin
;
1393 ioapic_i8259
.apic
= i8259_apic
;
1395 /* Complain if the MP table and the hardware disagree */
1396 if (((ioapic_i8259
.apic
!= i8259_apic
) || (ioapic_i8259
.pin
!= i8259_pin
)) &&
1397 (i8259_pin
>= 0) && (ioapic_i8259
.pin
>= 0))
1399 printk(KERN_WARNING
"ExtINT in hardware and MP table differ\n");
1403 * Do not trust the IO-APIC being empty at bootup
1409 * Not an __init, needed by the reboot code
1411 void disable_IO_APIC(void)
1414 * Clear the IO-APIC before rebooting:
1419 * If the i8259 is routed through an IOAPIC
1420 * Put that IOAPIC in virtual wire mode
1421 * so legacy interrupts can be delivered.
1423 if (ioapic_i8259
.pin
!= -1) {
1424 struct IO_APIC_route_entry entry
;
1426 memset(&entry
, 0, sizeof(entry
));
1427 entry
.mask
= 0; /* Enabled */
1428 entry
.trigger
= 0; /* Edge */
1430 entry
.polarity
= 0; /* High */
1431 entry
.delivery_status
= 0;
1432 entry
.dest_mode
= 0; /* Physical */
1433 entry
.delivery_mode
= dest_ExtINT
; /* ExtInt */
1435 entry
.dest
= read_apic_id();
1438 * Add it to the IO-APIC irq-routing table:
1440 ioapic_write_entry(ioapic_i8259
.apic
, ioapic_i8259
.pin
, entry
);
1443 disconnect_bsp_APIC(ioapic_i8259
.pin
!= -1);
1447 * There is a nasty bug in some older SMP boards, their mptable lies
1448 * about the timer IRQ. We do the following to work around the situation:
1450 * - timer IRQ defaults to IO-APIC IRQ
1451 * - if this function detects that timer IRQs are defunct, then we fall
1452 * back to ISA timer IRQs
1454 static int __init
timer_irq_works(void)
1456 unsigned long t1
= jiffies
;
1457 unsigned long flags
;
1459 local_save_flags(flags
);
1461 /* Let ten ticks pass... */
1462 mdelay((10 * 1000) / HZ
);
1463 local_irq_restore(flags
);
1466 * Expect a few ticks at least, to be sure some possible
1467 * glue logic does not lock up after one or two first
1468 * ticks in a non-ExtINT mode. Also the local APIC
1469 * might have cached one ExtINT interrupt. Finally, at
1470 * least one tick may be lost due to delays.
1474 if (time_after(jiffies
, t1
+ 4))
1480 * In the SMP+IOAPIC case it might happen that there are an unspecified
1481 * number of pending IRQ events unhandled. These cases are very rare,
1482 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
1483 * better to do it this way as thus we do not have to be aware of
1484 * 'pending' interrupts in the IRQ path, except at this point.
1487 * Edge triggered needs to resend any interrupt
1488 * that was delayed but this is now handled in the device
1493 * Starting up a edge-triggered IO-APIC interrupt is
1494 * nasty - we need to make sure that we get the edge.
1495 * If it is already asserted for some reason, we need
1496 * return 1 to indicate that is was pending.
1498 * This is not complete - we should be able to fake
1499 * an edge even if it isn't on the 8259A...
1502 static unsigned int startup_ioapic_irq(unsigned int irq
)
1504 int was_pending
= 0;
1505 unsigned long flags
;
1507 spin_lock_irqsave(&ioapic_lock
, flags
);
1509 disable_8259A_irq(irq
);
1510 if (i8259A_irq_pending(irq
))
1513 __unmask_IO_APIC_irq(irq
);
1514 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1519 static int ioapic_retrigger_irq(unsigned int irq
)
1521 struct irq_cfg
*cfg
= &irq_cfg
[irq
];
1523 unsigned long flags
;
1525 spin_lock_irqsave(&vector_lock
, flags
);
1526 mask
= cpumask_of_cpu(first_cpu(cfg
->domain
));
1527 send_IPI_mask(mask
, cfg
->vector
);
1528 spin_unlock_irqrestore(&vector_lock
, flags
);
1534 * Level and edge triggered IO-APIC interrupts need different handling,
1535 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
1536 * handled with the level-triggered descriptor, but that one has slightly
1537 * more overhead. Level-triggered interrupts cannot be handled with the
1538 * edge-triggered handler, without risking IRQ storms and other ugly
1544 #ifdef CONFIG_INTR_REMAP
1545 static void ir_irq_migration(struct work_struct
*work
);
1547 static DECLARE_DELAYED_WORK(ir_migration_work
, ir_irq_migration
);
1550 * Migrate the IO-APIC irq in the presence of intr-remapping.
1552 * For edge triggered, irq migration is a simple atomic update(of vector
1553 * and cpu destination) of IRTE and flush the hardware cache.
1555 * For level triggered, we need to modify the io-apic RTE aswell with the update
1556 * vector information, along with modifying IRTE with vector and destination.
1557 * So irq migration for level triggered is little bit more complex compared to
1558 * edge triggered migration. But the good news is, we use the same algorithm
1559 * for level triggered migration as we have today, only difference being,
1560 * we now initiate the irq migration from process context instead of the
1561 * interrupt context.
1563 * In future, when we do a directed EOI (combined with cpu EOI broadcast
1564 * suppression) to the IO-APIC, level triggered irq migration will also be
1565 * as simple as edge triggered migration and we can do the irq migration
1566 * with a simple atomic update to IO-APIC RTE.
1568 static void migrate_ioapic_irq(int irq
, cpumask_t mask
)
1570 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
1571 struct irq_desc
*desc
= irq_desc
+ irq
;
1572 cpumask_t tmp
, cleanup_mask
;
1574 int modify_ioapic_rte
= desc
->status
& IRQ_LEVEL
;
1576 unsigned long flags
;
1578 cpus_and(tmp
, mask
, cpu_online_map
);
1579 if (cpus_empty(tmp
))
1582 if (get_irte(irq
, &irte
))
1585 if (assign_irq_vector(irq
, mask
))
1588 cpus_and(tmp
, cfg
->domain
, mask
);
1589 dest
= cpu_mask_to_apicid(tmp
);
1591 if (modify_ioapic_rte
) {
1592 spin_lock_irqsave(&ioapic_lock
, flags
);
1593 __target_IO_APIC_irq(irq
, dest
, cfg
->vector
);
1594 spin_unlock_irqrestore(&ioapic_lock
, flags
);
1597 irte
.vector
= cfg
->vector
;
1598 irte
.dest_id
= IRTE_DEST(dest
);
1601 * Modified the IRTE and flushes the Interrupt entry cache.
1603 modify_irte(irq
, &irte
);
1605 if (cfg
->move_in_progress
) {
1606 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
1607 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
1608 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
1609 cfg
->move_in_progress
= 0;
1612 irq_desc
[irq
].affinity
= mask
;
1615 static int migrate_irq_remapped_level(int irq
)
1619 mask_IO_APIC_irq(irq
);
1621 if (io_apic_level_ack_pending(irq
)) {
1623 * Interrupt in progress. Migrating irq now will change the
1624 * vector information in the IO-APIC RTE and that will confuse
1625 * the EOI broadcast performed by cpu.
1626 * So, delay the irq migration to the next instance.
1628 schedule_delayed_work(&ir_migration_work
, 1);
1632 /* everthing is clear. we have right of way */
1633 migrate_ioapic_irq(irq
, irq_desc
[irq
].pending_mask
);
1636 irq_desc
[irq
].status
&= ~IRQ_MOVE_PENDING
;
1637 cpus_clear(irq_desc
[irq
].pending_mask
);
1640 unmask_IO_APIC_irq(irq
);
1644 static void ir_irq_migration(struct work_struct
*work
)
1648 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
1649 struct irq_desc
*desc
= irq_desc
+ irq
;
1650 if (desc
->status
& IRQ_MOVE_PENDING
) {
1651 unsigned long flags
;
1653 spin_lock_irqsave(&desc
->lock
, flags
);
1654 if (!desc
->chip
->set_affinity
||
1655 !(desc
->status
& IRQ_MOVE_PENDING
)) {
1656 desc
->status
&= ~IRQ_MOVE_PENDING
;
1657 spin_unlock_irqrestore(&desc
->lock
, flags
);
1661 desc
->chip
->set_affinity(irq
,
1662 irq_desc
[irq
].pending_mask
);
1663 spin_unlock_irqrestore(&desc
->lock
, flags
);
1669 * Migrates the IRQ destination in the process context.
1671 static void set_ir_ioapic_affinity_irq(unsigned int irq
, cpumask_t mask
)
1673 if (irq_desc
[irq
].status
& IRQ_LEVEL
) {
1674 irq_desc
[irq
].status
|= IRQ_MOVE_PENDING
;
1675 irq_desc
[irq
].pending_mask
= mask
;
1676 migrate_irq_remapped_level(irq
);
1680 migrate_ioapic_irq(irq
, mask
);
1684 asmlinkage
void smp_irq_move_cleanup_interrupt(void)
1686 unsigned vector
, me
;
1691 me
= smp_processor_id();
1692 for (vector
= FIRST_EXTERNAL_VECTOR
; vector
< NR_VECTORS
; vector
++) {
1694 struct irq_desc
*desc
;
1695 struct irq_cfg
*cfg
;
1696 irq
= __get_cpu_var(vector_irq
)[vector
];
1700 desc
= irq_desc
+ irq
;
1701 cfg
= irq_cfg
+ irq
;
1702 spin_lock(&desc
->lock
);
1703 if (!cfg
->move_cleanup_count
)
1706 if ((vector
== cfg
->vector
) && cpu_isset(me
, cfg
->domain
))
1709 __get_cpu_var(vector_irq
)[vector
] = -1;
1710 cfg
->move_cleanup_count
--;
1712 spin_unlock(&desc
->lock
);
1718 static void irq_complete_move(unsigned int irq
)
1720 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
1721 unsigned vector
, me
;
1723 if (likely(!cfg
->move_in_progress
))
1726 vector
= ~get_irq_regs()->orig_ax
;
1727 me
= smp_processor_id();
1728 if ((vector
== cfg
->vector
) && cpu_isset(me
, cfg
->domain
)) {
1729 cpumask_t cleanup_mask
;
1731 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
1732 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
1733 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
1734 cfg
->move_in_progress
= 0;
1738 static inline void irq_complete_move(unsigned int irq
) {}
1740 #ifdef CONFIG_INTR_REMAP
1741 static void ack_x2apic_level(unsigned int irq
)
1746 static void ack_x2apic_edge(unsigned int irq
)
1752 static void ack_apic_edge(unsigned int irq
)
1754 irq_complete_move(irq
);
1755 move_native_irq(irq
);
1759 static void ack_apic_level(unsigned int irq
)
1761 int do_unmask_irq
= 0;
1763 irq_complete_move(irq
);
1764 #ifdef CONFIG_GENERIC_PENDING_IRQ
1765 /* If we are moving the irq we need to mask it */
1766 if (unlikely(irq_desc
[irq
].status
& IRQ_MOVE_PENDING
)) {
1768 mask_IO_APIC_irq(irq
);
1773 * We must acknowledge the irq before we move it or the acknowledge will
1774 * not propagate properly.
1778 /* Now we can move and renable the irq */
1779 if (unlikely(do_unmask_irq
)) {
1780 /* Only migrate the irq if the ack has been received.
1782 * On rare occasions the broadcast level triggered ack gets
1783 * delayed going to ioapics, and if we reprogram the
1784 * vector while Remote IRR is still set the irq will never
1787 * To prevent this scenario we read the Remote IRR bit
1788 * of the ioapic. This has two effects.
1789 * - On any sane system the read of the ioapic will
1790 * flush writes (and acks) going to the ioapic from
1792 * - We get to see if the ACK has actually been delivered.
1794 * Based on failed experiments of reprogramming the
1795 * ioapic entry from outside of irq context starting
1796 * with masking the ioapic entry and then polling until
1797 * Remote IRR was clear before reprogramming the
1798 * ioapic I don't trust the Remote IRR bit to be
1799 * completey accurate.
1801 * However there appears to be no other way to plug
1802 * this race, so if the Remote IRR bit is not
1803 * accurate and is causing problems then it is a hardware bug
1804 * and you can go talk to the chipset vendor about it.
1806 if (!io_apic_level_ack_pending(irq
))
1807 move_masked_irq(irq
);
1808 unmask_IO_APIC_irq(irq
);
1812 static struct irq_chip ioapic_chip __read_mostly
= {
1814 .startup
= startup_ioapic_irq
,
1815 .mask
= mask_IO_APIC_irq
,
1816 .unmask
= unmask_IO_APIC_irq
,
1817 .ack
= ack_apic_edge
,
1818 .eoi
= ack_apic_level
,
1820 .set_affinity
= set_ioapic_affinity_irq
,
1822 .retrigger
= ioapic_retrigger_irq
,
1825 #ifdef CONFIG_INTR_REMAP
1826 static struct irq_chip ir_ioapic_chip __read_mostly
= {
1827 .name
= "IR-IO-APIC",
1828 .startup
= startup_ioapic_irq
,
1829 .mask
= mask_IO_APIC_irq
,
1830 .unmask
= unmask_IO_APIC_irq
,
1831 .ack
= ack_x2apic_edge
,
1832 .eoi
= ack_x2apic_level
,
1834 .set_affinity
= set_ir_ioapic_affinity_irq
,
1836 .retrigger
= ioapic_retrigger_irq
,
1840 static inline void init_IO_APIC_traps(void)
1845 * NOTE! The local APIC isn't very good at handling
1846 * multiple interrupts at the same interrupt level.
1847 * As the interrupt level is determined by taking the
1848 * vector number and shifting that right by 4, we
1849 * want to spread these out a bit so that they don't
1850 * all fall in the same interrupt level.
1852 * Also, we've got to be careful not to trash gate
1853 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1855 for (irq
= 0; irq
< NR_IRQS
; irq
++) {
1856 if (IO_APIC_IRQ(irq
) && !irq_cfg
[irq
].vector
) {
1858 * Hmm.. We don't have an entry for this,
1859 * so default to an old-fashioned 8259
1860 * interrupt if we can..
1863 make_8259A_irq(irq
);
1865 /* Strange. Oh, well.. */
1866 irq_desc
[irq
].chip
= &no_irq_chip
;
1871 static void unmask_lapic_irq(unsigned int irq
)
1875 v
= apic_read(APIC_LVT0
);
1876 apic_write(APIC_LVT0
, v
& ~APIC_LVT_MASKED
);
1879 static void mask_lapic_irq(unsigned int irq
)
1883 v
= apic_read(APIC_LVT0
);
1884 apic_write(APIC_LVT0
, v
| APIC_LVT_MASKED
);
1887 static void ack_lapic_irq (unsigned int irq
)
1892 static struct irq_chip lapic_chip __read_mostly
= {
1893 .name
= "local-APIC",
1894 .mask
= mask_lapic_irq
,
1895 .unmask
= unmask_lapic_irq
,
1896 .ack
= ack_lapic_irq
,
1899 static void lapic_register_intr(int irq
)
1901 irq_desc
[irq
].status
&= ~IRQ_LEVEL
;
1902 set_irq_chip_and_handler_name(irq
, &lapic_chip
, handle_edge_irq
,
1906 static void __init
setup_nmi(void)
1909 * Dirty trick to enable the NMI watchdog ...
1910 * We put the 8259A master into AEOI mode and
1911 * unmask on all local APICs LVT0 as NMI.
1913 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
1914 * is from Maciej W. Rozycki - so we do not have to EOI from
1915 * the NMI handler or the timer interrupt.
1917 printk(KERN_INFO
"activating NMI Watchdog ...");
1919 enable_NMI_through_LVT0();
1925 * This looks a bit hackish but it's about the only one way of sending
1926 * a few INTA cycles to 8259As and any associated glue logic. ICR does
1927 * not support the ExtINT mode, unfortunately. We need to send these
1928 * cycles as some i82489DX-based boards have glue logic that keeps the
1929 * 8259A interrupt line asserted until INTA. --macro
1931 static inline void __init
unlock_ExtINT_logic(void)
1934 struct IO_APIC_route_entry entry0
, entry1
;
1935 unsigned char save_control
, save_freq_select
;
1937 pin
= find_isa_irq_pin(8, mp_INT
);
1938 apic
= find_isa_irq_apic(8, mp_INT
);
1942 entry0
= ioapic_read_entry(apic
, pin
);
1944 clear_IO_APIC_pin(apic
, pin
);
1946 memset(&entry1
, 0, sizeof(entry1
));
1948 entry1
.dest_mode
= 0; /* physical delivery */
1949 entry1
.mask
= 0; /* unmask IRQ now */
1950 entry1
.dest
= hard_smp_processor_id();
1951 entry1
.delivery_mode
= dest_ExtINT
;
1952 entry1
.polarity
= entry0
.polarity
;
1956 ioapic_write_entry(apic
, pin
, entry1
);
1958 save_control
= CMOS_READ(RTC_CONTROL
);
1959 save_freq_select
= CMOS_READ(RTC_FREQ_SELECT
);
1960 CMOS_WRITE((save_freq_select
& ~RTC_RATE_SELECT
) | 0x6,
1962 CMOS_WRITE(save_control
| RTC_PIE
, RTC_CONTROL
);
1967 if ((CMOS_READ(RTC_INTR_FLAGS
) & RTC_PF
) == RTC_PF
)
1971 CMOS_WRITE(save_control
, RTC_CONTROL
);
1972 CMOS_WRITE(save_freq_select
, RTC_FREQ_SELECT
);
1973 clear_IO_APIC_pin(apic
, pin
);
1975 ioapic_write_entry(apic
, pin
, entry0
);
1979 * This code may look a bit paranoid, but it's supposed to cooperate with
1980 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
1981 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
1982 * fanatically on his truly buggy board.
1984 * FIXME: really need to revamp this for modern platforms only.
1986 static inline void __init
check_timer(void)
1988 struct irq_cfg
*cfg
= irq_cfg
+ 0;
1989 int apic1
, pin1
, apic2
, pin2
;
1990 unsigned long flags
;
1993 local_irq_save(flags
);
1996 * get/set the timer IRQ vector:
1998 disable_8259A_irq(0);
1999 assign_irq_vector(0, TARGET_CPUS
);
2002 * As IRQ0 is to be enabled in the 8259A, the virtual
2003 * wire has to be disabled in the local APIC.
2005 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_EXTINT
);
2008 pin1
= find_isa_irq_pin(0, mp_INT
);
2009 apic1
= find_isa_irq_apic(0, mp_INT
);
2010 pin2
= ioapic_i8259
.pin
;
2011 apic2
= ioapic_i8259
.apic
;
2013 apic_printk(APIC_VERBOSE
,KERN_INFO
"..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
2014 cfg
->vector
, apic1
, pin1
, apic2
, pin2
);
2017 * Some BIOS writers are clueless and report the ExtINTA
2018 * I/O APIC input from the cascaded 8259A as the timer
2019 * interrupt input. So just in case, if only one pin
2020 * was found above, try it both directly and through the
2024 if (intr_remapping_enabled
)
2025 panic("BIOS bug: timer not connected to IO-APIC");
2029 } else if (pin2
== -1) {
2036 * Ok, does IRQ0 through the IOAPIC work?
2039 add_pin_to_irq(0, apic1
, pin1
);
2040 setup_timer_IRQ0_pin(apic1
, pin1
, cfg
->vector
);
2042 unmask_IO_APIC_irq(0);
2043 if (!no_timer_check
&& timer_irq_works()) {
2044 if (nmi_watchdog
== NMI_IO_APIC
) {
2046 enable_8259A_irq(0);
2048 if (disable_timer_pin_1
> 0)
2049 clear_IO_APIC_pin(0, pin1
);
2052 if (intr_remapping_enabled
)
2053 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2054 clear_IO_APIC_pin(apic1
, pin1
);
2056 apic_printk(APIC_QUIET
,KERN_ERR
"..MP-BIOS bug: "
2057 "8254 timer not connected to IO-APIC\n");
2059 apic_printk(APIC_VERBOSE
,KERN_INFO
2060 "...trying to set up timer (IRQ0) "
2061 "through the 8259A ... ");
2062 apic_printk(APIC_VERBOSE
,"\n..... (found apic %d pin %d) ...",
2065 * legacy devices should be connected to IO APIC #0
2067 replace_pin_at_irq(0, apic1
, pin1
, apic2
, pin2
);
2068 setup_timer_IRQ0_pin(apic2
, pin2
, cfg
->vector
);
2069 unmask_IO_APIC_irq(0);
2070 enable_8259A_irq(0);
2071 if (timer_irq_works()) {
2072 apic_printk(APIC_VERBOSE
," works.\n");
2073 timer_through_8259
= 1;
2074 if (nmi_watchdog
== NMI_IO_APIC
) {
2075 disable_8259A_irq(0);
2077 enable_8259A_irq(0);
2082 * Cleanup, just in case ...
2084 disable_8259A_irq(0);
2085 clear_IO_APIC_pin(apic2
, pin2
);
2086 apic_printk(APIC_VERBOSE
," failed.\n");
2089 if (nmi_watchdog
== NMI_IO_APIC
) {
2090 printk(KERN_WARNING
"timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
2091 nmi_watchdog
= NMI_NONE
;
2094 apic_printk(APIC_VERBOSE
, KERN_INFO
"...trying to set up timer as Virtual Wire IRQ...");
2096 lapic_register_intr(0);
2097 apic_write(APIC_LVT0
, APIC_DM_FIXED
| cfg
->vector
); /* Fixed mode */
2098 enable_8259A_irq(0);
2100 if (timer_irq_works()) {
2101 apic_printk(APIC_VERBOSE
," works.\n");
2104 disable_8259A_irq(0);
2105 apic_write(APIC_LVT0
, APIC_LVT_MASKED
| APIC_DM_FIXED
| cfg
->vector
);
2106 apic_printk(APIC_VERBOSE
," failed.\n");
2108 apic_printk(APIC_VERBOSE
, KERN_INFO
"...trying to set up timer as ExtINT IRQ...");
2112 apic_write(APIC_LVT0
, APIC_DM_EXTINT
);
2114 unlock_ExtINT_logic();
2116 if (timer_irq_works()) {
2117 apic_printk(APIC_VERBOSE
," works.\n");
2120 apic_printk(APIC_VERBOSE
," failed :(.\n");
2121 panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
2123 local_irq_restore(flags
);
2126 static int __init
notimercheck(char *s
)
2131 __setup("no_timer_check", notimercheck
);
2134 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2135 * to devices. However there may be an I/O APIC pin available for
2136 * this interrupt regardless. The pin may be left unconnected, but
2137 * typically it will be reused as an ExtINT cascade interrupt for
2138 * the master 8259A. In the MPS case such a pin will normally be
2139 * reported as an ExtINT interrupt in the MP table. With ACPI
2140 * there is no provision for ExtINT interrupts, and in the absence
2141 * of an override it would be treated as an ordinary ISA I/O APIC
2142 * interrupt, that is edge-triggered and unmasked by default. We
2143 * used to do this, but it caused problems on some systems because
2144 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2145 * the same ExtINT cascade interrupt to drive the local APIC of the
2146 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2147 * the I/O APIC in all cases now. No actual device should request
2148 * it anyway. --macro
2150 #define PIC_IRQS (1<<2)
2152 void __init
setup_IO_APIC(void)
2156 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2159 io_apic_irqs
= ~PIC_IRQS
;
2161 apic_printk(APIC_VERBOSE
, "ENABLING IO-APIC IRQs\n");
2164 setup_IO_APIC_irqs();
2165 init_IO_APIC_traps();
2171 struct sysfs_ioapic_data
{
2172 struct sys_device dev
;
2173 struct IO_APIC_route_entry entry
[0];
2175 static struct sysfs_ioapic_data
* mp_ioapic_data
[MAX_IO_APICS
];
2177 static int ioapic_suspend(struct sys_device
*dev
, pm_message_t state
)
2179 struct IO_APIC_route_entry
*entry
;
2180 struct sysfs_ioapic_data
*data
;
2183 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2184 entry
= data
->entry
;
2185 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++, entry
++ )
2186 *entry
= ioapic_read_entry(dev
->id
, i
);
2191 static int ioapic_resume(struct sys_device
*dev
)
2193 struct IO_APIC_route_entry
*entry
;
2194 struct sysfs_ioapic_data
*data
;
2195 unsigned long flags
;
2196 union IO_APIC_reg_00 reg_00
;
2199 data
= container_of(dev
, struct sysfs_ioapic_data
, dev
);
2200 entry
= data
->entry
;
2202 spin_lock_irqsave(&ioapic_lock
, flags
);
2203 reg_00
.raw
= io_apic_read(dev
->id
, 0);
2204 if (reg_00
.bits
.ID
!= mp_ioapics
[dev
->id
].mp_apicid
) {
2205 reg_00
.bits
.ID
= mp_ioapics
[dev
->id
].mp_apicid
;
2206 io_apic_write(dev
->id
, 0, reg_00
.raw
);
2208 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2209 for (i
= 0; i
< nr_ioapic_registers
[dev
->id
]; i
++)
2210 ioapic_write_entry(dev
->id
, i
, entry
[i
]);
2215 static struct sysdev_class ioapic_sysdev_class
= {
2217 .suspend
= ioapic_suspend
,
2218 .resume
= ioapic_resume
,
2221 static int __init
ioapic_init_sysfs(void)
2223 struct sys_device
* dev
;
2226 error
= sysdev_class_register(&ioapic_sysdev_class
);
2230 for (i
= 0; i
< nr_ioapics
; i
++ ) {
2231 size
= sizeof(struct sys_device
) + nr_ioapic_registers
[i
]
2232 * sizeof(struct IO_APIC_route_entry
);
2233 mp_ioapic_data
[i
] = kzalloc(size
, GFP_KERNEL
);
2234 if (!mp_ioapic_data
[i
]) {
2235 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
2238 dev
= &mp_ioapic_data
[i
]->dev
;
2240 dev
->cls
= &ioapic_sysdev_class
;
2241 error
= sysdev_register(dev
);
2243 kfree(mp_ioapic_data
[i
]);
2244 mp_ioapic_data
[i
] = NULL
;
2245 printk(KERN_ERR
"Can't suspend/resume IOAPIC %d\n", i
);
2253 device_initcall(ioapic_init_sysfs
);
2256 * Dynamic irq allocate and deallocation
2258 int create_irq(void)
2260 /* Allocate an unused irq */
2263 unsigned long flags
;
2266 spin_lock_irqsave(&vector_lock
, flags
);
2267 for (new = (NR_IRQS
- 1); new >= 0; new--) {
2268 if (platform_legacy_irq(new))
2270 if (irq_cfg
[new].vector
!= 0)
2272 if (__assign_irq_vector(new, TARGET_CPUS
) == 0)
2276 spin_unlock_irqrestore(&vector_lock
, flags
);
2279 dynamic_irq_init(irq
);
2284 void destroy_irq(unsigned int irq
)
2286 unsigned long flags
;
2288 dynamic_irq_cleanup(irq
);
2290 #ifdef CONFIG_INTR_REMAP
2293 spin_lock_irqsave(&vector_lock
, flags
);
2294 __clear_irq_vector(irq
);
2295 spin_unlock_irqrestore(&vector_lock
, flags
);
2299 * MSI message composition
2301 #ifdef CONFIG_PCI_MSI
2302 static int msi_compose_msg(struct pci_dev
*pdev
, unsigned int irq
, struct msi_msg
*msg
)
2304 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
2310 err
= assign_irq_vector(irq
, tmp
);
2314 cpus_and(tmp
, cfg
->domain
, tmp
);
2315 dest
= cpu_mask_to_apicid(tmp
);
2317 #ifdef CONFIG_INTR_REMAP
2318 if (irq_remapped(irq
)) {
2323 ir_index
= map_irq_to_irte_handle(irq
, &sub_handle
);
2324 BUG_ON(ir_index
== -1);
2326 memset (&irte
, 0, sizeof(irte
));
2329 irte
.dst_mode
= INT_DEST_MODE
;
2330 irte
.trigger_mode
= 0; /* edge */
2331 irte
.dlvry_mode
= INT_DELIVERY_MODE
;
2332 irte
.vector
= cfg
->vector
;
2333 irte
.dest_id
= IRTE_DEST(dest
);
2335 modify_irte(irq
, &irte
);
2337 msg
->address_hi
= MSI_ADDR_BASE_HI
;
2338 msg
->data
= sub_handle
;
2339 msg
->address_lo
= MSI_ADDR_BASE_LO
| MSI_ADDR_IR_EXT_INT
|
2341 MSI_ADDR_IR_INDEX1(ir_index
) |
2342 MSI_ADDR_IR_INDEX2(ir_index
);
2346 msg
->address_hi
= MSI_ADDR_BASE_HI
;
2349 ((INT_DEST_MODE
== 0) ?
2350 MSI_ADDR_DEST_MODE_PHYSICAL
:
2351 MSI_ADDR_DEST_MODE_LOGICAL
) |
2352 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
2353 MSI_ADDR_REDIRECTION_CPU
:
2354 MSI_ADDR_REDIRECTION_LOWPRI
) |
2355 MSI_ADDR_DEST_ID(dest
);
2358 MSI_DATA_TRIGGER_EDGE
|
2359 MSI_DATA_LEVEL_ASSERT
|
2360 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
2361 MSI_DATA_DELIVERY_FIXED
:
2362 MSI_DATA_DELIVERY_LOWPRI
) |
2363 MSI_DATA_VECTOR(cfg
->vector
);
2369 static void set_msi_irq_affinity(unsigned int irq
, cpumask_t mask
)
2371 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
2376 cpus_and(tmp
, mask
, cpu_online_map
);
2377 if (cpus_empty(tmp
))
2380 if (assign_irq_vector(irq
, mask
))
2383 cpus_and(tmp
, cfg
->domain
, mask
);
2384 dest
= cpu_mask_to_apicid(tmp
);
2386 read_msi_msg(irq
, &msg
);
2388 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
2389 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
2390 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
2391 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
2393 write_msi_msg(irq
, &msg
);
2394 irq_desc
[irq
].affinity
= mask
;
2397 #ifdef CONFIG_INTR_REMAP
2399 * Migrate the MSI irq to another cpumask. This migration is
2400 * done in the process context using interrupt-remapping hardware.
2402 static void ir_set_msi_irq_affinity(unsigned int irq
, cpumask_t mask
)
2404 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
2406 cpumask_t tmp
, cleanup_mask
;
2409 cpus_and(tmp
, mask
, cpu_online_map
);
2410 if (cpus_empty(tmp
))
2413 if (get_irte(irq
, &irte
))
2416 if (assign_irq_vector(irq
, mask
))
2419 cpus_and(tmp
, cfg
->domain
, mask
);
2420 dest
= cpu_mask_to_apicid(tmp
);
2422 irte
.vector
= cfg
->vector
;
2423 irte
.dest_id
= IRTE_DEST(dest
);
2426 * atomically update the IRTE with the new destination and vector.
2428 modify_irte(irq
, &irte
);
2431 * After this point, all the interrupts will start arriving
2432 * at the new destination. So, time to cleanup the previous
2433 * vector allocation.
2435 if (cfg
->move_in_progress
) {
2436 cpus_and(cleanup_mask
, cfg
->old_domain
, cpu_online_map
);
2437 cfg
->move_cleanup_count
= cpus_weight(cleanup_mask
);
2438 send_IPI_mask(cleanup_mask
, IRQ_MOVE_CLEANUP_VECTOR
);
2439 cfg
->move_in_progress
= 0;
2442 irq_desc
[irq
].affinity
= mask
;
2445 #endif /* CONFIG_SMP */
2448 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
2449 * which implement the MSI or MSI-X Capability Structure.
2451 static struct irq_chip msi_chip
= {
2453 .unmask
= unmask_msi_irq
,
2454 .mask
= mask_msi_irq
,
2455 .ack
= ack_apic_edge
,
2457 .set_affinity
= set_msi_irq_affinity
,
2459 .retrigger
= ioapic_retrigger_irq
,
2462 #ifdef CONFIG_INTR_REMAP
2463 static struct irq_chip msi_ir_chip
= {
2464 .name
= "IR-PCI-MSI",
2465 .unmask
= unmask_msi_irq
,
2466 .mask
= mask_msi_irq
,
2467 .ack
= ack_x2apic_edge
,
2469 .set_affinity
= ir_set_msi_irq_affinity
,
2471 .retrigger
= ioapic_retrigger_irq
,
2475 * Map the PCI dev to the corresponding remapping hardware unit
2476 * and allocate 'nvec' consecutive interrupt-remapping table entries
2479 static int msi_alloc_irte(struct pci_dev
*dev
, int irq
, int nvec
)
2481 struct intel_iommu
*iommu
;
2484 iommu
= map_dev_to_ir(dev
);
2487 "Unable to map PCI %s to iommu\n", pci_name(dev
));
2491 index
= alloc_irte(iommu
, irq
, nvec
);
2494 "Unable to allocate %d IRTE for PCI %s\n", nvec
,
2502 static int setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*desc
, int irq
)
2507 ret
= msi_compose_msg(dev
, irq
, &msg
);
2511 set_irq_msi(irq
, desc
);
2512 write_msi_msg(irq
, &msg
);
2514 #ifdef CONFIG_INTR_REMAP
2515 if (irq_remapped(irq
)) {
2516 struct irq_desc
*desc
= irq_desc
+ irq
;
2518 * irq migration in process context
2520 desc
->status
|= IRQ_MOVE_PCNTXT
;
2521 set_irq_chip_and_handler_name(irq
, &msi_ir_chip
, handle_edge_irq
, "edge");
2524 set_irq_chip_and_handler_name(irq
, &msi_chip
, handle_edge_irq
, "edge");
2529 int arch_setup_msi_irq(struct pci_dev
*dev
, struct msi_desc
*desc
)
2537 #ifdef CONFIG_INTR_REMAP
2538 if (!intr_remapping_enabled
)
2541 ret
= msi_alloc_irte(dev
, irq
, 1);
2546 ret
= setup_msi_irq(dev
, desc
, irq
);
2553 #ifdef CONFIG_INTR_REMAP
2560 int arch_setup_msi_irqs(struct pci_dev
*dev
, int nvec
, int type
)
2562 int irq
, ret
, sub_handle
;
2563 struct msi_desc
*desc
;
2564 #ifdef CONFIG_INTR_REMAP
2565 struct intel_iommu
*iommu
= 0;
2570 list_for_each_entry(desc
, &dev
->msi_list
, list
) {
2574 #ifdef CONFIG_INTR_REMAP
2575 if (!intr_remapping_enabled
)
2580 * allocate the consecutive block of IRTE's
2583 index
= msi_alloc_irte(dev
, irq
, nvec
);
2589 iommu
= map_dev_to_ir(dev
);
2595 * setup the mapping between the irq and the IRTE
2596 * base index, the sub_handle pointing to the
2597 * appropriate interrupt remap table entry.
2599 set_irte_irq(irq
, iommu
, index
, sub_handle
);
2603 ret
= setup_msi_irq(dev
, desc
, irq
);
2615 void arch_teardown_msi_irq(unsigned int irq
)
2622 static void dmar_msi_set_affinity(unsigned int irq
, cpumask_t mask
)
2624 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
2629 cpus_and(tmp
, mask
, cpu_online_map
);
2630 if (cpus_empty(tmp
))
2633 if (assign_irq_vector(irq
, mask
))
2636 cpus_and(tmp
, cfg
->domain
, mask
);
2637 dest
= cpu_mask_to_apicid(tmp
);
2639 dmar_msi_read(irq
, &msg
);
2641 msg
.data
&= ~MSI_DATA_VECTOR_MASK
;
2642 msg
.data
|= MSI_DATA_VECTOR(cfg
->vector
);
2643 msg
.address_lo
&= ~MSI_ADDR_DEST_ID_MASK
;
2644 msg
.address_lo
|= MSI_ADDR_DEST_ID(dest
);
2646 dmar_msi_write(irq
, &msg
);
2647 irq_desc
[irq
].affinity
= mask
;
2649 #endif /* CONFIG_SMP */
2651 struct irq_chip dmar_msi_type
= {
2653 .unmask
= dmar_msi_unmask
,
2654 .mask
= dmar_msi_mask
,
2655 .ack
= ack_apic_edge
,
2657 .set_affinity
= dmar_msi_set_affinity
,
2659 .retrigger
= ioapic_retrigger_irq
,
2662 int arch_setup_dmar_msi(unsigned int irq
)
2667 ret
= msi_compose_msg(NULL
, irq
, &msg
);
2670 dmar_msi_write(irq
, &msg
);
2671 set_irq_chip_and_handler_name(irq
, &dmar_msi_type
, handle_edge_irq
,
2677 #endif /* CONFIG_PCI_MSI */
2679 * Hypertransport interrupt support
2681 #ifdef CONFIG_HT_IRQ
2685 static void target_ht_irq(unsigned int irq
, unsigned int dest
, u8 vector
)
2687 struct ht_irq_msg msg
;
2688 fetch_ht_irq_msg(irq
, &msg
);
2690 msg
.address_lo
&= ~(HT_IRQ_LOW_VECTOR_MASK
| HT_IRQ_LOW_DEST_ID_MASK
);
2691 msg
.address_hi
&= ~(HT_IRQ_HIGH_DEST_ID_MASK
);
2693 msg
.address_lo
|= HT_IRQ_LOW_VECTOR(vector
) | HT_IRQ_LOW_DEST_ID(dest
);
2694 msg
.address_hi
|= HT_IRQ_HIGH_DEST_ID(dest
);
2696 write_ht_irq_msg(irq
, &msg
);
2699 static void set_ht_irq_affinity(unsigned int irq
, cpumask_t mask
)
2701 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
2705 cpus_and(tmp
, mask
, cpu_online_map
);
2706 if (cpus_empty(tmp
))
2709 if (assign_irq_vector(irq
, mask
))
2712 cpus_and(tmp
, cfg
->domain
, mask
);
2713 dest
= cpu_mask_to_apicid(tmp
);
2715 target_ht_irq(irq
, dest
, cfg
->vector
);
2716 irq_desc
[irq
].affinity
= mask
;
2720 static struct irq_chip ht_irq_chip
= {
2722 .mask
= mask_ht_irq
,
2723 .unmask
= unmask_ht_irq
,
2724 .ack
= ack_apic_edge
,
2726 .set_affinity
= set_ht_irq_affinity
,
2728 .retrigger
= ioapic_retrigger_irq
,
2731 int arch_setup_ht_irq(unsigned int irq
, struct pci_dev
*dev
)
2733 struct irq_cfg
*cfg
= irq_cfg
+ irq
;
2738 err
= assign_irq_vector(irq
, tmp
);
2740 struct ht_irq_msg msg
;
2743 cpus_and(tmp
, cfg
->domain
, tmp
);
2744 dest
= cpu_mask_to_apicid(tmp
);
2746 msg
.address_hi
= HT_IRQ_HIGH_DEST_ID(dest
);
2750 HT_IRQ_LOW_DEST_ID(dest
) |
2751 HT_IRQ_LOW_VECTOR(cfg
->vector
) |
2752 ((INT_DEST_MODE
== 0) ?
2753 HT_IRQ_LOW_DM_PHYSICAL
:
2754 HT_IRQ_LOW_DM_LOGICAL
) |
2755 HT_IRQ_LOW_RQEOI_EDGE
|
2756 ((INT_DELIVERY_MODE
!= dest_LowestPrio
) ?
2757 HT_IRQ_LOW_MT_FIXED
:
2758 HT_IRQ_LOW_MT_ARBITRATED
) |
2759 HT_IRQ_LOW_IRQ_MASKED
;
2761 write_ht_irq_msg(irq
, &msg
);
2763 set_irq_chip_and_handler_name(irq
, &ht_irq_chip
,
2764 handle_edge_irq
, "edge");
2768 #endif /* CONFIG_HT_IRQ */
2770 /* --------------------------------------------------------------------------
2771 ACPI-based IOAPIC Configuration
2772 -------------------------------------------------------------------------- */
2776 #define IO_APIC_MAX_ID 0xFE
2778 int __init
io_apic_get_redir_entries (int ioapic
)
2780 union IO_APIC_reg_01 reg_01
;
2781 unsigned long flags
;
2783 spin_lock_irqsave(&ioapic_lock
, flags
);
2784 reg_01
.raw
= io_apic_read(ioapic
, 1);
2785 spin_unlock_irqrestore(&ioapic_lock
, flags
);
2787 return reg_01
.bits
.entries
;
2791 int io_apic_set_pci_routing (int ioapic
, int pin
, int irq
, int triggering
, int polarity
)
2793 if (!IO_APIC_IRQ(irq
)) {
2794 apic_printk(APIC_QUIET
,KERN_ERR
"IOAPIC[%d]: Invalid reference to IRQ 0\n",
2800 * IRQs < 16 are already in the irq_2_pin[] map
2803 add_pin_to_irq(irq
, ioapic
, pin
);
2805 setup_IO_APIC_irq(ioapic
, pin
, irq
, triggering
, polarity
);
2811 int acpi_get_override_irq(int bus_irq
, int *trigger
, int *polarity
)
2815 if (skip_ioapic_setup
)
2818 for (i
= 0; i
< mp_irq_entries
; i
++)
2819 if (mp_irqs
[i
].mp_irqtype
== mp_INT
&&
2820 mp_irqs
[i
].mp_srcbusirq
== bus_irq
)
2822 if (i
>= mp_irq_entries
)
2825 *trigger
= irq_trigger(i
);
2826 *polarity
= irq_polarity(i
);
2830 #endif /* CONFIG_ACPI */
2833 * This function currently is only a helper for the i386 smp boot process where
2834 * we need to reprogram the ioredtbls to cater for the cpus which have come online
2835 * so mask in all cases should simply be TARGET_CPUS
2838 void __init
setup_ioapic_dest(void)
2840 int pin
, ioapic
, irq
, irq_entry
;
2842 if (skip_ioapic_setup
== 1)
2845 for (ioapic
= 0; ioapic
< nr_ioapics
; ioapic
++) {
2846 for (pin
= 0; pin
< nr_ioapic_registers
[ioapic
]; pin
++) {
2847 irq_entry
= find_irq_entry(ioapic
, pin
, mp_INT
);
2848 if (irq_entry
== -1)
2850 irq
= pin_2_irq(irq_entry
, ioapic
, pin
);
2852 /* setup_IO_APIC_irqs could fail to get vector for some device
2853 * when you have too many devices, because at that time only boot
2856 if (!irq_cfg
[irq
].vector
)
2857 setup_IO_APIC_irq(ioapic
, pin
, irq
,
2858 irq_trigger(irq_entry
),
2859 irq_polarity(irq_entry
));
2860 #ifdef CONFIG_INTR_REMAP
2861 else if (intr_remapping_enabled
)
2862 set_ir_ioapic_affinity_irq(irq
, TARGET_CPUS
);
2865 set_ioapic_affinity_irq(irq
, TARGET_CPUS
);
2872 #define IOAPIC_RESOURCE_NAME_SIZE 11
2874 static struct resource
*ioapic_resources
;
2876 static struct resource
* __init
ioapic_setup_resources(void)
2879 struct resource
*res
;
2883 if (nr_ioapics
<= 0)
2886 n
= IOAPIC_RESOURCE_NAME_SIZE
+ sizeof(struct resource
);
2889 mem
= alloc_bootmem(n
);
2893 mem
+= sizeof(struct resource
) * nr_ioapics
;
2895 for (i
= 0; i
< nr_ioapics
; i
++) {
2897 res
[i
].flags
= IORESOURCE_MEM
| IORESOURCE_BUSY
;
2898 sprintf(mem
, "IOAPIC %u", i
);
2899 mem
+= IOAPIC_RESOURCE_NAME_SIZE
;
2903 ioapic_resources
= res
;
2908 void __init
ioapic_init_mappings(void)
2910 unsigned long ioapic_phys
, idx
= FIX_IO_APIC_BASE_0
;
2911 struct resource
*ioapic_res
;
2914 ioapic_res
= ioapic_setup_resources();
2915 for (i
= 0; i
< nr_ioapics
; i
++) {
2916 if (smp_found_config
) {
2917 ioapic_phys
= mp_ioapics
[i
].mp_apicaddr
;
2919 ioapic_phys
= (unsigned long)
2920 alloc_bootmem_pages(PAGE_SIZE
);
2921 ioapic_phys
= __pa(ioapic_phys
);
2923 set_fixmap_nocache(idx
, ioapic_phys
);
2924 apic_printk(APIC_VERBOSE
,
2925 "mapped IOAPIC to %016lx (%016lx)\n",
2926 __fix_to_virt(idx
), ioapic_phys
);
2929 if (ioapic_res
!= NULL
) {
2930 ioapic_res
->start
= ioapic_phys
;
2931 ioapic_res
->end
= ioapic_phys
+ (4 * 1024) - 1;
2937 static int __init
ioapic_insert_resources(void)
2940 struct resource
*r
= ioapic_resources
;
2944 "IO APIC resources could be not be allocated.\n");
2948 for (i
= 0; i
< nr_ioapics
; i
++) {
2949 insert_resource(&iomem_resource
, r
);
2956 /* Insert the IO APIC resources after PCI initialization has occured to handle
2957 * IO APICS that are mapped in on a BAR in PCI space. */
2958 late_initcall(ioapic_insert_resources
);