]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/kernel/io_apic.c
x86: apic - fix unused vars warning in calibrate_APIC_clock
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / io_apic.c
CommitLineData
1da177e4
LT
1/*
2 * Intel IO-APIC support for multi-Pentium hosts.
3 *
4 * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
5 *
6 * Many thanks to Stig Venaas for trying out countless experimental
7 * patches and reporting/debugging problems patiently!
8 *
9 * (c) 1999, Multiple IO-APIC support, developed by
10 * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
11 * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
12 * further tested and cleaned up by Zach Brown <zab@redhat.com>
13 * and Ingo Molnar <mingo@redhat.com>
14 *
15 * Fixes
16 * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
17 * thanks to Eric Gilmore
18 * and Rolf G. Tews
19 * for testing these extensively
20 * Paul Diefenbaugh : Added full ACPI support
21 */
22
23#include <linux/mm.h>
1da177e4
LT
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/delay.h>
27#include <linux/sched.h>
d4057bdb 28#include <linux/pci.h>
1da177e4
LT
29#include <linux/mc146818rtc.h>
30#include <linux/compiler.h>
31#include <linux/acpi.h>
129f6946 32#include <linux/module.h>
1da177e4 33#include <linux/sysdev.h>
3b7d1921 34#include <linux/msi.h>
95d77884 35#include <linux/htirq.h>
7dfb7103 36#include <linux/freezer.h>
f26d6a2b 37#include <linux/kthread.h>
54168ed7 38#include <linux/jiffies.h> /* time_after() */
d4057bdb
YL
39#ifdef CONFIG_ACPI
40#include <acpi/acpi_bus.h>
41#endif
42#include <linux/bootmem.h>
43#include <linux/dmar.h>
58ac1e76 44#include <linux/hpet.h>
54d5d424 45
d4057bdb 46#include <asm/idle.h>
1da177e4
LT
47#include <asm/io.h>
48#include <asm/smp.h>
49#include <asm/desc.h>
d4057bdb
YL
50#include <asm/proto.h>
51#include <asm/acpi.h>
52#include <asm/dma.h>
1da177e4 53#include <asm/timer.h>
306e440d 54#include <asm/i8259.h>
3e4ff115 55#include <asm/nmi.h>
2d3fcc1c 56#include <asm/msidef.h>
8b955b0d 57#include <asm/hypertransport.h>
a4dbc34d 58#include <asm/setup.h>
d4057bdb 59#include <asm/irq_remapping.h>
58ac1e76 60#include <asm/hpet.h>
1da177e4 61
497c9a19 62#include <mach_ipi.h>
1da177e4 63#include <mach_apic.h>
874c4fe3 64#include <mach_apicdef.h>
1da177e4 65
32f71aff
MR
66#define __apicdebuginit(type) static type __init
67
1da177e4 68/*
54168ed7
IM
69 * Is the SiS APIC rmw bug present ?
70 * -1 = don't know, 0 = no, 1 = yes
1da177e4
LT
71 */
72int sis_apic_bug = -1;
73
efa2559f
YL
74static DEFINE_SPINLOCK(ioapic_lock);
75static DEFINE_SPINLOCK(vector_lock);
76
1da177e4
LT
77/*
78 * # of IRQ routing registers
79 */
80int nr_ioapic_registers[MAX_IO_APICS];
81
9f640ccb 82/* I/O APIC entries */
ec2cd0a2 83struct mp_config_ioapic mp_ioapics[MAX_IO_APICS];
9f640ccb
AS
84int nr_ioapics;
85
584f734d 86/* MP IRQ source entries */
2fddb6e2 87struct mp_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
584f734d
AS
88
89/* # of MP IRQ source entries */
90int mp_irq_entries;
91
8732fc4b
AS
92#if defined (CONFIG_MCA) || defined (CONFIG_EISA)
93int mp_bus_id_to_type[MAX_MP_BUSSES];
94#endif
95
96DECLARE_BITMAP(mp_bus_not_pci, MAX_MP_BUSSES);
97
efa2559f
YL
98int skip_ioapic_setup;
99
54168ed7 100static int __init parse_noapic(char *str)
efa2559f
YL
101{
102 /* disable IO-APIC */
103 disable_ioapic_setup();
104 return 0;
105}
106early_param("noapic", parse_noapic);
66759a01 107
da51a821 108struct irq_cfg;
0f978f45 109struct irq_pin_list;
a1420f39 110struct irq_cfg {
da51a821 111 unsigned int irq;
8f09cd20 112#ifdef CONFIG_HAVE_SPARSE_IRQ
da51a821 113 struct irq_cfg *next;
8f09cd20 114#endif
0f978f45 115 struct irq_pin_list *irq_2_pin;
497c9a19
YL
116 cpumask_t domain;
117 cpumask_t old_domain;
118 unsigned move_cleanup_count;
a1420f39 119 u8 vector;
497c9a19 120 u8 move_in_progress : 1;
a1420f39
YL
121};
122
a1420f39
YL
123/* irq_cfg is indexed by the sum of all RTEs in all I/O APICs. */
124static struct irq_cfg irq_cfg_legacy[] __initdata = {
497c9a19
YL
125 [0] = { .irq = 0, .domain = CPU_MASK_ALL, .vector = IRQ0_VECTOR, },
126 [1] = { .irq = 1, .domain = CPU_MASK_ALL, .vector = IRQ1_VECTOR, },
127 [2] = { .irq = 2, .domain = CPU_MASK_ALL, .vector = IRQ2_VECTOR, },
128 [3] = { .irq = 3, .domain = CPU_MASK_ALL, .vector = IRQ3_VECTOR, },
129 [4] = { .irq = 4, .domain = CPU_MASK_ALL, .vector = IRQ4_VECTOR, },
130 [5] = { .irq = 5, .domain = CPU_MASK_ALL, .vector = IRQ5_VECTOR, },
131 [6] = { .irq = 6, .domain = CPU_MASK_ALL, .vector = IRQ6_VECTOR, },
132 [7] = { .irq = 7, .domain = CPU_MASK_ALL, .vector = IRQ7_VECTOR, },
133 [8] = { .irq = 8, .domain = CPU_MASK_ALL, .vector = IRQ8_VECTOR, },
134 [9] = { .irq = 9, .domain = CPU_MASK_ALL, .vector = IRQ9_VECTOR, },
135 [10] = { .irq = 10, .domain = CPU_MASK_ALL, .vector = IRQ10_VECTOR, },
136 [11] = { .irq = 11, .domain = CPU_MASK_ALL, .vector = IRQ11_VECTOR, },
137 [12] = { .irq = 12, .domain = CPU_MASK_ALL, .vector = IRQ12_VECTOR, },
138 [13] = { .irq = 13, .domain = CPU_MASK_ALL, .vector = IRQ13_VECTOR, },
139 [14] = { .irq = 14, .domain = CPU_MASK_ALL, .vector = IRQ14_VECTOR, },
140 [15] = { .irq = 15, .domain = CPU_MASK_ALL, .vector = IRQ15_VECTOR, },
a1420f39
YL
141};
142
da51a821 143static struct irq_cfg irq_cfg_init = { .irq = -1U, };
da51a821
YL
144
145static void init_one_irq_cfg(struct irq_cfg *cfg)
146{
147 memcpy(cfg, &irq_cfg_init, sizeof(struct irq_cfg));
148}
149
150static struct irq_cfg *irq_cfgx;
e89eb438 151
ac54a6c9 152#ifdef CONFIG_HAVE_SPARSE_IRQ
e89eb438
YL
153/*
154 * Protect the irq_cfgx_free freelist:
155 */
156static DEFINE_SPINLOCK(irq_cfg_lock);
157
da51a821 158static struct irq_cfg *irq_cfgx_free;
8f09cd20 159#endif
ac54a6c9 160
a1420f39
YL
161static void __init init_work(void *data)
162{
da51a821
YL
163 struct dyn_array *da = data;
164 struct irq_cfg *cfg;
165 int legacy_count;
166 int i;
167
168 cfg = *da->name;
a1420f39 169
da51a821 170 memcpy(cfg, irq_cfg_legacy, sizeof(irq_cfg_legacy));
a1420f39 171
676f4a92 172 legacy_count = ARRAY_SIZE(irq_cfg_legacy);
da51a821
YL
173 for (i = legacy_count; i < *da->nr; i++)
174 init_one_irq_cfg(&cfg[i]);
a1420f39 175
8f09cd20 176#ifdef CONFIG_HAVE_SPARSE_IRQ
da51a821
YL
177 for (i = 1; i < *da->nr; i++)
178 cfg[i-1].next = &cfg[i];
a1420f39 179
da51a821
YL
180 irq_cfgx_free = &irq_cfgx[legacy_count];
181 irq_cfgx[legacy_count - 1].next = NULL;
8f09cd20
YL
182#endif
183}
184
185#ifdef CONFIG_HAVE_SPARSE_IRQ
186/* need to be biger than size of irq_cfg_legacy */
187static int nr_irq_cfg = 32;
188
189static int __init parse_nr_irq_cfg(char *arg)
190{
191 if (arg) {
192 nr_irq_cfg = simple_strtoul(arg, NULL, 0);
193 if (nr_irq_cfg < 32)
194 nr_irq_cfg = 32;
195 }
196 return 0;
a1420f39
YL
197}
198
8f09cd20
YL
199early_param("nr_irq_cfg", parse_nr_irq_cfg);
200
201#define for_each_irq_cfg(irqX, cfg) \
202 for (cfg = irq_cfgx, irqX = cfg->irq; cfg; cfg = cfg->next, irqX = cfg ? cfg->irq : -1U)
203
da51a821
YL
204
205DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irq_cfg, PAGE_SIZE, init_work);
a1420f39
YL
206
207static struct irq_cfg *irq_cfg(unsigned int irq)
208{
da51a821
YL
209 struct irq_cfg *cfg;
210
211 cfg = irq_cfgx;
212 while (cfg) {
213 if (cfg->irq == irq)
214 return cfg;
215
216 cfg = cfg->next;
217 }
218
219 return NULL;
220}
221
222static struct irq_cfg *irq_cfg_alloc(unsigned int irq)
223{
224 struct irq_cfg *cfg, *cfg_pri;
e89eb438 225 unsigned long flags;
da51a821 226 int count = 0;
e89eb438 227 int i;
da51a821
YL
228
229 cfg_pri = cfg = irq_cfgx;
230 while (cfg) {
231 if (cfg->irq == irq)
232 return cfg;
233
234 cfg_pri = cfg;
235 cfg = cfg->next;
236 count++;
237 }
238
e89eb438 239 spin_lock_irqsave(&irq_cfg_lock, flags);
da51a821
YL
240 if (!irq_cfgx_free) {
241 unsigned long phys;
242 unsigned long total_bytes;
243 /*
244 * we run out of pre-allocate ones, allocate more
245 */
246 printk(KERN_DEBUG "try to get more irq_cfg %d\n", nr_irq_cfg);
247
248 total_bytes = sizeof(struct irq_cfg) * nr_irq_cfg;
249 if (after_bootmem)
250 cfg = kzalloc(total_bytes, GFP_ATOMIC);
251 else
252 cfg = __alloc_bootmem_nopanic(total_bytes, PAGE_SIZE, 0);
a1420f39 253
da51a821
YL
254 if (!cfg)
255 panic("please boot with nr_irq_cfg= %d\n", count * 2);
256
257 phys = __pa(cfg);
258 printk(KERN_DEBUG "irq_irq ==> [%#lx - %#lx]\n", phys, phys + total_bytes);
259
260 for (i = 0; i < nr_irq_cfg; i++)
261 init_one_irq_cfg(&cfg[i]);
262
263 for (i = 1; i < nr_irq_cfg; i++)
264 cfg[i-1].next = &cfg[i];
265
266 irq_cfgx_free = cfg;
267 }
268
269 cfg = irq_cfgx_free;
270 irq_cfgx_free = irq_cfgx_free->next;
271 cfg->next = NULL;
272 if (cfg_pri)
273 cfg_pri->next = cfg;
274 else
275 irq_cfgx = cfg;
276 cfg->irq = irq;
e89eb438
YL
277
278 spin_unlock_irqrestore(&irq_cfg_lock, flags);
279
da51a821 280 printk(KERN_DEBUG "found new irq_cfg for irq %d\n", cfg->irq);
da51a821
YL
281#ifdef CONFIG_HAVE_SPARSE_IRQ_DEBUG
282 {
283 /* dump the results */
284 struct irq_cfg *cfg;
285 unsigned long phys;
286 unsigned long bytes = sizeof(struct irq_cfg);
287
288 printk(KERN_DEBUG "=========================== %d\n", irq);
289 printk(KERN_DEBUG "irq_cfg dump after get that for %d\n", irq);
290 for_each_irq_cfg(cfg) {
291 phys = __pa(cfg);
292 printk(KERN_DEBUG "irq_cfg %d ==> [%#lx - %#lx]\n", cfg->irq, phys, phys + bytes);
293 }
294 printk(KERN_DEBUG "===========================\n");
295 }
296#endif
297 return cfg;
a1420f39 298}
8f09cd20
YL
299#else
300
301#define for_each_irq_cfg(irq, cfg) \
302 for (irq = 0, cfg = &irq_cfgx[irq]; irq < nr_irqs; irq++, cfg = &irq_cfgx[irq])
303
304DEFINE_DYN_ARRAY(irq_cfgx, sizeof(struct irq_cfg), nr_irqs, PAGE_SIZE, init_work);
a1420f39 305
8f09cd20
YL
306struct irq_cfg *irq_cfg(unsigned int irq)
307{
308 if (irq < nr_irqs)
309 return &irq_cfgx[irq];
310
311 return NULL;
312}
313struct irq_cfg *irq_cfg_alloc(unsigned int irq)
314{
315 return irq_cfg(irq);
316}
317
318#endif
1da177e4
LT
319/*
320 * This is performance-critical, we want to do it O(1)
321 *
322 * the indexing order of this array favors 1:1 mappings
323 * between pins and IRQs.
324 */
325
0f978f45
YL
326struct irq_pin_list {
327 int apic, pin;
328 struct irq_pin_list *next;
329};
330
331static struct irq_pin_list *irq_2_pin_head;
332/* fill one page ? */
333static int nr_irq_2_pin = 0x100;
334static struct irq_pin_list *irq_2_pin_ptr;
335static void __init irq_2_pin_init_work(void *data)
336{
337 struct dyn_array *da = data;
338 struct irq_pin_list *pin;
339 int i;
340
341 pin = *da->name;
342
343 for (i = 1; i < *da->nr; i++)
344 pin[i-1].next = &pin[i];
345
346 irq_2_pin_ptr = &pin[0];
347}
348DEFINE_DYN_ARRAY(irq_2_pin_head, sizeof(struct irq_pin_list), nr_irq_2_pin, PAGE_SIZE, irq_2_pin_init_work);
349
350static struct irq_pin_list *get_one_free_irq_2_pin(void)
351{
352 struct irq_pin_list *pin;
353 int i;
354
355 pin = irq_2_pin_ptr;
356
357 if (pin) {
358 irq_2_pin_ptr = pin->next;
359 pin->next = NULL;
360 return pin;
361 }
362
363 /*
364 * we run out of pre-allocate ones, allocate more
365 */
366 printk(KERN_DEBUG "try to get more irq_2_pin %d\n", nr_irq_2_pin);
367
368 if (after_bootmem)
369 pin = kzalloc(sizeof(struct irq_pin_list)*nr_irq_2_pin,
370 GFP_ATOMIC);
371 else
372 pin = __alloc_bootmem_nopanic(sizeof(struct irq_pin_list) *
373 nr_irq_2_pin, PAGE_SIZE, 0);
374
375 if (!pin)
376 panic("can not get more irq_2_pin\n");
301e6190 377
0f978f45
YL
378 for (i = 1; i < nr_irq_2_pin; i++)
379 pin[i-1].next = &pin[i];
380
381 irq_2_pin_ptr = pin->next;
382 pin->next = NULL;
383
384 return pin;
385}
1da177e4 386
130fe05d
LT
387struct io_apic {
388 unsigned int index;
389 unsigned int unused[3];
390 unsigned int data;
391};
392
393static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
394{
395 return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
ec2cd0a2 396 + (mp_ioapics[idx].mp_apicaddr & ~PAGE_MASK);
130fe05d
LT
397}
398
399static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
400{
401 struct io_apic __iomem *io_apic = io_apic_base(apic);
402 writel(reg, &io_apic->index);
403 return readl(&io_apic->data);
404}
405
406static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
407{
408 struct io_apic __iomem *io_apic = io_apic_base(apic);
409 writel(reg, &io_apic->index);
410 writel(value, &io_apic->data);
411}
412
413/*
414 * Re-write a value: to be used for read-modify-write
415 * cycles where the read already set up the index register.
416 *
417 * Older SiS APIC requires we rewrite the index register
418 */
419static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
420{
54168ed7
IM
421 struct io_apic __iomem *io_apic = io_apic_base(apic);
422 if (sis_apic_bug)
423 writel(reg, &io_apic->index);
130fe05d
LT
424 writel(value, &io_apic->data);
425}
426
047c8fdb
YL
427static bool io_apic_level_ack_pending(unsigned int irq)
428{
429 struct irq_pin_list *entry;
430 unsigned long flags;
431 struct irq_cfg *cfg = irq_cfg(irq);
432
433 spin_lock_irqsave(&ioapic_lock, flags);
434 entry = cfg->irq_2_pin;
435 for (;;) {
436 unsigned int reg;
437 int pin;
438
439 if (!entry)
440 break;
441 pin = entry->pin;
442 reg = io_apic_read(entry->apic, 0x10 + pin*2);
443 /* Is the remote IRR bit set? */
444 if (reg & IO_APIC_REDIR_REMOTE_IRR) {
445 spin_unlock_irqrestore(&ioapic_lock, flags);
446 return true;
447 }
448 if (!entry->next)
449 break;
450 entry = entry->next;
451 }
452 spin_unlock_irqrestore(&ioapic_lock, flags);
453
454 return false;
455}
047c8fdb 456
cf4c6a2f
AK
457union entry_union {
458 struct { u32 w1, w2; };
459 struct IO_APIC_route_entry entry;
460};
461
462static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
463{
464 union entry_union eu;
465 unsigned long flags;
466 spin_lock_irqsave(&ioapic_lock, flags);
467 eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
468 eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
469 spin_unlock_irqrestore(&ioapic_lock, flags);
470 return eu.entry;
471}
472
f9dadfa7
LT
473/*
474 * When we write a new IO APIC routing entry, we need to write the high
475 * word first! If the mask bit in the low word is clear, we will enable
476 * the interrupt, and we need to make sure the entry is fully populated
477 * before that happens.
478 */
d15512f4
AK
479static void
480__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
cf4c6a2f 481{
cf4c6a2f
AK
482 union entry_union eu;
483 eu.entry = e;
f9dadfa7
LT
484 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
485 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
d15512f4
AK
486}
487
488static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
489{
490 unsigned long flags;
491 spin_lock_irqsave(&ioapic_lock, flags);
492 __ioapic_write_entry(apic, pin, e);
f9dadfa7
LT
493 spin_unlock_irqrestore(&ioapic_lock, flags);
494}
495
496/*
497 * When we mask an IO APIC routing entry, we need to write the low
498 * word first, in order to set the mask bit before we change the
499 * high bits!
500 */
501static void ioapic_mask_entry(int apic, int pin)
502{
503 unsigned long flags;
504 union entry_union eu = { .entry.mask = 1 };
505
cf4c6a2f
AK
506 spin_lock_irqsave(&ioapic_lock, flags);
507 io_apic_write(apic, 0x10 + 2*pin, eu.w1);
508 io_apic_write(apic, 0x11 + 2*pin, eu.w2);
509 spin_unlock_irqrestore(&ioapic_lock, flags);
510}
511
497c9a19
YL
512#ifdef CONFIG_SMP
513static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, u8 vector)
514{
515 int apic, pin;
516 struct irq_cfg *cfg;
517 struct irq_pin_list *entry;
518
519 cfg = irq_cfg(irq);
520 entry = cfg->irq_2_pin;
521 for (;;) {
522 unsigned int reg;
523
524 if (!entry)
525 break;
526
527 apic = entry->apic;
528 pin = entry->pin;
54168ed7
IM
529#ifdef CONFIG_INTR_REMAP
530 /*
531 * With interrupt-remapping, destination information comes
532 * from interrupt-remapping table entry.
533 */
534 if (!irq_remapped(irq))
535 io_apic_write(apic, 0x11 + pin*2, dest);
536#else
497c9a19 537 io_apic_write(apic, 0x11 + pin*2, dest);
54168ed7 538#endif
497c9a19
YL
539 reg = io_apic_read(apic, 0x10 + pin*2);
540 reg &= ~IO_APIC_REDIR_VECTOR_MASK;
541 reg |= vector;
54168ed7 542 io_apic_modify(apic, 0x10 + pin*2, reg);
497c9a19
YL
543 if (!entry->next)
544 break;
545 entry = entry->next;
546 }
547}
efa2559f
YL
548
549static int assign_irq_vector(int irq, cpumask_t mask);
550
497c9a19
YL
551static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
552{
553 struct irq_cfg *cfg;
554 unsigned long flags;
555 unsigned int dest;
556 cpumask_t tmp;
54168ed7 557 struct irq_desc *desc;
497c9a19 558
497c9a19
YL
559 cpus_and(tmp, mask, cpu_online_map);
560 if (cpus_empty(tmp))
561 return;
562
047c8fdb 563 cfg = irq_cfg(irq);
497c9a19
YL
564 if (assign_irq_vector(irq, mask))
565 return;
566
567 cpus_and(tmp, cfg->domain, mask);
497c9a19
YL
568 dest = cpu_mask_to_apicid(tmp);
569 /*
570 * Only the high 8 bits are valid.
571 */
572 dest = SET_APIC_LOGICAL_ID(dest);
573
54168ed7 574 desc = irq_to_desc(irq);
497c9a19
YL
575 spin_lock_irqsave(&ioapic_lock, flags);
576 __target_IO_APIC_irq(irq, dest, cfg->vector);
54168ed7 577 desc->affinity = mask;
497c9a19
YL
578 spin_unlock_irqrestore(&ioapic_lock, flags);
579}
497c9a19
YL
580#endif /* CONFIG_SMP */
581
1da177e4
LT
582/*
583 * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
584 * shared ISA-space IRQs, so we have to support them. We are super
585 * fast in the common case, and fast for shared ISA-space IRQs.
586 */
587static void add_pin_to_irq(unsigned int irq, int apic, int pin)
588{
0f978f45
YL
589 struct irq_cfg *cfg;
590 struct irq_pin_list *entry;
591
592 /* first time to refer irq_cfg, so with new */
593 cfg = irq_cfg_alloc(irq);
594 entry = cfg->irq_2_pin;
595 if (!entry) {
596 entry = get_one_free_irq_2_pin();
597 cfg->irq_2_pin = entry;
598 entry->apic = apic;
599 entry->pin = pin;
600 printk(KERN_DEBUG " 0 add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
601 return;
602 }
1da177e4 603
0f978f45
YL
604 while (entry->next) {
605 /* not again, please */
606 if (entry->apic == apic && entry->pin == pin)
607 return;
1da177e4 608
0f978f45 609 entry = entry->next;
1da177e4 610 }
0f978f45
YL
611
612 entry->next = get_one_free_irq_2_pin();
613 entry = entry->next;
1da177e4
LT
614 entry->apic = apic;
615 entry->pin = pin;
0f978f45 616 printk(KERN_DEBUG " x add_pin_to_irq: irq %d --> apic %d pin %d\n", irq, apic, pin);
1da177e4
LT
617}
618
619/*
620 * Reroute an IRQ to a different pin.
621 */
622static void __init replace_pin_at_irq(unsigned int irq,
623 int oldapic, int oldpin,
624 int newapic, int newpin)
625{
0f978f45
YL
626 struct irq_cfg *cfg = irq_cfg(irq);
627 struct irq_pin_list *entry = cfg->irq_2_pin;
628 int replaced = 0;
1da177e4 629
0f978f45 630 while (entry) {
1da177e4
LT
631 if (entry->apic == oldapic && entry->pin == oldpin) {
632 entry->apic = newapic;
633 entry->pin = newpin;
0f978f45
YL
634 replaced = 1;
635 /* every one is different, right? */
1da177e4 636 break;
0f978f45
YL
637 }
638 entry = entry->next;
1da177e4 639 }
0f978f45
YL
640
641 /* why? call replace before add? */
642 if (!replaced)
643 add_pin_to_irq(irq, newapic, newpin);
1da177e4
LT
644}
645
87783be4
CG
646static inline void io_apic_modify_irq(unsigned int irq,
647 int mask_and, int mask_or,
648 void (*final)(struct irq_pin_list *entry))
649{
650 int pin;
651 struct irq_cfg *cfg;
652 struct irq_pin_list *entry;
047c8fdb 653
87783be4
CG
654 cfg = irq_cfg(irq);
655 for (entry = cfg->irq_2_pin; entry != NULL; entry = entry->next) {
656 unsigned int reg;
657 pin = entry->pin;
658 reg = io_apic_read(entry->apic, 0x10 + pin * 2);
659 reg &= mask_and;
660 reg |= mask_or;
661 io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
662 if (final)
663 final(entry);
664 }
665}
047c8fdb 666
87783be4
CG
667static void __unmask_IO_APIC_irq(unsigned int irq)
668{
669 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED, 0, NULL);
670}
047c8fdb 671
4e738e2f 672#ifdef CONFIG_X86_64
87783be4 673void io_apic_sync(struct irq_pin_list *entry)
1da177e4 674{
87783be4
CG
675 /*
676 * Synchronize the IO-APIC and the CPU by doing
677 * a dummy read from the IO-APIC
678 */
679 struct io_apic __iomem *io_apic;
680 io_apic = io_apic_base(entry->apic);
4e738e2f 681 readl(&io_apic->data);
1da177e4
LT
682}
683
87783be4
CG
684static void __mask_IO_APIC_irq(unsigned int irq)
685{
686 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
687}
688#else /* CONFIG_X86_32 */
689static void __mask_IO_APIC_irq(unsigned int irq)
690{
691 io_apic_modify_irq(irq, ~0, IO_APIC_REDIR_MASKED, NULL);
692}
1da177e4 693
87783be4
CG
694static void __mask_and_edge_IO_APIC_irq(unsigned int irq)
695{
696 io_apic_modify_irq(irq, ~IO_APIC_REDIR_LEVEL_TRIGGER,
697 IO_APIC_REDIR_MASKED, NULL);
698}
1da177e4 699
87783be4
CG
700static void __unmask_and_level_IO_APIC_irq(unsigned int irq)
701{
702 io_apic_modify_irq(irq, ~IO_APIC_REDIR_MASKED,
703 IO_APIC_REDIR_LEVEL_TRIGGER, NULL);
704}
705#endif /* CONFIG_X86_32 */
047c8fdb 706
54168ed7 707static void mask_IO_APIC_irq (unsigned int irq)
1da177e4
LT
708{
709 unsigned long flags;
710
711 spin_lock_irqsave(&ioapic_lock, flags);
712 __mask_IO_APIC_irq(irq);
713 spin_unlock_irqrestore(&ioapic_lock, flags);
714}
715
54168ed7 716static void unmask_IO_APIC_irq (unsigned int irq)
1da177e4
LT
717{
718 unsigned long flags;
719
720 spin_lock_irqsave(&ioapic_lock, flags);
721 __unmask_IO_APIC_irq(irq);
722 spin_unlock_irqrestore(&ioapic_lock, flags);
723}
724
725static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
726{
727 struct IO_APIC_route_entry entry;
36062448 728
1da177e4 729 /* Check delivery_mode to be sure we're not clearing an SMI pin */
cf4c6a2f 730 entry = ioapic_read_entry(apic, pin);
1da177e4
LT
731 if (entry.delivery_mode == dest_SMI)
732 return;
1da177e4
LT
733 /*
734 * Disable it in the IO-APIC irq-routing table:
735 */
f9dadfa7 736 ioapic_mask_entry(apic, pin);
1da177e4
LT
737}
738
54168ed7 739static void clear_IO_APIC (void)
1da177e4
LT
740{
741 int apic, pin;
742
743 for (apic = 0; apic < nr_ioapics; apic++)
744 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
745 clear_IO_APIC_pin(apic, pin);
746}
747
54168ed7 748#if !defined(CONFIG_SMP) && defined(CONFIG_X86_32)
75604d7f 749void send_IPI_self(int vector)
1da177e4
LT
750{
751 unsigned int cfg;
752
753 /*
754 * Wait for idle.
755 */
756 apic_wait_icr_idle();
757 cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
758 /*
759 * Send the IPI. The write to APIC_ICR fires this off.
760 */
593f4a78 761 apic_write(APIC_ICR, cfg);
1da177e4 762}
54168ed7 763#endif /* !CONFIG_SMP && CONFIG_X86_32*/
1da177e4 764
54168ed7 765#ifdef CONFIG_X86_32
1da177e4
LT
766/*
767 * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
768 * specific CPU-side IRQs.
769 */
770
771#define MAX_PIRQS 8
772static int pirq_entries [MAX_PIRQS];
773static int pirqs_enabled;
1da177e4 774
1da177e4
LT
775static int __init ioapic_pirq_setup(char *str)
776{
777 int i, max;
778 int ints[MAX_PIRQS+1];
779
780 get_options(str, ARRAY_SIZE(ints), ints);
781
782 for (i = 0; i < MAX_PIRQS; i++)
783 pirq_entries[i] = -1;
784
785 pirqs_enabled = 1;
786 apic_printk(APIC_VERBOSE, KERN_INFO
787 "PIRQ redirection, working around broken MP-BIOS.\n");
788 max = MAX_PIRQS;
789 if (ints[0] < MAX_PIRQS)
790 max = ints[0];
791
792 for (i = 0; i < max; i++) {
793 apic_printk(APIC_VERBOSE, KERN_DEBUG
794 "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
795 /*
796 * PIRQs are mapped upside down, usually.
797 */
798 pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
799 }
800 return 1;
801}
802
803__setup("pirq=", ioapic_pirq_setup);
54168ed7
IM
804#endif /* CONFIG_X86_32 */
805
806#ifdef CONFIG_INTR_REMAP
807/* I/O APIC RTE contents at the OS boot up */
808static struct IO_APIC_route_entry *early_ioapic_entries[MAX_IO_APICS];
809
810/*
811 * Saves and masks all the unmasked IO-APIC RTE's
812 */
813int save_mask_IO_APIC_setup(void)
814{
815 union IO_APIC_reg_01 reg_01;
816 unsigned long flags;
817 int apic, pin;
818
819 /*
820 * The number of IO-APIC IRQ registers (== #pins):
821 */
822 for (apic = 0; apic < nr_ioapics; apic++) {
823 spin_lock_irqsave(&ioapic_lock, flags);
824 reg_01.raw = io_apic_read(apic, 1);
825 spin_unlock_irqrestore(&ioapic_lock, flags);
826 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
827 }
828
829 for (apic = 0; apic < nr_ioapics; apic++) {
830 early_ioapic_entries[apic] =
831 kzalloc(sizeof(struct IO_APIC_route_entry) *
832 nr_ioapic_registers[apic], GFP_KERNEL);
833 if (!early_ioapic_entries[apic])
834 return -ENOMEM;
835 }
836
837 for (apic = 0; apic < nr_ioapics; apic++)
838 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
839 struct IO_APIC_route_entry entry;
840
841 entry = early_ioapic_entries[apic][pin] =
842 ioapic_read_entry(apic, pin);
843 if (!entry.mask) {
844 entry.mask = 1;
845 ioapic_write_entry(apic, pin, entry);
846 }
847 }
848 return 0;
849}
850
851void restore_IO_APIC_setup(void)
852{
853 int apic, pin;
854
855 for (apic = 0; apic < nr_ioapics; apic++)
856 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
857 ioapic_write_entry(apic, pin,
858 early_ioapic_entries[apic][pin]);
859}
860
861void reinit_intr_remapped_IO_APIC(int intr_remapping)
862{
863 /*
864 * for now plain restore of previous settings.
865 * TBD: In the case of OS enabling interrupt-remapping,
866 * IO-APIC RTE's need to be setup to point to interrupt-remapping
867 * table entries. for now, do a plain restore, and wait for
868 * the setup_IO_APIC_irqs() to do proper initialization.
869 */
870 restore_IO_APIC_setup();
871}
872#endif
1da177e4
LT
873
874/*
875 * Find the IRQ entry number of a certain pin.
876 */
877static int find_irq_entry(int apic, int pin, int type)
878{
879 int i;
880
881 for (i = 0; i < mp_irq_entries; i++)
2fddb6e2
AS
882 if (mp_irqs[i].mp_irqtype == type &&
883 (mp_irqs[i].mp_dstapic == mp_ioapics[apic].mp_apicid ||
884 mp_irqs[i].mp_dstapic == MP_APIC_ALL) &&
885 mp_irqs[i].mp_dstirq == pin)
1da177e4
LT
886 return i;
887
888 return -1;
889}
890
891/*
892 * Find the pin to which IRQ[irq] (ISA) is connected
893 */
fcfd636a 894static int __init find_isa_irq_pin(int irq, int type)
1da177e4
LT
895{
896 int i;
897
898 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 899 int lbus = mp_irqs[i].mp_srcbus;
1da177e4 900
d27e2b8e 901 if (test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2
AS
902 (mp_irqs[i].mp_irqtype == type) &&
903 (mp_irqs[i].mp_srcbusirq == irq))
1da177e4 904
2fddb6e2 905 return mp_irqs[i].mp_dstirq;
1da177e4
LT
906 }
907 return -1;
908}
909
fcfd636a
EB
910static int __init find_isa_irq_apic(int irq, int type)
911{
912 int i;
913
914 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 915 int lbus = mp_irqs[i].mp_srcbus;
fcfd636a 916
73b2961b 917 if (test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2
AS
918 (mp_irqs[i].mp_irqtype == type) &&
919 (mp_irqs[i].mp_srcbusirq == irq))
fcfd636a
EB
920 break;
921 }
922 if (i < mp_irq_entries) {
923 int apic;
54168ed7 924 for(apic = 0; apic < nr_ioapics; apic++) {
2fddb6e2 925 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic)
fcfd636a
EB
926 return apic;
927 }
928 }
929
930 return -1;
931}
932
1da177e4
LT
933/*
934 * Find a specific PCI IRQ entry.
935 * Not an __init, possibly needed by modules
936 */
937static int pin_2_irq(int idx, int apic, int pin);
938
939int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
940{
941 int apic, i, best_guess = -1;
942
54168ed7
IM
943 apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
944 bus, slot, pin);
ce6444d3 945 if (test_bit(bus, mp_bus_not_pci)) {
54168ed7 946 apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
1da177e4
LT
947 return -1;
948 }
949 for (i = 0; i < mp_irq_entries; i++) {
2fddb6e2 950 int lbus = mp_irqs[i].mp_srcbus;
1da177e4
LT
951
952 for (apic = 0; apic < nr_ioapics; apic++)
2fddb6e2
AS
953 if (mp_ioapics[apic].mp_apicid == mp_irqs[i].mp_dstapic ||
954 mp_irqs[i].mp_dstapic == MP_APIC_ALL)
1da177e4
LT
955 break;
956
47cab822 957 if (!test_bit(lbus, mp_bus_not_pci) &&
2fddb6e2 958 !mp_irqs[i].mp_irqtype &&
1da177e4 959 (bus == lbus) &&
2fddb6e2 960 (slot == ((mp_irqs[i].mp_srcbusirq >> 2) & 0x1f))) {
54168ed7 961 int irq = pin_2_irq(i,apic,mp_irqs[i].mp_dstirq);
1da177e4
LT
962
963 if (!(apic || IO_APIC_IRQ(irq)))
964 continue;
965
2fddb6e2 966 if (pin == (mp_irqs[i].mp_srcbusirq & 3))
1da177e4
LT
967 return irq;
968 /*
969 * Use the first all-but-pin matching entry as a
970 * best-guess fuzzy result for broken mptables.
971 */
972 if (best_guess < 0)
973 best_guess = irq;
974 }
975 }
976 return best_guess;
977}
54168ed7 978
129f6946 979EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
1da177e4 980
c0a282c2 981#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
1da177e4
LT
982/*
983 * EISA Edge/Level control register, ELCR
984 */
985static int EISA_ELCR(unsigned int irq)
986{
987 if (irq < 16) {
988 unsigned int port = 0x4d0 + (irq >> 3);
989 return (inb(port) >> (irq & 7)) & 1;
990 }
991 apic_printk(APIC_VERBOSE, KERN_INFO
992 "Broken MPtable reports ISA irq %d\n", irq);
993 return 0;
994}
54168ed7 995
c0a282c2 996#endif
1da177e4 997
6728801d
AS
998/* ISA interrupts are always polarity zero edge triggered,
999 * when listed as conforming in the MP table. */
1000
1001#define default_ISA_trigger(idx) (0)
1002#define default_ISA_polarity(idx) (0)
1003
1da177e4
LT
1004/* EISA interrupts are always polarity zero and can be edge or level
1005 * trigger depending on the ELCR value. If an interrupt is listed as
1006 * EISA conforming in the MP table, that means its trigger type must
1007 * be read in from the ELCR */
1008
2fddb6e2 1009#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mp_srcbusirq))
6728801d 1010#define default_EISA_polarity(idx) default_ISA_polarity(idx)
1da177e4
LT
1011
1012/* PCI interrupts are always polarity one level triggered,
1013 * when listed as conforming in the MP table. */
1014
1015#define default_PCI_trigger(idx) (1)
1016#define default_PCI_polarity(idx) (1)
1017
1018/* MCA interrupts are always polarity zero level triggered,
1019 * when listed as conforming in the MP table. */
1020
1021#define default_MCA_trigger(idx) (1)
6728801d 1022#define default_MCA_polarity(idx) default_ISA_polarity(idx)
1da177e4 1023
61fd47e0 1024static int MPBIOS_polarity(int idx)
1da177e4 1025{
2fddb6e2 1026 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
1027 int polarity;
1028
1029 /*
1030 * Determine IRQ line polarity (high active or low active):
1031 */
54168ed7 1032 switch (mp_irqs[idx].mp_irqflag & 3)
36062448 1033 {
54168ed7
IM
1034 case 0: /* conforms, ie. bus-type dependent polarity */
1035 if (test_bit(bus, mp_bus_not_pci))
1036 polarity = default_ISA_polarity(idx);
1037 else
1038 polarity = default_PCI_polarity(idx);
1039 break;
1040 case 1: /* high active */
1041 {
1042 polarity = 0;
1043 break;
1044 }
1045 case 2: /* reserved */
1046 {
1047 printk(KERN_WARNING "broken BIOS!!\n");
1048 polarity = 1;
1049 break;
1050 }
1051 case 3: /* low active */
1052 {
1053 polarity = 1;
1054 break;
1055 }
1056 default: /* invalid */
1057 {
1058 printk(KERN_WARNING "broken BIOS!!\n");
1059 polarity = 1;
1060 break;
1061 }
1da177e4
LT
1062 }
1063 return polarity;
1064}
1065
1066static int MPBIOS_trigger(int idx)
1067{
2fddb6e2 1068 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
1069 int trigger;
1070
1071 /*
1072 * Determine IRQ trigger mode (edge or level sensitive):
1073 */
54168ed7 1074 switch ((mp_irqs[idx].mp_irqflag>>2) & 3)
1da177e4 1075 {
54168ed7
IM
1076 case 0: /* conforms, ie. bus-type dependent */
1077 if (test_bit(bus, mp_bus_not_pci))
1078 trigger = default_ISA_trigger(idx);
1079 else
1080 trigger = default_PCI_trigger(idx);
c0a282c2 1081#if defined(CONFIG_EISA) || defined(CONFIG_MCA)
54168ed7
IM
1082 switch (mp_bus_id_to_type[bus]) {
1083 case MP_BUS_ISA: /* ISA pin */
1084 {
1085 /* set before the switch */
1086 break;
1087 }
1088 case MP_BUS_EISA: /* EISA pin */
1089 {
1090 trigger = default_EISA_trigger(idx);
1091 break;
1092 }
1093 case MP_BUS_PCI: /* PCI pin */
1094 {
1095 /* set before the switch */
1096 break;
1097 }
1098 case MP_BUS_MCA: /* MCA pin */
1099 {
1100 trigger = default_MCA_trigger(idx);
1101 break;
1102 }
1103 default:
1104 {
1105 printk(KERN_WARNING "broken BIOS!!\n");
1106 trigger = 1;
1107 break;
1108 }
1109 }
1110#endif
1da177e4 1111 break;
54168ed7 1112 case 1: /* edge */
1da177e4 1113 {
54168ed7 1114 trigger = 0;
1da177e4
LT
1115 break;
1116 }
54168ed7 1117 case 2: /* reserved */
1da177e4 1118 {
54168ed7
IM
1119 printk(KERN_WARNING "broken BIOS!!\n");
1120 trigger = 1;
1da177e4
LT
1121 break;
1122 }
54168ed7 1123 case 3: /* level */
1da177e4 1124 {
54168ed7 1125 trigger = 1;
1da177e4
LT
1126 break;
1127 }
54168ed7 1128 default: /* invalid */
1da177e4
LT
1129 {
1130 printk(KERN_WARNING "broken BIOS!!\n");
54168ed7 1131 trigger = 0;
1da177e4
LT
1132 break;
1133 }
1134 }
1135 return trigger;
1136}
1137
1138static inline int irq_polarity(int idx)
1139{
1140 return MPBIOS_polarity(idx);
1141}
1142
1143static inline int irq_trigger(int idx)
1144{
1145 return MPBIOS_trigger(idx);
1146}
1147
efa2559f 1148int (*ioapic_renumber_irq)(int ioapic, int irq);
1da177e4
LT
1149static int pin_2_irq(int idx, int apic, int pin)
1150{
1151 int irq, i;
2fddb6e2 1152 int bus = mp_irqs[idx].mp_srcbus;
1da177e4
LT
1153
1154 /*
1155 * Debugging check, we are in big trouble if this message pops up!
1156 */
2fddb6e2 1157 if (mp_irqs[idx].mp_dstirq != pin)
1da177e4
LT
1158 printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
1159
54168ed7 1160 if (test_bit(bus, mp_bus_not_pci)) {
2fddb6e2 1161 irq = mp_irqs[idx].mp_srcbusirq;
54168ed7 1162 } else {
643befed
AS
1163 /*
1164 * PCI IRQs are mapped in order
1165 */
1166 i = irq = 0;
1167 while (i < apic)
1168 irq += nr_ioapic_registers[i++];
1169 irq += pin;
54168ed7
IM
1170 /*
1171 * For MPS mode, so far only needed by ES7000 platform
1172 */
1173 if (ioapic_renumber_irq)
1174 irq = ioapic_renumber_irq(apic, irq);
1da177e4
LT
1175 }
1176
54168ed7 1177#ifdef CONFIG_X86_32
1da177e4
LT
1178 /*
1179 * PCI IRQ command line redirection. Yes, limits are hardcoded.
1180 */
1181 if ((pin >= 16) && (pin <= 23)) {
1182 if (pirq_entries[pin-16] != -1) {
1183 if (!pirq_entries[pin-16]) {
1184 apic_printk(APIC_VERBOSE, KERN_DEBUG
1185 "disabling PIRQ%d\n", pin-16);
1186 } else {
1187 irq = pirq_entries[pin-16];
1188 apic_printk(APIC_VERBOSE, KERN_DEBUG
1189 "using PIRQ%d -> IRQ %d\n",
1190 pin-16, irq);
1191 }
1192 }
1193 }
54168ed7
IM
1194#endif
1195
1da177e4
LT
1196 return irq;
1197}
1198
497c9a19
YL
1199void lock_vector_lock(void)
1200{
1201 /* Used to the online set of cpus does not change
1202 * during assign_irq_vector.
1203 */
1204 spin_lock(&vector_lock);
1205}
1da177e4 1206
497c9a19 1207void unlock_vector_lock(void)
1da177e4 1208{
497c9a19
YL
1209 spin_unlock(&vector_lock);
1210}
1da177e4 1211
497c9a19
YL
1212static int __assign_irq_vector(int irq, cpumask_t mask)
1213{
047c8fdb
YL
1214 /*
1215 * NOTE! The local APIC isn't very good at handling
1216 * multiple interrupts at the same interrupt level.
1217 * As the interrupt level is determined by taking the
1218 * vector number and shifting that right by 4, we
1219 * want to spread these out a bit so that they don't
1220 * all fall in the same interrupt level.
1221 *
1222 * Also, we've got to be careful not to trash gate
1223 * 0x80, because int 0x80 is hm, kind of importantish. ;)
1224 */
54168ed7
IM
1225 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1226 unsigned int old_vector;
1227 int cpu;
1228 struct irq_cfg *cfg;
ace80ab7 1229
54168ed7 1230 cfg = irq_cfg(irq);
8339f000 1231
54168ed7
IM
1232 /* Only try and allocate irqs on cpus that are present */
1233 cpus_and(mask, mask, cpu_online_map);
ace80ab7 1234
54168ed7
IM
1235 if ((cfg->move_in_progress) || cfg->move_cleanup_count)
1236 return -EBUSY;
0a1ad60d 1237
54168ed7
IM
1238 old_vector = cfg->vector;
1239 if (old_vector) {
1240 cpumask_t tmp;
1241 cpus_and(tmp, cfg->domain, mask);
1242 if (!cpus_empty(tmp))
1243 return 0;
1244 }
497c9a19 1245
54168ed7
IM
1246 for_each_cpu_mask_nr(cpu, mask) {
1247 cpumask_t domain, new_mask;
1248 int new_cpu;
1249 int vector, offset;
497c9a19 1250
54168ed7
IM
1251 domain = vector_allocation_domain(cpu);
1252 cpus_and(new_mask, domain, cpu_online_map);
497c9a19 1253
54168ed7
IM
1254 vector = current_vector;
1255 offset = current_offset;
497c9a19 1256next:
54168ed7
IM
1257 vector += 8;
1258 if (vector >= first_system_vector) {
1259 /* If we run out of vectors on large boxen, must share them. */
1260 offset = (offset + 1) % 8;
1261 vector = FIRST_DEVICE_VECTOR + offset;
1262 }
1263 if (unlikely(current_vector == vector))
1264 continue;
047c8fdb 1265#ifdef CONFIG_X86_64
54168ed7
IM
1266 if (vector == IA32_SYSCALL_VECTOR)
1267 goto next;
047c8fdb 1268#else
54168ed7
IM
1269 if (vector == SYSCALL_VECTOR)
1270 goto next;
047c8fdb 1271#endif
54168ed7
IM
1272 for_each_cpu_mask_nr(new_cpu, new_mask)
1273 if (per_cpu(vector_irq, new_cpu)[vector] != -1)
1274 goto next;
1275 /* Found one! */
1276 current_vector = vector;
1277 current_offset = offset;
1278 if (old_vector) {
1279 cfg->move_in_progress = 1;
1280 cfg->old_domain = cfg->domain;
7a959cff 1281 }
54168ed7
IM
1282 for_each_cpu_mask_nr(new_cpu, new_mask)
1283 per_cpu(vector_irq, new_cpu)[vector] = irq;
1284 cfg->vector = vector;
1285 cfg->domain = domain;
1286 return 0;
1287 }
1288 return -ENOSPC;
497c9a19
YL
1289}
1290
1291static int assign_irq_vector(int irq, cpumask_t mask)
1292{
1293 int err;
ace80ab7 1294 unsigned long flags;
ace80ab7
EB
1295
1296 spin_lock_irqsave(&vector_lock, flags);
497c9a19 1297 err = __assign_irq_vector(irq, mask);
26a3c49c 1298 spin_unlock_irqrestore(&vector_lock, flags);
497c9a19
YL
1299 return err;
1300}
1301
1302static void __clear_irq_vector(int irq)
1303{
1304 struct irq_cfg *cfg;
1305 cpumask_t mask;
1306 int cpu, vector;
1307
1308 cfg = irq_cfg(irq);
1309 BUG_ON(!cfg->vector);
1310
1311 vector = cfg->vector;
1312 cpus_and(mask, cfg->domain, cpu_online_map);
1313 for_each_cpu_mask_nr(cpu, mask)
1314 per_cpu(vector_irq, cpu)[vector] = -1;
1315
1316 cfg->vector = 0;
1317 cpus_clear(cfg->domain);
1318}
1319
1320void __setup_vector_irq(int cpu)
1321{
1322 /* Initialize vector_irq on a new cpu */
1323 /* This function must be called with vector_lock held */
1324 int irq, vector;
1325 struct irq_cfg *cfg;
1326
1327 /* Mark the inuse vectors */
8f09cd20 1328 for_each_irq_cfg(irq, cfg) {
497c9a19
YL
1329 if (!cpu_isset(cpu, cfg->domain))
1330 continue;
1331 vector = cfg->vector;
497c9a19
YL
1332 per_cpu(vector_irq, cpu)[vector] = irq;
1333 }
1334 /* Mark the free vectors */
1335 for (vector = 0; vector < NR_VECTORS; ++vector) {
1336 irq = per_cpu(vector_irq, cpu)[vector];
1337 if (irq < 0)
1338 continue;
1339
1340 cfg = irq_cfg(irq);
1341 if (!cpu_isset(cpu, cfg->domain))
1342 per_cpu(vector_irq, cpu)[vector] = -1;
54168ed7 1343 }
1da177e4 1344}
3fde6900 1345
f5b9ed7a 1346static struct irq_chip ioapic_chip;
54168ed7
IM
1347#ifdef CONFIG_INTR_REMAP
1348static struct irq_chip ir_ioapic_chip;
1349#endif
1da177e4 1350
54168ed7
IM
1351#define IOAPIC_AUTO -1
1352#define IOAPIC_EDGE 0
1353#define IOAPIC_LEVEL 1
1da177e4 1354
047c8fdb 1355#ifdef CONFIG_X86_32
1d025192
YL
1356static inline int IO_APIC_irq_trigger(int irq)
1357{
54168ed7 1358 int apic, idx, pin;
1d025192 1359
54168ed7
IM
1360 for (apic = 0; apic < nr_ioapics; apic++) {
1361 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
1362 idx = find_irq_entry(apic, pin, mp_INT);
1363 if ((idx != -1) && (irq == pin_2_irq(idx, apic, pin)))
1364 return irq_trigger(idx);
1365 }
1366 }
1367 /*
1368 * nonexistent IRQs are edge default
1369 */
1370 return 0;
1d025192 1371}
047c8fdb
YL
1372#else
1373static inline int IO_APIC_irq_trigger(int irq)
1374{
54168ed7 1375 return 1;
047c8fdb
YL
1376}
1377#endif
1d025192 1378
497c9a19 1379static void ioapic_register_intr(int irq, unsigned long trigger)
1da177e4 1380{
08678b08
YL
1381 struct irq_desc *desc;
1382
199751d7
YL
1383 /* first time to use this irq_desc */
1384 if (irq < 16)
1385 desc = irq_to_desc(irq);
1386 else
1387 desc = irq_to_desc_alloc(irq);
1388
6ebcc00e 1389 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
047c8fdb 1390 trigger == IOAPIC_LEVEL)
08678b08 1391 desc->status |= IRQ_LEVEL;
047c8fdb
YL
1392 else
1393 desc->status &= ~IRQ_LEVEL;
1394
54168ed7
IM
1395#ifdef CONFIG_INTR_REMAP
1396 if (irq_remapped(irq)) {
1397 desc->status |= IRQ_MOVE_PCNTXT;
1398 if (trigger)
1399 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1400 handle_fasteoi_irq,
1401 "fasteoi");
1402 else
1403 set_irq_chip_and_handler_name(irq, &ir_ioapic_chip,
1404 handle_edge_irq, "edge");
1405 return;
1406 }
1407#endif
047c8fdb
YL
1408 if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
1409 trigger == IOAPIC_LEVEL)
a460e745 1410 set_irq_chip_and_handler_name(irq, &ioapic_chip,
54168ed7
IM
1411 handle_fasteoi_irq,
1412 "fasteoi");
047c8fdb 1413 else
a460e745 1414 set_irq_chip_and_handler_name(irq, &ioapic_chip,
54168ed7 1415 handle_edge_irq, "edge");
1da177e4
LT
1416}
1417
497c9a19
YL
1418static int setup_ioapic_entry(int apic, int irq,
1419 struct IO_APIC_route_entry *entry,
1420 unsigned int destination, int trigger,
1421 int polarity, int vector)
1da177e4 1422{
497c9a19
YL
1423 /*
1424 * add it to the IO-APIC irq-routing table:
1425 */
1426 memset(entry,0,sizeof(*entry));
1427
54168ed7
IM
1428#ifdef CONFIG_INTR_REMAP
1429 if (intr_remapping_enabled) {
1430 struct intel_iommu *iommu = map_ioapic_to_ir(apic);
1431 struct irte irte;
1432 struct IR_IO_APIC_route_entry *ir_entry =
1433 (struct IR_IO_APIC_route_entry *) entry;
1434 int index;
1435
1436 if (!iommu)
1437 panic("No mapping iommu for ioapic %d\n", apic);
1438
1439 index = alloc_irte(iommu, irq, 1);
1440 if (index < 0)
1441 panic("Failed to allocate IRTE for ioapic %d\n", apic);
1442
1443 memset(&irte, 0, sizeof(irte));
1444
1445 irte.present = 1;
1446 irte.dst_mode = INT_DEST_MODE;
1447 irte.trigger_mode = trigger;
1448 irte.dlvry_mode = INT_DELIVERY_MODE;
1449 irte.vector = vector;
1450 irte.dest_id = IRTE_DEST(destination);
1451
1452 modify_irte(irq, &irte);
1453
1454 ir_entry->index2 = (index >> 15) & 0x1;
1455 ir_entry->zero = 0;
1456 ir_entry->format = 1;
1457 ir_entry->index = (index & 0x7fff);
1458 } else
1459#endif
1460 {
1461 entry->delivery_mode = INT_DELIVERY_MODE;
1462 entry->dest_mode = INT_DEST_MODE;
1463 entry->dest = destination;
1464 }
497c9a19 1465
54168ed7 1466 entry->mask = 0; /* enable IRQ */
497c9a19
YL
1467 entry->trigger = trigger;
1468 entry->polarity = polarity;
1469 entry->vector = vector;
1470
1471 /* Mask level triggered irqs.
1472 * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
1473 */
1474 if (trigger)
1475 entry->mask = 1;
497c9a19
YL
1476 return 0;
1477}
1478
1479static void setup_IO_APIC_irq(int apic, int pin, unsigned int irq,
54168ed7 1480 int trigger, int polarity)
497c9a19
YL
1481{
1482 struct irq_cfg *cfg;
1da177e4 1483 struct IO_APIC_route_entry entry;
497c9a19
YL
1484 cpumask_t mask;
1485
1486 if (!IO_APIC_IRQ(irq))
1487 return;
1488
1489 cfg = irq_cfg(irq);
1490
1491 mask = TARGET_CPUS;
1492 if (assign_irq_vector(irq, mask))
1493 return;
1494
1495 cpus_and(mask, cfg->domain, mask);
1496
1497 apic_printk(APIC_VERBOSE,KERN_DEBUG
1498 "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
1499 "IRQ %d Mode:%i Active:%i)\n",
1500 apic, mp_ioapics[apic].mp_apicid, pin, cfg->vector,
1501 irq, trigger, polarity);
1502
1503
1504 if (setup_ioapic_entry(mp_ioapics[apic].mp_apicid, irq, &entry,
1505 cpu_mask_to_apicid(mask), trigger, polarity,
1506 cfg->vector)) {
1507 printk("Failed to setup ioapic entry for ioapic %d, pin %d\n",
1508 mp_ioapics[apic].mp_apicid, pin);
1509 __clear_irq_vector(irq);
1510 return;
1511 }
1512
1513 ioapic_register_intr(irq, trigger);
1514 if (irq < 16)
1515 disable_8259A_irq(irq);
1516
1517 ioapic_write_entry(apic, pin, entry);
1518}
1519
1520static void __init setup_IO_APIC_irqs(void)
1521{
3c2cbd24
CG
1522 int apic, pin, idx, irq;
1523 int notcon = 0;
1da177e4
LT
1524
1525 apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
1526
1527 for (apic = 0; apic < nr_ioapics; apic++) {
3c2cbd24 1528 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
20d225b9 1529
3c2cbd24
CG
1530 idx = find_irq_entry(apic, pin, mp_INT);
1531 if (idx == -1) {
2a554fb1 1532 if (!notcon) {
3c2cbd24 1533 notcon = 1;
2a554fb1
CG
1534 apic_printk(APIC_VERBOSE,
1535 KERN_DEBUG " %d-%d",
1536 mp_ioapics[apic].mp_apicid,
1537 pin);
1538 } else
1539 apic_printk(APIC_VERBOSE, " %d-%d",
1540 mp_ioapics[apic].mp_apicid,
1541 pin);
3c2cbd24
CG
1542 continue;
1543 }
1544
1545 irq = pin_2_irq(idx, apic, pin);
54168ed7 1546#ifdef CONFIG_X86_32
3c2cbd24
CG
1547 if (multi_timer_check(apic, irq))
1548 continue;
54168ed7 1549#endif
3c2cbd24 1550 add_pin_to_irq(irq, apic, pin);
36062448 1551
3c2cbd24
CG
1552 setup_IO_APIC_irq(apic, pin, irq,
1553 irq_trigger(idx), irq_polarity(idx));
1554 }
1555 if (notcon) {
1556 apic_printk(APIC_VERBOSE,
2a554fb1 1557 " (apicid-pin) not connected\n");
3c2cbd24
CG
1558 notcon = 0;
1559 }
1da177e4
LT
1560 }
1561
3c2cbd24
CG
1562 if (notcon)
1563 apic_printk(APIC_VERBOSE,
2a554fb1 1564 " (apicid-pin) not connected\n");
1da177e4
LT
1565}
1566
1567/*
f7633ce5 1568 * Set up the timer pin, possibly with the 8259A-master behind.
1da177e4 1569 */
f7633ce5
MR
1570static void __init setup_timer_IRQ0_pin(unsigned int apic, unsigned int pin,
1571 int vector)
1da177e4
LT
1572{
1573 struct IO_APIC_route_entry entry;
1da177e4 1574
54168ed7
IM
1575#ifdef CONFIG_INTR_REMAP
1576 if (intr_remapping_enabled)
1577 return;
1578#endif
1579
36062448 1580 memset(&entry, 0, sizeof(entry));
1da177e4
LT
1581
1582 /*
1583 * We use logical delivery to get the timer IRQ
1584 * to the first CPU.
1585 */
1586 entry.dest_mode = INT_DEST_MODE;
03be7505 1587 entry.mask = 1; /* mask IRQ now */
d83e94ac 1588 entry.dest = cpu_mask_to_apicid(TARGET_CPUS);
1da177e4
LT
1589 entry.delivery_mode = INT_DELIVERY_MODE;
1590 entry.polarity = 0;
1591 entry.trigger = 0;
1592 entry.vector = vector;
1593
1594 /*
1595 * The timer IRQ doesn't have to know that behind the
f7633ce5 1596 * scene we may have a 8259A-master in AEOI mode ...
1da177e4 1597 */
54168ed7 1598 set_irq_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq, "edge");
1da177e4
LT
1599
1600 /*
1601 * Add it to the IO-APIC irq-routing table:
1602 */
cf4c6a2f 1603 ioapic_write_entry(apic, pin, entry);
1da177e4
LT
1604}
1605
32f71aff
MR
1606
1607__apicdebuginit(void) print_IO_APIC(void)
1da177e4
LT
1608{
1609 int apic, i;
1610 union IO_APIC_reg_00 reg_00;
1611 union IO_APIC_reg_01 reg_01;
1612 union IO_APIC_reg_02 reg_02;
1613 union IO_APIC_reg_03 reg_03;
1614 unsigned long flags;
0f978f45 1615 struct irq_cfg *cfg;
8f09cd20 1616 unsigned int irq;
1da177e4
LT
1617
1618 if (apic_verbosity == APIC_QUIET)
1619 return;
1620
36062448 1621 printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
1da177e4
LT
1622 for (i = 0; i < nr_ioapics; i++)
1623 printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
ec2cd0a2 1624 mp_ioapics[i].mp_apicid, nr_ioapic_registers[i]);
1da177e4
LT
1625
1626 /*
1627 * We are a bit conservative about what we expect. We have to
1628 * know about every hardware change ASAP.
1629 */
1630 printk(KERN_INFO "testing the IO APIC.......................\n");
1631
1632 for (apic = 0; apic < nr_ioapics; apic++) {
1633
1634 spin_lock_irqsave(&ioapic_lock, flags);
1635 reg_00.raw = io_apic_read(apic, 0);
1636 reg_01.raw = io_apic_read(apic, 1);
1637 if (reg_01.bits.version >= 0x10)
1638 reg_02.raw = io_apic_read(apic, 2);
54168ed7
IM
1639 if (reg_01.bits.version >= 0x20)
1640 reg_03.raw = io_apic_read(apic, 3);
1da177e4
LT
1641 spin_unlock_irqrestore(&ioapic_lock, flags);
1642
54168ed7 1643 printk("\n");
ec2cd0a2 1644 printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mp_apicid);
1da177e4
LT
1645 printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
1646 printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
1647 printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
1648 printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
1da177e4 1649
54168ed7 1650 printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)&reg_01);
1da177e4 1651 printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
1da177e4
LT
1652
1653 printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
1654 printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
1da177e4
LT
1655
1656 /*
1657 * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
1658 * but the value of reg_02 is read as the previous read register
1659 * value, so ignore it if reg_02 == reg_01.
1660 */
1661 if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
1662 printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
1663 printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
1da177e4
LT
1664 }
1665
1666 /*
1667 * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
1668 * or reg_03, but the value of reg_0[23] is read as the previous read
1669 * register value, so ignore it if reg_03 == reg_0[12].
1670 */
1671 if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
1672 reg_03.raw != reg_01.raw) {
1673 printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
1674 printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
1da177e4
LT
1675 }
1676
1677 printk(KERN_DEBUG ".... IRQ redirection table:\n");
1678
d83e94ac
YL
1679 printk(KERN_DEBUG " NR Dst Mask Trig IRR Pol"
1680 " Stat Dmod Deli Vect: \n");
1da177e4
LT
1681
1682 for (i = 0; i <= reg_01.bits.entries; i++) {
1683 struct IO_APIC_route_entry entry;
1684
cf4c6a2f 1685 entry = ioapic_read_entry(apic, i);
1da177e4 1686
54168ed7
IM
1687 printk(KERN_DEBUG " %02x %03X ",
1688 i,
1689 entry.dest
1690 );
1da177e4
LT
1691
1692 printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
1693 entry.mask,
1694 entry.trigger,
1695 entry.irr,
1696 entry.polarity,
1697 entry.delivery_status,
1698 entry.dest_mode,
1699 entry.delivery_mode,
1700 entry.vector
1701 );
1702 }
1703 }
1da177e4 1704 printk(KERN_DEBUG "IRQ to pin mappings:\n");
8f09cd20 1705 for_each_irq_cfg(irq, cfg) {
0f978f45
YL
1706 struct irq_pin_list *entry = cfg->irq_2_pin;
1707 if (!entry)
1da177e4 1708 continue;
8f09cd20 1709 printk(KERN_DEBUG "IRQ%d ", irq);
1da177e4
LT
1710 for (;;) {
1711 printk("-> %d:%d", entry->apic, entry->pin);
1712 if (!entry->next)
1713 break;
0f978f45 1714 entry = entry->next;
1da177e4
LT
1715 }
1716 printk("\n");
1717 }
1718
1719 printk(KERN_INFO ".................................... done.\n");
1720
1721 return;
1722}
1723
32f71aff 1724__apicdebuginit(void) print_APIC_bitfield(int base)
1da177e4
LT
1725{
1726 unsigned int v;
1727 int i, j;
1728
1729 if (apic_verbosity == APIC_QUIET)
1730 return;
1731
1732 printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
1733 for (i = 0; i < 8; i++) {
1734 v = apic_read(base + i*0x10);
1735 for (j = 0; j < 32; j++) {
1736 if (v & (1<<j))
1737 printk("1");
1738 else
1739 printk("0");
1740 }
1741 printk("\n");
1742 }
1743}
1744
32f71aff 1745__apicdebuginit(void) print_local_APIC(void *dummy)
1da177e4
LT
1746{
1747 unsigned int v, ver, maxlvt;
7ab6af7a 1748 u64 icr;
1da177e4
LT
1749
1750 if (apic_verbosity == APIC_QUIET)
1751 return;
1752
1753 printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
1754 smp_processor_id(), hard_smp_processor_id());
66823114 1755 v = apic_read(APIC_ID);
54168ed7 1756 printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, read_apic_id());
1da177e4
LT
1757 v = apic_read(APIC_LVR);
1758 printk(KERN_INFO "... APIC VERSION: %08x\n", v);
1759 ver = GET_APIC_VERSION(v);
e05d723f 1760 maxlvt = lapic_get_maxlvt();
1da177e4
LT
1761
1762 v = apic_read(APIC_TASKPRI);
1763 printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
1764
54168ed7 1765 if (APIC_INTEGRATED(ver)) { /* !82489DX */
a11b5abe
YL
1766 if (!APIC_XAPIC(ver)) {
1767 v = apic_read(APIC_ARBPRI);
1768 printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
1769 v & APIC_ARBPRI_MASK);
1770 }
1da177e4
LT
1771 v = apic_read(APIC_PROCPRI);
1772 printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
1773 }
1774
a11b5abe
YL
1775 /*
1776 * Remote read supported only in the 82489DX and local APIC for
1777 * Pentium processors.
1778 */
1779 if (!APIC_INTEGRATED(ver) || maxlvt == 3) {
1780 v = apic_read(APIC_RRR);
1781 printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
1782 }
1783
1da177e4
LT
1784 v = apic_read(APIC_LDR);
1785 printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
a11b5abe
YL
1786 if (!x2apic_enabled()) {
1787 v = apic_read(APIC_DFR);
1788 printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
1789 }
1da177e4
LT
1790 v = apic_read(APIC_SPIV);
1791 printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
1792
1793 printk(KERN_DEBUG "... APIC ISR field:\n");
1794 print_APIC_bitfield(APIC_ISR);
1795 printk(KERN_DEBUG "... APIC TMR field:\n");
1796 print_APIC_bitfield(APIC_TMR);
1797 printk(KERN_DEBUG "... APIC IRR field:\n");
1798 print_APIC_bitfield(APIC_IRR);
1799
54168ed7
IM
1800 if (APIC_INTEGRATED(ver)) { /* !82489DX */
1801 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
1da177e4 1802 apic_write(APIC_ESR, 0);
54168ed7 1803
1da177e4
LT
1804 v = apic_read(APIC_ESR);
1805 printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
1806 }
1807
7ab6af7a 1808 icr = apic_icr_read();
0c425cec
IM
1809 printk(KERN_DEBUG "... APIC ICR: %08x\n", (u32)icr);
1810 printk(KERN_DEBUG "... APIC ICR2: %08x\n", (u32)(icr >> 32));
1da177e4
LT
1811
1812 v = apic_read(APIC_LVTT);
1813 printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
1814
1815 if (maxlvt > 3) { /* PC is LVT#4. */
1816 v = apic_read(APIC_LVTPC);
1817 printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
1818 }
1819 v = apic_read(APIC_LVT0);
1820 printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
1821 v = apic_read(APIC_LVT1);
1822 printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
1823
1824 if (maxlvt > 2) { /* ERR is LVT#3. */
1825 v = apic_read(APIC_LVTERR);
1826 printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
1827 }
1828
1829 v = apic_read(APIC_TMICT);
1830 printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
1831 v = apic_read(APIC_TMCCT);
1832 printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
1833 v = apic_read(APIC_TDCR);
1834 printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
1835 printk("\n");
1836}
1837
32f71aff 1838__apicdebuginit(void) print_all_local_APICs(void)
1da177e4 1839{
ffd5aae7
YL
1840 int cpu;
1841
1842 preempt_disable();
1843 for_each_online_cpu(cpu)
1844 smp_call_function_single(cpu, print_local_APIC, NULL, 1);
1845 preempt_enable();
1da177e4
LT
1846}
1847
32f71aff 1848__apicdebuginit(void) print_PIC(void)
1da177e4 1849{
1da177e4
LT
1850 unsigned int v;
1851 unsigned long flags;
1852
1853 if (apic_verbosity == APIC_QUIET)
1854 return;
1855
1856 printk(KERN_DEBUG "\nprinting PIC contents\n");
1857
1858 spin_lock_irqsave(&i8259A_lock, flags);
1859
1860 v = inb(0xa1) << 8 | inb(0x21);
1861 printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
1862
1863 v = inb(0xa0) << 8 | inb(0x20);
1864 printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
1865
54168ed7
IM
1866 outb(0x0b,0xa0);
1867 outb(0x0b,0x20);
1da177e4 1868 v = inb(0xa0) << 8 | inb(0x20);
54168ed7
IM
1869 outb(0x0a,0xa0);
1870 outb(0x0a,0x20);
1da177e4
LT
1871
1872 spin_unlock_irqrestore(&i8259A_lock, flags);
1873
1874 printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
1875
1876 v = inb(0x4d1) << 8 | inb(0x4d0);
1877 printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
1878}
1879
32f71aff
MR
1880__apicdebuginit(int) print_all_ICs(void)
1881{
1882 print_PIC();
1883 print_all_local_APICs();
1884 print_IO_APIC();
1885
1886 return 0;
1887}
1888
1889fs_initcall(print_all_ICs);
1890
1da177e4 1891
efa2559f
YL
1892/* Where if anywhere is the i8259 connect in external int mode */
1893static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
1894
54168ed7 1895void __init enable_IO_APIC(void)
1da177e4
LT
1896{
1897 union IO_APIC_reg_01 reg_01;
fcfd636a 1898 int i8259_apic, i8259_pin;
54168ed7 1899 int apic;
1da177e4
LT
1900 unsigned long flags;
1901
54168ed7
IM
1902#ifdef CONFIG_X86_32
1903 int i;
1da177e4
LT
1904 if (!pirqs_enabled)
1905 for (i = 0; i < MAX_PIRQS; i++)
1906 pirq_entries[i] = -1;
54168ed7 1907#endif
1da177e4
LT
1908
1909 /*
1910 * The number of IO-APIC IRQ registers (== #pins):
1911 */
fcfd636a 1912 for (apic = 0; apic < nr_ioapics; apic++) {
1da177e4 1913 spin_lock_irqsave(&ioapic_lock, flags);
fcfd636a 1914 reg_01.raw = io_apic_read(apic, 1);
1da177e4 1915 spin_unlock_irqrestore(&ioapic_lock, flags);
fcfd636a
EB
1916 nr_ioapic_registers[apic] = reg_01.bits.entries+1;
1917 }
54168ed7 1918 for(apic = 0; apic < nr_ioapics; apic++) {
fcfd636a
EB
1919 int pin;
1920 /* See if any of the pins is in ExtINT mode */
1008fddc 1921 for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
fcfd636a 1922 struct IO_APIC_route_entry entry;
cf4c6a2f 1923 entry = ioapic_read_entry(apic, pin);
fcfd636a 1924
fcfd636a
EB
1925 /* If the interrupt line is enabled and in ExtInt mode
1926 * I have found the pin where the i8259 is connected.
1927 */
1928 if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
1929 ioapic_i8259.apic = apic;
1930 ioapic_i8259.pin = pin;
1931 goto found_i8259;
1932 }
1933 }
1934 }
1935 found_i8259:
1936 /* Look to see what if the MP table has reported the ExtINT */
1937 /* If we could not find the appropriate pin by looking at the ioapic
1938 * the i8259 probably is not connected the ioapic but give the
1939 * mptable a chance anyway.
1940 */
1941 i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
1942 i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
1943 /* Trust the MP table if nothing is setup in the hardware */
1944 if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
1945 printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
1946 ioapic_i8259.pin = i8259_pin;
1947 ioapic_i8259.apic = i8259_apic;
1948 }
1949 /* Complain if the MP table and the hardware disagree */
1950 if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
1951 (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
1952 {
1953 printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
1da177e4
LT
1954 }
1955
1956 /*
1957 * Do not trust the IO-APIC being empty at bootup
1958 */
1959 clear_IO_APIC();
1960}
1961
1962/*
1963 * Not an __init, needed by the reboot code
1964 */
1965void disable_IO_APIC(void)
1966{
1967 /*
1968 * Clear the IO-APIC before rebooting:
1969 */
1970 clear_IO_APIC();
1971
650927ef 1972 /*
0b968d23 1973 * If the i8259 is routed through an IOAPIC
650927ef 1974 * Put that IOAPIC in virtual wire mode
0b968d23 1975 * so legacy interrupts can be delivered.
650927ef 1976 */
fcfd636a 1977 if (ioapic_i8259.pin != -1) {
650927ef 1978 struct IO_APIC_route_entry entry;
650927ef
EB
1979
1980 memset(&entry, 0, sizeof(entry));
1981 entry.mask = 0; /* Enabled */
1982 entry.trigger = 0; /* Edge */
1983 entry.irr = 0;
1984 entry.polarity = 0; /* High */
1985 entry.delivery_status = 0;
1986 entry.dest_mode = 0; /* Physical */
fcfd636a 1987 entry.delivery_mode = dest_ExtINT; /* ExtInt */
650927ef 1988 entry.vector = 0;
54168ed7 1989 entry.dest = read_apic_id();
650927ef
EB
1990
1991 /*
1992 * Add it to the IO-APIC irq-routing table:
1993 */
cf4c6a2f 1994 ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
650927ef 1995 }
54168ed7 1996
fcfd636a 1997 disconnect_bsp_APIC(ioapic_i8259.pin != -1);
1da177e4
LT
1998}
1999
54168ed7 2000#ifdef CONFIG_X86_32
1da177e4
LT
2001/*
2002 * function to set the IO-APIC physical IDs based on the
2003 * values stored in the MPC table.
2004 *
2005 * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
2006 */
2007
1da177e4
LT
2008static void __init setup_ioapic_ids_from_mpc(void)
2009{
2010 union IO_APIC_reg_00 reg_00;
2011 physid_mask_t phys_id_present_map;
2012 int apic;
2013 int i;
2014 unsigned char old_id;
2015 unsigned long flags;
2016
a4dbc34d 2017 if (x86_quirks->setup_ioapic_ids && x86_quirks->setup_ioapic_ids())
d49c4288 2018 return;
d49c4288 2019
ca05fea6
NP
2020 /*
2021 * Don't check I/O APIC IDs for xAPIC systems. They have
2022 * no meaning without the serial APIC bus.
2023 */
7c5c1e42
SL
2024 if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
2025 || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
ca05fea6 2026 return;
1da177e4
LT
2027 /*
2028 * This is broken; anything with a real cpu count has to
2029 * circumvent this idiocy regardless.
2030 */
2031 phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
2032
2033 /*
2034 * Set the IOAPIC ID to the value stored in the MPC table.
2035 */
2036 for (apic = 0; apic < nr_ioapics; apic++) {
2037
2038 /* Read the register 0 value */
2039 spin_lock_irqsave(&ioapic_lock, flags);
2040 reg_00.raw = io_apic_read(apic, 0);
2041 spin_unlock_irqrestore(&ioapic_lock, flags);
36062448 2042
ec2cd0a2 2043 old_id = mp_ioapics[apic].mp_apicid;
1da177e4 2044
ec2cd0a2 2045 if (mp_ioapics[apic].mp_apicid >= get_physical_broadcast()) {
1da177e4 2046 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
ec2cd0a2 2047 apic, mp_ioapics[apic].mp_apicid);
1da177e4
LT
2048 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2049 reg_00.bits.ID);
ec2cd0a2 2050 mp_ioapics[apic].mp_apicid = reg_00.bits.ID;
1da177e4
LT
2051 }
2052
1da177e4
LT
2053 /*
2054 * Sanity check, is the ID really free? Every APIC in a
2055 * system must have a unique ID or we get lots of nice
2056 * 'stuck on smp_invalidate_needed IPI wait' messages.
2057 */
2058 if (check_apicid_used(phys_id_present_map,
ec2cd0a2 2059 mp_ioapics[apic].mp_apicid)) {
1da177e4 2060 printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
ec2cd0a2 2061 apic, mp_ioapics[apic].mp_apicid);
1da177e4
LT
2062 for (i = 0; i < get_physical_broadcast(); i++)
2063 if (!physid_isset(i, phys_id_present_map))
2064 break;
2065 if (i >= get_physical_broadcast())
2066 panic("Max APIC ID exceeded!\n");
2067 printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
2068 i);
2069 physid_set(i, phys_id_present_map);
ec2cd0a2 2070 mp_ioapics[apic].mp_apicid = i;
1da177e4
LT
2071 } else {
2072 physid_mask_t tmp;
ec2cd0a2 2073 tmp = apicid_to_cpu_present(mp_ioapics[apic].mp_apicid);
1da177e4
LT
2074 apic_printk(APIC_VERBOSE, "Setting %d in the "
2075 "phys_id_present_map\n",
ec2cd0a2 2076 mp_ioapics[apic].mp_apicid);
1da177e4
LT
2077 physids_or(phys_id_present_map, phys_id_present_map, tmp);
2078 }
2079
2080
2081 /*
2082 * We need to adjust the IRQ routing table
2083 * if the ID changed.
2084 */
ec2cd0a2 2085 if (old_id != mp_ioapics[apic].mp_apicid)
1da177e4 2086 for (i = 0; i < mp_irq_entries; i++)
2fddb6e2
AS
2087 if (mp_irqs[i].mp_dstapic == old_id)
2088 mp_irqs[i].mp_dstapic
ec2cd0a2 2089 = mp_ioapics[apic].mp_apicid;
1da177e4
LT
2090
2091 /*
2092 * Read the right value from the MPC table and
2093 * write it into the ID register.
36062448 2094 */
1da177e4
LT
2095 apic_printk(APIC_VERBOSE, KERN_INFO
2096 "...changing IO-APIC physical APIC ID to %d ...",
ec2cd0a2 2097 mp_ioapics[apic].mp_apicid);
1da177e4 2098
ec2cd0a2 2099 reg_00.bits.ID = mp_ioapics[apic].mp_apicid;
1da177e4 2100 spin_lock_irqsave(&ioapic_lock, flags);
a2d332fa
YL
2101 io_apic_write(apic, 0, reg_00.raw);
2102 spin_unlock_irqrestore(&ioapic_lock, flags);
1da177e4
LT
2103
2104 /*
2105 * Sanity check
2106 */
2107 spin_lock_irqsave(&ioapic_lock, flags);
2108 reg_00.raw = io_apic_read(apic, 0);
2109 spin_unlock_irqrestore(&ioapic_lock, flags);
ec2cd0a2 2110 if (reg_00.bits.ID != mp_ioapics[apic].mp_apicid)
1da177e4
LT
2111 printk("could not set ID!\n");
2112 else
2113 apic_printk(APIC_VERBOSE, " ok.\n");
2114 }
2115}
54168ed7 2116#endif
1da177e4 2117
7ce0bcfd 2118int no_timer_check __initdata;
8542b200
ZA
2119
2120static int __init notimercheck(char *s)
2121{
2122 no_timer_check = 1;
2123 return 1;
2124}
2125__setup("no_timer_check", notimercheck);
2126
1da177e4
LT
2127/*
2128 * There is a nasty bug in some older SMP boards, their mptable lies
2129 * about the timer IRQ. We do the following to work around the situation:
2130 *
2131 * - timer IRQ defaults to IO-APIC IRQ
2132 * - if this function detects that timer IRQs are defunct, then we fall
2133 * back to ISA timer IRQs
2134 */
f0a7a5c9 2135static int __init timer_irq_works(void)
1da177e4
LT
2136{
2137 unsigned long t1 = jiffies;
4aae0702 2138 unsigned long flags;
1da177e4 2139
8542b200
ZA
2140 if (no_timer_check)
2141 return 1;
2142
4aae0702 2143 local_save_flags(flags);
1da177e4
LT
2144 local_irq_enable();
2145 /* Let ten ticks pass... */
2146 mdelay((10 * 1000) / HZ);
4aae0702 2147 local_irq_restore(flags);
1da177e4
LT
2148
2149 /*
2150 * Expect a few ticks at least, to be sure some possible
2151 * glue logic does not lock up after one or two first
2152 * ticks in a non-ExtINT mode. Also the local APIC
2153 * might have cached one ExtINT interrupt. Finally, at
2154 * least one tick may be lost due to delays.
2155 */
54168ed7
IM
2156
2157 /* jiffies wrap? */
1d16b53e 2158 if (time_after(jiffies, t1 + 4))
1da177e4 2159 return 1;
1da177e4
LT
2160 return 0;
2161}
2162
2163/*
2164 * In the SMP+IOAPIC case it might happen that there are an unspecified
2165 * number of pending IRQ events unhandled. These cases are very rare,
2166 * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
2167 * better to do it this way as thus we do not have to be aware of
2168 * 'pending' interrupts in the IRQ path, except at this point.
2169 */
2170/*
2171 * Edge triggered needs to resend any interrupt
2172 * that was delayed but this is now handled in the device
2173 * independent code.
2174 */
2175
2176/*
2177 * Starting up a edge-triggered IO-APIC interrupt is
2178 * nasty - we need to make sure that we get the edge.
2179 * If it is already asserted for some reason, we need
2180 * return 1 to indicate that is was pending.
2181 *
2182 * This is not complete - we should be able to fake
2183 * an edge even if it isn't on the 8259A...
2184 */
54168ed7 2185
f5b9ed7a 2186static unsigned int startup_ioapic_irq(unsigned int irq)
1da177e4
LT
2187{
2188 int was_pending = 0;
2189 unsigned long flags;
2190
2191 spin_lock_irqsave(&ioapic_lock, flags);
2192 if (irq < 16) {
2193 disable_8259A_irq(irq);
2194 if (i8259A_irq_pending(irq))
2195 was_pending = 1;
2196 }
2197 __unmask_IO_APIC_irq(irq);
2198 spin_unlock_irqrestore(&ioapic_lock, flags);
2199
2200 return was_pending;
2201}
2202
54168ed7 2203#ifdef CONFIG_X86_64
ace80ab7 2204static int ioapic_retrigger_irq(unsigned int irq)
1da177e4 2205{
54168ed7
IM
2206
2207 struct irq_cfg *cfg = irq_cfg(irq);
2208 unsigned long flags;
2209
2210 spin_lock_irqsave(&vector_lock, flags);
2211 send_IPI_mask(cpumask_of_cpu(first_cpu(cfg->domain)), cfg->vector);
2212 spin_unlock_irqrestore(&vector_lock, flags);
c0ad90a3
IM
2213
2214 return 1;
2215}
54168ed7
IM
2216#else
2217static int ioapic_retrigger_irq(unsigned int irq)
497c9a19 2218{
54168ed7 2219 send_IPI_self(irq_cfg(irq)->vector);
497c9a19 2220
54168ed7
IM
2221 return 1;
2222}
2223#endif
497c9a19 2224
54168ed7
IM
2225/*
2226 * Level and edge triggered IO-APIC interrupts need different handling,
2227 * so we use two separate IRQ descriptors. Edge triggered IRQs can be
2228 * handled with the level-triggered descriptor, but that one has slightly
2229 * more overhead. Level-triggered interrupts cannot be handled with the
2230 * edge-triggered handler, without risking IRQ storms and other ugly
2231 * races.
2232 */
497c9a19 2233
54168ed7 2234#ifdef CONFIG_SMP
497c9a19 2235
54168ed7
IM
2236#ifdef CONFIG_INTR_REMAP
2237static void ir_irq_migration(struct work_struct *work);
497c9a19 2238
54168ed7 2239static DECLARE_DELAYED_WORK(ir_migration_work, ir_irq_migration);
497c9a19 2240
54168ed7
IM
2241/*
2242 * Migrate the IO-APIC irq in the presence of intr-remapping.
2243 *
2244 * For edge triggered, irq migration is a simple atomic update(of vector
2245 * and cpu destination) of IRTE and flush the hardware cache.
2246 *
2247 * For level triggered, we need to modify the io-apic RTE aswell with the update
2248 * vector information, along with modifying IRTE with vector and destination.
2249 * So irq migration for level triggered is little bit more complex compared to
2250 * edge triggered migration. But the good news is, we use the same algorithm
2251 * for level triggered migration as we have today, only difference being,
2252 * we now initiate the irq migration from process context instead of the
2253 * interrupt context.
2254 *
2255 * In future, when we do a directed EOI (combined with cpu EOI broadcast
2256 * suppression) to the IO-APIC, level triggered irq migration will also be
2257 * as simple as edge triggered migration and we can do the irq migration
2258 * with a simple atomic update to IO-APIC RTE.
2259 */
2260static void migrate_ioapic_irq(int irq, cpumask_t mask)
497c9a19 2261{
54168ed7
IM
2262 struct irq_cfg *cfg;
2263 struct irq_desc *desc;
2264 cpumask_t tmp, cleanup_mask;
2265 struct irte irte;
2266 int modify_ioapic_rte;
2267 unsigned int dest;
2268 unsigned long flags;
497c9a19 2269
54168ed7
IM
2270 cpus_and(tmp, mask, cpu_online_map);
2271 if (cpus_empty(tmp))
497c9a19
YL
2272 return;
2273
54168ed7
IM
2274 if (get_irte(irq, &irte))
2275 return;
497c9a19 2276
54168ed7
IM
2277 if (assign_irq_vector(irq, mask))
2278 return;
2279
2280 cfg = irq_cfg(irq);
2281 cpus_and(tmp, cfg->domain, mask);
2282 dest = cpu_mask_to_apicid(tmp);
2283
2284 desc = irq_to_desc(irq);
2285 modify_ioapic_rte = desc->status & IRQ_LEVEL;
2286 if (modify_ioapic_rte) {
2287 spin_lock_irqsave(&ioapic_lock, flags);
2288 __target_IO_APIC_irq(irq, dest, cfg->vector);
2289 spin_unlock_irqrestore(&ioapic_lock, flags);
2290 }
2291
2292 irte.vector = cfg->vector;
2293 irte.dest_id = IRTE_DEST(dest);
2294
2295 /*
2296 * Modified the IRTE and flushes the Interrupt entry cache.
2297 */
2298 modify_irte(irq, &irte);
2299
2300 if (cfg->move_in_progress) {
2301 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2302 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2303 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
2304 cfg->move_in_progress = 0;
2305 }
2306
2307 desc->affinity = mask;
2308}
2309
2310static int migrate_irq_remapped_level(int irq)
2311{
2312 int ret = -1;
2313 struct irq_desc *desc = irq_to_desc(irq);
2314
2315 mask_IO_APIC_irq(irq);
2316
2317 if (io_apic_level_ack_pending(irq)) {
2318 /*
2319 * Interrupt in progress. Migrating irq now will change the
2320 * vector information in the IO-APIC RTE and that will confuse
2321 * the EOI broadcast performed by cpu.
2322 * So, delay the irq migration to the next instance.
2323 */
2324 schedule_delayed_work(&ir_migration_work, 1);
2325 goto unmask;
2326 }
2327
2328 /* everthing is clear. we have right of way */
2329 migrate_ioapic_irq(irq, desc->pending_mask);
2330
2331 ret = 0;
2332 desc->status &= ~IRQ_MOVE_PENDING;
2333 cpus_clear(desc->pending_mask);
2334
2335unmask:
2336 unmask_IO_APIC_irq(irq);
2337 return ret;
2338}
2339
2340static void ir_irq_migration(struct work_struct *work)
2341{
2342 unsigned int irq;
2343 struct irq_desc *desc;
2344
2345 for_each_irq_desc(irq, desc) {
2346 if (desc->status & IRQ_MOVE_PENDING) {
2347 unsigned long flags;
2348
2349 spin_lock_irqsave(&desc->lock, flags);
2350 if (!desc->chip->set_affinity ||
2351 !(desc->status & IRQ_MOVE_PENDING)) {
2352 desc->status &= ~IRQ_MOVE_PENDING;
2353 spin_unlock_irqrestore(&desc->lock, flags);
2354 continue;
2355 }
2356
2357 desc->chip->set_affinity(irq, desc->pending_mask);
2358 spin_unlock_irqrestore(&desc->lock, flags);
2359 }
2360 }
2361}
2362
2363/*
2364 * Migrates the IRQ destination in the process context.
2365 */
2366static void set_ir_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
2367{
2368 struct irq_desc *desc = irq_to_desc(irq);
2369
2370 if (desc->status & IRQ_LEVEL) {
2371 desc->status |= IRQ_MOVE_PENDING;
2372 desc->pending_mask = mask;
2373 migrate_irq_remapped_level(irq);
2374 return;
2375 }
2376
2377 migrate_ioapic_irq(irq, mask);
2378}
2379#endif
2380
2381asmlinkage void smp_irq_move_cleanup_interrupt(void)
2382{
2383 unsigned vector, me;
2384 ack_APIC_irq();
2385#ifdef CONFIG_X86_64
2386 exit_idle();
2387#endif
2388 irq_enter();
2389
2390 me = smp_processor_id();
2391 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
2392 unsigned int irq;
2393 struct irq_desc *desc;
2394 struct irq_cfg *cfg;
2395 irq = __get_cpu_var(vector_irq)[vector];
2396
2397 desc = irq_to_desc(irq);
2398 if (!desc)
2399 continue;
2400
2401 cfg = irq_cfg(irq);
2402 spin_lock(&desc->lock);
2403 if (!cfg->move_cleanup_count)
2404 goto unlock;
2405
2406 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain))
2407 goto unlock;
2408
2409 __get_cpu_var(vector_irq)[vector] = -1;
2410 cfg->move_cleanup_count--;
2411unlock:
2412 spin_unlock(&desc->lock);
2413 }
2414
2415 irq_exit();
2416}
2417
2418static void irq_complete_move(unsigned int irq)
2419{
2420 struct irq_cfg *cfg = irq_cfg(irq);
2421 unsigned vector, me;
2422
2423 if (likely(!cfg->move_in_progress))
2424 return;
2425
2426 vector = ~get_irq_regs()->orig_ax;
2427 me = smp_processor_id();
2428 if ((vector == cfg->vector) && cpu_isset(me, cfg->domain)) {
2429 cpumask_t cleanup_mask;
2430
2431 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
2432 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
2433 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
497c9a19
YL
2434 cfg->move_in_progress = 0;
2435 }
2436}
2437#else
2438static inline void irq_complete_move(unsigned int irq) {}
2439#endif
54168ed7
IM
2440#ifdef CONFIG_INTR_REMAP
2441static void ack_x2apic_level(unsigned int irq)
2442{
2443 ack_x2APIC_irq();
2444}
2445
2446static void ack_x2apic_edge(unsigned int irq)
2447{
2448 ack_x2APIC_irq();
2449}
2450#endif
497c9a19 2451
1d025192
YL
2452static void ack_apic_edge(unsigned int irq)
2453{
2454 irq_complete_move(irq);
2455 move_native_irq(irq);
2456 ack_APIC_irq();
2457}
2458
3eb2cce8
YL
2459#ifdef CONFIG_X86_32
2460atomic_t irq_mis_count;
2461#endif
2462
047c8fdb
YL
2463static void ack_apic_level(unsigned int irq)
2464{
3eb2cce8
YL
2465#ifdef CONFIG_X86_32
2466 unsigned long v;
2467 int i;
2468#endif
54168ed7 2469 int do_unmask_irq = 0;
047c8fdb 2470
54168ed7 2471 irq_complete_move(irq);
047c8fdb 2472#ifdef CONFIG_GENERIC_PENDING_IRQ
54168ed7
IM
2473 /* If we are moving the irq we need to mask it */
2474 if (unlikely(irq_to_desc(irq)->status & IRQ_MOVE_PENDING)) {
2475 do_unmask_irq = 1;
2476 mask_IO_APIC_irq(irq);
2477 }
047c8fdb
YL
2478#endif
2479
3eb2cce8
YL
2480#ifdef CONFIG_X86_32
2481 /*
2482 * It appears there is an erratum which affects at least version 0x11
2483 * of I/O APIC (that's the 82093AA and cores integrated into various
2484 * chipsets). Under certain conditions a level-triggered interrupt is
2485 * erroneously delivered as edge-triggered one but the respective IRR
2486 * bit gets set nevertheless. As a result the I/O unit expects an EOI
2487 * message but it will never arrive and further interrupts are blocked
2488 * from the source. The exact reason is so far unknown, but the
2489 * phenomenon was observed when two consecutive interrupt requests
2490 * from a given source get delivered to the same CPU and the source is
2491 * temporarily disabled in between.
2492 *
2493 * A workaround is to simulate an EOI message manually. We achieve it
2494 * by setting the trigger mode to edge and then to level when the edge
2495 * trigger mode gets detected in the TMR of a local APIC for a
2496 * level-triggered interrupt. We mask the source for the time of the
2497 * operation to prevent an edge-triggered interrupt escaping meanwhile.
2498 * The idea is from Manfred Spraul. --macro
2499 */
2500 i = irq_cfg(irq)->vector;
2501
2502 v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
2503#endif
2504
54168ed7
IM
2505 /*
2506 * We must acknowledge the irq before we move it or the acknowledge will
2507 * not propagate properly.
2508 */
2509 ack_APIC_irq();
2510
2511 /* Now we can move and renable the irq */
2512 if (unlikely(do_unmask_irq)) {
2513 /* Only migrate the irq if the ack has been received.
2514 *
2515 * On rare occasions the broadcast level triggered ack gets
2516 * delayed going to ioapics, and if we reprogram the
2517 * vector while Remote IRR is still set the irq will never
2518 * fire again.
2519 *
2520 * To prevent this scenario we read the Remote IRR bit
2521 * of the ioapic. This has two effects.
2522 * - On any sane system the read of the ioapic will
2523 * flush writes (and acks) going to the ioapic from
2524 * this cpu.
2525 * - We get to see if the ACK has actually been delivered.
2526 *
2527 * Based on failed experiments of reprogramming the
2528 * ioapic entry from outside of irq context starting
2529 * with masking the ioapic entry and then polling until
2530 * Remote IRR was clear before reprogramming the
2531 * ioapic I don't trust the Remote IRR bit to be
2532 * completey accurate.
2533 *
2534 * However there appears to be no other way to plug
2535 * this race, so if the Remote IRR bit is not
2536 * accurate and is causing problems then it is a hardware bug
2537 * and you can go talk to the chipset vendor about it.
2538 */
2539 if (!io_apic_level_ack_pending(irq))
2540 move_masked_irq(irq);
2541 unmask_IO_APIC_irq(irq);
2542 }
1d025192 2543
3eb2cce8 2544#ifdef CONFIG_X86_32
1d025192
YL
2545 if (!(v & (1 << (i & 0x1f)))) {
2546 atomic_inc(&irq_mis_count);
2547 spin_lock(&ioapic_lock);
2548 __mask_and_edge_IO_APIC_irq(irq);
2549 __unmask_and_level_IO_APIC_irq(irq);
2550 spin_unlock(&ioapic_lock);
2551 }
047c8fdb 2552#endif
3eb2cce8 2553}
1d025192 2554
f5b9ed7a
IM
2555static struct irq_chip ioapic_chip __read_mostly = {
2556 .name = "IO-APIC",
ace80ab7
EB
2557 .startup = startup_ioapic_irq,
2558 .mask = mask_IO_APIC_irq,
2559 .unmask = unmask_IO_APIC_irq,
1d025192
YL
2560 .ack = ack_apic_edge,
2561 .eoi = ack_apic_level,
54d5d424 2562#ifdef CONFIG_SMP
ace80ab7 2563 .set_affinity = set_ioapic_affinity_irq,
54d5d424 2564#endif
ace80ab7 2565 .retrigger = ioapic_retrigger_irq,
1da177e4
LT
2566};
2567
54168ed7
IM
2568#ifdef CONFIG_INTR_REMAP
2569static struct irq_chip ir_ioapic_chip __read_mostly = {
2570 .name = "IR-IO-APIC",
2571 .startup = startup_ioapic_irq,
2572 .mask = mask_IO_APIC_irq,
2573 .unmask = unmask_IO_APIC_irq,
2574 .ack = ack_x2apic_edge,
2575 .eoi = ack_x2apic_level,
2576#ifdef CONFIG_SMP
2577 .set_affinity = set_ir_ioapic_affinity_irq,
2578#endif
2579 .retrigger = ioapic_retrigger_irq,
2580};
2581#endif
1da177e4
LT
2582
2583static inline void init_IO_APIC_traps(void)
2584{
2585 int irq;
08678b08 2586 struct irq_desc *desc;
da51a821 2587 struct irq_cfg *cfg;
1da177e4
LT
2588
2589 /*
2590 * NOTE! The local APIC isn't very good at handling
2591 * multiple interrupts at the same interrupt level.
2592 * As the interrupt level is determined by taking the
2593 * vector number and shifting that right by 4, we
2594 * want to spread these out a bit so that they don't
2595 * all fall in the same interrupt level.
2596 *
2597 * Also, we've got to be careful not to trash gate
2598 * 0x80, because int 0x80 is hm, kind of importantish. ;)
2599 */
8f09cd20 2600 for_each_irq_cfg(irq, cfg) {
da51a821 2601 if (IO_APIC_IRQ(irq) && !cfg->vector) {
1da177e4
LT
2602 /*
2603 * Hmm.. We don't have an entry for this,
2604 * so default to an old-fashioned 8259
2605 * interrupt if we can..
2606 */
2607 if (irq < 16)
2608 make_8259A_irq(irq);
08678b08
YL
2609 else {
2610 desc = irq_to_desc(irq);
1da177e4 2611 /* Strange. Oh, well.. */
08678b08
YL
2612 desc->chip = &no_irq_chip;
2613 }
1da177e4
LT
2614 }
2615 }
2616}
2617
f5b9ed7a
IM
2618/*
2619 * The local APIC irq-chip implementation:
2620 */
1da177e4 2621
36062448 2622static void mask_lapic_irq(unsigned int irq)
1da177e4
LT
2623{
2624 unsigned long v;
2625
2626 v = apic_read(APIC_LVT0);
593f4a78 2627 apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
1da177e4
LT
2628}
2629
36062448 2630static void unmask_lapic_irq(unsigned int irq)
1da177e4 2631{
f5b9ed7a 2632 unsigned long v;
1da177e4 2633
f5b9ed7a 2634 v = apic_read(APIC_LVT0);
593f4a78 2635 apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
f5b9ed7a 2636}
1da177e4 2637
54168ed7 2638static void ack_lapic_irq (unsigned int irq)
1d025192
YL
2639{
2640 ack_APIC_irq();
2641}
2642
f5b9ed7a 2643static struct irq_chip lapic_chip __read_mostly = {
9a1c6192 2644 .name = "local-APIC",
f5b9ed7a
IM
2645 .mask = mask_lapic_irq,
2646 .unmask = unmask_lapic_irq,
c88ac1df 2647 .ack = ack_lapic_irq,
1da177e4
LT
2648};
2649
497c9a19 2650static void lapic_register_intr(int irq)
c88ac1df 2651{
08678b08
YL
2652 struct irq_desc *desc;
2653
2654 desc = irq_to_desc(irq);
2655 desc->status &= ~IRQ_LEVEL;
c88ac1df
MR
2656 set_irq_chip_and_handler_name(irq, &lapic_chip, handle_edge_irq,
2657 "edge");
c88ac1df
MR
2658}
2659
e9427101 2660static void __init setup_nmi(void)
1da177e4
LT
2661{
2662 /*
36062448 2663 * Dirty trick to enable the NMI watchdog ...
1da177e4
LT
2664 * We put the 8259A master into AEOI mode and
2665 * unmask on all local APICs LVT0 as NMI.
2666 *
2667 * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
2668 * is from Maciej W. Rozycki - so we do not have to EOI from
2669 * the NMI handler or the timer interrupt.
36062448 2670 */
1da177e4
LT
2671 apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
2672
e9427101 2673 enable_NMI_through_LVT0();
1da177e4
LT
2674
2675 apic_printk(APIC_VERBOSE, " done.\n");
2676}
2677
2678/*
2679 * This looks a bit hackish but it's about the only one way of sending
2680 * a few INTA cycles to 8259As and any associated glue logic. ICR does
2681 * not support the ExtINT mode, unfortunately. We need to send these
2682 * cycles as some i82489DX-based boards have glue logic that keeps the
2683 * 8259A interrupt line asserted until INTA. --macro
2684 */
28acf285 2685static inline void __init unlock_ExtINT_logic(void)
1da177e4 2686{
fcfd636a 2687 int apic, pin, i;
1da177e4
LT
2688 struct IO_APIC_route_entry entry0, entry1;
2689 unsigned char save_control, save_freq_select;
1da177e4 2690
fcfd636a 2691 pin = find_isa_irq_pin(8, mp_INT);
956fb531
AB
2692 if (pin == -1) {
2693 WARN_ON_ONCE(1);
2694 return;
2695 }
fcfd636a 2696 apic = find_isa_irq_apic(8, mp_INT);
956fb531
AB
2697 if (apic == -1) {
2698 WARN_ON_ONCE(1);
1da177e4 2699 return;
956fb531 2700 }
1da177e4 2701
cf4c6a2f 2702 entry0 = ioapic_read_entry(apic, pin);
fcfd636a 2703 clear_IO_APIC_pin(apic, pin);
1da177e4
LT
2704
2705 memset(&entry1, 0, sizeof(entry1));
2706
2707 entry1.dest_mode = 0; /* physical delivery */
2708 entry1.mask = 0; /* unmask IRQ now */
d83e94ac 2709 entry1.dest = hard_smp_processor_id();
1da177e4
LT
2710 entry1.delivery_mode = dest_ExtINT;
2711 entry1.polarity = entry0.polarity;
2712 entry1.trigger = 0;
2713 entry1.vector = 0;
2714
cf4c6a2f 2715 ioapic_write_entry(apic, pin, entry1);
1da177e4
LT
2716
2717 save_control = CMOS_READ(RTC_CONTROL);
2718 save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
2719 CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
2720 RTC_FREQ_SELECT);
2721 CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
2722
2723 i = 100;
2724 while (i-- > 0) {
2725 mdelay(10);
2726 if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
2727 i -= 10;
2728 }
2729
2730 CMOS_WRITE(save_control, RTC_CONTROL);
2731 CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
fcfd636a 2732 clear_IO_APIC_pin(apic, pin);
1da177e4 2733
cf4c6a2f 2734 ioapic_write_entry(apic, pin, entry0);
1da177e4
LT
2735}
2736
efa2559f 2737static int disable_timer_pin_1 __initdata;
047c8fdb 2738/* Actually the next is obsolete, but keep it for paranoid reasons -AK */
54168ed7 2739static int __init disable_timer_pin_setup(char *arg)
efa2559f
YL
2740{
2741 disable_timer_pin_1 = 1;
2742 return 0;
2743}
54168ed7 2744early_param("disable_timer_pin_1", disable_timer_pin_setup);
efa2559f
YL
2745
2746int timer_through_8259 __initdata;
2747
1da177e4
LT
2748/*
2749 * This code may look a bit paranoid, but it's supposed to cooperate with
2750 * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
2751 * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
2752 * fanatically on his truly buggy board.
54168ed7
IM
2753 *
2754 * FIXME: really need to revamp this for all platforms.
1da177e4 2755 */
8542b200 2756static inline void __init check_timer(void)
1da177e4 2757{
497c9a19 2758 struct irq_cfg *cfg = irq_cfg(0);
fcfd636a 2759 int apic1, pin1, apic2, pin2;
4aae0702 2760 unsigned long flags;
047c8fdb
YL
2761 unsigned int ver;
2762 int no_pin1 = 0;
4aae0702
IM
2763
2764 local_irq_save(flags);
d4d25dec 2765
54168ed7
IM
2766 ver = apic_read(APIC_LVR);
2767 ver = GET_APIC_VERSION(ver);
6e908947 2768
1da177e4
LT
2769 /*
2770 * get/set the timer IRQ vector:
2771 */
2772 disable_8259A_irq(0);
497c9a19 2773 assign_irq_vector(0, TARGET_CPUS);
1da177e4
LT
2774
2775 /*
d11d5794
MR
2776 * As IRQ0 is to be enabled in the 8259A, the virtual
2777 * wire has to be disabled in the local APIC. Also
2778 * timer interrupts need to be acknowledged manually in
2779 * the 8259A for the i82489DX when using the NMI
2780 * watchdog as that APIC treats NMIs as level-triggered.
2781 * The AEOI mode will finish them in the 8259A
2782 * automatically.
1da177e4 2783 */
593f4a78 2784 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
1da177e4 2785 init_8259A(1);
54168ed7 2786#ifdef CONFIG_X86_32
d11d5794 2787 timer_ack = (nmi_watchdog == NMI_IO_APIC && !APIC_INTEGRATED(ver));
54168ed7 2788#endif
1da177e4 2789
fcfd636a
EB
2790 pin1 = find_isa_irq_pin(0, mp_INT);
2791 apic1 = find_isa_irq_apic(0, mp_INT);
2792 pin2 = ioapic_i8259.pin;
2793 apic2 = ioapic_i8259.apic;
1da177e4 2794
49a66a0b
MR
2795 apic_printk(APIC_QUIET, KERN_INFO "..TIMER: vector=0x%02X "
2796 "apic1=%d pin1=%d apic2=%d pin2=%d\n",
497c9a19 2797 cfg->vector, apic1, pin1, apic2, pin2);
1da177e4 2798
691874fa
MR
2799 /*
2800 * Some BIOS writers are clueless and report the ExtINTA
2801 * I/O APIC input from the cascaded 8259A as the timer
2802 * interrupt input. So just in case, if only one pin
2803 * was found above, try it both directly and through the
2804 * 8259A.
2805 */
2806 if (pin1 == -1) {
54168ed7
IM
2807#ifdef CONFIG_INTR_REMAP
2808 if (intr_remapping_enabled)
2809 panic("BIOS bug: timer not connected to IO-APIC");
2810#endif
691874fa
MR
2811 pin1 = pin2;
2812 apic1 = apic2;
2813 no_pin1 = 1;
2814 } else if (pin2 == -1) {
2815 pin2 = pin1;
2816 apic2 = apic1;
2817 }
2818
1da177e4
LT
2819 if (pin1 != -1) {
2820 /*
2821 * Ok, does IRQ0 through the IOAPIC work?
2822 */
691874fa
MR
2823 if (no_pin1) {
2824 add_pin_to_irq(0, apic1, pin1);
497c9a19 2825 setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
691874fa 2826 }
1da177e4
LT
2827 unmask_IO_APIC_irq(0);
2828 if (timer_irq_works()) {
2829 if (nmi_watchdog == NMI_IO_APIC) {
1da177e4
LT
2830 setup_nmi();
2831 enable_8259A_irq(0);
1da177e4 2832 }
66759a01
CE
2833 if (disable_timer_pin_1 > 0)
2834 clear_IO_APIC_pin(0, pin1);
4aae0702 2835 goto out;
1da177e4 2836 }
54168ed7
IM
2837#ifdef CONFIG_INTR_REMAP
2838 if (intr_remapping_enabled)
2839 panic("timer doesn't work through Interrupt-remapped IO-APIC");
2840#endif
fcfd636a 2841 clear_IO_APIC_pin(apic1, pin1);
691874fa 2842 if (!no_pin1)
49a66a0b
MR
2843 apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
2844 "8254 timer not connected to IO-APIC\n");
1da177e4 2845
49a66a0b
MR
2846 apic_printk(APIC_QUIET, KERN_INFO "...trying to set up timer "
2847 "(IRQ0) through the 8259A ...\n");
2848 apic_printk(APIC_QUIET, KERN_INFO
2849 "..... (found apic %d pin %d) ...\n", apic2, pin2);
1da177e4
LT
2850 /*
2851 * legacy devices should be connected to IO APIC #0
2852 */
691874fa 2853 replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
497c9a19 2854 setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
24742ece 2855 unmask_IO_APIC_irq(0);
ecd29476 2856 enable_8259A_irq(0);
1da177e4 2857 if (timer_irq_works()) {
49a66a0b 2858 apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
35542c5e 2859 timer_through_8259 = 1;
1da177e4 2860 if (nmi_watchdog == NMI_IO_APIC) {
60134ebe 2861 disable_8259A_irq(0);
1da177e4 2862 setup_nmi();
60134ebe 2863 enable_8259A_irq(0);
1da177e4 2864 }
4aae0702 2865 goto out;
1da177e4
LT
2866 }
2867 /*
2868 * Cleanup, just in case ...
2869 */
ecd29476 2870 disable_8259A_irq(0);
fcfd636a 2871 clear_IO_APIC_pin(apic2, pin2);
49a66a0b 2872 apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
1da177e4 2873 }
1da177e4
LT
2874
2875 if (nmi_watchdog == NMI_IO_APIC) {
49a66a0b
MR
2876 apic_printk(APIC_QUIET, KERN_WARNING "timer doesn't work "
2877 "through the IO-APIC - disabling NMI Watchdog!\n");
067fa0ff 2878 nmi_watchdog = NMI_NONE;
1da177e4 2879 }
54168ed7 2880#ifdef CONFIG_X86_32
d11d5794 2881 timer_ack = 0;
54168ed7 2882#endif
1da177e4 2883
49a66a0b
MR
2884 apic_printk(APIC_QUIET, KERN_INFO
2885 "...trying to set up timer as Virtual Wire IRQ...\n");
1da177e4 2886
497c9a19
YL
2887 lapic_register_intr(0);
2888 apic_write(APIC_LVT0, APIC_DM_FIXED | cfg->vector); /* Fixed mode */
1da177e4
LT
2889 enable_8259A_irq(0);
2890
2891 if (timer_irq_works()) {
49a66a0b 2892 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
4aae0702 2893 goto out;
1da177e4 2894 }
e67465f1 2895 disable_8259A_irq(0);
497c9a19 2896 apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
49a66a0b 2897 apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
1da177e4 2898
49a66a0b
MR
2899 apic_printk(APIC_QUIET, KERN_INFO
2900 "...trying to set up timer as ExtINT IRQ...\n");
1da177e4 2901
1da177e4
LT
2902 init_8259A(0);
2903 make_8259A_irq(0);
593f4a78 2904 apic_write(APIC_LVT0, APIC_DM_EXTINT);
1da177e4
LT
2905
2906 unlock_ExtINT_logic();
2907
2908 if (timer_irq_works()) {
49a66a0b 2909 apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
4aae0702 2910 goto out;
1da177e4 2911 }
49a66a0b 2912 apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
1da177e4 2913 panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
49a66a0b 2914 "report. Then try booting with the 'noapic' option.\n");
4aae0702
IM
2915out:
2916 local_irq_restore(flags);
1da177e4
LT
2917}
2918
2919/*
af174783
MR
2920 * Traditionally ISA IRQ2 is the cascade IRQ, and is not available
2921 * to devices. However there may be an I/O APIC pin available for
2922 * this interrupt regardless. The pin may be left unconnected, but
2923 * typically it will be reused as an ExtINT cascade interrupt for
2924 * the master 8259A. In the MPS case such a pin will normally be
2925 * reported as an ExtINT interrupt in the MP table. With ACPI
2926 * there is no provision for ExtINT interrupts, and in the absence
2927 * of an override it would be treated as an ordinary ISA I/O APIC
2928 * interrupt, that is edge-triggered and unmasked by default. We
2929 * used to do this, but it caused problems on some systems because
2930 * of the NMI watchdog and sometimes IRQ0 of the 8254 timer using
2931 * the same ExtINT cascade interrupt to drive the local APIC of the
2932 * bootstrap processor. Therefore we refrain from routing IRQ2 to
2933 * the I/O APIC in all cases now. No actual device should request
2934 * it anyway. --macro
1da177e4
LT
2935 */
2936#define PIC_IRQS (1 << PIC_CASCADE_IR)
2937
2938void __init setup_IO_APIC(void)
2939{
54168ed7
IM
2940
2941#ifdef CONFIG_X86_32
1da177e4 2942 enable_IO_APIC();
54168ed7
IM
2943#else
2944 /*
2945 * calling enable_IO_APIC() is moved to setup_local_APIC for BP
2946 */
2947#endif
1da177e4 2948
af174783 2949 io_apic_irqs = ~PIC_IRQS;
1da177e4 2950
54168ed7
IM
2951 apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
2952 /*
2953 * Set up IO-APIC IRQ routing.
2954 */
2955#ifdef CONFIG_X86_32
2956 if (!acpi_ioapic)
2957 setup_ioapic_ids_from_mpc();
2958#endif
1da177e4
LT
2959 sync_Arb_IDs();
2960 setup_IO_APIC_irqs();
2961 init_IO_APIC_traps();
1e4c85f9 2962 check_timer();
1da177e4
LT
2963}
2964
2965/*
54168ed7
IM
2966 * Called after all the initialization is done. If we didnt find any
2967 * APIC bugs then we can allow the modify fast path
1da177e4 2968 */
36062448 2969
1da177e4
LT
2970static int __init io_apic_bug_finalize(void)
2971{
54168ed7
IM
2972 if (sis_apic_bug == -1)
2973 sis_apic_bug = 0;
2974 return 0;
1da177e4
LT
2975}
2976
2977late_initcall(io_apic_bug_finalize);
2978
2979struct sysfs_ioapic_data {
2980 struct sys_device dev;
2981 struct IO_APIC_route_entry entry[0];
2982};
54168ed7 2983static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
1da177e4 2984
438510f6 2985static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
1da177e4
LT
2986{
2987 struct IO_APIC_route_entry *entry;
2988 struct sysfs_ioapic_data *data;
1da177e4 2989 int i;
36062448 2990
1da177e4
LT
2991 data = container_of(dev, struct sysfs_ioapic_data, dev);
2992 entry = data->entry;
54168ed7
IM
2993 for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ )
2994 *entry = ioapic_read_entry(dev->id, i);
1da177e4
LT
2995
2996 return 0;
2997}
2998
2999static int ioapic_resume(struct sys_device *dev)
3000{
3001 struct IO_APIC_route_entry *entry;
3002 struct sysfs_ioapic_data *data;
3003 unsigned long flags;
3004 union IO_APIC_reg_00 reg_00;
3005 int i;
36062448 3006
1da177e4
LT
3007 data = container_of(dev, struct sysfs_ioapic_data, dev);
3008 entry = data->entry;
3009
3010 spin_lock_irqsave(&ioapic_lock, flags);
3011 reg_00.raw = io_apic_read(dev->id, 0);
ec2cd0a2
AS
3012 if (reg_00.bits.ID != mp_ioapics[dev->id].mp_apicid) {
3013 reg_00.bits.ID = mp_ioapics[dev->id].mp_apicid;
1da177e4
LT
3014 io_apic_write(dev->id, 0, reg_00.raw);
3015 }
1da177e4 3016 spin_unlock_irqrestore(&ioapic_lock, flags);
36062448 3017 for (i = 0; i < nr_ioapic_registers[dev->id]; i++)
cf4c6a2f 3018 ioapic_write_entry(dev->id, i, entry[i]);
1da177e4
LT
3019
3020 return 0;
3021}
3022
3023static struct sysdev_class ioapic_sysdev_class = {
af5ca3f4 3024 .name = "ioapic",
1da177e4
LT
3025 .suspend = ioapic_suspend,
3026 .resume = ioapic_resume,
3027};
3028
3029static int __init ioapic_init_sysfs(void)
3030{
54168ed7
IM
3031 struct sys_device * dev;
3032 int i, size, error;
1da177e4
LT
3033
3034 error = sysdev_class_register(&ioapic_sysdev_class);
3035 if (error)
3036 return error;
3037
54168ed7 3038 for (i = 0; i < nr_ioapics; i++ ) {
36062448 3039 size = sizeof(struct sys_device) + nr_ioapic_registers[i]
1da177e4 3040 * sizeof(struct IO_APIC_route_entry);
25556c16 3041 mp_ioapic_data[i] = kzalloc(size, GFP_KERNEL);
1da177e4
LT
3042 if (!mp_ioapic_data[i]) {
3043 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3044 continue;
3045 }
1da177e4 3046 dev = &mp_ioapic_data[i]->dev;
36062448 3047 dev->id = i;
1da177e4
LT
3048 dev->cls = &ioapic_sysdev_class;
3049 error = sysdev_register(dev);
3050 if (error) {
3051 kfree(mp_ioapic_data[i]);
3052 mp_ioapic_data[i] = NULL;
3053 printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
3054 continue;
3055 }
3056 }
3057
3058 return 0;
3059}
3060
3061device_initcall(ioapic_init_sysfs);
3062
3fc471ed 3063/*
95d77884 3064 * Dynamic irq allocate and deallocation
3fc471ed 3065 */
199751d7 3066unsigned int create_irq_nr(unsigned int irq_want)
3fc471ed 3067{
ace80ab7 3068 /* Allocate an unused irq */
54168ed7
IM
3069 unsigned int irq;
3070 unsigned int new;
3fc471ed 3071 unsigned long flags;
da51a821 3072 struct irq_cfg *cfg_new;
3fc471ed 3073
497c9a19 3074#ifndef CONFIG_HAVE_SPARSE_IRQ
199751d7 3075 irq_want = nr_irqs - 1;
497c9a19 3076#endif
199751d7
YL
3077
3078 irq = 0;
ace80ab7 3079 spin_lock_irqsave(&vector_lock, flags);
54168ed7 3080 for (new = irq_want; new > 0; new--) {
ace80ab7
EB
3081 if (platform_legacy_irq(new))
3082 continue;
da51a821
YL
3083 cfg_new = irq_cfg(new);
3084 if (cfg_new && cfg_new->vector != 0)
ace80ab7 3085 continue;
047c8fdb 3086 /* check if need to create one */
da51a821
YL
3087 if (!cfg_new)
3088 cfg_new = irq_cfg_alloc(new);
497c9a19 3089 if (__assign_irq_vector(new, TARGET_CPUS) == 0)
ace80ab7
EB
3090 irq = new;
3091 break;
3092 }
3093 spin_unlock_irqrestore(&vector_lock, flags);
3fc471ed 3094
199751d7 3095 if (irq > 0) {
3fc471ed
EB
3096 dynamic_irq_init(irq);
3097 }
3098 return irq;
3099}
3100
199751d7
YL
3101int create_irq(void)
3102{
54168ed7
IM
3103 int irq;
3104
3105 irq = create_irq_nr(nr_irqs - 1);
3106
3107 if (irq == 0)
3108 irq = -1;
3109
3110 return irq;
199751d7
YL
3111}
3112
3fc471ed
EB
3113void destroy_irq(unsigned int irq)
3114{
3115 unsigned long flags;
3fc471ed
EB
3116
3117 dynamic_irq_cleanup(irq);
3118
54168ed7
IM
3119#ifdef CONFIG_INTR_REMAP
3120 free_irte(irq);
3121#endif
3fc471ed 3122 spin_lock_irqsave(&vector_lock, flags);
497c9a19 3123 __clear_irq_vector(irq);
3fc471ed
EB
3124 spin_unlock_irqrestore(&vector_lock, flags);
3125}
3fc471ed 3126
2d3fcc1c 3127/*
27b46d76 3128 * MSI message composition
2d3fcc1c
EB
3129 */
3130#ifdef CONFIG_PCI_MSI
3b7d1921 3131static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
2d3fcc1c 3132{
497c9a19
YL
3133 struct irq_cfg *cfg;
3134 int err;
2d3fcc1c 3135 unsigned dest;
497c9a19 3136 cpumask_t tmp;
2d3fcc1c 3137
497c9a19
YL
3138 tmp = TARGET_CPUS;
3139 err = assign_irq_vector(irq, tmp);
3140 if (err)
3141 return err;
2d3fcc1c 3142
497c9a19
YL
3143 cfg = irq_cfg(irq);
3144 cpus_and(tmp, cfg->domain, tmp);
3145 dest = cpu_mask_to_apicid(tmp);
3146
54168ed7
IM
3147#ifdef CONFIG_INTR_REMAP
3148 if (irq_remapped(irq)) {
3149 struct irte irte;
3150 int ir_index;
3151 u16 sub_handle;
3152
3153 ir_index = map_irq_to_irte_handle(irq, &sub_handle);
3154 BUG_ON(ir_index == -1);
3155
3156 memset (&irte, 0, sizeof(irte));
3157
3158 irte.present = 1;
3159 irte.dst_mode = INT_DEST_MODE;
3160 irte.trigger_mode = 0; /* edge */
3161 irte.dlvry_mode = INT_DELIVERY_MODE;
3162 irte.vector = cfg->vector;
3163 irte.dest_id = IRTE_DEST(dest);
3164
3165 modify_irte(irq, &irte);
3166
3167 msg->address_hi = MSI_ADDR_BASE_HI;
3168 msg->data = sub_handle;
3169 msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
3170 MSI_ADDR_IR_SHV |
3171 MSI_ADDR_IR_INDEX1(ir_index) |
3172 MSI_ADDR_IR_INDEX2(ir_index);
3173 } else
3174#endif
3175 {
3176 msg->address_hi = MSI_ADDR_BASE_HI;
3177 msg->address_lo =
3178 MSI_ADDR_BASE_LO |
3179 ((INT_DEST_MODE == 0) ?
3180 MSI_ADDR_DEST_MODE_PHYSICAL:
3181 MSI_ADDR_DEST_MODE_LOGICAL) |
3182 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3183 MSI_ADDR_REDIRECTION_CPU:
3184 MSI_ADDR_REDIRECTION_LOWPRI) |
3185 MSI_ADDR_DEST_ID(dest);
497c9a19 3186
54168ed7
IM
3187 msg->data =
3188 MSI_DATA_TRIGGER_EDGE |
3189 MSI_DATA_LEVEL_ASSERT |
3190 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3191 MSI_DATA_DELIVERY_FIXED:
3192 MSI_DATA_DELIVERY_LOWPRI) |
3193 MSI_DATA_VECTOR(cfg->vector);
3194 }
497c9a19 3195 return err;
2d3fcc1c
EB
3196}
3197
3b7d1921
EB
3198#ifdef CONFIG_SMP
3199static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
2d3fcc1c 3200{
497c9a19 3201 struct irq_cfg *cfg;
3b7d1921
EB
3202 struct msi_msg msg;
3203 unsigned int dest;
3204 cpumask_t tmp;
54168ed7 3205 struct irq_desc *desc;
3b7d1921
EB
3206
3207 cpus_and(tmp, mask, cpu_online_map);
3208 if (cpus_empty(tmp))
497c9a19 3209 return;
2d3fcc1c 3210
497c9a19 3211 if (assign_irq_vector(irq, mask))
3b7d1921 3212 return;
2d3fcc1c 3213
497c9a19
YL
3214 cfg = irq_cfg(irq);
3215 cpus_and(tmp, cfg->domain, mask);
3216 dest = cpu_mask_to_apicid(tmp);
3b7d1921
EB
3217
3218 read_msi_msg(irq, &msg);
3219
3220 msg.data &= ~MSI_DATA_VECTOR_MASK;
497c9a19 3221 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3b7d1921
EB
3222 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3223 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3224
3225 write_msi_msg(irq, &msg);
54168ed7
IM
3226 desc = irq_to_desc(irq);
3227 desc->affinity = mask;
2d3fcc1c 3228}
54168ed7
IM
3229
3230#ifdef CONFIG_INTR_REMAP
3231/*
3232 * Migrate the MSI irq to another cpumask. This migration is
3233 * done in the process context using interrupt-remapping hardware.
3234 */
3235static void ir_set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
3236{
3237 struct irq_cfg *cfg;
3238 unsigned int dest;
3239 cpumask_t tmp, cleanup_mask;
3240 struct irte irte;
3241 struct irq_desc *desc;
3242
3243 cpus_and(tmp, mask, cpu_online_map);
3244 if (cpus_empty(tmp))
3245 return;
3246
3247 if (get_irte(irq, &irte))
3248 return;
3249
3250 if (assign_irq_vector(irq, mask))
3251 return;
3252
3253 cfg = irq_cfg(irq);
3254 cpus_and(tmp, cfg->domain, mask);
3255 dest = cpu_mask_to_apicid(tmp);
3256
3257 irte.vector = cfg->vector;
3258 irte.dest_id = IRTE_DEST(dest);
3259
3260 /*
3261 * atomically update the IRTE with the new destination and vector.
3262 */
3263 modify_irte(irq, &irte);
3264
3265 /*
3266 * After this point, all the interrupts will start arriving
3267 * at the new destination. So, time to cleanup the previous
3268 * vector allocation.
3269 */
3270 if (cfg->move_in_progress) {
3271 cpus_and(cleanup_mask, cfg->old_domain, cpu_online_map);
3272 cfg->move_cleanup_count = cpus_weight(cleanup_mask);
3273 send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
3274 cfg->move_in_progress = 0;
3275 }
3276
3277 desc = irq_to_desc(irq);
3278 desc->affinity = mask;
3279}
3280#endif
3b7d1921 3281#endif /* CONFIG_SMP */
2d3fcc1c 3282
3b7d1921
EB
3283/*
3284 * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
3285 * which implement the MSI or MSI-X Capability Structure.
3286 */
3287static struct irq_chip msi_chip = {
3288 .name = "PCI-MSI",
3289 .unmask = unmask_msi_irq,
3290 .mask = mask_msi_irq,
1d025192 3291 .ack = ack_apic_edge,
3b7d1921
EB
3292#ifdef CONFIG_SMP
3293 .set_affinity = set_msi_irq_affinity,
3294#endif
3295 .retrigger = ioapic_retrigger_irq,
2d3fcc1c
EB
3296};
3297
54168ed7
IM
3298#ifdef CONFIG_INTR_REMAP
3299static struct irq_chip msi_ir_chip = {
3300 .name = "IR-PCI-MSI",
3301 .unmask = unmask_msi_irq,
3302 .mask = mask_msi_irq,
3303 .ack = ack_x2apic_edge,
3304#ifdef CONFIG_SMP
3305 .set_affinity = ir_set_msi_irq_affinity,
3306#endif
3307 .retrigger = ioapic_retrigger_irq,
3308};
3309
3310/*
3311 * Map the PCI dev to the corresponding remapping hardware unit
3312 * and allocate 'nvec' consecutive interrupt-remapping table entries
3313 * in it.
3314 */
3315static int msi_alloc_irte(struct pci_dev *dev, int irq, int nvec)
3316{
3317 struct intel_iommu *iommu;
3318 int index;
3319
3320 iommu = map_dev_to_ir(dev);
3321 if (!iommu) {
3322 printk(KERN_ERR
3323 "Unable to map PCI %s to iommu\n", pci_name(dev));
3324 return -ENOENT;
3325 }
3326
3327 index = alloc_irte(iommu, irq, nvec);
3328 if (index < 0) {
3329 printk(KERN_ERR
3330 "Unable to allocate %d IRTE for PCI %s\n", nvec,
3331 pci_name(dev));
3332 return -ENOSPC;
3333 }
3334 return index;
3335}
3336#endif
1d025192
YL
3337
3338static int setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc, int irq)
3339{
3340 int ret;
3341 struct msi_msg msg;
3342
3343 ret = msi_compose_msg(dev, irq, &msg);
3344 if (ret < 0)
3345 return ret;
3346
3347 set_irq_msi(irq, desc);
3348 write_msi_msg(irq, &msg);
3349
54168ed7
IM
3350#ifdef CONFIG_INTR_REMAP
3351 if (irq_remapped(irq)) {
3352 struct irq_desc *desc = irq_to_desc(irq);
3353 /*
3354 * irq migration in process context
3355 */
3356 desc->status |= IRQ_MOVE_PCNTXT;
3357 set_irq_chip_and_handler_name(irq, &msi_ir_chip, handle_edge_irq, "edge");
3358 } else
3359#endif
3360 set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq, "edge");
1d025192
YL
3361
3362 return 0;
3363}
3364
199751d7
YL
3365static unsigned int build_irq_for_pci_dev(struct pci_dev *dev)
3366{
3367 unsigned int irq;
3368
3369 irq = dev->bus->number;
3370 irq <<= 8;
3371 irq |= dev->devfn;
3372 irq <<= 12;
3373
3374 return irq;
3375}
3376
f7feaca7 3377int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc)
3b7d1921 3378{
54168ed7
IM
3379 unsigned int irq;
3380 int ret;
199751d7
YL
3381 unsigned int irq_want;
3382
3383 irq_want = build_irq_for_pci_dev(dev) + 0x100;
3384
3385 irq = create_irq_nr(irq_want);
199751d7
YL
3386 if (irq == 0)
3387 return -1;
f7feaca7 3388
54168ed7
IM
3389#ifdef CONFIG_INTR_REMAP
3390 if (!intr_remapping_enabled)
3391 goto no_ir;
3392
3393 ret = msi_alloc_irte(dev, irq, 1);
3394 if (ret < 0)
3395 goto error;
3396no_ir:
3397#endif
1d025192 3398 ret = setup_msi_irq(dev, desc, irq);
f7feaca7
EB
3399 if (ret < 0) {
3400 destroy_irq(irq);
3b7d1921 3401 return ret;
54168ed7 3402 }
7fe3730d 3403 return 0;
54168ed7
IM
3404
3405#ifdef CONFIG_INTR_REMAP
3406error:
3407 destroy_irq(irq);
3408 return ret;
3409#endif
3b7d1921
EB
3410}
3411
047c8fdb
YL
3412int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
3413{
54168ed7
IM
3414 unsigned int irq;
3415 int ret, sub_handle;
3416 struct msi_desc *desc;
3417 unsigned int irq_want;
3418
3419#ifdef CONFIG_INTR_REMAP
3420 struct intel_iommu *iommu = 0;
3421 int index = 0;
3422#endif
3423
3424 irq_want = build_irq_for_pci_dev(dev) + 0x100;
3425 sub_handle = 0;
3426 list_for_each_entry(desc, &dev->msi_list, list) {
3427 irq = create_irq_nr(irq_want--);
3428 if (irq == 0)
3429 return -1;
3430#ifdef CONFIG_INTR_REMAP
3431 if (!intr_remapping_enabled)
3432 goto no_ir;
3433
3434 if (!sub_handle) {
3435 /*
3436 * allocate the consecutive block of IRTE's
3437 * for 'nvec'
3438 */
3439 index = msi_alloc_irte(dev, irq, nvec);
3440 if (index < 0) {
3441 ret = index;
3442 goto error;
3443 }
3444 } else {
3445 iommu = map_dev_to_ir(dev);
3446 if (!iommu) {
3447 ret = -ENOENT;
3448 goto error;
3449 }
3450 /*
3451 * setup the mapping between the irq and the IRTE
3452 * base index, the sub_handle pointing to the
3453 * appropriate interrupt remap table entry.
3454 */
3455 set_irte_irq(irq, iommu, index, sub_handle);
3456 }
3457no_ir:
3458#endif
3459 ret = setup_msi_irq(dev, desc, irq);
3460 if (ret < 0)
3461 goto error;
3462 sub_handle++;
3463 }
3464 return 0;
047c8fdb
YL
3465
3466error:
54168ed7
IM
3467 destroy_irq(irq);
3468 return ret;
047c8fdb
YL
3469}
3470
3b7d1921
EB
3471void arch_teardown_msi_irq(unsigned int irq)
3472{
f7feaca7 3473 destroy_irq(irq);
3b7d1921
EB
3474}
3475
54168ed7
IM
3476#ifdef CONFIG_DMAR
3477#ifdef CONFIG_SMP
3478static void dmar_msi_set_affinity(unsigned int irq, cpumask_t mask)
3479{
3480 struct irq_cfg *cfg;
3481 struct msi_msg msg;
3482 unsigned int dest;
3483 cpumask_t tmp;
3484 struct irq_desc *desc;
3485
3486 cpus_and(tmp, mask, cpu_online_map);
3487 if (cpus_empty(tmp))
3488 return;
3489
3490 if (assign_irq_vector(irq, mask))
3491 return;
3492
3493 cfg = irq_cfg(irq);
3494 cpus_and(tmp, cfg->domain, mask);
3495 dest = cpu_mask_to_apicid(tmp);
3496
3497 dmar_msi_read(irq, &msg);
3498
3499 msg.data &= ~MSI_DATA_VECTOR_MASK;
3500 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3501 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3502 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3503
3504 dmar_msi_write(irq, &msg);
3505 desc = irq_to_desc(irq);
3506 desc->affinity = mask;
3507}
3508#endif /* CONFIG_SMP */
3509
3510struct irq_chip dmar_msi_type = {
3511 .name = "DMAR_MSI",
3512 .unmask = dmar_msi_unmask,
3513 .mask = dmar_msi_mask,
3514 .ack = ack_apic_edge,
3515#ifdef CONFIG_SMP
3516 .set_affinity = dmar_msi_set_affinity,
3517#endif
3518 .retrigger = ioapic_retrigger_irq,
3519};
3520
3521int arch_setup_dmar_msi(unsigned int irq)
3522{
3523 int ret;
3524 struct msi_msg msg;
2d3fcc1c 3525
54168ed7
IM
3526 ret = msi_compose_msg(NULL, irq, &msg);
3527 if (ret < 0)
3528 return ret;
3529 dmar_msi_write(irq, &msg);
3530 set_irq_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
3531 "edge");
3532 return 0;
3533}
3534#endif
3535
58ac1e76 3536#ifdef CONFIG_HPET_TIMER
3537
3538#ifdef CONFIG_SMP
3539static void hpet_msi_set_affinity(unsigned int irq, cpumask_t mask)
3540{
3541 struct irq_cfg *cfg;
3542 struct irq_desc *desc;
3543 struct msi_msg msg;
3544 unsigned int dest;
3545 cpumask_t tmp;
3546
3547 cpus_and(tmp, mask, cpu_online_map);
3548 if (cpus_empty(tmp))
3549 return;
3550
3551 if (assign_irq_vector(irq, mask))
3552 return;
3553
3554 cfg = irq_cfg(irq);
3555 cpus_and(tmp, cfg->domain, mask);
3556 dest = cpu_mask_to_apicid(tmp);
3557
3558 hpet_msi_read(irq, &msg);
3559
3560 msg.data &= ~MSI_DATA_VECTOR_MASK;
3561 msg.data |= MSI_DATA_VECTOR(cfg->vector);
3562 msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
3563 msg.address_lo |= MSI_ADDR_DEST_ID(dest);
3564
3565 hpet_msi_write(irq, &msg);
3566 desc = irq_to_desc(irq);
3567 desc->affinity = mask;
3568}
3569#endif /* CONFIG_SMP */
3570
3571struct irq_chip hpet_msi_type = {
3572 .name = "HPET_MSI",
3573 .unmask = hpet_msi_unmask,
3574 .mask = hpet_msi_mask,
3575 .ack = ack_apic_edge,
3576#ifdef CONFIG_SMP
3577 .set_affinity = hpet_msi_set_affinity,
3578#endif
3579 .retrigger = ioapic_retrigger_irq,
3580};
3581
3582int arch_setup_hpet_msi(unsigned int irq)
3583{
3584 int ret;
3585 struct msi_msg msg;
3586
3587 ret = msi_compose_msg(NULL, irq, &msg);
3588 if (ret < 0)
3589 return ret;
3590
3591 hpet_msi_write(irq, &msg);
3592 set_irq_chip_and_handler_name(irq, &hpet_msi_type, handle_edge_irq,
3593 "edge");
3594 return 0;
3595}
3596#endif
3597
54168ed7 3598#endif /* CONFIG_PCI_MSI */
8b955b0d
EB
3599/*
3600 * Hypertransport interrupt support
3601 */
3602#ifdef CONFIG_HT_IRQ
3603
3604#ifdef CONFIG_SMP
3605
497c9a19 3606static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
8b955b0d 3607{
ec68307c
EB
3608 struct ht_irq_msg msg;
3609 fetch_ht_irq_msg(irq, &msg);
8b955b0d 3610
497c9a19 3611 msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
ec68307c 3612 msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
8b955b0d 3613
497c9a19 3614 msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
ec68307c 3615 msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
8b955b0d 3616
ec68307c 3617 write_ht_irq_msg(irq, &msg);
8b955b0d
EB
3618}
3619
3620static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
3621{
497c9a19 3622 struct irq_cfg *cfg;
8b955b0d
EB
3623 unsigned int dest;
3624 cpumask_t tmp;
54168ed7 3625 struct irq_desc *desc;
8b955b0d
EB
3626
3627 cpus_and(tmp, mask, cpu_online_map);
3628 if (cpus_empty(tmp))
497c9a19 3629 return;
8b955b0d 3630
497c9a19
YL
3631 if (assign_irq_vector(irq, mask))
3632 return;
8b955b0d 3633
497c9a19
YL
3634 cfg = irq_cfg(irq);
3635 cpus_and(tmp, cfg->domain, mask);
3636 dest = cpu_mask_to_apicid(tmp);
8b955b0d 3637
497c9a19 3638 target_ht_irq(irq, dest, cfg->vector);
54168ed7
IM
3639 desc = irq_to_desc(irq);
3640 desc->affinity = mask;
8b955b0d
EB
3641}
3642#endif
3643
c37e108d 3644static struct irq_chip ht_irq_chip = {
8b955b0d
EB
3645 .name = "PCI-HT",
3646 .mask = mask_ht_irq,
3647 .unmask = unmask_ht_irq,
1d025192 3648 .ack = ack_apic_edge,
8b955b0d
EB
3649#ifdef CONFIG_SMP
3650 .set_affinity = set_ht_irq_affinity,
3651#endif
3652 .retrigger = ioapic_retrigger_irq,
3653};
3654
3655int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
3656{
497c9a19
YL
3657 struct irq_cfg *cfg;
3658 int err;
3659 cpumask_t tmp;
8b955b0d 3660
497c9a19
YL
3661 tmp = TARGET_CPUS;
3662 err = assign_irq_vector(irq, tmp);
54168ed7 3663 if (!err) {
ec68307c 3664 struct ht_irq_msg msg;
8b955b0d 3665 unsigned dest;
8b955b0d 3666
497c9a19
YL
3667 cfg = irq_cfg(irq);
3668 cpus_and(tmp, cfg->domain, tmp);
8b955b0d
EB
3669 dest = cpu_mask_to_apicid(tmp);
3670
ec68307c 3671 msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
8b955b0d 3672
ec68307c
EB
3673 msg.address_lo =
3674 HT_IRQ_LOW_BASE |
8b955b0d 3675 HT_IRQ_LOW_DEST_ID(dest) |
497c9a19 3676 HT_IRQ_LOW_VECTOR(cfg->vector) |
8b955b0d
EB
3677 ((INT_DEST_MODE == 0) ?
3678 HT_IRQ_LOW_DM_PHYSICAL :
3679 HT_IRQ_LOW_DM_LOGICAL) |
3680 HT_IRQ_LOW_RQEOI_EDGE |
3681 ((INT_DELIVERY_MODE != dest_LowestPrio) ?
3682 HT_IRQ_LOW_MT_FIXED :
3683 HT_IRQ_LOW_MT_ARBITRATED) |
3684 HT_IRQ_LOW_IRQ_MASKED;
3685
ec68307c 3686 write_ht_irq_msg(irq, &msg);
8b955b0d 3687
a460e745
IM
3688 set_irq_chip_and_handler_name(irq, &ht_irq_chip,
3689 handle_edge_irq, "edge");
8b955b0d 3690 }
497c9a19 3691 return err;
8b955b0d
EB
3692}
3693#endif /* CONFIG_HT_IRQ */
3694
9d6a4d08
YL
3695int __init io_apic_get_redir_entries (int ioapic)
3696{
3697 union IO_APIC_reg_01 reg_01;
3698 unsigned long flags;
3699
3700 spin_lock_irqsave(&ioapic_lock, flags);
3701 reg_01.raw = io_apic_read(ioapic, 1);
3702 spin_unlock_irqrestore(&ioapic_lock, flags);
3703
3704 return reg_01.bits.entries;
3705}
3706
3707int __init probe_nr_irqs(void)
3708{
3709 int idx;
3710 int nr = 0;
052c0bff
YL
3711#ifndef CONFIG_XEN
3712 int nr_min = 32;
3713#else
3714 int nr_min = NR_IRQS;
3715#endif
9d6a4d08
YL
3716
3717 for (idx = 0; idx < nr_ioapics; idx++)
052c0bff 3718 nr += io_apic_get_redir_entries(idx) + 1;
9d6a4d08
YL
3719
3720 /* double it for hotplug and msi and nmi */
3721 nr <<= 1;
3722
3723 /* something wrong ? */
052c0bff
YL
3724 if (nr < nr_min)
3725 nr = nr_min;
9d6a4d08
YL
3726
3727 return nr;
3728}
3729
1da177e4 3730/* --------------------------------------------------------------------------
54168ed7 3731 ACPI-based IOAPIC Configuration
1da177e4
LT
3732 -------------------------------------------------------------------------- */
3733
888ba6c6 3734#ifdef CONFIG_ACPI
1da177e4 3735
54168ed7 3736#ifdef CONFIG_X86_32
36062448 3737int __init io_apic_get_unique_id(int ioapic, int apic_id)
1da177e4
LT
3738{
3739 union IO_APIC_reg_00 reg_00;
3740 static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
3741 physid_mask_t tmp;
3742 unsigned long flags;
3743 int i = 0;
3744
3745 /*
36062448
PC
3746 * The P4 platform supports up to 256 APIC IDs on two separate APIC
3747 * buses (one for LAPICs, one for IOAPICs), where predecessors only
1da177e4 3748 * supports up to 16 on one shared APIC bus.
36062448 3749 *
1da177e4
LT
3750 * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
3751 * advantage of new APIC bus architecture.
3752 */
3753
3754 if (physids_empty(apic_id_map))
3755 apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
3756
3757 spin_lock_irqsave(&ioapic_lock, flags);
3758 reg_00.raw = io_apic_read(ioapic, 0);
3759 spin_unlock_irqrestore(&ioapic_lock, flags);
3760
3761 if (apic_id >= get_physical_broadcast()) {
3762 printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
3763 "%d\n", ioapic, apic_id, reg_00.bits.ID);
3764 apic_id = reg_00.bits.ID;
3765 }
3766
3767 /*
36062448 3768 * Every APIC in a system must have a unique ID or we get lots of nice
1da177e4
LT
3769 * 'stuck on smp_invalidate_needed IPI wait' messages.
3770 */
3771 if (check_apicid_used(apic_id_map, apic_id)) {
3772
3773 for (i = 0; i < get_physical_broadcast(); i++) {
3774 if (!check_apicid_used(apic_id_map, i))
3775 break;
3776 }
3777
3778 if (i == get_physical_broadcast())
3779 panic("Max apic_id exceeded!\n");
3780
3781 printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
3782 "trying %d\n", ioapic, apic_id, i);
3783
3784 apic_id = i;
36062448 3785 }
1da177e4
LT
3786
3787 tmp = apicid_to_cpu_present(apic_id);
3788 physids_or(apic_id_map, apic_id_map, tmp);
3789
3790 if (reg_00.bits.ID != apic_id) {
3791 reg_00.bits.ID = apic_id;
3792
3793 spin_lock_irqsave(&ioapic_lock, flags);
3794 io_apic_write(ioapic, 0, reg_00.raw);
3795 reg_00.raw = io_apic_read(ioapic, 0);
3796 spin_unlock_irqrestore(&ioapic_lock, flags);
3797
3798 /* Sanity check */
6070f9ec
AD
3799 if (reg_00.bits.ID != apic_id) {
3800 printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
3801 return -1;
3802 }
1da177e4
LT
3803 }
3804
3805 apic_printk(APIC_VERBOSE, KERN_INFO
3806 "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
3807
3808 return apic_id;
3809}
3810
36062448 3811int __init io_apic_get_version(int ioapic)
1da177e4
LT
3812{
3813 union IO_APIC_reg_01 reg_01;
3814 unsigned long flags;
3815
3816 spin_lock_irqsave(&ioapic_lock, flags);
3817 reg_01.raw = io_apic_read(ioapic, 1);
3818 spin_unlock_irqrestore(&ioapic_lock, flags);
3819
3820 return reg_01.bits.version;
3821}
54168ed7 3822#endif
1da177e4 3823
54168ed7 3824int io_apic_set_pci_routing (int ioapic, int pin, int irq, int triggering, int polarity)
1da177e4 3825{
1da177e4 3826 if (!IO_APIC_IRQ(irq)) {
54168ed7 3827 apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
1da177e4
LT
3828 ioapic);
3829 return -EINVAL;
3830 }
3831
1da177e4
LT
3832 /*
3833 * IRQs < 16 are already in the irq_2_pin[] map
3834 */
3835 if (irq >= 16)
3836 add_pin_to_irq(irq, ioapic, pin);
3837
497c9a19 3838 setup_IO_APIC_irq(ioapic, pin, irq, triggering, polarity);
1da177e4
LT
3839
3840 return 0;
3841}
3842
54168ed7 3843
61fd47e0
SL
3844int acpi_get_override_irq(int bus_irq, int *trigger, int *polarity)
3845{
3846 int i;
3847
3848 if (skip_ioapic_setup)
3849 return -1;
3850
3851 for (i = 0; i < mp_irq_entries; i++)
2fddb6e2
AS
3852 if (mp_irqs[i].mp_irqtype == mp_INT &&
3853 mp_irqs[i].mp_srcbusirq == bus_irq)
61fd47e0
SL
3854 break;
3855 if (i >= mp_irq_entries)
3856 return -1;
3857
3858 *trigger = irq_trigger(i);
3859 *polarity = irq_polarity(i);
3860 return 0;
3861}
3862
888ba6c6 3863#endif /* CONFIG_ACPI */
1a3f239d 3864
497c9a19
YL
3865/*
3866 * This function currently is only a helper for the i386 smp boot process where
3867 * we need to reprogram the ioredtbls to cater for the cpus which have come online
3868 * so mask in all cases should simply be TARGET_CPUS
3869 */
3870#ifdef CONFIG_SMP
3871void __init setup_ioapic_dest(void)
3872{
3873 int pin, ioapic, irq, irq_entry;
3874 struct irq_cfg *cfg;
497c9a19
YL
3875
3876 if (skip_ioapic_setup == 1)
3877 return;
3878
3879 for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
3880 for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
3881 irq_entry = find_irq_entry(ioapic, pin, mp_INT);
3882 if (irq_entry == -1)
3883 continue;
3884 irq = pin_2_irq(irq_entry, ioapic, pin);
3885
3886 /* setup_IO_APIC_irqs could fail to get vector for some device
3887 * when you have too many devices, because at that time only boot
3888 * cpu is online.
3889 */
3890 cfg = irq_cfg(irq);
3891 if (!cfg->vector)
3892 setup_IO_APIC_irq(ioapic, pin, irq,
3893 irq_trigger(irq_entry),
3894 irq_polarity(irq_entry));
54168ed7
IM
3895#ifdef CONFIG_INTR_REMAP
3896 else if (intr_remapping_enabled)
3897 set_ir_ioapic_affinity_irq(irq, TARGET_CPUS);
3898#endif
3899 else
497c9a19 3900 set_ioapic_affinity_irq(irq, TARGET_CPUS);
497c9a19
YL
3901 }
3902
3903 }
3904}
3905#endif
3906
54168ed7
IM
3907#define IOAPIC_RESOURCE_NAME_SIZE 11
3908
3909static struct resource *ioapic_resources;
3910
3911static struct resource * __init ioapic_setup_resources(void)
3912{
3913 unsigned long n;
3914 struct resource *res;
3915 char *mem;
3916 int i;
3917
3918 if (nr_ioapics <= 0)
3919 return NULL;
3920
3921 n = IOAPIC_RESOURCE_NAME_SIZE + sizeof(struct resource);
3922 n *= nr_ioapics;
3923
3924 mem = alloc_bootmem(n);
3925 res = (void *)mem;
3926
3927 if (mem != NULL) {
3928 mem += sizeof(struct resource) * nr_ioapics;
3929
3930 for (i = 0; i < nr_ioapics; i++) {
3931 res[i].name = mem;
3932 res[i].flags = IORESOURCE_MEM | IORESOURCE_BUSY;
3933 sprintf(mem, "IOAPIC %u", i);
3934 mem += IOAPIC_RESOURCE_NAME_SIZE;
3935 }
3936 }
3937
3938 ioapic_resources = res;
3939
3940 return res;
3941}
54168ed7 3942
f3294a33
YL
3943void __init ioapic_init_mappings(void)
3944{
3945 unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
3946 int i;
54168ed7 3947 struct resource *ioapic_res;
f3294a33 3948
54168ed7 3949 ioapic_res = ioapic_setup_resources();
f3294a33
YL
3950 for (i = 0; i < nr_ioapics; i++) {
3951 if (smp_found_config) {
3952 ioapic_phys = mp_ioapics[i].mp_apicaddr;
54168ed7
IM
3953#ifdef CONFIG_X86_32
3954 if (!ioapic_phys) {
3955 printk(KERN_ERR
3956 "WARNING: bogus zero IO-APIC "
3957 "address found in MPTABLE, "
3958 "disabling IO/APIC support!\n");
3959 smp_found_config = 0;
3960 skip_ioapic_setup = 1;
3961 goto fake_ioapic_page;
3962 }
3963#endif
f3294a33 3964 } else {
54168ed7 3965#ifdef CONFIG_X86_32
f3294a33 3966fake_ioapic_page:
54168ed7 3967#endif
f3294a33 3968 ioapic_phys = (unsigned long)
54168ed7 3969 alloc_bootmem_pages(PAGE_SIZE);
f3294a33
YL
3970 ioapic_phys = __pa(ioapic_phys);
3971 }
3972 set_fixmap_nocache(idx, ioapic_phys);
54168ed7
IM
3973 apic_printk(APIC_VERBOSE,
3974 "mapped IOAPIC to %08lx (%08lx)\n",
3975 __fix_to_virt(idx), ioapic_phys);
f3294a33 3976 idx++;
54168ed7 3977
54168ed7
IM
3978 if (ioapic_res != NULL) {
3979 ioapic_res->start = ioapic_phys;
3980 ioapic_res->end = ioapic_phys + (4 * 1024) - 1;
3981 ioapic_res++;
3982 }
f3294a33
YL
3983 }
3984}
3985
54168ed7
IM
3986static int __init ioapic_insert_resources(void)
3987{
3988 int i;
3989 struct resource *r = ioapic_resources;
3990
3991 if (!r) {
3992 printk(KERN_ERR
3993 "IO APIC resources could be not be allocated.\n");
3994 return -1;
3995 }
3996
3997 for (i = 0; i < nr_ioapics; i++) {
3998 insert_resource(&iomem_resource, r);
3999 r++;
4000 }
4001
4002 return 0;
4003}
4004
4005/* Insert the IO APIC resources after PCI initialization has occured to handle
4006 * IO APICS that are mapped in on a BAR in PCI space. */
4007late_initcall(ioapic_insert_resources);