]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86_64/kernel/mpparse.c
1105250bf02c3ee26d8b1f8a54ab78904e85ca5a
[mirror_ubuntu-artful-kernel.git] / arch / x86_64 / kernel / mpparse.c
1 /*
2 * Intel Multiprocessor Specification 1.1 and 1.4
3 * compliant MP-table parsing routines.
4 *
5 * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
6 * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
7 *
8 * Fixes
9 * Erich Boleyn : MP v1.4 and additional changes.
10 * Alan Cox : Added EBDA scanning
11 * Ingo Molnar : various cleanups and rewrites
12 * Maciej W. Rozycki: Bits for default MP configurations
13 * Paul Diefenbaugh: Added full ACPI support
14 */
15
16 #include <linux/mm.h>
17 #include <linux/init.h>
18 #include <linux/delay.h>
19 #include <linux/config.h>
20 #include <linux/bootmem.h>
21 #include <linux/smp_lock.h>
22 #include <linux/kernel_stat.h>
23 #include <linux/mc146818rtc.h>
24 #include <linux/acpi.h>
25 #include <linux/module.h>
26
27 #include <asm/smp.h>
28 #include <asm/mtrr.h>
29 #include <asm/mpspec.h>
30 #include <asm/pgalloc.h>
31 #include <asm/io_apic.h>
32 #include <asm/proto.h>
33 #include <asm/acpi.h>
34
35 /* Have we found an MP table */
36 int smp_found_config;
37 unsigned int __initdata maxcpus = NR_CPUS;
38
39 int acpi_found_madt;
40
41 /*
42 * Various Linux-internal data structures created from the
43 * MP-table.
44 */
45 unsigned char apic_version [MAX_APICS];
46 unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
47 int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
48
49 static int mp_current_pci_id = 0;
50 /* I/O APIC entries */
51 struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
52
53 /* # of MP IRQ source entries */
54 struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
55
56 /* MP IRQ source entries */
57 int mp_irq_entries;
58
59 int nr_ioapics;
60 int pic_mode;
61 unsigned long mp_lapic_addr = 0;
62
63
64
65 /* Processor that is doing the boot up */
66 unsigned int boot_cpu_id = -1U;
67 /* Internal processor count */
68 unsigned int num_processors __initdata = 0;
69
70 unsigned disabled_cpus __initdata;
71
72 /* Bitmask of physically existing CPUs */
73 physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
74
75 /* ACPI MADT entry parsing functions */
76 #ifdef CONFIG_ACPI
77 extern struct acpi_boot_flags acpi_boot;
78 #ifdef CONFIG_X86_LOCAL_APIC
79 extern int acpi_parse_lapic (acpi_table_entry_header *header);
80 extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
81 extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
82 #endif /*CONFIG_X86_LOCAL_APIC*/
83 #ifdef CONFIG_X86_IO_APIC
84 extern int acpi_parse_ioapic (acpi_table_entry_header *header);
85 #endif /*CONFIG_X86_IO_APIC*/
86 #endif /*CONFIG_ACPI*/
87
88 u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
89
90
91 /*
92 * Intel MP BIOS table parsing routines:
93 */
94
95 /*
96 * Checksum an MP configuration block.
97 */
98
99 static int __init mpf_checksum(unsigned char *mp, int len)
100 {
101 int sum = 0;
102
103 while (len--)
104 sum += *mp++;
105
106 return sum & 0xFF;
107 }
108
109 static void __init MP_processor_info (struct mpc_config_processor *m)
110 {
111 int cpu;
112 unsigned char ver;
113 static int found_bsp=0;
114
115 if (!(m->mpc_cpuflag & CPU_ENABLED)) {
116 disabled_cpus++;
117 return;
118 }
119
120 printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
121 m->mpc_apicid,
122 (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
123 (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
124 m->mpc_apicver);
125
126 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
127 Dprintk(" Bootup CPU\n");
128 boot_cpu_id = m->mpc_apicid;
129 }
130 if (num_processors >= NR_CPUS) {
131 printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
132 " Processor ignored.\n", NR_CPUS);
133 return;
134 }
135
136 cpu = num_processors++;
137
138 #if MAX_APICS < 255
139 if ((int)m->mpc_apicid > MAX_APICS) {
140 printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
141 m->mpc_apicid, MAX_APICS);
142 return;
143 }
144 #endif
145 ver = m->mpc_apicver;
146
147 physid_set(m->mpc_apicid, phys_cpu_present_map);
148 /*
149 * Validate version
150 */
151 if (ver == 0x0) {
152 printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
153 ver = 0x10;
154 }
155 apic_version[m->mpc_apicid] = ver;
156 if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
157 /*
158 * bios_cpu_apicid is required to have processors listed
159 * in same order as logical cpu numbers. Hence the first
160 * entry is BSP, and so on.
161 */
162 cpu = 0;
163
164 bios_cpu_apicid[0] = m->mpc_apicid;
165 x86_cpu_to_apicid[0] = m->mpc_apicid;
166 found_bsp = 1;
167 } else
168 cpu = num_processors - found_bsp;
169 bios_cpu_apicid[cpu] = m->mpc_apicid;
170 x86_cpu_to_apicid[cpu] = m->mpc_apicid;
171
172 cpu_set(cpu, cpu_possible_map);
173 cpu_set(cpu, cpu_present_map);
174 }
175
176 static void __init MP_bus_info (struct mpc_config_bus *m)
177 {
178 char str[7];
179
180 memcpy(str, m->mpc_bustype, 6);
181 str[6] = 0;
182 Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
183
184 if (strncmp(str, "ISA", 3) == 0) {
185 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
186 } else if (strncmp(str, "EISA", 4) == 0) {
187 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
188 } else if (strncmp(str, "PCI", 3) == 0) {
189 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
190 mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
191 mp_current_pci_id++;
192 } else if (strncmp(str, "MCA", 3) == 0) {
193 mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
194 } else {
195 printk(KERN_ERR "Unknown bustype %s\n", str);
196 }
197 }
198
199 static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
200 {
201 if (!(m->mpc_flags & MPC_APIC_USABLE))
202 return;
203
204 printk("I/O APIC #%d Version %d at 0x%X.\n",
205 m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
206 if (nr_ioapics >= MAX_IO_APICS) {
207 printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
208 MAX_IO_APICS, nr_ioapics);
209 panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
210 }
211 if (!m->mpc_apicaddr) {
212 printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
213 " found in MP table, skipping!\n");
214 return;
215 }
216 mp_ioapics[nr_ioapics] = *m;
217 nr_ioapics++;
218 }
219
220 static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
221 {
222 mp_irqs [mp_irq_entries] = *m;
223 Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
224 " IRQ %02x, APIC ID %x, APIC INT %02x\n",
225 m->mpc_irqtype, m->mpc_irqflag & 3,
226 (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
227 m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
228 if (++mp_irq_entries >= MAX_IRQ_SOURCES)
229 panic("Max # of irq sources exceeded!!\n");
230 }
231
232 static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
233 {
234 Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
235 " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
236 m->mpc_irqtype, m->mpc_irqflag & 3,
237 (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
238 m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
239 /*
240 * Well it seems all SMP boards in existence
241 * use ExtINT/LVT1 == LINT0 and
242 * NMI/LVT2 == LINT1 - the following check
243 * will show us if this assumptions is false.
244 * Until then we do not have to add baggage.
245 */
246 if ((m->mpc_irqtype == mp_ExtINT) &&
247 (m->mpc_destapiclint != 0))
248 BUG();
249 if ((m->mpc_irqtype == mp_NMI) &&
250 (m->mpc_destapiclint != 1))
251 BUG();
252 }
253
254 /*
255 * Read/parse the MPC
256 */
257
258 static int __init smp_read_mpc(struct mp_config_table *mpc)
259 {
260 char str[16];
261 int count=sizeof(*mpc);
262 unsigned char *mpt=((unsigned char *)mpc)+count;
263
264 if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
265 printk("SMP mptable: bad signature [%c%c%c%c]!\n",
266 mpc->mpc_signature[0],
267 mpc->mpc_signature[1],
268 mpc->mpc_signature[2],
269 mpc->mpc_signature[3]);
270 return 0;
271 }
272 if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
273 printk("SMP mptable: checksum error!\n");
274 return 0;
275 }
276 if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
277 printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
278 mpc->mpc_spec);
279 return 0;
280 }
281 if (!mpc->mpc_lapic) {
282 printk(KERN_ERR "SMP mptable: null local APIC address!\n");
283 return 0;
284 }
285 memcpy(str,mpc->mpc_oem,8);
286 str[8]=0;
287 printk(KERN_INFO "OEM ID: %s ",str);
288
289 memcpy(str,mpc->mpc_productid,12);
290 str[12]=0;
291 printk(KERN_INFO "Product ID: %s ",str);
292
293 printk(KERN_INFO "APIC at: 0x%X\n",mpc->mpc_lapic);
294
295 /* save the local APIC address, it might be non-default */
296 if (!acpi_lapic)
297 mp_lapic_addr = mpc->mpc_lapic;
298
299 /*
300 * Now process the configuration blocks.
301 */
302 while (count < mpc->mpc_length) {
303 switch(*mpt) {
304 case MP_PROCESSOR:
305 {
306 struct mpc_config_processor *m=
307 (struct mpc_config_processor *)mpt;
308 if (!acpi_lapic)
309 MP_processor_info(m);
310 mpt += sizeof(*m);
311 count += sizeof(*m);
312 break;
313 }
314 case MP_BUS:
315 {
316 struct mpc_config_bus *m=
317 (struct mpc_config_bus *)mpt;
318 MP_bus_info(m);
319 mpt += sizeof(*m);
320 count += sizeof(*m);
321 break;
322 }
323 case MP_IOAPIC:
324 {
325 struct mpc_config_ioapic *m=
326 (struct mpc_config_ioapic *)mpt;
327 MP_ioapic_info(m);
328 mpt+=sizeof(*m);
329 count+=sizeof(*m);
330 break;
331 }
332 case MP_INTSRC:
333 {
334 struct mpc_config_intsrc *m=
335 (struct mpc_config_intsrc *)mpt;
336
337 MP_intsrc_info(m);
338 mpt+=sizeof(*m);
339 count+=sizeof(*m);
340 break;
341 }
342 case MP_LINTSRC:
343 {
344 struct mpc_config_lintsrc *m=
345 (struct mpc_config_lintsrc *)mpt;
346 MP_lintsrc_info(m);
347 mpt+=sizeof(*m);
348 count+=sizeof(*m);
349 break;
350 }
351 }
352 }
353 clustered_apic_check();
354 if (!num_processors)
355 printk(KERN_ERR "SMP mptable: no processors registered!\n");
356 return num_processors;
357 }
358
359 static int __init ELCR_trigger(unsigned int irq)
360 {
361 unsigned int port;
362
363 port = 0x4d0 + (irq >> 3);
364 return (inb(port) >> (irq & 7)) & 1;
365 }
366
367 static void __init construct_default_ioirq_mptable(int mpc_default_type)
368 {
369 struct mpc_config_intsrc intsrc;
370 int i;
371 int ELCR_fallback = 0;
372
373 intsrc.mpc_type = MP_INTSRC;
374 intsrc.mpc_irqflag = 0; /* conforming */
375 intsrc.mpc_srcbus = 0;
376 intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
377
378 intsrc.mpc_irqtype = mp_INT;
379
380 /*
381 * If true, we have an ISA/PCI system with no IRQ entries
382 * in the MP table. To prevent the PCI interrupts from being set up
383 * incorrectly, we try to use the ELCR. The sanity check to see if
384 * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
385 * never be level sensitive, so we simply see if the ELCR agrees.
386 * If it does, we assume it's valid.
387 */
388 if (mpc_default_type == 5) {
389 printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
390
391 if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
392 printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
393 else {
394 printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
395 ELCR_fallback = 1;
396 }
397 }
398
399 for (i = 0; i < 16; i++) {
400 switch (mpc_default_type) {
401 case 2:
402 if (i == 0 || i == 13)
403 continue; /* IRQ0 & IRQ13 not connected */
404 /* fall through */
405 default:
406 if (i == 2)
407 continue; /* IRQ2 is never connected */
408 }
409
410 if (ELCR_fallback) {
411 /*
412 * If the ELCR indicates a level-sensitive interrupt, we
413 * copy that information over to the MP table in the
414 * irqflag field (level sensitive, active high polarity).
415 */
416 if (ELCR_trigger(i))
417 intsrc.mpc_irqflag = 13;
418 else
419 intsrc.mpc_irqflag = 0;
420 }
421
422 intsrc.mpc_srcbusirq = i;
423 intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
424 MP_intsrc_info(&intsrc);
425 }
426
427 intsrc.mpc_irqtype = mp_ExtINT;
428 intsrc.mpc_srcbusirq = 0;
429 intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
430 MP_intsrc_info(&intsrc);
431 }
432
433 static inline void __init construct_default_ISA_mptable(int mpc_default_type)
434 {
435 struct mpc_config_processor processor;
436 struct mpc_config_bus bus;
437 struct mpc_config_ioapic ioapic;
438 struct mpc_config_lintsrc lintsrc;
439 int linttypes[2] = { mp_ExtINT, mp_NMI };
440 int i;
441
442 /*
443 * local APIC has default address
444 */
445 mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
446
447 /*
448 * 2 CPUs, numbered 0 & 1.
449 */
450 processor.mpc_type = MP_PROCESSOR;
451 /* Either an integrated APIC or a discrete 82489DX. */
452 processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
453 processor.mpc_cpuflag = CPU_ENABLED;
454 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
455 (boot_cpu_data.x86_model << 4) |
456 boot_cpu_data.x86_mask;
457 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
458 processor.mpc_reserved[0] = 0;
459 processor.mpc_reserved[1] = 0;
460 for (i = 0; i < 2; i++) {
461 processor.mpc_apicid = i;
462 MP_processor_info(&processor);
463 }
464
465 bus.mpc_type = MP_BUS;
466 bus.mpc_busid = 0;
467 switch (mpc_default_type) {
468 default:
469 printk(KERN_ERR "???\nUnknown standard configuration %d\n",
470 mpc_default_type);
471 /* fall through */
472 case 1:
473 case 5:
474 memcpy(bus.mpc_bustype, "ISA ", 6);
475 break;
476 case 2:
477 case 6:
478 case 3:
479 memcpy(bus.mpc_bustype, "EISA ", 6);
480 break;
481 case 4:
482 case 7:
483 memcpy(bus.mpc_bustype, "MCA ", 6);
484 }
485 MP_bus_info(&bus);
486 if (mpc_default_type > 4) {
487 bus.mpc_busid = 1;
488 memcpy(bus.mpc_bustype, "PCI ", 6);
489 MP_bus_info(&bus);
490 }
491
492 ioapic.mpc_type = MP_IOAPIC;
493 ioapic.mpc_apicid = 2;
494 ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
495 ioapic.mpc_flags = MPC_APIC_USABLE;
496 ioapic.mpc_apicaddr = 0xFEC00000;
497 MP_ioapic_info(&ioapic);
498
499 /*
500 * We set up most of the low 16 IO-APIC pins according to MPS rules.
501 */
502 construct_default_ioirq_mptable(mpc_default_type);
503
504 lintsrc.mpc_type = MP_LINTSRC;
505 lintsrc.mpc_irqflag = 0; /* conforming */
506 lintsrc.mpc_srcbusid = 0;
507 lintsrc.mpc_srcbusirq = 0;
508 lintsrc.mpc_destapic = MP_APIC_ALL;
509 for (i = 0; i < 2; i++) {
510 lintsrc.mpc_irqtype = linttypes[i];
511 lintsrc.mpc_destapiclint = i;
512 MP_lintsrc_info(&lintsrc);
513 }
514 }
515
516 static struct intel_mp_floating *mpf_found;
517
518 /*
519 * Scan the memory blocks for an SMP configuration block.
520 */
521 void __init get_smp_config (void)
522 {
523 struct intel_mp_floating *mpf = mpf_found;
524
525 /*
526 * ACPI supports both logical (e.g. Hyper-Threading) and physical
527 * processors, where MPS only supports physical.
528 */
529 if (acpi_lapic && acpi_ioapic) {
530 printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
531 return;
532 }
533 else if (acpi_lapic)
534 printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
535
536 printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
537 if (mpf->mpf_feature2 & (1<<7)) {
538 printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
539 pic_mode = 1;
540 } else {
541 printk(KERN_INFO " Virtual Wire compatibility mode.\n");
542 pic_mode = 0;
543 }
544
545 /*
546 * Now see if we need to read further.
547 */
548 if (mpf->mpf_feature1 != 0) {
549
550 printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
551 construct_default_ISA_mptable(mpf->mpf_feature1);
552
553 } else if (mpf->mpf_physptr) {
554
555 /*
556 * Read the physical hardware table. Anything here will
557 * override the defaults.
558 */
559 if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
560 smp_found_config = 0;
561 printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
562 printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
563 return;
564 }
565 /*
566 * If there are no explicit MP IRQ entries, then we are
567 * broken. We set up most of the low 16 IO-APIC pins to
568 * ISA defaults and hope it will work.
569 */
570 if (!mp_irq_entries) {
571 struct mpc_config_bus bus;
572
573 printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
574
575 bus.mpc_type = MP_BUS;
576 bus.mpc_busid = 0;
577 memcpy(bus.mpc_bustype, "ISA ", 6);
578 MP_bus_info(&bus);
579
580 construct_default_ioirq_mptable(0);
581 }
582
583 } else
584 BUG();
585
586 printk(KERN_INFO "Processors: %d\n", num_processors);
587 /*
588 * Only use the first configuration found.
589 */
590 }
591
592 static int __init smp_scan_config (unsigned long base, unsigned long length)
593 {
594 extern void __bad_mpf_size(void);
595 unsigned int *bp = phys_to_virt(base);
596 struct intel_mp_floating *mpf;
597
598 Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
599 if (sizeof(*mpf) != 16)
600 __bad_mpf_size();
601
602 while (length > 0) {
603 mpf = (struct intel_mp_floating *)bp;
604 if ((*bp == SMP_MAGIC_IDENT) &&
605 (mpf->mpf_length == 1) &&
606 !mpf_checksum((unsigned char *)bp, 16) &&
607 ((mpf->mpf_specification == 1)
608 || (mpf->mpf_specification == 4)) ) {
609
610 smp_found_config = 1;
611 reserve_bootmem_generic(virt_to_phys(mpf), PAGE_SIZE);
612 if (mpf->mpf_physptr)
613 reserve_bootmem_generic(mpf->mpf_physptr, PAGE_SIZE);
614 mpf_found = mpf;
615 return 1;
616 }
617 bp += 4;
618 length -= 16;
619 }
620 return 0;
621 }
622
623 void __init find_intel_smp (void)
624 {
625 unsigned int address;
626
627 /*
628 * FIXME: Linux assumes you have 640K of base ram..
629 * this continues the error...
630 *
631 * 1) Scan the bottom 1K for a signature
632 * 2) Scan the top 1K of base RAM
633 * 3) Scan the 64K of bios
634 */
635 if (smp_scan_config(0x0,0x400) ||
636 smp_scan_config(639*0x400,0x400) ||
637 smp_scan_config(0xF0000,0x10000))
638 return;
639 /*
640 * If it is an SMP machine we should know now, unless the
641 * configuration is in an EISA/MCA bus machine with an
642 * extended bios data area.
643 *
644 * there is a real-mode segmented pointer pointing to the
645 * 4K EBDA area at 0x40E, calculate and scan it here.
646 *
647 * NOTE! There are Linux loaders that will corrupt the EBDA
648 * area, and as such this kind of SMP config may be less
649 * trustworthy, simply because the SMP table may have been
650 * stomped on during early boot. These loaders are buggy and
651 * should be fixed.
652 */
653
654 address = *(unsigned short *)phys_to_virt(0x40E);
655 address <<= 4;
656 if (smp_scan_config(address, 0x1000))
657 return;
658
659 /* If we have come this far, we did not find an MP table */
660 printk(KERN_INFO "No mptable found.\n");
661 }
662
663 /*
664 * - Intel MP Configuration Table
665 */
666 void __init find_smp_config (void)
667 {
668 #ifdef CONFIG_X86_LOCAL_APIC
669 find_intel_smp();
670 #endif
671 }
672
673
674 /* --------------------------------------------------------------------------
675 ACPI-based MP Configuration
676 -------------------------------------------------------------------------- */
677
678 #ifdef CONFIG_ACPI
679
680 void __init mp_register_lapic_address (
681 u64 address)
682 {
683 mp_lapic_addr = (unsigned long) address;
684
685 set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
686
687 if (boot_cpu_id == -1U)
688 boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
689
690 Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
691 }
692
693
694 void __init mp_register_lapic (
695 u8 id,
696 u8 enabled)
697 {
698 struct mpc_config_processor processor;
699 int boot_cpu = 0;
700
701 if (id >= MAX_APICS) {
702 printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
703 id, MAX_APICS);
704 return;
705 }
706
707 if (id == boot_cpu_physical_apicid)
708 boot_cpu = 1;
709
710 processor.mpc_type = MP_PROCESSOR;
711 processor.mpc_apicid = id;
712 processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
713 processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
714 processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
715 processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
716 (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
717 processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
718 processor.mpc_reserved[0] = 0;
719 processor.mpc_reserved[1] = 0;
720
721 MP_processor_info(&processor);
722 }
723
724 #ifdef CONFIG_X86_IO_APIC
725
726 #define MP_ISA_BUS 0
727 #define MP_MAX_IOAPIC_PIN 127
728
729 static struct mp_ioapic_routing {
730 int apic_id;
731 int gsi_start;
732 int gsi_end;
733 u32 pin_programmed[4];
734 } mp_ioapic_routing[MAX_IO_APICS];
735
736
737 static int mp_find_ioapic (
738 int gsi)
739 {
740 int i = 0;
741
742 /* Find the IOAPIC that manages this GSI. */
743 for (i = 0; i < nr_ioapics; i++) {
744 if ((gsi >= mp_ioapic_routing[i].gsi_start)
745 && (gsi <= mp_ioapic_routing[i].gsi_end))
746 return i;
747 }
748
749 printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
750
751 return -1;
752 }
753
754
755 void __init mp_register_ioapic (
756 u8 id,
757 u32 address,
758 u32 gsi_base)
759 {
760 int idx = 0;
761
762 if (nr_ioapics >= MAX_IO_APICS) {
763 printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
764 "(found %d)\n", MAX_IO_APICS, nr_ioapics);
765 panic("Recompile kernel with bigger MAX_IO_APICS!\n");
766 }
767 if (!address) {
768 printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
769 " found in MADT table, skipping!\n");
770 return;
771 }
772
773 idx = nr_ioapics++;
774
775 mp_ioapics[idx].mpc_type = MP_IOAPIC;
776 mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
777 mp_ioapics[idx].mpc_apicaddr = address;
778
779 set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
780 mp_ioapics[idx].mpc_apicid = id;
781 mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
782
783 /*
784 * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
785 * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
786 */
787 mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
788 mp_ioapic_routing[idx].gsi_start = gsi_base;
789 mp_ioapic_routing[idx].gsi_end = gsi_base +
790 io_apic_get_redir_entries(idx);
791
792 printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
793 "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
794 mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
795 mp_ioapic_routing[idx].gsi_start,
796 mp_ioapic_routing[idx].gsi_end);
797
798 return;
799 }
800
801
802 void __init mp_override_legacy_irq (
803 u8 bus_irq,
804 u8 polarity,
805 u8 trigger,
806 u32 gsi)
807 {
808 struct mpc_config_intsrc intsrc;
809 int ioapic = -1;
810 int pin = -1;
811
812 /*
813 * Convert 'gsi' to 'ioapic.pin'.
814 */
815 ioapic = mp_find_ioapic(gsi);
816 if (ioapic < 0)
817 return;
818 pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
819
820 /*
821 * TBD: This check is for faulty timer entries, where the override
822 * erroneously sets the trigger to level, resulting in a HUGE
823 * increase of timer interrupts!
824 */
825 if ((bus_irq == 0) && (trigger == 3))
826 trigger = 1;
827
828 intsrc.mpc_type = MP_INTSRC;
829 intsrc.mpc_irqtype = mp_INT;
830 intsrc.mpc_irqflag = (trigger << 2) | polarity;
831 intsrc.mpc_srcbus = MP_ISA_BUS;
832 intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
833 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
834 intsrc.mpc_dstirq = pin; /* INTIN# */
835
836 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
837 intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
838 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
839 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
840
841 mp_irqs[mp_irq_entries] = intsrc;
842 if (++mp_irq_entries == MAX_IRQ_SOURCES)
843 panic("Max # of irq sources exceeded!\n");
844
845 return;
846 }
847
848
849 void __init mp_config_acpi_legacy_irqs (void)
850 {
851 struct mpc_config_intsrc intsrc;
852 int i = 0;
853 int ioapic = -1;
854
855 /*
856 * Fabricate the legacy ISA bus (bus #31).
857 */
858 mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
859 Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
860
861 /*
862 * Locate the IOAPIC that manages the ISA IRQs (0-15).
863 */
864 ioapic = mp_find_ioapic(0);
865 if (ioapic < 0)
866 return;
867
868 intsrc.mpc_type = MP_INTSRC;
869 intsrc.mpc_irqflag = 0; /* Conforming */
870 intsrc.mpc_srcbus = MP_ISA_BUS;
871 intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
872
873 /*
874 * Use the default configuration for the IRQs 0-15. Unless
875 * overridden by (MADT) interrupt source override entries.
876 */
877 for (i = 0; i < 16; i++) {
878 int idx;
879
880 for (idx = 0; idx < mp_irq_entries; idx++) {
881 struct mpc_config_intsrc *irq = mp_irqs + idx;
882
883 /* Do we already have a mapping for this ISA IRQ? */
884 if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
885 break;
886
887 /* Do we already have a mapping for this IOAPIC pin */
888 if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
889 (irq->mpc_dstirq == i))
890 break;
891 }
892
893 if (idx != mp_irq_entries) {
894 printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
895 continue; /* IRQ already used */
896 }
897
898 intsrc.mpc_irqtype = mp_INT;
899 intsrc.mpc_srcbusirq = i; /* Identity mapped */
900 intsrc.mpc_dstirq = i;
901
902 Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
903 "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
904 (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
905 intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
906 intsrc.mpc_dstirq);
907
908 mp_irqs[mp_irq_entries] = intsrc;
909 if (++mp_irq_entries == MAX_IRQ_SOURCES)
910 panic("Max # of irq sources exceeded!\n");
911 }
912
913 return;
914 }
915
916 #define MAX_GSI_NUM 4096
917
918 int mp_register_gsi(u32 gsi, int edge_level, int active_high_low)
919 {
920 int ioapic = -1;
921 int ioapic_pin = 0;
922 int idx, bit = 0;
923 static int pci_irq = 16;
924 /*
925 * Mapping between Global System Interrupts, which
926 * represent all possible interrupts, to the IRQs
927 * assigned to actual devices.
928 */
929 static int gsi_to_irq[MAX_GSI_NUM];
930
931 if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
932 return gsi;
933
934 /* Don't set up the ACPI SCI because it's already set up */
935 if (acpi_fadt.sci_int == gsi)
936 return gsi;
937
938 ioapic = mp_find_ioapic(gsi);
939 if (ioapic < 0) {
940 printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
941 return gsi;
942 }
943
944 ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
945
946 /*
947 * Avoid pin reprogramming. PRTs typically include entries
948 * with redundant pin->gsi mappings (but unique PCI devices);
949 * we only program the IOAPIC on the first.
950 */
951 bit = ioapic_pin % 32;
952 idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
953 if (idx > 3) {
954 printk(KERN_ERR "Invalid reference to IOAPIC pin "
955 "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
956 ioapic_pin);
957 return gsi;
958 }
959 if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
960 Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
961 mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
962 return gsi_to_irq[gsi];
963 }
964
965 mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
966
967 if (edge_level) {
968 /*
969 * For PCI devices assign IRQs in order, avoiding gaps
970 * due to unused I/O APIC pins.
971 */
972 int irq = gsi;
973 if (gsi < MAX_GSI_NUM) {
974 if (gsi > 15)
975 gsi = pci_irq++;
976 /*
977 * Don't assign IRQ used by ACPI SCI
978 */
979 if (gsi == acpi_fadt.sci_int)
980 gsi = pci_irq++;
981 gsi_to_irq[irq] = gsi;
982 } else {
983 printk(KERN_ERR "GSI %u is too high\n", gsi);
984 return gsi;
985 }
986 }
987
988 io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
989 edge_level == ACPI_EDGE_SENSITIVE ? 0 : 1,
990 active_high_low == ACPI_ACTIVE_HIGH ? 0 : 1);
991 return gsi;
992 }
993
994 #endif /*CONFIG_X86_IO_APIC*/
995 #endif /*CONFIG_ACPI*/