]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/i386/kernel/acpi/boot.c
Merge linux-2.6 with linux-acpi-2.6
[mirror_ubuntu-zesty-kernel.git] / arch / i386 / kernel / acpi / boot.c
1 /*
2 * boot.c - Architecture-Specific Low-Level ACPI Boot Support
3 *
4 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
5 * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
6 *
7 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
24 */
25
26 #include <linux/init.h>
27 #include <linux/config.h>
28 #include <linux/acpi.h>
29 #include <linux/efi.h>
30 #include <linux/irq.h>
31 #include <linux/module.h>
32 #include <linux/dmi.h>
33
34 #include <asm/pgtable.h>
35 #include <asm/io_apic.h>
36 #include <asm/apic.h>
37 #include <asm/io.h>
38 #include <asm/irq.h>
39 #include <asm/mpspec.h>
40
41 #ifdef CONFIG_X86_64
42
43 static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id)
44 {
45 }
46 extern void __init clustered_apic_check(void);
47 static inline int ioapic_setup_disabled(void)
48 {
49 return 0;
50 }
51
52 #include <asm/proto.h>
53
54 #else /* X86 */
55
56 #ifdef CONFIG_X86_LOCAL_APIC
57 #include <mach_apic.h>
58 #include <mach_mpparse.h>
59 #endif /* CONFIG_X86_LOCAL_APIC */
60
61 #endif /* X86 */
62
63 #define BAD_MADT_ENTRY(entry, end) ( \
64 (!entry) || (unsigned long)entry + sizeof(*entry) > end || \
65 ((acpi_table_entry_header *)entry)->length != sizeof(*entry))
66
67 #define PREFIX "ACPI: "
68
69 int acpi_noirq __initdata; /* skip ACPI IRQ initialization */
70 int acpi_pci_disabled __initdata; /* skip ACPI PCI scan and IRQ initialization */
71 int acpi_ht __initdata = 1; /* enable HT */
72
73 int acpi_lapic;
74 int acpi_ioapic;
75 int acpi_strict;
76 EXPORT_SYMBOL(acpi_strict);
77
78 acpi_interrupt_flags acpi_sci_flags __initdata;
79 int acpi_sci_override_gsi __initdata;
80 int acpi_skip_timer_override __initdata;
81
82 #ifdef CONFIG_X86_LOCAL_APIC
83 static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
84 #endif
85
86 #ifndef __HAVE_ARCH_CMPXCHG
87 #warning ACPI uses CMPXCHG, i486 and later hardware
88 #endif
89
90 #define MAX_MADT_ENTRIES 256
91 u8 x86_acpiid_to_apicid[MAX_MADT_ENTRIES] =
92 {[0 ... MAX_MADT_ENTRIES - 1] = 0xff };
93 EXPORT_SYMBOL(x86_acpiid_to_apicid);
94
95 /* --------------------------------------------------------------------------
96 Boot-time Configuration
97 -------------------------------------------------------------------------- */
98
99 /*
100 * The default interrupt routing model is PIC (8259). This gets
101 * overriden if IOAPICs are enumerated (below).
102 */
103 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
104
105 #ifdef CONFIG_X86_64
106
107 /* rely on all ACPI tables being in the direct mapping */
108 char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
109 {
110 if (!phys_addr || !size)
111 return NULL;
112
113 if (phys_addr < (end_pfn_map << PAGE_SHIFT))
114 return __va(phys_addr);
115
116 return NULL;
117 }
118
119 #else
120
121 /*
122 * Temporarily use the virtual area starting from FIX_IO_APIC_BASE_END,
123 * to map the target physical address. The problem is that set_fixmap()
124 * provides a single page, and it is possible that the page is not
125 * sufficient.
126 * By using this area, we can map up to MAX_IO_APICS pages temporarily,
127 * i.e. until the next __va_range() call.
128 *
129 * Important Safety Note: The fixed I/O APIC page numbers are *subtracted*
130 * from the fixed base. That's why we start at FIX_IO_APIC_BASE_END and
131 * count idx down while incrementing the phys address.
132 */
133 char *__acpi_map_table(unsigned long phys, unsigned long size)
134 {
135 unsigned long base, offset, mapped_size;
136 int idx;
137
138 if (phys + size < 8 * 1024 * 1024)
139 return __va(phys);
140
141 offset = phys & (PAGE_SIZE - 1);
142 mapped_size = PAGE_SIZE - offset;
143 set_fixmap(FIX_ACPI_END, phys);
144 base = fix_to_virt(FIX_ACPI_END);
145
146 /*
147 * Most cases can be covered by the below.
148 */
149 idx = FIX_ACPI_END;
150 while (mapped_size < size) {
151 if (--idx < FIX_ACPI_BEGIN)
152 return NULL; /* cannot handle this */
153 phys += PAGE_SIZE;
154 set_fixmap(idx, phys);
155 mapped_size += PAGE_SIZE;
156 }
157
158 return ((unsigned char *)base + offset);
159 }
160 #endif
161
162 #ifdef CONFIG_PCI_MMCONFIG
163 /* The physical address of the MMCONFIG aperture. Set from ACPI tables. */
164 struct acpi_table_mcfg_config *pci_mmcfg_config;
165 int pci_mmcfg_config_num;
166
167 int __init acpi_parse_mcfg(unsigned long phys_addr, unsigned long size)
168 {
169 struct acpi_table_mcfg *mcfg;
170 unsigned long i;
171 int config_size;
172
173 if (!phys_addr || !size)
174 return -EINVAL;
175
176 mcfg = (struct acpi_table_mcfg *)__acpi_map_table(phys_addr, size);
177 if (!mcfg) {
178 printk(KERN_WARNING PREFIX "Unable to map MCFG\n");
179 return -ENODEV;
180 }
181
182 /* how many config structures do we have */
183 pci_mmcfg_config_num = 0;
184 i = size - sizeof(struct acpi_table_mcfg);
185 while (i >= sizeof(struct acpi_table_mcfg_config)) {
186 ++pci_mmcfg_config_num;
187 i -= sizeof(struct acpi_table_mcfg_config);
188 };
189 if (pci_mmcfg_config_num == 0) {
190 printk(KERN_ERR PREFIX "MMCONFIG has no entries\n");
191 return -ENODEV;
192 }
193
194 config_size = pci_mmcfg_config_num * sizeof(*pci_mmcfg_config);
195 pci_mmcfg_config = kmalloc(config_size, GFP_KERNEL);
196 if (!pci_mmcfg_config) {
197 printk(KERN_WARNING PREFIX
198 "No memory for MCFG config tables\n");
199 return -ENOMEM;
200 }
201
202 memcpy(pci_mmcfg_config, &mcfg->config, config_size);
203 for (i = 0; i < pci_mmcfg_config_num; ++i) {
204 if (mcfg->config[i].base_reserved) {
205 printk(KERN_ERR PREFIX
206 "MMCONFIG not in low 4GB of memory\n");
207 return -ENODEV;
208 }
209 }
210
211 return 0;
212 }
213 #endif /* CONFIG_PCI_MMCONFIG */
214
215 #ifdef CONFIG_X86_LOCAL_APIC
216 static int __init acpi_parse_madt(unsigned long phys_addr, unsigned long size)
217 {
218 struct acpi_table_madt *madt = NULL;
219
220 if (!phys_addr || !size)
221 return -EINVAL;
222
223 madt = (struct acpi_table_madt *)__acpi_map_table(phys_addr, size);
224 if (!madt) {
225 printk(KERN_WARNING PREFIX "Unable to map MADT\n");
226 return -ENODEV;
227 }
228
229 if (madt->lapic_address) {
230 acpi_lapic_addr = (u64) madt->lapic_address;
231
232 printk(KERN_DEBUG PREFIX "Local APIC address 0x%08x\n",
233 madt->lapic_address);
234 }
235
236 acpi_madt_oem_check(madt->header.oem_id, madt->header.oem_table_id);
237
238 return 0;
239 }
240
241 static int __init
242 acpi_parse_lapic(acpi_table_entry_header * header, const unsigned long end)
243 {
244 struct acpi_table_lapic *processor = NULL;
245
246 processor = (struct acpi_table_lapic *)header;
247
248 if (BAD_MADT_ENTRY(processor, end))
249 return -EINVAL;
250
251 acpi_table_print_madt_entry(header);
252
253 /* no utility in registering a disabled processor */
254 if (processor->flags.enabled == 0)
255 return 0;
256
257 x86_acpiid_to_apicid[processor->acpi_id] = processor->id;
258
259 mp_register_lapic(processor->id, /* APIC ID */
260 processor->flags.enabled); /* Enabled? */
261
262 return 0;
263 }
264
265 static int __init
266 acpi_parse_lapic_addr_ovr(acpi_table_entry_header * header,
267 const unsigned long end)
268 {
269 struct acpi_table_lapic_addr_ovr *lapic_addr_ovr = NULL;
270
271 lapic_addr_ovr = (struct acpi_table_lapic_addr_ovr *)header;
272
273 if (BAD_MADT_ENTRY(lapic_addr_ovr, end))
274 return -EINVAL;
275
276 acpi_lapic_addr = lapic_addr_ovr->address;
277
278 return 0;
279 }
280
281 static int __init
282 acpi_parse_lapic_nmi(acpi_table_entry_header * header, const unsigned long end)
283 {
284 struct acpi_table_lapic_nmi *lapic_nmi = NULL;
285
286 lapic_nmi = (struct acpi_table_lapic_nmi *)header;
287
288 if (BAD_MADT_ENTRY(lapic_nmi, end))
289 return -EINVAL;
290
291 acpi_table_print_madt_entry(header);
292
293 if (lapic_nmi->lint != 1)
294 printk(KERN_WARNING PREFIX "NMI not connected to LINT 1!\n");
295
296 return 0;
297 }
298
299 #endif /*CONFIG_X86_LOCAL_APIC */
300
301 #ifdef CONFIG_X86_IO_APIC
302
303 static int __init
304 acpi_parse_ioapic(acpi_table_entry_header * header, const unsigned long end)
305 {
306 struct acpi_table_ioapic *ioapic = NULL;
307
308 ioapic = (struct acpi_table_ioapic *)header;
309
310 if (BAD_MADT_ENTRY(ioapic, end))
311 return -EINVAL;
312
313 acpi_table_print_madt_entry(header);
314
315 mp_register_ioapic(ioapic->id,
316 ioapic->address, ioapic->global_irq_base);
317
318 return 0;
319 }
320
321 /*
322 * Parse Interrupt Source Override for the ACPI SCI
323 */
324 static void acpi_sci_ioapic_setup(u32 gsi, u16 polarity, u16 trigger)
325 {
326 if (trigger == 0) /* compatible SCI trigger is level */
327 trigger = 3;
328
329 if (polarity == 0) /* compatible SCI polarity is low */
330 polarity = 3;
331
332 /* Command-line over-ride via acpi_sci= */
333 if (acpi_sci_flags.trigger)
334 trigger = acpi_sci_flags.trigger;
335
336 if (acpi_sci_flags.polarity)
337 polarity = acpi_sci_flags.polarity;
338
339 /*
340 * mp_config_acpi_legacy_irqs() already setup IRQs < 16
341 * If GSI is < 16, this will update its flags,
342 * else it will create a new mp_irqs[] entry.
343 */
344 mp_override_legacy_irq(gsi, polarity, trigger, gsi);
345
346 /*
347 * stash over-ride to indicate we've been here
348 * and for later update of acpi_fadt
349 */
350 acpi_sci_override_gsi = gsi;
351 return;
352 }
353
354 static int __init
355 acpi_parse_int_src_ovr(acpi_table_entry_header * header,
356 const unsigned long end)
357 {
358 struct acpi_table_int_src_ovr *intsrc = NULL;
359
360 intsrc = (struct acpi_table_int_src_ovr *)header;
361
362 if (BAD_MADT_ENTRY(intsrc, end))
363 return -EINVAL;
364
365 acpi_table_print_madt_entry(header);
366
367 if (intsrc->bus_irq == acpi_fadt.sci_int) {
368 acpi_sci_ioapic_setup(intsrc->global_irq,
369 intsrc->flags.polarity,
370 intsrc->flags.trigger);
371 return 0;
372 }
373
374 if (acpi_skip_timer_override &&
375 intsrc->bus_irq == 0 && intsrc->global_irq == 2) {
376 printk(PREFIX "BIOS IRQ0 pin2 override ignored.\n");
377 return 0;
378 }
379
380 mp_override_legacy_irq(intsrc->bus_irq,
381 intsrc->flags.polarity,
382 intsrc->flags.trigger, intsrc->global_irq);
383
384 return 0;
385 }
386
387 static int __init
388 acpi_parse_nmi_src(acpi_table_entry_header * header, const unsigned long end)
389 {
390 struct acpi_table_nmi_src *nmi_src = NULL;
391
392 nmi_src = (struct acpi_table_nmi_src *)header;
393
394 if (BAD_MADT_ENTRY(nmi_src, end))
395 return -EINVAL;
396
397 acpi_table_print_madt_entry(header);
398
399 /* TBD: Support nimsrc entries? */
400
401 return 0;
402 }
403
404 #endif /* CONFIG_X86_IO_APIC */
405
406 /*
407 * acpi_pic_sci_set_trigger()
408 *
409 * use ELCR to set PIC-mode trigger type for SCI
410 *
411 * If a PIC-mode SCI is not recognized or gives spurious IRQ7's
412 * it may require Edge Trigger -- use "acpi_sci=edge"
413 *
414 * Port 0x4d0-4d1 are ECLR1 and ECLR2, the Edge/Level Control Registers
415 * for the 8259 PIC. bit[n] = 1 means irq[n] is Level, otherwise Edge.
416 * ECLR1 is IRQ's 0-7 (IRQ 0, 1, 2 must be 0)
417 * ECLR2 is IRQ's 8-15 (IRQ 8, 13 must be 0)
418 */
419
420 void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
421 {
422 unsigned int mask = 1 << irq;
423 unsigned int old, new;
424
425 /* Real old ELCR mask */
426 old = inb(0x4d0) | (inb(0x4d1) << 8);
427
428 /*
429 * If we use ACPI to set PCI irq's, then we should clear ELCR
430 * since we will set it correctly as we enable the PCI irq
431 * routing.
432 */
433 new = acpi_noirq ? old : 0;
434
435 /*
436 * Update SCI information in the ELCR, it isn't in the PCI
437 * routing tables..
438 */
439 switch (trigger) {
440 case 1: /* Edge - clear */
441 new &= ~mask;
442 break;
443 case 3: /* Level - set */
444 new |= mask;
445 break;
446 }
447
448 if (old == new)
449 return;
450
451 printk(PREFIX "setting ELCR to %04x (from %04x)\n", new, old);
452 outb(new, 0x4d0);
453 outb(new >> 8, 0x4d1);
454 }
455
456 int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
457 {
458 #ifdef CONFIG_X86_IO_APIC
459 if (use_pci_vector() && !platform_legacy_irq(gsi))
460 *irq = IO_APIC_VECTOR(gsi);
461 else
462 #endif
463 *irq = gsi;
464 return 0;
465 }
466
467 /*
468 * success: return IRQ number (>=0)
469 * failure: return < 0
470 */
471 int acpi_register_gsi(u32 gsi, int edge_level, int active_high_low)
472 {
473 unsigned int irq;
474 unsigned int plat_gsi = gsi;
475
476 #ifdef CONFIG_PCI
477 /*
478 * Make sure all (legacy) PCI IRQs are set as level-triggered.
479 */
480 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
481 extern void eisa_set_level_irq(unsigned int irq);
482
483 if (edge_level == ACPI_LEVEL_SENSITIVE)
484 eisa_set_level_irq(gsi);
485 }
486 #endif
487
488 #ifdef CONFIG_X86_IO_APIC
489 if (acpi_irq_model == ACPI_IRQ_MODEL_IOAPIC) {
490 plat_gsi = mp_register_gsi(gsi, edge_level, active_high_low);
491 }
492 #endif
493 acpi_gsi_to_irq(plat_gsi, &irq);
494 return irq;
495 }
496
497 EXPORT_SYMBOL(acpi_register_gsi);
498
499 /*
500 * ACPI based hotplug support for CPU
501 */
502 #ifdef CONFIG_ACPI_HOTPLUG_CPU
503 int acpi_map_lsapic(acpi_handle handle, int *pcpu)
504 {
505 /* TBD */
506 return -EINVAL;
507 }
508
509 EXPORT_SYMBOL(acpi_map_lsapic);
510
511 int acpi_unmap_lsapic(int cpu)
512 {
513 /* TBD */
514 return -EINVAL;
515 }
516
517 EXPORT_SYMBOL(acpi_unmap_lsapic);
518 #endif /* CONFIG_ACPI_HOTPLUG_CPU */
519
520 int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
521 {
522 /* TBD */
523 return -EINVAL;
524 }
525
526 EXPORT_SYMBOL(acpi_register_ioapic);
527
528 int acpi_unregister_ioapic(acpi_handle handle, u32 gsi_base)
529 {
530 /* TBD */
531 return -EINVAL;
532 }
533
534 EXPORT_SYMBOL(acpi_unregister_ioapic);
535
536 static unsigned long __init
537 acpi_scan_rsdp(unsigned long start, unsigned long length)
538 {
539 unsigned long offset = 0;
540 unsigned long sig_len = sizeof("RSD PTR ") - 1;
541
542 /*
543 * Scan all 16-byte boundaries of the physical memory region for the
544 * RSDP signature.
545 */
546 for (offset = 0; offset < length; offset += 16) {
547 if (strncmp((char *)(start + offset), "RSD PTR ", sig_len))
548 continue;
549 return (start + offset);
550 }
551
552 return 0;
553 }
554
555 static int __init acpi_parse_sbf(unsigned long phys_addr, unsigned long size)
556 {
557 struct acpi_table_sbf *sb;
558
559 if (!phys_addr || !size)
560 return -EINVAL;
561
562 sb = (struct acpi_table_sbf *)__acpi_map_table(phys_addr, size);
563 if (!sb) {
564 printk(KERN_WARNING PREFIX "Unable to map SBF\n");
565 return -ENODEV;
566 }
567
568 sbf_port = sb->sbf_cmos; /* Save CMOS port */
569
570 return 0;
571 }
572
573 #ifdef CONFIG_HPET_TIMER
574
575 static int __init acpi_parse_hpet(unsigned long phys, unsigned long size)
576 {
577 struct acpi_table_hpet *hpet_tbl;
578
579 if (!phys || !size)
580 return -EINVAL;
581
582 hpet_tbl = (struct acpi_table_hpet *)__acpi_map_table(phys, size);
583 if (!hpet_tbl) {
584 printk(KERN_WARNING PREFIX "Unable to map HPET\n");
585 return -ENODEV;
586 }
587
588 if (hpet_tbl->addr.space_id != ACPI_SPACE_MEM) {
589 printk(KERN_WARNING PREFIX "HPET timers must be located in "
590 "memory.\n");
591 return -1;
592 }
593 #ifdef CONFIG_X86_64
594 vxtime.hpet_address = hpet_tbl->addr.addrl |
595 ((long)hpet_tbl->addr.addrh << 32);
596
597 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
598 hpet_tbl->id, vxtime.hpet_address);
599 #else /* X86 */
600 {
601 extern unsigned long hpet_address;
602
603 hpet_address = hpet_tbl->addr.addrl;
604 printk(KERN_INFO PREFIX "HPET id: %#x base: %#lx\n",
605 hpet_tbl->id, hpet_address);
606 }
607 #endif /* X86 */
608
609 return 0;
610 }
611 #else
612 #define acpi_parse_hpet NULL
613 #endif
614
615 #ifdef CONFIG_X86_PM_TIMER
616 extern u32 pmtmr_ioport;
617 #endif
618
619 static int __init acpi_parse_fadt(unsigned long phys, unsigned long size)
620 {
621 struct fadt_descriptor_rev2 *fadt = NULL;
622
623 fadt = (struct fadt_descriptor_rev2 *)__acpi_map_table(phys, size);
624 if (!fadt) {
625 printk(KERN_WARNING PREFIX "Unable to map FADT\n");
626 return 0;
627 }
628 /* initialize sci_int early for INT_SRC_OVR MADT parsing */
629 acpi_fadt.sci_int = fadt->sci_int;
630
631 /* initialize rev and apic_phys_dest_mode for x86_64 genapic */
632 acpi_fadt.revision = fadt->revision;
633 acpi_fadt.force_apic_physical_destination_mode =
634 fadt->force_apic_physical_destination_mode;
635
636 #ifdef CONFIG_X86_PM_TIMER
637 /* detect the location of the ACPI PM Timer */
638 if (fadt->revision >= FADT2_REVISION_ID) {
639 /* FADT rev. 2 */
640 if (fadt->xpm_tmr_blk.address_space_id !=
641 ACPI_ADR_SPACE_SYSTEM_IO)
642 return 0;
643
644 pmtmr_ioport = fadt->xpm_tmr_blk.address;
645 } else {
646 /* FADT rev. 1 */
647 pmtmr_ioport = fadt->V1_pm_tmr_blk;
648 }
649 if (pmtmr_ioport)
650 printk(KERN_INFO PREFIX "PM-Timer IO Port: %#x\n",
651 pmtmr_ioport);
652 #endif
653 return 0;
654 }
655
656 unsigned long __init acpi_find_rsdp(void)
657 {
658 unsigned long rsdp_phys = 0;
659
660 if (efi_enabled) {
661 if (efi.acpi20)
662 return __pa(efi.acpi20);
663 else if (efi.acpi)
664 return __pa(efi.acpi);
665 }
666 /*
667 * Scan memory looking for the RSDP signature. First search EBDA (low
668 * memory) paragraphs and then search upper memory (E0000-FFFFF).
669 */
670 rsdp_phys = acpi_scan_rsdp(0, 0x400);
671 if (!rsdp_phys)
672 rsdp_phys = acpi_scan_rsdp(0xE0000, 0x20000);
673
674 return rsdp_phys;
675 }
676
677 #ifdef CONFIG_X86_LOCAL_APIC
678 /*
679 * Parse LAPIC entries in MADT
680 * returns 0 on success, < 0 on error
681 */
682 static int __init acpi_parse_madt_lapic_entries(void)
683 {
684 int count;
685
686 /*
687 * Note that the LAPIC address is obtained from the MADT (32-bit value)
688 * and (optionally) overriden by a LAPIC_ADDR_OVR entry (64-bit value).
689 */
690
691 count =
692 acpi_table_parse_madt(ACPI_MADT_LAPIC_ADDR_OVR,
693 acpi_parse_lapic_addr_ovr, 0);
694 if (count < 0) {
695 printk(KERN_ERR PREFIX
696 "Error parsing LAPIC address override entry\n");
697 return count;
698 }
699
700 mp_register_lapic_address(acpi_lapic_addr);
701
702 count = acpi_table_parse_madt(ACPI_MADT_LAPIC, acpi_parse_lapic,
703 MAX_APICS);
704 if (!count) {
705 printk(KERN_ERR PREFIX "No LAPIC entries present\n");
706 /* TBD: Cleanup to allow fallback to MPS */
707 return -ENODEV;
708 } else if (count < 0) {
709 printk(KERN_ERR PREFIX "Error parsing LAPIC entry\n");
710 /* TBD: Cleanup to allow fallback to MPS */
711 return count;
712 }
713
714 count =
715 acpi_table_parse_madt(ACPI_MADT_LAPIC_NMI, acpi_parse_lapic_nmi, 0);
716 if (count < 0) {
717 printk(KERN_ERR PREFIX "Error parsing LAPIC NMI entry\n");
718 /* TBD: Cleanup to allow fallback to MPS */
719 return count;
720 }
721 return 0;
722 }
723 #endif /* CONFIG_X86_LOCAL_APIC */
724
725 #ifdef CONFIG_X86_IO_APIC
726 /*
727 * Parse IOAPIC related entries in MADT
728 * returns 0 on success, < 0 on error
729 */
730 static int __init acpi_parse_madt_ioapic_entries(void)
731 {
732 int count;
733
734 /*
735 * ACPI interpreter is required to complete interrupt setup,
736 * so if it is off, don't enumerate the io-apics with ACPI.
737 * If MPS is present, it will handle them,
738 * otherwise the system will stay in PIC mode
739 */
740 if (acpi_disabled || acpi_noirq) {
741 return -ENODEV;
742 }
743
744 /*
745 * if "noapic" boot option, don't look for IO-APICs
746 */
747 if (skip_ioapic_setup) {
748 printk(KERN_INFO PREFIX "Skipping IOAPIC probe "
749 "due to 'noapic' option.\n");
750 return -ENODEV;
751 }
752
753 count =
754 acpi_table_parse_madt(ACPI_MADT_IOAPIC, acpi_parse_ioapic,
755 MAX_IO_APICS);
756 if (!count) {
757 printk(KERN_ERR PREFIX "No IOAPIC entries present\n");
758 return -ENODEV;
759 } else if (count < 0) {
760 printk(KERN_ERR PREFIX "Error parsing IOAPIC entry\n");
761 return count;
762 }
763
764 count =
765 acpi_table_parse_madt(ACPI_MADT_INT_SRC_OVR, acpi_parse_int_src_ovr,
766 NR_IRQ_VECTORS);
767 if (count < 0) {
768 printk(KERN_ERR PREFIX
769 "Error parsing interrupt source overrides entry\n");
770 /* TBD: Cleanup to allow fallback to MPS */
771 return count;
772 }
773
774 /*
775 * If BIOS did not supply an INT_SRC_OVR for the SCI
776 * pretend we got one so we can set the SCI flags.
777 */
778 if (!acpi_sci_override_gsi)
779 acpi_sci_ioapic_setup(acpi_fadt.sci_int, 0, 0);
780
781 /* Fill in identity legacy mapings where no override */
782 mp_config_acpi_legacy_irqs();
783
784 count =
785 acpi_table_parse_madt(ACPI_MADT_NMI_SRC, acpi_parse_nmi_src,
786 NR_IRQ_VECTORS);
787 if (count < 0) {
788 printk(KERN_ERR PREFIX "Error parsing NMI SRC entry\n");
789 /* TBD: Cleanup to allow fallback to MPS */
790 return count;
791 }
792
793 return 0;
794 }
795 #else
796 static inline int acpi_parse_madt_ioapic_entries(void)
797 {
798 return -1;
799 }
800 #endif /* !CONFIG_X86_IO_APIC */
801
802 static void __init acpi_process_madt(void)
803 {
804 #ifdef CONFIG_X86_LOCAL_APIC
805 int count, error;
806
807 count = acpi_table_parse(ACPI_APIC, acpi_parse_madt);
808 if (count >= 1) {
809
810 /*
811 * Parse MADT LAPIC entries
812 */
813 error = acpi_parse_madt_lapic_entries();
814 if (!error) {
815 acpi_lapic = 1;
816
817 #ifdef CONFIG_X86_GENERICARCH
818 generic_bigsmp_probe();
819 #endif
820 /*
821 * Parse MADT IO-APIC entries
822 */
823 error = acpi_parse_madt_ioapic_entries();
824 if (!error) {
825 acpi_irq_model = ACPI_IRQ_MODEL_IOAPIC;
826 acpi_irq_balance_set(NULL);
827 acpi_ioapic = 1;
828
829 smp_found_config = 1;
830 clustered_apic_check();
831 }
832 }
833 if (error == -EINVAL) {
834 /*
835 * Dell Precision Workstation 410, 610 come here.
836 */
837 printk(KERN_ERR PREFIX
838 "Invalid BIOS MADT, disabling ACPI\n");
839 disable_acpi();
840 }
841 }
842 #endif
843 return;
844 }
845
846 extern int acpi_force;
847
848 #ifdef __i386__
849
850 static int __init disable_acpi_irq(struct dmi_system_id *d)
851 {
852 if (!acpi_force) {
853 printk(KERN_NOTICE "%s detected: force use of acpi=noirq\n",
854 d->ident);
855 acpi_noirq_set();
856 }
857 return 0;
858 }
859
860 static int __init disable_acpi_pci(struct dmi_system_id *d)
861 {
862 if (!acpi_force) {
863 printk(KERN_NOTICE "%s detected: force use of pci=noacpi\n",
864 d->ident);
865 acpi_disable_pci();
866 }
867 return 0;
868 }
869
870 static int __init dmi_disable_acpi(struct dmi_system_id *d)
871 {
872 if (!acpi_force) {
873 printk(KERN_NOTICE "%s detected: acpi off\n", d->ident);
874 disable_acpi();
875 } else {
876 printk(KERN_NOTICE
877 "Warning: DMI blacklist says broken, but acpi forced\n");
878 }
879 return 0;
880 }
881
882 /*
883 * Limit ACPI to CPU enumeration for HT
884 */
885 static int __init force_acpi_ht(struct dmi_system_id *d)
886 {
887 if (!acpi_force) {
888 printk(KERN_NOTICE "%s detected: force use of acpi=ht\n",
889 d->ident);
890 disable_acpi();
891 acpi_ht = 1;
892 } else {
893 printk(KERN_NOTICE
894 "Warning: acpi=force overrules DMI blacklist: acpi=ht\n");
895 }
896 return 0;
897 }
898
899 /*
900 * If your system is blacklisted here, but you find that acpi=force
901 * works for you, please contact acpi-devel@sourceforge.net
902 */
903 static struct dmi_system_id __initdata acpi_dmi_table[] = {
904 /*
905 * Boxes that need ACPI disabled
906 */
907 {
908 .callback = dmi_disable_acpi,
909 .ident = "IBM Thinkpad",
910 .matches = {
911 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
912 DMI_MATCH(DMI_BOARD_NAME, "2629H1G"),
913 },
914 },
915
916 /*
917 * Boxes that need acpi=ht
918 */
919 {
920 .callback = force_acpi_ht,
921 .ident = "FSC Primergy T850",
922 .matches = {
923 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
924 DMI_MATCH(DMI_PRODUCT_NAME, "PRIMERGY T850"),
925 },
926 },
927 {
928 .callback = force_acpi_ht,
929 .ident = "DELL GX240",
930 .matches = {
931 DMI_MATCH(DMI_BOARD_VENDOR, "Dell Computer Corporation"),
932 DMI_MATCH(DMI_BOARD_NAME, "OptiPlex GX240"),
933 },
934 },
935 {
936 .callback = force_acpi_ht,
937 .ident = "HP VISUALIZE NT Workstation",
938 .matches = {
939 DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"),
940 DMI_MATCH(DMI_PRODUCT_NAME, "HP VISUALIZE NT Workstation"),
941 },
942 },
943 {
944 .callback = force_acpi_ht,
945 .ident = "Compaq Workstation W8000",
946 .matches = {
947 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
948 DMI_MATCH(DMI_PRODUCT_NAME, "Workstation W8000"),
949 },
950 },
951 {
952 .callback = force_acpi_ht,
953 .ident = "ASUS P4B266",
954 .matches = {
955 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
956 DMI_MATCH(DMI_BOARD_NAME, "P4B266"),
957 },
958 },
959 {
960 .callback = force_acpi_ht,
961 .ident = "ASUS P2B-DS",
962 .matches = {
963 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
964 DMI_MATCH(DMI_BOARD_NAME, "P2B-DS"),
965 },
966 },
967 {
968 .callback = force_acpi_ht,
969 .ident = "ASUS CUR-DLS",
970 .matches = {
971 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
972 DMI_MATCH(DMI_BOARD_NAME, "CUR-DLS"),
973 },
974 },
975 {
976 .callback = force_acpi_ht,
977 .ident = "ABIT i440BX-W83977",
978 .matches = {
979 DMI_MATCH(DMI_BOARD_VENDOR, "ABIT <http://www.abit.com>"),
980 DMI_MATCH(DMI_BOARD_NAME, "i440BX-W83977 (BP6)"),
981 },
982 },
983 {
984 .callback = force_acpi_ht,
985 .ident = "IBM Bladecenter",
986 .matches = {
987 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
988 DMI_MATCH(DMI_BOARD_NAME, "IBM eServer BladeCenter HS20"),
989 },
990 },
991 {
992 .callback = force_acpi_ht,
993 .ident = "IBM eServer xSeries 360",
994 .matches = {
995 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
996 DMI_MATCH(DMI_BOARD_NAME, "eServer xSeries 360"),
997 },
998 },
999 {
1000 .callback = force_acpi_ht,
1001 .ident = "IBM eserver xSeries 330",
1002 .matches = {
1003 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1004 DMI_MATCH(DMI_BOARD_NAME, "eserver xSeries 330"),
1005 },
1006 },
1007 {
1008 .callback = force_acpi_ht,
1009 .ident = "IBM eserver xSeries 440",
1010 .matches = {
1011 DMI_MATCH(DMI_BOARD_VENDOR, "IBM"),
1012 DMI_MATCH(DMI_PRODUCT_NAME, "eserver xSeries 440"),
1013 },
1014 },
1015
1016 /*
1017 * Boxes that need ACPI PCI IRQ routing disabled
1018 */
1019 {
1020 .callback = disable_acpi_irq,
1021 .ident = "ASUS A7V",
1022 .matches = {
1023 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC"),
1024 DMI_MATCH(DMI_BOARD_NAME, "<A7V>"),
1025 /* newer BIOS, Revision 1011, does work */
1026 DMI_MATCH(DMI_BIOS_VERSION,
1027 "ASUS A7V ACPI BIOS Revision 1007"),
1028 },
1029 },
1030
1031 /*
1032 * Boxes that need ACPI PCI IRQ routing and PCI scan disabled
1033 */
1034 { /* _BBN 0 bug */
1035 .callback = disable_acpi_pci,
1036 .ident = "ASUS PR-DLS",
1037 .matches = {
1038 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
1039 DMI_MATCH(DMI_BOARD_NAME, "PR-DLS"),
1040 DMI_MATCH(DMI_BIOS_VERSION,
1041 "ASUS PR-DLS ACPI BIOS Revision 1010"),
1042 DMI_MATCH(DMI_BIOS_DATE, "03/21/2003")
1043 },
1044 },
1045 {
1046 .callback = disable_acpi_pci,
1047 .ident = "Acer TravelMate 36x Laptop",
1048 .matches = {
1049 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
1050 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
1051 },
1052 },
1053 {}
1054 };
1055
1056 #endif /* __i386__ */
1057
1058 /*
1059 * acpi_boot_table_init() and acpi_boot_init()
1060 * called from setup_arch(), always.
1061 * 1. checksums all tables
1062 * 2. enumerates lapics
1063 * 3. enumerates io-apics
1064 *
1065 * acpi_table_init() is separate to allow reading SRAT without
1066 * other side effects.
1067 *
1068 * side effects of acpi_boot_init:
1069 * acpi_lapic = 1 if LAPIC found
1070 * acpi_ioapic = 1 if IOAPIC found
1071 * if (acpi_lapic && acpi_ioapic) smp_found_config = 1;
1072 * if acpi_blacklisted() acpi_disabled = 1;
1073 * acpi_irq_model=...
1074 * ...
1075 *
1076 * return value: (currently ignored)
1077 * 0: success
1078 * !0: failure
1079 */
1080
1081 int __init acpi_boot_table_init(void)
1082 {
1083 int error;
1084
1085 #ifdef __i386__
1086 dmi_check_system(acpi_dmi_table);
1087 #endif
1088
1089 /*
1090 * If acpi_disabled, bail out
1091 * One exception: acpi=ht continues far enough to enumerate LAPICs
1092 */
1093 if (acpi_disabled && !acpi_ht)
1094 return 1;
1095
1096 /*
1097 * Initialize the ACPI boot-time table parser.
1098 */
1099 error = acpi_table_init();
1100 if (error) {
1101 disable_acpi();
1102 return error;
1103 }
1104 #ifdef __i386__
1105 check_acpi_pci();
1106 #endif
1107
1108 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
1109
1110 /*
1111 * blacklist may disable ACPI entirely
1112 */
1113 error = acpi_blacklisted();
1114 if (error) {
1115 if (acpi_force) {
1116 printk(KERN_WARNING PREFIX "acpi=force override\n");
1117 } else {
1118 printk(KERN_WARNING PREFIX "Disabling ACPI support\n");
1119 disable_acpi();
1120 return error;
1121 }
1122 }
1123
1124 return 0;
1125 }
1126
1127 int __init acpi_boot_init(void)
1128 {
1129 /*
1130 * If acpi_disabled, bail out
1131 * One exception: acpi=ht continues far enough to enumerate LAPICs
1132 */
1133 if (acpi_disabled && !acpi_ht)
1134 return 1;
1135
1136 acpi_table_parse(ACPI_BOOT, acpi_parse_sbf);
1137
1138 /*
1139 * set sci_int and PM timer address
1140 */
1141 acpi_table_parse(ACPI_FADT, acpi_parse_fadt);
1142
1143 /*
1144 * Process the Multiple APIC Description Table (MADT), if present
1145 */
1146 acpi_process_madt();
1147
1148 acpi_table_parse(ACPI_HPET, acpi_parse_hpet);
1149
1150 return 0;
1151 }