]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86_64/kernel/setup.c
Pull delete-sigdelayed into release branch
[mirror_ubuntu-artful-kernel.git] / arch / x86_64 / kernel / setup.c
1 /*
2 * linux/arch/x86-64/kernel/setup.c
3 *
4 * Copyright (C) 1995 Linus Torvalds
5 *
6 * Nov 2001 Dave Jones <davej@suse.de>
7 * Forked from i386 setup code.
8 *
9 * $Id$
10 */
11
12 /*
13 * This file handles the architecture-dependent parts of initialization
14 */
15
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/a.out.h>
26 #include <linux/tty.h>
27 #include <linux/ioport.h>
28 #include <linux/delay.h>
29 #include <linux/config.h>
30 #include <linux/init.h>
31 #include <linux/initrd.h>
32 #include <linux/highmem.h>
33 #include <linux/bootmem.h>
34 #include <linux/module.h>
35 #include <asm/processor.h>
36 #include <linux/console.h>
37 #include <linux/seq_file.h>
38 #include <linux/crash_dump.h>
39 #include <linux/root_dev.h>
40 #include <linux/pci.h>
41 #include <linux/acpi.h>
42 #include <linux/kallsyms.h>
43 #include <linux/edd.h>
44 #include <linux/mmzone.h>
45 #include <linux/kexec.h>
46 #include <linux/cpufreq.h>
47 #include <linux/dmi.h>
48 #include <linux/dma-mapping.h>
49
50 #include <asm/mtrr.h>
51 #include <asm/uaccess.h>
52 #include <asm/system.h>
53 #include <asm/io.h>
54 #include <asm/smp.h>
55 #include <asm/msr.h>
56 #include <asm/desc.h>
57 #include <video/edid.h>
58 #include <asm/e820.h>
59 #include <asm/dma.h>
60 #include <asm/mpspec.h>
61 #include <asm/mmu_context.h>
62 #include <asm/bootsetup.h>
63 #include <asm/proto.h>
64 #include <asm/setup.h>
65 #include <asm/mach_apic.h>
66 #include <asm/numa.h>
67 #include <asm/swiotlb.h>
68 #include <asm/sections.h>
69 #include <asm/gart-mapping.h>
70
71 /*
72 * Machine setup..
73 */
74
75 struct cpuinfo_x86 boot_cpu_data __read_mostly;
76
77 unsigned long mmu_cr4_features;
78
79 int acpi_disabled;
80 EXPORT_SYMBOL(acpi_disabled);
81 #ifdef CONFIG_ACPI
82 extern int __initdata acpi_ht;
83 extern acpi_interrupt_flags acpi_sci_flags;
84 int __initdata acpi_force = 0;
85 #endif
86
87 int acpi_numa __initdata;
88
89 /* Boot loader ID as an integer, for the benefit of proc_dointvec */
90 int bootloader_type;
91
92 unsigned long saved_video_mode;
93
94 /*
95 * Setup options
96 */
97 struct screen_info screen_info;
98 struct sys_desc_table_struct {
99 unsigned short length;
100 unsigned char table[0];
101 };
102
103 struct edid_info edid_info;
104 struct e820map e820;
105
106 extern int root_mountflags;
107
108 char command_line[COMMAND_LINE_SIZE];
109
110 struct resource standard_io_resources[] = {
111 { .name = "dma1", .start = 0x00, .end = 0x1f,
112 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
113 { .name = "pic1", .start = 0x20, .end = 0x21,
114 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
115 { .name = "timer0", .start = 0x40, .end = 0x43,
116 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
117 { .name = "timer1", .start = 0x50, .end = 0x53,
118 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
119 { .name = "keyboard", .start = 0x60, .end = 0x6f,
120 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
121 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
122 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
123 { .name = "pic2", .start = 0xa0, .end = 0xa1,
124 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
125 { .name = "dma2", .start = 0xc0, .end = 0xdf,
126 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
127 { .name = "fpu", .start = 0xf0, .end = 0xff,
128 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
129 };
130
131 #define STANDARD_IO_RESOURCES \
132 (sizeof standard_io_resources / sizeof standard_io_resources[0])
133
134 #define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
135
136 struct resource data_resource = {
137 .name = "Kernel data",
138 .start = 0,
139 .end = 0,
140 .flags = IORESOURCE_RAM,
141 };
142 struct resource code_resource = {
143 .name = "Kernel code",
144 .start = 0,
145 .end = 0,
146 .flags = IORESOURCE_RAM,
147 };
148
149 #define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
150
151 static struct resource system_rom_resource = {
152 .name = "System ROM",
153 .start = 0xf0000,
154 .end = 0xfffff,
155 .flags = IORESOURCE_ROM,
156 };
157
158 static struct resource extension_rom_resource = {
159 .name = "Extension ROM",
160 .start = 0xe0000,
161 .end = 0xeffff,
162 .flags = IORESOURCE_ROM,
163 };
164
165 static struct resource adapter_rom_resources[] = {
166 { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
167 .flags = IORESOURCE_ROM },
168 { .name = "Adapter ROM", .start = 0, .end = 0,
169 .flags = IORESOURCE_ROM },
170 { .name = "Adapter ROM", .start = 0, .end = 0,
171 .flags = IORESOURCE_ROM },
172 { .name = "Adapter ROM", .start = 0, .end = 0,
173 .flags = IORESOURCE_ROM },
174 { .name = "Adapter ROM", .start = 0, .end = 0,
175 .flags = IORESOURCE_ROM },
176 { .name = "Adapter ROM", .start = 0, .end = 0,
177 .flags = IORESOURCE_ROM }
178 };
179
180 #define ADAPTER_ROM_RESOURCES \
181 (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
182
183 static struct resource video_rom_resource = {
184 .name = "Video ROM",
185 .start = 0xc0000,
186 .end = 0xc7fff,
187 .flags = IORESOURCE_ROM,
188 };
189
190 static struct resource video_ram_resource = {
191 .name = "Video RAM area",
192 .start = 0xa0000,
193 .end = 0xbffff,
194 .flags = IORESOURCE_RAM,
195 };
196
197 #define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
198
199 static int __init romchecksum(unsigned char *rom, unsigned long length)
200 {
201 unsigned char *p, sum = 0;
202
203 for (p = rom; p < rom + length; p++)
204 sum += *p;
205 return sum == 0;
206 }
207
208 static void __init probe_roms(void)
209 {
210 unsigned long start, length, upper;
211 unsigned char *rom;
212 int i;
213
214 /* video rom */
215 upper = adapter_rom_resources[0].start;
216 for (start = video_rom_resource.start; start < upper; start += 2048) {
217 rom = isa_bus_to_virt(start);
218 if (!romsignature(rom))
219 continue;
220
221 video_rom_resource.start = start;
222
223 /* 0 < length <= 0x7f * 512, historically */
224 length = rom[2] * 512;
225
226 /* if checksum okay, trust length byte */
227 if (length && romchecksum(rom, length))
228 video_rom_resource.end = start + length - 1;
229
230 request_resource(&iomem_resource, &video_rom_resource);
231 break;
232 }
233
234 start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
235 if (start < upper)
236 start = upper;
237
238 /* system rom */
239 request_resource(&iomem_resource, &system_rom_resource);
240 upper = system_rom_resource.start;
241
242 /* check for extension rom (ignore length byte!) */
243 rom = isa_bus_to_virt(extension_rom_resource.start);
244 if (romsignature(rom)) {
245 length = extension_rom_resource.end - extension_rom_resource.start + 1;
246 if (romchecksum(rom, length)) {
247 request_resource(&iomem_resource, &extension_rom_resource);
248 upper = extension_rom_resource.start;
249 }
250 }
251
252 /* check for adapter roms on 2k boundaries */
253 for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
254 rom = isa_bus_to_virt(start);
255 if (!romsignature(rom))
256 continue;
257
258 /* 0 < length <= 0x7f * 512, historically */
259 length = rom[2] * 512;
260
261 /* but accept any length that fits if checksum okay */
262 if (!length || start + length > upper || !romchecksum(rom, length))
263 continue;
264
265 adapter_rom_resources[i].start = start;
266 adapter_rom_resources[i].end = start + length - 1;
267 request_resource(&iomem_resource, &adapter_rom_resources[i]);
268
269 start = adapter_rom_resources[i++].end & ~2047UL;
270 }
271 }
272
273 static __init void parse_cmdline_early (char ** cmdline_p)
274 {
275 char c = ' ', *to = command_line, *from = COMMAND_LINE;
276 int len = 0;
277 int userdef = 0;
278
279 for (;;) {
280 if (c != ' ')
281 goto next_char;
282
283 #ifdef CONFIG_SMP
284 /*
285 * If the BIOS enumerates physical processors before logical,
286 * maxcpus=N at enumeration-time can be used to disable HT.
287 */
288 else if (!memcmp(from, "maxcpus=", 8)) {
289 extern unsigned int maxcpus;
290
291 maxcpus = simple_strtoul(from + 8, NULL, 0);
292 }
293 #endif
294 #ifdef CONFIG_ACPI
295 /* "acpi=off" disables both ACPI table parsing and interpreter init */
296 if (!memcmp(from, "acpi=off", 8))
297 disable_acpi();
298
299 if (!memcmp(from, "acpi=force", 10)) {
300 /* add later when we do DMI horrors: */
301 acpi_force = 1;
302 acpi_disabled = 0;
303 }
304
305 /* acpi=ht just means: do ACPI MADT parsing
306 at bootup, but don't enable the full ACPI interpreter */
307 if (!memcmp(from, "acpi=ht", 7)) {
308 if (!acpi_force)
309 disable_acpi();
310 acpi_ht = 1;
311 }
312 else if (!memcmp(from, "pci=noacpi", 10))
313 acpi_disable_pci();
314 else if (!memcmp(from, "acpi=noirq", 10))
315 acpi_noirq_set();
316
317 else if (!memcmp(from, "acpi_sci=edge", 13))
318 acpi_sci_flags.trigger = 1;
319 else if (!memcmp(from, "acpi_sci=level", 14))
320 acpi_sci_flags.trigger = 3;
321 else if (!memcmp(from, "acpi_sci=high", 13))
322 acpi_sci_flags.polarity = 1;
323 else if (!memcmp(from, "acpi_sci=low", 12))
324 acpi_sci_flags.polarity = 3;
325
326 /* acpi=strict disables out-of-spec workarounds */
327 else if (!memcmp(from, "acpi=strict", 11)) {
328 acpi_strict = 1;
329 }
330 #ifdef CONFIG_X86_IO_APIC
331 else if (!memcmp(from, "acpi_skip_timer_override", 24))
332 acpi_skip_timer_override = 1;
333 #endif
334 #endif
335
336 if (!memcmp(from, "disable_timer_pin_1", 19))
337 disable_timer_pin_1 = 1;
338 if (!memcmp(from, "enable_timer_pin_1", 18))
339 disable_timer_pin_1 = -1;
340
341 if (!memcmp(from, "nolapic", 7) ||
342 !memcmp(from, "disableapic", 11))
343 disable_apic = 1;
344
345 /* Don't confuse with noapictimer */
346 if (!memcmp(from, "noapic", 6) &&
347 (from[6] == ' ' || from[6] == 0))
348 skip_ioapic_setup = 1;
349
350 /* Make sure to not confuse with apic= */
351 if (!memcmp(from, "apic", 4) &&
352 (from[4] == ' ' || from[4] == 0)) {
353 skip_ioapic_setup = 0;
354 ioapic_force = 1;
355 }
356
357 if (!memcmp(from, "mem=", 4))
358 parse_memopt(from+4, &from);
359
360 if (!memcmp(from, "memmap=", 7)) {
361 /* exactmap option is for used defined memory */
362 if (!memcmp(from+7, "exactmap", 8)) {
363 #ifdef CONFIG_CRASH_DUMP
364 /* If we are doing a crash dump, we
365 * still need to know the real mem
366 * size before original memory map is
367 * reset.
368 */
369 saved_max_pfn = e820_end_of_ram();
370 #endif
371 from += 8+7;
372 end_pfn_map = 0;
373 e820.nr_map = 0;
374 userdef = 1;
375 }
376 else {
377 parse_memmapopt(from+7, &from);
378 userdef = 1;
379 }
380 }
381
382 #ifdef CONFIG_NUMA
383 if (!memcmp(from, "numa=", 5))
384 numa_setup(from+5);
385 #endif
386
387 if (!memcmp(from,"iommu=",6)) {
388 iommu_setup(from+6);
389 }
390
391 if (!memcmp(from,"oops=panic", 10))
392 panic_on_oops = 1;
393
394 if (!memcmp(from, "noexec=", 7))
395 nonx_setup(from + 7);
396
397 #ifdef CONFIG_KEXEC
398 /* crashkernel=size@addr specifies the location to reserve for
399 * a crash kernel. By reserving this memory we guarantee
400 * that linux never set's it up as a DMA target.
401 * Useful for holding code to do something appropriate
402 * after a kernel panic.
403 */
404 else if (!memcmp(from, "crashkernel=", 12)) {
405 unsigned long size, base;
406 size = memparse(from+12, &from);
407 if (*from == '@') {
408 base = memparse(from+1, &from);
409 /* FIXME: Do I want a sanity check
410 * to validate the memory range?
411 */
412 crashk_res.start = base;
413 crashk_res.end = base + size - 1;
414 }
415 }
416 #endif
417
418 #ifdef CONFIG_PROC_VMCORE
419 /* elfcorehdr= specifies the location of elf core header
420 * stored by the crashed kernel. This option will be passed
421 * by kexec loader to the capture kernel.
422 */
423 else if(!memcmp(from, "elfcorehdr=", 11))
424 elfcorehdr_addr = memparse(from+11, &from);
425 #endif
426
427 #ifdef CONFIG_HOTPLUG_CPU
428 else if (!memcmp(from, "additional_cpus=", 16))
429 setup_additional_cpus(from+16);
430 #endif
431
432 next_char:
433 c = *(from++);
434 if (!c)
435 break;
436 if (COMMAND_LINE_SIZE <= ++len)
437 break;
438 *(to++) = c;
439 }
440 if (userdef) {
441 printk(KERN_INFO "user-defined physical RAM map:\n");
442 e820_print_map("user");
443 }
444 *to = '\0';
445 *cmdline_p = command_line;
446 }
447
448 #ifndef CONFIG_NUMA
449 static void __init
450 contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
451 {
452 unsigned long bootmap_size, bootmap;
453
454 bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
455 bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
456 if (bootmap == -1L)
457 panic("Cannot find bootmem map of size %ld\n",bootmap_size);
458 bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
459 e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
460 reserve_bootmem(bootmap, bootmap_size);
461 }
462 #endif
463
464 /* Use inline assembly to define this because the nops are defined
465 as inline assembly strings in the include files and we cannot
466 get them easily into strings. */
467 asm("\t.data\nk8nops: "
468 K8_NOP1 K8_NOP2 K8_NOP3 K8_NOP4 K8_NOP5 K8_NOP6
469 K8_NOP7 K8_NOP8);
470
471 extern unsigned char k8nops[];
472 static unsigned char *k8_nops[ASM_NOP_MAX+1] = {
473 NULL,
474 k8nops,
475 k8nops + 1,
476 k8nops + 1 + 2,
477 k8nops + 1 + 2 + 3,
478 k8nops + 1 + 2 + 3 + 4,
479 k8nops + 1 + 2 + 3 + 4 + 5,
480 k8nops + 1 + 2 + 3 + 4 + 5 + 6,
481 k8nops + 1 + 2 + 3 + 4 + 5 + 6 + 7,
482 };
483
484 extern char __vsyscall_0;
485
486 /* Replace instructions with better alternatives for this CPU type.
487
488 This runs before SMP is initialized to avoid SMP problems with
489 self modifying code. This implies that assymetric systems where
490 APs have less capabilities than the boot processor are not handled.
491 In this case boot with "noreplacement". */
492 void apply_alternatives(void *start, void *end)
493 {
494 struct alt_instr *a;
495 int diff, i, k;
496 for (a = start; (void *)a < end; a++) {
497 u8 *instr;
498
499 if (!boot_cpu_has(a->cpuid))
500 continue;
501
502 BUG_ON(a->replacementlen > a->instrlen);
503 instr = a->instr;
504 /* vsyscall code is not mapped yet. resolve it manually. */
505 if (instr >= (u8 *)VSYSCALL_START && instr < (u8*)VSYSCALL_END)
506 instr = __va(instr - (u8*)VSYSCALL_START + (u8*)__pa_symbol(&__vsyscall_0));
507 __inline_memcpy(instr, a->replacement, a->replacementlen);
508 diff = a->instrlen - a->replacementlen;
509
510 /* Pad the rest with nops */
511 for (i = a->replacementlen; diff > 0; diff -= k, i += k) {
512 k = diff;
513 if (k > ASM_NOP_MAX)
514 k = ASM_NOP_MAX;
515 __inline_memcpy(instr + i, k8_nops[k], k);
516 }
517 }
518 }
519
520 static int no_replacement __initdata = 0;
521
522 void __init alternative_instructions(void)
523 {
524 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
525 if (no_replacement)
526 return;
527 apply_alternatives(__alt_instructions, __alt_instructions_end);
528 }
529
530 static int __init noreplacement_setup(char *s)
531 {
532 no_replacement = 1;
533 return 0;
534 }
535
536 __setup("noreplacement", noreplacement_setup);
537
538 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
539 struct edd edd;
540 #ifdef CONFIG_EDD_MODULE
541 EXPORT_SYMBOL(edd);
542 #endif
543 /**
544 * copy_edd() - Copy the BIOS EDD information
545 * from boot_params into a safe place.
546 *
547 */
548 static inline void copy_edd(void)
549 {
550 memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
551 memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
552 edd.mbr_signature_nr = EDD_MBR_SIG_NR;
553 edd.edd_info_nr = EDD_NR;
554 }
555 #else
556 static inline void copy_edd(void)
557 {
558 }
559 #endif
560
561 #define EBDA_ADDR_POINTER 0x40E
562 static void __init reserve_ebda_region(void)
563 {
564 unsigned int addr;
565 /**
566 * there is a real-mode segmented pointer pointing to the
567 * 4K EBDA area at 0x40E
568 */
569 addr = *(unsigned short *)phys_to_virt(EBDA_ADDR_POINTER);
570 addr <<= 4;
571 if (addr)
572 reserve_bootmem_generic(addr, PAGE_SIZE);
573 }
574
575 void __init setup_arch(char **cmdline_p)
576 {
577 unsigned long kernel_end;
578
579 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
580 screen_info = SCREEN_INFO;
581 edid_info = EDID_INFO;
582 saved_video_mode = SAVED_VIDEO_MODE;
583 bootloader_type = LOADER_TYPE;
584
585 #ifdef CONFIG_BLK_DEV_RAM
586 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
587 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
588 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
589 #endif
590 setup_memory_region();
591 copy_edd();
592
593 if (!MOUNT_ROOT_RDONLY)
594 root_mountflags &= ~MS_RDONLY;
595 init_mm.start_code = (unsigned long) &_text;
596 init_mm.end_code = (unsigned long) &_etext;
597 init_mm.end_data = (unsigned long) &_edata;
598 init_mm.brk = (unsigned long) &_end;
599
600 code_resource.start = virt_to_phys(&_text);
601 code_resource.end = virt_to_phys(&_etext)-1;
602 data_resource.start = virt_to_phys(&_etext);
603 data_resource.end = virt_to_phys(&_edata)-1;
604
605 parse_cmdline_early(cmdline_p);
606
607 early_identify_cpu(&boot_cpu_data);
608
609 /*
610 * partially used pages are not usable - thus
611 * we are rounding upwards:
612 */
613 end_pfn = e820_end_of_ram();
614
615 check_efer();
616
617 init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
618
619 zap_low_mappings(0);
620
621 #ifdef CONFIG_ACPI
622 /*
623 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
624 * Call this early for SRAT node setup.
625 */
626 acpi_boot_table_init();
627 #endif
628
629 #ifdef CONFIG_ACPI_NUMA
630 /*
631 * Parse SRAT to discover nodes.
632 */
633 acpi_numa_init();
634 #endif
635
636 #ifdef CONFIG_NUMA
637 numa_initmem_init(0, end_pfn);
638 #else
639 contig_initmem_init(0, end_pfn);
640 #endif
641
642 /* Reserve direct mapping */
643 reserve_bootmem_generic(table_start << PAGE_SHIFT,
644 (table_end - table_start) << PAGE_SHIFT);
645
646 /* reserve kernel */
647 kernel_end = round_up(__pa_symbol(&_end),PAGE_SIZE);
648 reserve_bootmem_generic(HIGH_MEMORY, kernel_end - HIGH_MEMORY);
649
650 /*
651 * reserve physical page 0 - it's a special BIOS page on many boxes,
652 * enabling clean reboots, SMP operation, laptop functions.
653 */
654 reserve_bootmem_generic(0, PAGE_SIZE);
655
656 /* reserve ebda region */
657 reserve_ebda_region();
658
659 #ifdef CONFIG_SMP
660 /*
661 * But first pinch a few for the stack/trampoline stuff
662 * FIXME: Don't need the extra page at 4K, but need to fix
663 * trampoline before removing it. (see the GDT stuff)
664 */
665 reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
666
667 /* Reserve SMP trampoline */
668 reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
669 #endif
670
671 #ifdef CONFIG_ACPI_SLEEP
672 /*
673 * Reserve low memory region for sleep support.
674 */
675 acpi_reserve_bootmem();
676 #endif
677 #ifdef CONFIG_X86_LOCAL_APIC
678 /*
679 * Find and reserve possible boot-time SMP configuration:
680 */
681 find_smp_config();
682 #endif
683 #ifdef CONFIG_BLK_DEV_INITRD
684 if (LOADER_TYPE && INITRD_START) {
685 if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
686 reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
687 initrd_start =
688 INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
689 initrd_end = initrd_start+INITRD_SIZE;
690 }
691 else {
692 printk(KERN_ERR "initrd extends beyond end of memory "
693 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
694 (unsigned long)(INITRD_START + INITRD_SIZE),
695 (unsigned long)(end_pfn << PAGE_SHIFT));
696 initrd_start = 0;
697 }
698 }
699 #endif
700 #ifdef CONFIG_KEXEC
701 if (crashk_res.start != crashk_res.end) {
702 reserve_bootmem(crashk_res.start,
703 crashk_res.end - crashk_res.start + 1);
704 }
705 #endif
706
707 paging_init();
708
709 check_ioapic();
710
711 #ifdef CONFIG_ACPI
712 /*
713 * Read APIC and some other early information from ACPI tables.
714 */
715 acpi_boot_init();
716 #endif
717
718 init_cpu_to_node();
719
720 #ifdef CONFIG_X86_LOCAL_APIC
721 /*
722 * get boot-time SMP configuration:
723 */
724 if (smp_found_config)
725 get_smp_config();
726 init_apic_mappings();
727 #endif
728
729 /*
730 * Request address space for all standard RAM and ROM resources
731 * and also for regions reported as reserved by the e820.
732 */
733 probe_roms();
734 e820_reserve_resources();
735
736 request_resource(&iomem_resource, &video_ram_resource);
737
738 {
739 unsigned i;
740 /* request I/O space for devices used on all i[345]86 PCs */
741 for (i = 0; i < STANDARD_IO_RESOURCES; i++)
742 request_resource(&ioport_resource, &standard_io_resources[i]);
743 }
744
745 e820_setup_gap();
746
747 #ifdef CONFIG_GART_IOMMU
748 iommu_hole_init();
749 #endif
750
751 #ifdef CONFIG_VT
752 #if defined(CONFIG_VGA_CONSOLE)
753 conswitchp = &vga_con;
754 #elif defined(CONFIG_DUMMY_CONSOLE)
755 conswitchp = &dummy_con;
756 #endif
757 #endif
758 }
759
760 static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
761 {
762 unsigned int *v;
763
764 if (c->extended_cpuid_level < 0x80000004)
765 return 0;
766
767 v = (unsigned int *) c->x86_model_id;
768 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
769 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
770 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
771 c->x86_model_id[48] = 0;
772 return 1;
773 }
774
775
776 static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
777 {
778 unsigned int n, dummy, eax, ebx, ecx, edx;
779
780 n = c->extended_cpuid_level;
781
782 if (n >= 0x80000005) {
783 cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
784 printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
785 edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
786 c->x86_cache_size=(ecx>>24)+(edx>>24);
787 /* On K8 L1 TLB is inclusive, so don't count it */
788 c->x86_tlbsize = 0;
789 }
790
791 if (n >= 0x80000006) {
792 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
793 ecx = cpuid_ecx(0x80000006);
794 c->x86_cache_size = ecx >> 16;
795 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
796
797 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
798 c->x86_cache_size, ecx & 0xFF);
799 }
800
801 if (n >= 0x80000007)
802 cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
803 if (n >= 0x80000008) {
804 cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
805 c->x86_virt_bits = (eax >> 8) & 0xff;
806 c->x86_phys_bits = eax & 0xff;
807 }
808 }
809
810 #ifdef CONFIG_NUMA
811 static int nearby_node(int apicid)
812 {
813 int i;
814 for (i = apicid - 1; i >= 0; i--) {
815 int node = apicid_to_node[i];
816 if (node != NUMA_NO_NODE && node_online(node))
817 return node;
818 }
819 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
820 int node = apicid_to_node[i];
821 if (node != NUMA_NO_NODE && node_online(node))
822 return node;
823 }
824 return first_node(node_online_map); /* Shouldn't happen */
825 }
826 #endif
827
828 /*
829 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
830 * Assumes number of cores is a power of two.
831 */
832 static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
833 {
834 #ifdef CONFIG_SMP
835 int cpu = smp_processor_id();
836 unsigned bits;
837 #ifdef CONFIG_NUMA
838 int node = 0;
839 unsigned apicid = phys_proc_id[cpu];
840 #endif
841
842 bits = 0;
843 while ((1 << bits) < c->x86_max_cores)
844 bits++;
845
846 /* Low order bits define the core id (index of core in socket) */
847 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1 << bits)-1);
848 /* Convert the APIC ID into the socket ID */
849 phys_proc_id[cpu] >>= bits;
850
851 #ifdef CONFIG_NUMA
852 node = phys_proc_id[cpu];
853 if (apicid_to_node[apicid] != NUMA_NO_NODE)
854 node = apicid_to_node[apicid];
855 if (!node_online(node)) {
856 /* Two possibilities here:
857 - The CPU is missing memory and no node was created.
858 In that case try picking one from a nearby CPU
859 - The APIC IDs differ from the HyperTransport node IDs
860 which the K8 northbridge parsing fills in.
861 Assume they are all increased by a constant offset,
862 but in the same order as the HT nodeids.
863 If that doesn't result in a usable node fall back to the
864 path for the previous case. */
865 int ht_nodeid = apicid - (phys_proc_id[0] << bits);
866 if (ht_nodeid >= 0 &&
867 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
868 node = apicid_to_node[ht_nodeid];
869 /* Pick a nearby node */
870 if (!node_online(node))
871 node = nearby_node(apicid);
872 }
873 numa_set_node(cpu, node);
874
875 printk(KERN_INFO "CPU %d(%d) -> Node %d -> Core %d\n",
876 cpu, c->x86_max_cores, node, cpu_core_id[cpu]);
877 #endif
878 #endif
879 }
880
881 static int __init init_amd(struct cpuinfo_x86 *c)
882 {
883 int r;
884 unsigned level;
885
886 #ifdef CONFIG_SMP
887 unsigned long value;
888
889 /*
890 * Disable TLB flush filter by setting HWCR.FFDIS on K8
891 * bit 6 of msr C001_0015
892 *
893 * Errata 63 for SH-B3 steppings
894 * Errata 122 for all steppings (F+ have it disabled by default)
895 */
896 if (c->x86 == 15) {
897 rdmsrl(MSR_K8_HWCR, value);
898 value |= 1 << 6;
899 wrmsrl(MSR_K8_HWCR, value);
900 }
901 #endif
902
903 /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
904 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
905 clear_bit(0*32+31, &c->x86_capability);
906
907 /* On C+ stepping K8 rep microcode works well for copy/memset */
908 level = cpuid_eax(1);
909 if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
910 set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
911
912 r = get_model_name(c);
913 if (!r) {
914 switch (c->x86) {
915 case 15:
916 /* Should distinguish Models here, but this is only
917 a fallback anyways. */
918 strcpy(c->x86_model_id, "Hammer");
919 break;
920 }
921 }
922 display_cacheinfo(c);
923
924 /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
925 if (c->x86_power & (1<<8))
926 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
927
928 if (c->extended_cpuid_level >= 0x80000008) {
929 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
930 if (c->x86_max_cores & (c->x86_max_cores - 1))
931 c->x86_max_cores = 1;
932
933 amd_detect_cmp(c);
934 }
935
936 return r;
937 }
938
939 static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
940 {
941 #ifdef CONFIG_SMP
942 u32 eax, ebx, ecx, edx;
943 int index_msb, core_bits;
944 int cpu = smp_processor_id();
945
946 cpuid(1, &eax, &ebx, &ecx, &edx);
947
948 c->apicid = phys_pkg_id(0);
949
950 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
951 return;
952
953 smp_num_siblings = (ebx & 0xff0000) >> 16;
954
955 if (smp_num_siblings == 1) {
956 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
957 } else if (smp_num_siblings > 1 ) {
958
959 if (smp_num_siblings > NR_CPUS) {
960 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
961 smp_num_siblings = 1;
962 return;
963 }
964
965 index_msb = get_count_order(smp_num_siblings);
966 phys_proc_id[cpu] = phys_pkg_id(index_msb);
967
968 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
969 phys_proc_id[cpu]);
970
971 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
972
973 index_msb = get_count_order(smp_num_siblings) ;
974
975 core_bits = get_count_order(c->x86_max_cores);
976
977 cpu_core_id[cpu] = phys_pkg_id(index_msb) &
978 ((1 << core_bits) - 1);
979
980 if (c->x86_max_cores > 1)
981 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
982 cpu_core_id[cpu]);
983 }
984 #endif
985 }
986
987 /*
988 * find out the number of processor cores on the die
989 */
990 static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
991 {
992 unsigned int eax;
993
994 if (c->cpuid_level < 4)
995 return 1;
996
997 __asm__("cpuid"
998 : "=a" (eax)
999 : "0" (4), "c" (0)
1000 : "bx", "dx");
1001
1002 if (eax & 0x1f)
1003 return ((eax >> 26) + 1);
1004 else
1005 return 1;
1006 }
1007
1008 static void srat_detect_node(void)
1009 {
1010 #ifdef CONFIG_NUMA
1011 unsigned node;
1012 int cpu = smp_processor_id();
1013
1014 /* Don't do the funky fallback heuristics the AMD version employs
1015 for now. */
1016 node = apicid_to_node[hard_smp_processor_id()];
1017 if (node == NUMA_NO_NODE)
1018 node = 0;
1019 numa_set_node(cpu, node);
1020
1021 if (acpi_numa > 0)
1022 printk(KERN_INFO "CPU %d -> Node %d\n", cpu, node);
1023 #endif
1024 }
1025
1026 static void __cpuinit init_intel(struct cpuinfo_x86 *c)
1027 {
1028 /* Cache sizes */
1029 unsigned n;
1030
1031 init_intel_cacheinfo(c);
1032 n = c->extended_cpuid_level;
1033 if (n >= 0x80000008) {
1034 unsigned eax = cpuid_eax(0x80000008);
1035 c->x86_virt_bits = (eax >> 8) & 0xff;
1036 c->x86_phys_bits = eax & 0xff;
1037 /* CPUID workaround for Intel 0F34 CPU */
1038 if (c->x86_vendor == X86_VENDOR_INTEL &&
1039 c->x86 == 0xF && c->x86_model == 0x3 &&
1040 c->x86_mask == 0x4)
1041 c->x86_phys_bits = 36;
1042 }
1043
1044 if (c->x86 == 15)
1045 c->x86_cache_alignment = c->x86_clflush_size * 2;
1046 if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
1047 (c->x86 == 0x6 && c->x86_model >= 0x0e))
1048 set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
1049 set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
1050 c->x86_max_cores = intel_num_cpu_cores(c);
1051
1052 srat_detect_node();
1053 }
1054
1055 static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
1056 {
1057 char *v = c->x86_vendor_id;
1058
1059 if (!strcmp(v, "AuthenticAMD"))
1060 c->x86_vendor = X86_VENDOR_AMD;
1061 else if (!strcmp(v, "GenuineIntel"))
1062 c->x86_vendor = X86_VENDOR_INTEL;
1063 else
1064 c->x86_vendor = X86_VENDOR_UNKNOWN;
1065 }
1066
1067 struct cpu_model_info {
1068 int vendor;
1069 int family;
1070 char *model_names[16];
1071 };
1072
1073 /* Do some early cpuid on the boot CPU to get some parameter that are
1074 needed before check_bugs. Everything advanced is in identify_cpu
1075 below. */
1076 void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
1077 {
1078 u32 tfms;
1079
1080 c->loops_per_jiffy = loops_per_jiffy;
1081 c->x86_cache_size = -1;
1082 c->x86_vendor = X86_VENDOR_UNKNOWN;
1083 c->x86_model = c->x86_mask = 0; /* So far unknown... */
1084 c->x86_vendor_id[0] = '\0'; /* Unset */
1085 c->x86_model_id[0] = '\0'; /* Unset */
1086 c->x86_clflush_size = 64;
1087 c->x86_cache_alignment = c->x86_clflush_size;
1088 c->x86_max_cores = 1;
1089 c->extended_cpuid_level = 0;
1090 memset(&c->x86_capability, 0, sizeof c->x86_capability);
1091
1092 /* Get vendor name */
1093 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
1094 (unsigned int *)&c->x86_vendor_id[0],
1095 (unsigned int *)&c->x86_vendor_id[8],
1096 (unsigned int *)&c->x86_vendor_id[4]);
1097
1098 get_cpu_vendor(c);
1099
1100 /* Initialize the standard set of capabilities */
1101 /* Note that the vendor-specific code below might override */
1102
1103 /* Intel-defined flags: level 0x00000001 */
1104 if (c->cpuid_level >= 0x00000001) {
1105 __u32 misc;
1106 cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
1107 &c->x86_capability[0]);
1108 c->x86 = (tfms >> 8) & 0xf;
1109 c->x86_model = (tfms >> 4) & 0xf;
1110 c->x86_mask = tfms & 0xf;
1111 if (c->x86 == 0xf)
1112 c->x86 += (tfms >> 20) & 0xff;
1113 if (c->x86 >= 0x6)
1114 c->x86_model += ((tfms >> 16) & 0xF) << 4;
1115 if (c->x86_capability[0] & (1<<19))
1116 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
1117 } else {
1118 /* Have CPUID level 0 only - unheard of */
1119 c->x86 = 4;
1120 }
1121
1122 #ifdef CONFIG_SMP
1123 phys_proc_id[smp_processor_id()] = (cpuid_ebx(1) >> 24) & 0xff;
1124 #endif
1125 }
1126
1127 /*
1128 * This does the hard work of actually picking apart the CPU stuff...
1129 */
1130 void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
1131 {
1132 int i;
1133 u32 xlvl;
1134
1135 early_identify_cpu(c);
1136
1137 /* AMD-defined flags: level 0x80000001 */
1138 xlvl = cpuid_eax(0x80000000);
1139 c->extended_cpuid_level = xlvl;
1140 if ((xlvl & 0xffff0000) == 0x80000000) {
1141 if (xlvl >= 0x80000001) {
1142 c->x86_capability[1] = cpuid_edx(0x80000001);
1143 c->x86_capability[6] = cpuid_ecx(0x80000001);
1144 }
1145 if (xlvl >= 0x80000004)
1146 get_model_name(c); /* Default name */
1147 }
1148
1149 /* Transmeta-defined flags: level 0x80860001 */
1150 xlvl = cpuid_eax(0x80860000);
1151 if ((xlvl & 0xffff0000) == 0x80860000) {
1152 /* Don't set x86_cpuid_level here for now to not confuse. */
1153 if (xlvl >= 0x80860001)
1154 c->x86_capability[2] = cpuid_edx(0x80860001);
1155 }
1156
1157 /*
1158 * Vendor-specific initialization. In this section we
1159 * canonicalize the feature flags, meaning if there are
1160 * features a certain CPU supports which CPUID doesn't
1161 * tell us, CPUID claiming incorrect flags, or other bugs,
1162 * we handle them here.
1163 *
1164 * At the end of this section, c->x86_capability better
1165 * indicate the features this CPU genuinely supports!
1166 */
1167 switch (c->x86_vendor) {
1168 case X86_VENDOR_AMD:
1169 init_amd(c);
1170 break;
1171
1172 case X86_VENDOR_INTEL:
1173 init_intel(c);
1174 break;
1175
1176 case X86_VENDOR_UNKNOWN:
1177 default:
1178 display_cacheinfo(c);
1179 break;
1180 }
1181
1182 select_idle_routine(c);
1183 detect_ht(c);
1184
1185 /*
1186 * On SMP, boot_cpu_data holds the common feature set between
1187 * all CPUs; so make sure that we indicate which features are
1188 * common between the CPUs. The first time this routine gets
1189 * executed, c == &boot_cpu_data.
1190 */
1191 if (c != &boot_cpu_data) {
1192 /* AND the already accumulated flags with these */
1193 for (i = 0 ; i < NCAPINTS ; i++)
1194 boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
1195 }
1196
1197 #ifdef CONFIG_X86_MCE
1198 mcheck_init(c);
1199 #endif
1200 if (c == &boot_cpu_data)
1201 mtrr_bp_init();
1202 else
1203 mtrr_ap_init();
1204 #ifdef CONFIG_NUMA
1205 numa_add_cpu(smp_processor_id());
1206 #endif
1207 }
1208
1209
1210 void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1211 {
1212 if (c->x86_model_id[0])
1213 printk("%s", c->x86_model_id);
1214
1215 if (c->x86_mask || c->cpuid_level >= 0)
1216 printk(" stepping %02x\n", c->x86_mask);
1217 else
1218 printk("\n");
1219 }
1220
1221 /*
1222 * Get CPU information for use by the procfs.
1223 */
1224
1225 static int show_cpuinfo(struct seq_file *m, void *v)
1226 {
1227 struct cpuinfo_x86 *c = v;
1228
1229 /*
1230 * These flag bits must match the definitions in <asm/cpufeature.h>.
1231 * NULL means this bit is undefined or reserved; either way it doesn't
1232 * have meaning as far as Linux is concerned. Note that it's important
1233 * to realize there is a difference between this table and CPUID -- if
1234 * applications want to get the raw CPUID data, they should access
1235 * /dev/cpu/<cpu_nr>/cpuid instead.
1236 */
1237 static char *x86_cap_flags[] = {
1238 /* Intel-defined */
1239 "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
1240 "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
1241 "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
1242 "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
1243
1244 /* AMD-defined */
1245 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1246 NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
1247 NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
1248 NULL, "fxsr_opt", "rdtscp", NULL, NULL, "lm", "3dnowext", "3dnow",
1249
1250 /* Transmeta-defined */
1251 "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
1252 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1253 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1254 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1255
1256 /* Other (Linux-defined) */
1257 "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
1258 "constant_tsc", NULL, NULL,
1259 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1260 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1261 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1262
1263 /* Intel-defined (#2) */
1264 "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", NULL, "est",
1265 "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
1266 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1267 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1268
1269 /* VIA/Cyrix/Centaur-defined */
1270 NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
1271 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1272 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1273 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1274
1275 /* AMD-defined (#2) */
1276 "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
1277 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1278 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1279 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
1280 };
1281 static char *x86_power_flags[] = {
1282 "ts", /* temperature sensor */
1283 "fid", /* frequency id control */
1284 "vid", /* voltage id control */
1285 "ttp", /* thermal trip */
1286 "tm",
1287 "stc",
1288 NULL,
1289 /* nothing */ /* constant_tsc - moved to flags */
1290 };
1291
1292
1293 #ifdef CONFIG_SMP
1294 if (!cpu_online(c-cpu_data))
1295 return 0;
1296 #endif
1297
1298 seq_printf(m,"processor\t: %u\n"
1299 "vendor_id\t: %s\n"
1300 "cpu family\t: %d\n"
1301 "model\t\t: %d\n"
1302 "model name\t: %s\n",
1303 (unsigned)(c-cpu_data),
1304 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1305 c->x86,
1306 (int)c->x86_model,
1307 c->x86_model_id[0] ? c->x86_model_id : "unknown");
1308
1309 if (c->x86_mask || c->cpuid_level >= 0)
1310 seq_printf(m, "stepping\t: %d\n", c->x86_mask);
1311 else
1312 seq_printf(m, "stepping\t: unknown\n");
1313
1314 if (cpu_has(c,X86_FEATURE_TSC)) {
1315 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
1316 if (!freq)
1317 freq = cpu_khz;
1318 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
1319 freq / 1000, (freq % 1000));
1320 }
1321
1322 /* Cache size */
1323 if (c->x86_cache_size >= 0)
1324 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
1325
1326 #ifdef CONFIG_SMP
1327 if (smp_num_siblings * c->x86_max_cores > 1) {
1328 int cpu = c - cpu_data;
1329 seq_printf(m, "physical id\t: %d\n", phys_proc_id[cpu]);
1330 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
1331 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[cpu]);
1332 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1333 }
1334 #endif
1335
1336 seq_printf(m,
1337 "fpu\t\t: yes\n"
1338 "fpu_exception\t: yes\n"
1339 "cpuid level\t: %d\n"
1340 "wp\t\t: yes\n"
1341 "flags\t\t:",
1342 c->cpuid_level);
1343
1344 {
1345 int i;
1346 for ( i = 0 ; i < 32*NCAPINTS ; i++ )
1347 if ( test_bit(i, &c->x86_capability) &&
1348 x86_cap_flags[i] != NULL )
1349 seq_printf(m, " %s", x86_cap_flags[i]);
1350 }
1351
1352 seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
1353 c->loops_per_jiffy/(500000/HZ),
1354 (c->loops_per_jiffy/(5000/HZ)) % 100);
1355
1356 if (c->x86_tlbsize > 0)
1357 seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
1358 seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
1359 seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
1360
1361 seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
1362 c->x86_phys_bits, c->x86_virt_bits);
1363
1364 seq_printf(m, "power management:");
1365 {
1366 unsigned i;
1367 for (i = 0; i < 32; i++)
1368 if (c->x86_power & (1 << i)) {
1369 if (i < ARRAY_SIZE(x86_power_flags) &&
1370 x86_power_flags[i])
1371 seq_printf(m, "%s%s",
1372 x86_power_flags[i][0]?" ":"",
1373 x86_power_flags[i]);
1374 else
1375 seq_printf(m, " [%d]", i);
1376 }
1377 }
1378
1379 seq_printf(m, "\n\n");
1380
1381 return 0;
1382 }
1383
1384 static void *c_start(struct seq_file *m, loff_t *pos)
1385 {
1386 return *pos < NR_CPUS ? cpu_data + *pos : NULL;
1387 }
1388
1389 static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1390 {
1391 ++*pos;
1392 return c_start(m, pos);
1393 }
1394
1395 static void c_stop(struct seq_file *m, void *v)
1396 {
1397 }
1398
1399 struct seq_operations cpuinfo_op = {
1400 .start =c_start,
1401 .next = c_next,
1402 .stop = c_stop,
1403 .show = show_cpuinfo,
1404 };
1405
1406 static int __init run_dmi_scan(void)
1407 {
1408 dmi_scan_machine();
1409 return 0;
1410 }
1411 core_initcall(run_dmi_scan);
1412