]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/kernel/setup.c
Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / kernel / setup.c
1 /*
2 * Copyright (C) 1995 Linus Torvalds
3 *
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
5 *
6 * Memory region support
7 * David Parsons <orc@pell.chi.il.us>, July-August 1999
8 *
9 * Added E820 sanitization routine (removes overlapping memory regions);
10 * Brian Moyle <bmoyle@mvista.com>, February 2001
11 *
12 * Moved CPU detection code to cpu/${cpu}.c
13 * Patrick Mochel <mochel@osdl.org>, March 2002
14 *
15 * Provisions for empty E820 memory regions (reported by certain BIOSes).
16 * Alex Achenbach <xela@slit.de>, December 2002.
17 *
18 */
19
20 /*
21 * This file handles the architecture-dependent parts of initialization
22 */
23
24 #include <linux/sched.h>
25 #include <linux/mm.h>
26 #include <linux/mmzone.h>
27 #include <linux/screen_info.h>
28 #include <linux/ioport.h>
29 #include <linux/acpi.h>
30 #include <linux/sfi.h>
31 #include <linux/apm_bios.h>
32 #include <linux/initrd.h>
33 #include <linux/bootmem.h>
34 #include <linux/memblock.h>
35 #include <linux/seq_file.h>
36 #include <linux/console.h>
37 #include <linux/root_dev.h>
38 #include <linux/highmem.h>
39 #include <linux/module.h>
40 #include <linux/efi.h>
41 #include <linux/init.h>
42 #include <linux/edd.h>
43 #include <linux/iscsi_ibft.h>
44 #include <linux/nodemask.h>
45 #include <linux/kexec.h>
46 #include <linux/dmi.h>
47 #include <linux/pfn.h>
48 #include <linux/pci.h>
49 #include <asm/pci-direct.h>
50 #include <linux/init_ohci1394_dma.h>
51 #include <linux/kvm_para.h>
52 #include <linux/dma-contiguous.h>
53
54 #include <linux/errno.h>
55 #include <linux/kernel.h>
56 #include <linux/stddef.h>
57 #include <linux/unistd.h>
58 #include <linux/ptrace.h>
59 #include <linux/user.h>
60 #include <linux/delay.h>
61
62 #include <linux/kallsyms.h>
63 #include <linux/cpufreq.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/ctype.h>
66 #include <linux/uaccess.h>
67
68 #include <linux/percpu.h>
69 #include <linux/crash_dump.h>
70 #include <linux/tboot.h>
71 #include <linux/jiffies.h>
72
73 #include <video/edid.h>
74
75 #include <asm/mtrr.h>
76 #include <asm/apic.h>
77 #include <asm/realmode.h>
78 #include <asm/e820.h>
79 #include <asm/mpspec.h>
80 #include <asm/setup.h>
81 #include <asm/efi.h>
82 #include <asm/timer.h>
83 #include <asm/i8259.h>
84 #include <asm/sections.h>
85 #include <asm/io_apic.h>
86 #include <asm/ist.h>
87 #include <asm/setup_arch.h>
88 #include <asm/bios_ebda.h>
89 #include <asm/cacheflush.h>
90 #include <asm/processor.h>
91 #include <asm/bugs.h>
92 #include <asm/kasan.h>
93
94 #include <asm/vsyscall.h>
95 #include <asm/cpu.h>
96 #include <asm/desc.h>
97 #include <asm/dma.h>
98 #include <asm/iommu.h>
99 #include <asm/gart.h>
100 #include <asm/mmu_context.h>
101 #include <asm/proto.h>
102
103 #include <asm/paravirt.h>
104 #include <asm/hypervisor.h>
105 #include <asm/olpc_ofw.h>
106
107 #include <asm/percpu.h>
108 #include <asm/topology.h>
109 #include <asm/apicdef.h>
110 #include <asm/amd_nb.h>
111 #include <asm/mce.h>
112 #include <asm/alternative.h>
113 #include <asm/prom.h>
114
115 /*
116 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
117 * max_pfn_mapped: highest direct mapped pfn over 4GB
118 *
119 * The direct mapping only covers E820_RAM regions, so the ranges and gaps are
120 * represented by pfn_mapped
121 */
122 unsigned long max_low_pfn_mapped;
123 unsigned long max_pfn_mapped;
124
125 #ifdef CONFIG_DMI
126 RESERVE_BRK(dmi_alloc, 65536);
127 #endif
128
129
130 static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
131 unsigned long _brk_end = (unsigned long)__brk_base;
132
133 #ifdef CONFIG_X86_64
134 int default_cpu_present_to_apicid(int mps_cpu)
135 {
136 return __default_cpu_present_to_apicid(mps_cpu);
137 }
138
139 int default_check_phys_apicid_present(int phys_apicid)
140 {
141 return __default_check_phys_apicid_present(phys_apicid);
142 }
143 #endif
144
145 struct boot_params boot_params;
146
147 /*
148 * Machine setup..
149 */
150 static struct resource data_resource = {
151 .name = "Kernel data",
152 .start = 0,
153 .end = 0,
154 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
155 };
156
157 static struct resource code_resource = {
158 .name = "Kernel code",
159 .start = 0,
160 .end = 0,
161 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
162 };
163
164 static struct resource bss_resource = {
165 .name = "Kernel bss",
166 .start = 0,
167 .end = 0,
168 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
169 };
170
171
172 #ifdef CONFIG_X86_32
173 /* cpu data as detected by the assembly code in head.S */
174 struct cpuinfo_x86 new_cpu_data = {
175 .wp_works_ok = -1,
176 };
177 /* common cpu data for all cpus */
178 struct cpuinfo_x86 boot_cpu_data __read_mostly = {
179 .wp_works_ok = -1,
180 };
181 EXPORT_SYMBOL(boot_cpu_data);
182
183 unsigned int def_to_bigsmp;
184
185 /* for MCA, but anyone else can use it if they want */
186 unsigned int machine_id;
187 unsigned int machine_submodel_id;
188 unsigned int BIOS_revision;
189
190 struct apm_info apm_info;
191 EXPORT_SYMBOL(apm_info);
192
193 #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
194 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
195 struct ist_info ist_info;
196 EXPORT_SYMBOL(ist_info);
197 #else
198 struct ist_info ist_info;
199 #endif
200
201 #else
202 struct cpuinfo_x86 boot_cpu_data __read_mostly = {
203 .x86_phys_bits = MAX_PHYSMEM_BITS,
204 };
205 EXPORT_SYMBOL(boot_cpu_data);
206 #endif
207
208
209 #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
210 __visible unsigned long mmu_cr4_features;
211 #else
212 __visible unsigned long mmu_cr4_features = X86_CR4_PAE;
213 #endif
214
215 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
216 int bootloader_type, bootloader_version;
217
218 /*
219 * Setup options
220 */
221 struct screen_info screen_info;
222 EXPORT_SYMBOL(screen_info);
223 struct edid_info edid_info;
224 EXPORT_SYMBOL_GPL(edid_info);
225
226 extern int root_mountflags;
227
228 unsigned long saved_video_mode;
229
230 #define RAMDISK_IMAGE_START_MASK 0x07FF
231 #define RAMDISK_PROMPT_FLAG 0x8000
232 #define RAMDISK_LOAD_FLAG 0x4000
233
234 static char __initdata command_line[COMMAND_LINE_SIZE];
235 #ifdef CONFIG_CMDLINE_BOOL
236 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
237 #endif
238
239 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
240 struct edd edd;
241 #ifdef CONFIG_EDD_MODULE
242 EXPORT_SYMBOL(edd);
243 #endif
244 /**
245 * copy_edd() - Copy the BIOS EDD information
246 * from boot_params into a safe place.
247 *
248 */
249 static inline void __init copy_edd(void)
250 {
251 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
252 sizeof(edd.mbr_signature));
253 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
254 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
255 edd.edd_info_nr = boot_params.eddbuf_entries;
256 }
257 #else
258 static inline void __init copy_edd(void)
259 {
260 }
261 #endif
262
263 void * __init extend_brk(size_t size, size_t align)
264 {
265 size_t mask = align - 1;
266 void *ret;
267
268 BUG_ON(_brk_start == 0);
269 BUG_ON(align & mask);
270
271 _brk_end = (_brk_end + mask) & ~mask;
272 BUG_ON((char *)(_brk_end + size) > __brk_limit);
273
274 ret = (void *)_brk_end;
275 _brk_end += size;
276
277 memset(ret, 0, size);
278
279 return ret;
280 }
281
282 #ifdef CONFIG_X86_32
283 static void __init cleanup_highmap(void)
284 {
285 }
286 #endif
287
288 static void __init reserve_brk(void)
289 {
290 if (_brk_end > _brk_start)
291 memblock_reserve(__pa_symbol(_brk_start),
292 _brk_end - _brk_start);
293
294 /* Mark brk area as locked down and no longer taking any
295 new allocations */
296 _brk_start = 0;
297 }
298
299 u64 relocated_ramdisk;
300
301 #ifdef CONFIG_BLK_DEV_INITRD
302
303 static u64 __init get_ramdisk_image(void)
304 {
305 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
306
307 ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
308
309 return ramdisk_image;
310 }
311 static u64 __init get_ramdisk_size(void)
312 {
313 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
314
315 ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
316
317 return ramdisk_size;
318 }
319
320 #define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
321 static void __init relocate_initrd(void)
322 {
323 /* Assume only end is not page aligned */
324 u64 ramdisk_image = get_ramdisk_image();
325 u64 ramdisk_size = get_ramdisk_size();
326 u64 area_size = PAGE_ALIGN(ramdisk_size);
327 unsigned long slop, clen, mapaddr;
328 char *p, *q;
329
330 /* We need to move the initrd down into directly mapped mem */
331 relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
332 area_size, PAGE_SIZE);
333
334 if (!relocated_ramdisk)
335 panic("Cannot find place for new RAMDISK of size %lld\n",
336 ramdisk_size);
337
338 /* Note: this includes all the mem currently occupied by
339 the initrd, we rely on that fact to keep the data intact. */
340 memblock_reserve(relocated_ramdisk, area_size);
341 initrd_start = relocated_ramdisk + PAGE_OFFSET;
342 initrd_end = initrd_start + ramdisk_size;
343 printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
344 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
345
346 q = (char *)initrd_start;
347
348 /* Copy the initrd */
349 while (ramdisk_size) {
350 slop = ramdisk_image & ~PAGE_MASK;
351 clen = ramdisk_size;
352 if (clen > MAX_MAP_CHUNK-slop)
353 clen = MAX_MAP_CHUNK-slop;
354 mapaddr = ramdisk_image & PAGE_MASK;
355 p = early_memremap(mapaddr, clen+slop);
356 memcpy(q, p+slop, clen);
357 early_iounmap(p, clen+slop);
358 q += clen;
359 ramdisk_image += clen;
360 ramdisk_size -= clen;
361 }
362
363 ramdisk_image = get_ramdisk_image();
364 ramdisk_size = get_ramdisk_size();
365 printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
366 " [mem %#010llx-%#010llx]\n",
367 ramdisk_image, ramdisk_image + ramdisk_size - 1,
368 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
369 }
370
371 static void __init early_reserve_initrd(void)
372 {
373 /* Assume only end is not page aligned */
374 u64 ramdisk_image = get_ramdisk_image();
375 u64 ramdisk_size = get_ramdisk_size();
376 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
377
378 if (!boot_params.hdr.type_of_loader ||
379 !ramdisk_image || !ramdisk_size)
380 return; /* No initrd provided by bootloader */
381
382 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
383 }
384 static void __init reserve_initrd(void)
385 {
386 /* Assume only end is not page aligned */
387 u64 ramdisk_image = get_ramdisk_image();
388 u64 ramdisk_size = get_ramdisk_size();
389 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
390 u64 mapped_size;
391
392 if (!boot_params.hdr.type_of_loader ||
393 !ramdisk_image || !ramdisk_size)
394 return; /* No initrd provided by bootloader */
395
396 initrd_start = 0;
397
398 mapped_size = memblock_mem_size(max_pfn_mapped);
399 if (ramdisk_size >= (mapped_size>>1))
400 panic("initrd too large to handle, "
401 "disabling initrd (%lld needed, %lld available)\n",
402 ramdisk_size, mapped_size>>1);
403
404 printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
405 ramdisk_end - 1);
406
407 if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
408 PFN_DOWN(ramdisk_end))) {
409 /* All are mapped, easy case */
410 initrd_start = ramdisk_image + PAGE_OFFSET;
411 initrd_end = initrd_start + ramdisk_size;
412 return;
413 }
414
415 relocate_initrd();
416
417 memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
418 }
419 #else
420 static void __init early_reserve_initrd(void)
421 {
422 }
423 static void __init reserve_initrd(void)
424 {
425 }
426 #endif /* CONFIG_BLK_DEV_INITRD */
427
428 static void __init parse_setup_data(void)
429 {
430 struct setup_data *data;
431 u64 pa_data, pa_next;
432
433 pa_data = boot_params.hdr.setup_data;
434 while (pa_data) {
435 u32 data_len, data_type;
436
437 data = early_memremap(pa_data, sizeof(*data));
438 data_len = data->len + sizeof(struct setup_data);
439 data_type = data->type;
440 pa_next = data->next;
441 early_iounmap(data, sizeof(*data));
442
443 switch (data_type) {
444 case SETUP_E820_EXT:
445 parse_e820_ext(pa_data, data_len);
446 break;
447 case SETUP_DTB:
448 add_dtb(pa_data);
449 break;
450 case SETUP_EFI:
451 parse_efi_setup(pa_data, data_len);
452 break;
453 default:
454 break;
455 }
456 pa_data = pa_next;
457 }
458 }
459
460 static void __init e820_reserve_setup_data(void)
461 {
462 struct setup_data *data;
463 u64 pa_data;
464 int found = 0;
465
466 pa_data = boot_params.hdr.setup_data;
467 while (pa_data) {
468 data = early_memremap(pa_data, sizeof(*data));
469 e820_update_range(pa_data, sizeof(*data)+data->len,
470 E820_RAM, E820_RESERVED_KERN);
471 found = 1;
472 pa_data = data->next;
473 early_iounmap(data, sizeof(*data));
474 }
475 if (!found)
476 return;
477
478 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
479 memcpy(&e820_saved, &e820, sizeof(struct e820map));
480 printk(KERN_INFO "extended physical RAM map:\n");
481 e820_print_map("reserve setup_data");
482 }
483
484 static void __init memblock_x86_reserve_range_setup_data(void)
485 {
486 struct setup_data *data;
487 u64 pa_data;
488
489 pa_data = boot_params.hdr.setup_data;
490 while (pa_data) {
491 data = early_memremap(pa_data, sizeof(*data));
492 memblock_reserve(pa_data, sizeof(*data) + data->len);
493 pa_data = data->next;
494 early_iounmap(data, sizeof(*data));
495 }
496 }
497
498 /*
499 * --------- Crashkernel reservation ------------------------------
500 */
501
502 #ifdef CONFIG_KEXEC
503
504 /*
505 * Keep the crash kernel below this limit. On 32 bits earlier kernels
506 * would limit the kernel to the low 512 MiB due to mapping restrictions.
507 * On 64bit, old kexec-tools need to under 896MiB.
508 */
509 #ifdef CONFIG_X86_32
510 # define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20)
511 # define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20)
512 #else
513 # define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20)
514 # define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM
515 #endif
516
517 static void __init reserve_crashkernel_low(void)
518 {
519 #ifdef CONFIG_X86_64
520 const unsigned long long alignment = 16<<20; /* 16M */
521 unsigned long long low_base = 0, low_size = 0;
522 unsigned long total_low_mem;
523 unsigned long long base;
524 bool auto_set = false;
525 int ret;
526
527 total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
528 /* crashkernel=Y,low */
529 ret = parse_crashkernel_low(boot_command_line, total_low_mem,
530 &low_size, &base);
531 if (ret != 0) {
532 /*
533 * two parts from lib/swiotlb.c:
534 * swiotlb size: user specified with swiotlb= or default.
535 * swiotlb overflow buffer: now is hardcoded to 32k.
536 * We round it to 8M for other buffers that
537 * may need to stay low too.
538 */
539 low_size = swiotlb_size_or_default() + (8UL<<20);
540 auto_set = true;
541 } else {
542 /* passed with crashkernel=0,low ? */
543 if (!low_size)
544 return;
545 }
546
547 low_base = memblock_find_in_range(low_size, (1ULL<<32),
548 low_size, alignment);
549
550 if (!low_base) {
551 if (!auto_set)
552 pr_info("crashkernel low reservation failed - No suitable area found.\n");
553
554 return;
555 }
556
557 memblock_reserve(low_base, low_size);
558 pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
559 (unsigned long)(low_size >> 20),
560 (unsigned long)(low_base >> 20),
561 (unsigned long)(total_low_mem >> 20));
562 crashk_low_res.start = low_base;
563 crashk_low_res.end = low_base + low_size - 1;
564 insert_resource(&iomem_resource, &crashk_low_res);
565 #endif
566 }
567
568 static void __init reserve_crashkernel(void)
569 {
570 const unsigned long long alignment = 16<<20; /* 16M */
571 unsigned long long total_mem;
572 unsigned long long crash_size, crash_base;
573 bool high = false;
574 int ret;
575
576 total_mem = memblock_phys_mem_size();
577
578 /* crashkernel=XM */
579 ret = parse_crashkernel(boot_command_line, total_mem,
580 &crash_size, &crash_base);
581 if (ret != 0 || crash_size <= 0) {
582 /* crashkernel=X,high */
583 ret = parse_crashkernel_high(boot_command_line, total_mem,
584 &crash_size, &crash_base);
585 if (ret != 0 || crash_size <= 0)
586 return;
587 high = true;
588 }
589
590 /* 0 means: find the address automatically */
591 if (crash_base <= 0) {
592 /*
593 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
594 */
595 crash_base = memblock_find_in_range(alignment,
596 high ? CRASH_KERNEL_ADDR_HIGH_MAX :
597 CRASH_KERNEL_ADDR_LOW_MAX,
598 crash_size, alignment);
599
600 if (!crash_base) {
601 pr_info("crashkernel reservation failed - No suitable area found.\n");
602 return;
603 }
604
605 } else {
606 unsigned long long start;
607
608 start = memblock_find_in_range(crash_base,
609 crash_base + crash_size, crash_size, 1<<20);
610 if (start != crash_base) {
611 pr_info("crashkernel reservation failed - memory is in use.\n");
612 return;
613 }
614 }
615 memblock_reserve(crash_base, crash_size);
616
617 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
618 "for crashkernel (System RAM: %ldMB)\n",
619 (unsigned long)(crash_size >> 20),
620 (unsigned long)(crash_base >> 20),
621 (unsigned long)(total_mem >> 20));
622
623 crashk_res.start = crash_base;
624 crashk_res.end = crash_base + crash_size - 1;
625 insert_resource(&iomem_resource, &crashk_res);
626
627 if (crash_base >= (1ULL<<32))
628 reserve_crashkernel_low();
629 }
630 #else
631 static void __init reserve_crashkernel(void)
632 {
633 }
634 #endif
635
636 static struct resource standard_io_resources[] = {
637 { .name = "dma1", .start = 0x00, .end = 0x1f,
638 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
639 { .name = "pic1", .start = 0x20, .end = 0x21,
640 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
641 { .name = "timer0", .start = 0x40, .end = 0x43,
642 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
643 { .name = "timer1", .start = 0x50, .end = 0x53,
644 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
645 { .name = "keyboard", .start = 0x60, .end = 0x60,
646 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
647 { .name = "keyboard", .start = 0x64, .end = 0x64,
648 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
649 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
650 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
651 { .name = "pic2", .start = 0xa0, .end = 0xa1,
652 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
653 { .name = "dma2", .start = 0xc0, .end = 0xdf,
654 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
655 { .name = "fpu", .start = 0xf0, .end = 0xff,
656 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
657 };
658
659 void __init reserve_standard_io_resources(void)
660 {
661 int i;
662
663 /* request I/O space for devices used on all i[345]86 PCs */
664 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
665 request_resource(&ioport_resource, &standard_io_resources[i]);
666
667 }
668
669 static __init void reserve_ibft_region(void)
670 {
671 unsigned long addr, size = 0;
672
673 addr = find_ibft_region(&size);
674
675 if (size)
676 memblock_reserve(addr, size);
677 }
678
679 static bool __init snb_gfx_workaround_needed(void)
680 {
681 #ifdef CONFIG_PCI
682 int i;
683 u16 vendor, devid;
684 static const __initconst u16 snb_ids[] = {
685 0x0102,
686 0x0112,
687 0x0122,
688 0x0106,
689 0x0116,
690 0x0126,
691 0x010a,
692 };
693
694 /* Assume no if something weird is going on with PCI */
695 if (!early_pci_allowed())
696 return false;
697
698 vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
699 if (vendor != 0x8086)
700 return false;
701
702 devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
703 for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
704 if (devid == snb_ids[i])
705 return true;
706 #endif
707
708 return false;
709 }
710
711 /*
712 * Sandy Bridge graphics has trouble with certain ranges, exclude
713 * them from allocation.
714 */
715 static void __init trim_snb_memory(void)
716 {
717 static const __initconst unsigned long bad_pages[] = {
718 0x20050000,
719 0x20110000,
720 0x20130000,
721 0x20138000,
722 0x40004000,
723 };
724 int i;
725
726 if (!snb_gfx_workaround_needed())
727 return;
728
729 printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
730
731 /*
732 * Reserve all memory below the 1 MB mark that has not
733 * already been reserved.
734 */
735 memblock_reserve(0, 1<<20);
736
737 for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
738 if (memblock_reserve(bad_pages[i], PAGE_SIZE))
739 printk(KERN_WARNING "failed to reserve 0x%08lx\n",
740 bad_pages[i]);
741 }
742 }
743
744 /*
745 * Here we put platform-specific memory range workarounds, i.e.
746 * memory known to be corrupt or otherwise in need to be reserved on
747 * specific platforms.
748 *
749 * If this gets used more widely it could use a real dispatch mechanism.
750 */
751 static void __init trim_platform_memory_ranges(void)
752 {
753 trim_snb_memory();
754 }
755
756 static void __init trim_bios_range(void)
757 {
758 /*
759 * A special case is the first 4Kb of memory;
760 * This is a BIOS owned area, not kernel ram, but generally
761 * not listed as such in the E820 table.
762 *
763 * This typically reserves additional memory (64KiB by default)
764 * since some BIOSes are known to corrupt low memory. See the
765 * Kconfig help text for X86_RESERVE_LOW.
766 */
767 e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED);
768
769 /*
770 * special case: Some BIOSen report the PC BIOS
771 * area (640->1Mb) as ram even though it is not.
772 * take them out.
773 */
774 e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
775
776 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
777 }
778
779 /* called before trim_bios_range() to spare extra sanitize */
780 static void __init e820_add_kernel_range(void)
781 {
782 u64 start = __pa_symbol(_text);
783 u64 size = __pa_symbol(_end) - start;
784
785 /*
786 * Complain if .text .data and .bss are not marked as E820_RAM and
787 * attempt to fix it by adding the range. We may have a confused BIOS,
788 * or the user may have used memmap=exactmap or memmap=xxM$yyM to
789 * exclude kernel range. If we really are running on top non-RAM,
790 * we will crash later anyways.
791 */
792 if (e820_all_mapped(start, start + size, E820_RAM))
793 return;
794
795 pr_warn(".text .data .bss are not marked as E820_RAM!\n");
796 e820_remove_range(start, size, E820_RAM, 0);
797 e820_add_region(start, size, E820_RAM);
798 }
799
800 static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
801
802 static int __init parse_reservelow(char *p)
803 {
804 unsigned long long size;
805
806 if (!p)
807 return -EINVAL;
808
809 size = memparse(p, &p);
810
811 if (size < 4096)
812 size = 4096;
813
814 if (size > 640*1024)
815 size = 640*1024;
816
817 reserve_low = size;
818
819 return 0;
820 }
821
822 early_param("reservelow", parse_reservelow);
823
824 static void __init trim_low_memory_range(void)
825 {
826 memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
827 }
828
829 /*
830 * Dump out kernel offset information on panic.
831 */
832 static int
833 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
834 {
835 if (kaslr_enabled()) {
836 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
837 (unsigned long)&_text - __START_KERNEL,
838 __START_KERNEL,
839 __START_KERNEL_map,
840 MODULES_VADDR-1);
841 } else {
842 pr_emerg("Kernel Offset: disabled\n");
843 }
844
845 return 0;
846 }
847
848 /*
849 * Determine if we were loaded by an EFI loader. If so, then we have also been
850 * passed the efi memmap, systab, etc., so we should use these data structures
851 * for initialization. Note, the efi init code path is determined by the
852 * global efi_enabled. This allows the same kernel image to be used on existing
853 * systems (with a traditional BIOS) as well as on EFI systems.
854 */
855 /*
856 * setup_arch - architecture-specific boot-time initializations
857 *
858 * Note: On x86_64, fixmaps are ready for use even before this is called.
859 */
860
861 void __init setup_arch(char **cmdline_p)
862 {
863 memblock_reserve(__pa_symbol(_text),
864 (unsigned long)__bss_stop - (unsigned long)_text);
865
866 early_reserve_initrd();
867
868 /*
869 * At this point everything still needed from the boot loader
870 * or BIOS or kernel text should be early reserved or marked not
871 * RAM in e820. All other memory is free game.
872 */
873
874 #ifdef CONFIG_X86_32
875 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
876
877 /*
878 * copy kernel address range established so far and switch
879 * to the proper swapper page table
880 */
881 clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY,
882 initial_page_table + KERNEL_PGD_BOUNDARY,
883 KERNEL_PGD_PTRS);
884
885 load_cr3(swapper_pg_dir);
886 /*
887 * Note: Quark X1000 CPUs advertise PGE incorrectly and require
888 * a cr3 based tlb flush, so the following __flush_tlb_all()
889 * will not flush anything because the cpu quirk which clears
890 * X86_FEATURE_PGE has not been invoked yet. Though due to the
891 * load_cr3() above the TLB has been flushed already. The
892 * quirk is invoked before subsequent calls to __flush_tlb_all()
893 * so proper operation is guaranteed.
894 */
895 __flush_tlb_all();
896 #else
897 printk(KERN_INFO "Command line: %s\n", boot_command_line);
898 #endif
899
900 /*
901 * If we have OLPC OFW, we might end up relocating the fixmap due to
902 * reserve_top(), so do this before touching the ioremap area.
903 */
904 olpc_ofw_detect();
905
906 early_trap_init();
907 early_cpu_init();
908 early_ioremap_init();
909
910 setup_olpc_ofw_pgd();
911
912 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
913 screen_info = boot_params.screen_info;
914 edid_info = boot_params.edid_info;
915 #ifdef CONFIG_X86_32
916 apm_info.bios = boot_params.apm_bios_info;
917 ist_info = boot_params.ist_info;
918 if (boot_params.sys_desc_table.length != 0) {
919 machine_id = boot_params.sys_desc_table.table[0];
920 machine_submodel_id = boot_params.sys_desc_table.table[1];
921 BIOS_revision = boot_params.sys_desc_table.table[2];
922 }
923 #endif
924 saved_video_mode = boot_params.hdr.vid_mode;
925 bootloader_type = boot_params.hdr.type_of_loader;
926 if ((bootloader_type >> 4) == 0xe) {
927 bootloader_type &= 0xf;
928 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
929 }
930 bootloader_version = bootloader_type & 0xf;
931 bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
932
933 #ifdef CONFIG_BLK_DEV_RAM
934 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
935 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
936 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
937 #endif
938 #ifdef CONFIG_EFI
939 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
940 EFI32_LOADER_SIGNATURE, 4)) {
941 set_bit(EFI_BOOT, &efi.flags);
942 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
943 EFI64_LOADER_SIGNATURE, 4)) {
944 set_bit(EFI_BOOT, &efi.flags);
945 set_bit(EFI_64BIT, &efi.flags);
946 }
947
948 if (efi_enabled(EFI_BOOT))
949 efi_memblock_x86_reserve_range();
950 #endif
951
952 x86_init.oem.arch_setup();
953
954 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
955 setup_memory_map();
956 parse_setup_data();
957
958 copy_edd();
959
960 if (!boot_params.hdr.root_flags)
961 root_mountflags &= ~MS_RDONLY;
962 init_mm.start_code = (unsigned long) _text;
963 init_mm.end_code = (unsigned long) _etext;
964 init_mm.end_data = (unsigned long) _edata;
965 init_mm.brk = _brk_end;
966
967 mpx_mm_init(&init_mm);
968
969 code_resource.start = __pa_symbol(_text);
970 code_resource.end = __pa_symbol(_etext)-1;
971 data_resource.start = __pa_symbol(_etext);
972 data_resource.end = __pa_symbol(_edata)-1;
973 bss_resource.start = __pa_symbol(__bss_start);
974 bss_resource.end = __pa_symbol(__bss_stop)-1;
975
976 #ifdef CONFIG_CMDLINE_BOOL
977 #ifdef CONFIG_CMDLINE_OVERRIDE
978 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
979 #else
980 if (builtin_cmdline[0]) {
981 /* append boot loader cmdline to builtin */
982 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
983 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
984 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
985 }
986 #endif
987 #endif
988
989 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
990 *cmdline_p = command_line;
991
992 /*
993 * x86_configure_nx() is called before parse_early_param() to detect
994 * whether hardware doesn't support NX (so that the early EHCI debug
995 * console setup can safely call set_fixmap()). It may then be called
996 * again from within noexec_setup() during parsing early parameters
997 * to honor the respective command line option.
998 */
999 x86_configure_nx();
1000
1001 parse_early_param();
1002
1003 x86_report_nx();
1004
1005 /* after early param, so could get panic from serial */
1006 memblock_x86_reserve_range_setup_data();
1007
1008 if (acpi_mps_check()) {
1009 #ifdef CONFIG_X86_LOCAL_APIC
1010 disable_apic = 1;
1011 #endif
1012 setup_clear_cpu_cap(X86_FEATURE_APIC);
1013 }
1014
1015 #ifdef CONFIG_PCI
1016 if (pci_early_dump_regs)
1017 early_dump_pci_devices();
1018 #endif
1019
1020 /* update the e820_saved too */
1021 e820_reserve_setup_data();
1022 finish_e820_parsing();
1023
1024 if (efi_enabled(EFI_BOOT))
1025 efi_init();
1026
1027 dmi_scan_machine();
1028 dmi_memdev_walk();
1029 dmi_set_dump_stack_arch_desc();
1030
1031 /*
1032 * VMware detection requires dmi to be available, so this
1033 * needs to be done after dmi_scan_machine, for the BP.
1034 */
1035 init_hypervisor_platform();
1036
1037 x86_init.resources.probe_roms();
1038
1039 /* after parse_early_param, so could debug it */
1040 insert_resource(&iomem_resource, &code_resource);
1041 insert_resource(&iomem_resource, &data_resource);
1042 insert_resource(&iomem_resource, &bss_resource);
1043
1044 e820_add_kernel_range();
1045 trim_bios_range();
1046 #ifdef CONFIG_X86_32
1047 if (ppro_with_ram_bug()) {
1048 e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
1049 E820_RESERVED);
1050 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
1051 printk(KERN_INFO "fixed physical RAM map:\n");
1052 e820_print_map("bad_ppro");
1053 }
1054 #else
1055 early_gart_iommu_check();
1056 #endif
1057
1058 /*
1059 * partially used pages are not usable - thus
1060 * we are rounding upwards:
1061 */
1062 max_pfn = e820_end_of_ram_pfn();
1063
1064 /* update e820 for memory not covered by WB MTRRs */
1065 mtrr_bp_init();
1066 if (mtrr_trim_uncached_memory(max_pfn))
1067 max_pfn = e820_end_of_ram_pfn();
1068
1069 #ifdef CONFIG_X86_32
1070 /* max_low_pfn get updated here */
1071 find_low_pfn_range();
1072 #else
1073 check_x2apic();
1074
1075 /* How many end-of-memory variables you have, grandma! */
1076 /* need this before calling reserve_initrd */
1077 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1078 max_low_pfn = e820_end_of_low_ram_pfn();
1079 else
1080 max_low_pfn = max_pfn;
1081
1082 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1083 #endif
1084
1085 /*
1086 * Find and reserve possible boot-time SMP configuration:
1087 */
1088 find_smp_config();
1089
1090 reserve_ibft_region();
1091
1092 early_alloc_pgt_buf();
1093
1094 /*
1095 * Need to conclude brk, before memblock_x86_fill()
1096 * it could use memblock_find_in_range, could overlap with
1097 * brk area.
1098 */
1099 reserve_brk();
1100
1101 cleanup_highmap();
1102
1103 memblock_set_current_limit(ISA_END_ADDRESS);
1104 memblock_x86_fill();
1105
1106 /*
1107 * The EFI specification says that boot service code won't be called
1108 * after ExitBootServices(). This is, in fact, a lie.
1109 */
1110 if (efi_enabled(EFI_MEMMAP))
1111 efi_reserve_boot_services();
1112
1113 /* preallocate 4k for mptable mpc */
1114 early_reserve_e820_mpc_new();
1115
1116 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1117 setup_bios_corruption_check();
1118 #endif
1119
1120 #ifdef CONFIG_X86_32
1121 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1122 (max_pfn_mapped<<PAGE_SHIFT) - 1);
1123 #endif
1124
1125 reserve_real_mode();
1126
1127 trim_platform_memory_ranges();
1128 trim_low_memory_range();
1129
1130 init_mem_mapping();
1131
1132 early_trap_pf_init();
1133
1134 setup_real_mode();
1135
1136 memblock_set_current_limit(get_max_mapped());
1137
1138 /*
1139 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1140 */
1141
1142 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1143 if (init_ohci1394_dma_early)
1144 init_ohci1394_dma_on_all_controllers();
1145 #endif
1146 /* Allocate bigger log buffer */
1147 setup_log_buf(1);
1148
1149 reserve_initrd();
1150
1151 #if defined(CONFIG_ACPI) && defined(CONFIG_BLK_DEV_INITRD)
1152 acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
1153 #endif
1154
1155 vsmp_init();
1156
1157 io_delay_init();
1158
1159 /*
1160 * Parse the ACPI tables for possible boot-time SMP configuration.
1161 */
1162 acpi_boot_table_init();
1163
1164 early_acpi_boot_init();
1165
1166 initmem_init();
1167 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1168
1169 /*
1170 * Reserve memory for crash kernel after SRAT is parsed so that it
1171 * won't consume hotpluggable memory.
1172 */
1173 reserve_crashkernel();
1174
1175 memblock_find_dma_reserve();
1176
1177 #ifdef CONFIG_KVM_GUEST
1178 kvmclock_init();
1179 #endif
1180
1181 x86_init.paging.pagetable_init();
1182
1183 kasan_init();
1184
1185 if (boot_cpu_data.cpuid_level >= 0) {
1186 /* A CPU has %cr4 if and only if it has CPUID */
1187 mmu_cr4_features = __read_cr4();
1188 if (trampoline_cr4_features)
1189 *trampoline_cr4_features = mmu_cr4_features;
1190 }
1191
1192 #ifdef CONFIG_X86_32
1193 /* sync back kernel address range */
1194 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1195 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1196 KERNEL_PGD_PTRS);
1197 #endif
1198
1199 tboot_probe();
1200
1201 map_vsyscall();
1202
1203 generic_apic_probe();
1204
1205 early_quirks();
1206
1207 /*
1208 * Read APIC and some other early information from ACPI tables.
1209 */
1210 acpi_boot_init();
1211 sfi_init();
1212 x86_dtb_init();
1213
1214 /*
1215 * get boot-time SMP configuration:
1216 */
1217 if (smp_found_config)
1218 get_smp_config();
1219
1220 prefill_possible_map();
1221
1222 init_cpu_to_node();
1223
1224 init_apic_mappings();
1225 if (x86_io_apic_ops.init)
1226 x86_io_apic_ops.init();
1227
1228 kvm_guest_init();
1229
1230 e820_reserve_resources();
1231 e820_mark_nosave_regions(max_low_pfn);
1232
1233 x86_init.resources.reserve_resources();
1234
1235 e820_setup_gap();
1236
1237 #ifdef CONFIG_VT
1238 #if defined(CONFIG_VGA_CONSOLE)
1239 if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1240 conswitchp = &vga_con;
1241 #elif defined(CONFIG_DUMMY_CONSOLE)
1242 conswitchp = &dummy_con;
1243 #endif
1244 #endif
1245 x86_init.oem.banner();
1246
1247 x86_init.timers.wallclock_init();
1248
1249 mcheck_init();
1250
1251 arch_init_ideal_nops();
1252
1253 register_refined_jiffies(CLOCK_TICK_RATE);
1254
1255 #ifdef CONFIG_EFI
1256 if (efi_enabled(EFI_BOOT))
1257 efi_apply_memmap_quirks();
1258 #endif
1259 }
1260
1261 #ifdef CONFIG_X86_32
1262
1263 static struct resource video_ram_resource = {
1264 .name = "Video RAM area",
1265 .start = 0xa0000,
1266 .end = 0xbffff,
1267 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1268 };
1269
1270 void __init i386_reserve_resources(void)
1271 {
1272 request_resource(&iomem_resource, &video_ram_resource);
1273 reserve_standard_io_resources();
1274 }
1275
1276 #endif /* CONFIG_X86_32 */
1277
1278 static struct notifier_block kernel_offset_notifier = {
1279 .notifier_call = dump_kernel_offset
1280 };
1281
1282 static int __init register_kernel_offset_dumper(void)
1283 {
1284 atomic_notifier_chain_register(&panic_notifier_list,
1285 &kernel_offset_notifier);
1286 return 0;
1287 }
1288 __initcall(register_kernel_offset_dumper);