]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kernel/setup.c
x86/setup: simplify initrd relocation and reservation
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / setup.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 1995 Linus Torvalds
4 *
5 * This file contains the setup_arch() code, which handles the architecture-dependent
6 * parts of early kernel initialization.
7 */
8 #include <linux/console.h>
9 #include <linux/crash_dump.h>
10 #include <linux/dmi.h>
11 #include <linux/efi.h>
12 #include <linux/init_ohci1394_dma.h>
13 #include <linux/initrd.h>
14 #include <linux/iscsi_ibft.h>
15 #include <linux/memblock.h>
16 #include <linux/pci.h>
17 #include <linux/root_dev.h>
18 #include <linux/sfi.h>
19 #include <linux/hugetlb.h>
20 #include <linux/tboot.h>
21 #include <linux/usb/xhci-dbgp.h>
22 #include <linux/static_call.h>
23
24 #include <uapi/linux/mount.h>
25
26 #include <xen/xen.h>
27
28 #include <asm/apic.h>
29 #include <asm/numa.h>
30 #include <asm/bios_ebda.h>
31 #include <asm/bugs.h>
32 #include <asm/cpu.h>
33 #include <asm/efi.h>
34 #include <asm/gart.h>
35 #include <asm/hypervisor.h>
36 #include <asm/io_apic.h>
37 #include <asm/kasan.h>
38 #include <asm/kaslr.h>
39 #include <asm/mce.h>
40 #include <asm/mtrr.h>
41 #include <asm/realmode.h>
42 #include <asm/olpc_ofw.h>
43 #include <asm/pci-direct.h>
44 #include <asm/prom.h>
45 #include <asm/proto.h>
46 #include <asm/unwind.h>
47 #include <asm/vsyscall.h>
48 #include <linux/vmalloc.h>
49
50 /*
51 * max_low_pfn_mapped: highest directly mapped pfn < 4 GB
52 * max_pfn_mapped: highest directly mapped pfn > 4 GB
53 *
54 * The direct mapping only covers E820_TYPE_RAM regions, so the ranges and gaps are
55 * represented by pfn_mapped[].
56 */
57 unsigned long max_low_pfn_mapped;
58 unsigned long max_pfn_mapped;
59
60 #ifdef CONFIG_DMI
61 RESERVE_BRK(dmi_alloc, 65536);
62 #endif
63
64
65 /*
66 * Range of the BSS area. The size of the BSS area is determined
67 * at link time, with RESERVE_BRK*() facility reserving additional
68 * chunks.
69 */
70 unsigned long _brk_start = (unsigned long)__brk_base;
71 unsigned long _brk_end = (unsigned long)__brk_base;
72
73 struct boot_params boot_params;
74
75 /*
76 * These are the four main kernel memory regions, we put them into
77 * the resource tree so that kdump tools and other debugging tools
78 * recover it:
79 */
80
81 static struct resource rodata_resource = {
82 .name = "Kernel rodata",
83 .start = 0,
84 .end = 0,
85 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
86 };
87
88 static struct resource data_resource = {
89 .name = "Kernel data",
90 .start = 0,
91 .end = 0,
92 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
93 };
94
95 static struct resource code_resource = {
96 .name = "Kernel code",
97 .start = 0,
98 .end = 0,
99 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
100 };
101
102 static struct resource bss_resource = {
103 .name = "Kernel bss",
104 .start = 0,
105 .end = 0,
106 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
107 };
108
109
110 #ifdef CONFIG_X86_32
111 /* CPU data as detected by the assembly code in head_32.S */
112 struct cpuinfo_x86 new_cpu_data;
113
114 /* Common CPU data for all CPUs */
115 struct cpuinfo_x86 boot_cpu_data __read_mostly;
116 EXPORT_SYMBOL(boot_cpu_data);
117
118 unsigned int def_to_bigsmp;
119
120 /* For MCA, but anyone else can use it if they want */
121 unsigned int machine_id;
122 unsigned int machine_submodel_id;
123 unsigned int BIOS_revision;
124
125 struct apm_info apm_info;
126 EXPORT_SYMBOL(apm_info);
127
128 #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
129 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
130 struct ist_info ist_info;
131 EXPORT_SYMBOL(ist_info);
132 #else
133 struct ist_info ist_info;
134 #endif
135
136 #else
137 struct cpuinfo_x86 boot_cpu_data __read_mostly;
138 EXPORT_SYMBOL(boot_cpu_data);
139 #endif
140
141
142 #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
143 __visible unsigned long mmu_cr4_features __ro_after_init;
144 #else
145 __visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE;
146 #endif
147
148 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
149 int bootloader_type, bootloader_version;
150
151 /*
152 * Setup options
153 */
154 struct screen_info screen_info;
155 EXPORT_SYMBOL(screen_info);
156 struct edid_info edid_info;
157 EXPORT_SYMBOL_GPL(edid_info);
158
159 extern int root_mountflags;
160
161 unsigned long saved_video_mode;
162
163 #define RAMDISK_IMAGE_START_MASK 0x07FF
164 #define RAMDISK_PROMPT_FLAG 0x8000
165 #define RAMDISK_LOAD_FLAG 0x4000
166
167 static char __initdata command_line[COMMAND_LINE_SIZE];
168 #ifdef CONFIG_CMDLINE_BOOL
169 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
170 #endif
171
172 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
173 struct edd edd;
174 #ifdef CONFIG_EDD_MODULE
175 EXPORT_SYMBOL(edd);
176 #endif
177 /**
178 * copy_edd() - Copy the BIOS EDD information
179 * from boot_params into a safe place.
180 *
181 */
182 static inline void __init copy_edd(void)
183 {
184 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
185 sizeof(edd.mbr_signature));
186 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
187 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
188 edd.edd_info_nr = boot_params.eddbuf_entries;
189 }
190 #else
191 static inline void __init copy_edd(void)
192 {
193 }
194 #endif
195
196 void * __init extend_brk(size_t size, size_t align)
197 {
198 size_t mask = align - 1;
199 void *ret;
200
201 BUG_ON(_brk_start == 0);
202 BUG_ON(align & mask);
203
204 _brk_end = (_brk_end + mask) & ~mask;
205 BUG_ON((char *)(_brk_end + size) > __brk_limit);
206
207 ret = (void *)_brk_end;
208 _brk_end += size;
209
210 memset(ret, 0, size);
211
212 return ret;
213 }
214
215 #ifdef CONFIG_X86_32
216 static void __init cleanup_highmap(void)
217 {
218 }
219 #endif
220
221 static void __init reserve_brk(void)
222 {
223 if (_brk_end > _brk_start)
224 memblock_reserve(__pa_symbol(_brk_start),
225 _brk_end - _brk_start);
226
227 /* Mark brk area as locked down and no longer taking any
228 new allocations */
229 _brk_start = 0;
230 }
231
232 u64 relocated_ramdisk;
233
234 #ifdef CONFIG_BLK_DEV_INITRD
235
236 static u64 __init get_ramdisk_image(void)
237 {
238 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
239
240 ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
241
242 if (ramdisk_image == 0)
243 ramdisk_image = phys_initrd_start;
244
245 return ramdisk_image;
246 }
247 static u64 __init get_ramdisk_size(void)
248 {
249 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
250
251 ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
252
253 if (ramdisk_size == 0)
254 ramdisk_size = phys_initrd_size;
255
256 return ramdisk_size;
257 }
258
259 static void __init relocate_initrd(void)
260 {
261 /* Assume only end is not page aligned */
262 u64 ramdisk_image = get_ramdisk_image();
263 u64 ramdisk_size = get_ramdisk_size();
264 u64 area_size = PAGE_ALIGN(ramdisk_size);
265
266 /* We need to move the initrd down into directly mapped mem */
267 relocated_ramdisk = memblock_phys_alloc_range(area_size, PAGE_SIZE, 0,
268 PFN_PHYS(max_pfn_mapped));
269 if (!relocated_ramdisk)
270 panic("Cannot find place for new RAMDISK of size %lld\n",
271 ramdisk_size);
272
273 initrd_start = relocated_ramdisk + PAGE_OFFSET;
274 initrd_end = initrd_start + ramdisk_size;
275 printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
276 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
277
278 copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
279
280 printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
281 " [mem %#010llx-%#010llx]\n",
282 ramdisk_image, ramdisk_image + ramdisk_size - 1,
283 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
284 }
285
286 static void __init early_reserve_initrd(void)
287 {
288 /* Assume only end is not page aligned */
289 u64 ramdisk_image = get_ramdisk_image();
290 u64 ramdisk_size = get_ramdisk_size();
291 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
292
293 if (!boot_params.hdr.type_of_loader ||
294 !ramdisk_image || !ramdisk_size)
295 return; /* No initrd provided by bootloader */
296
297 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
298 }
299
300 static void __init reserve_initrd(void)
301 {
302 /* Assume only end is not page aligned */
303 u64 ramdisk_image = get_ramdisk_image();
304 u64 ramdisk_size = get_ramdisk_size();
305 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
306
307 if (!boot_params.hdr.type_of_loader ||
308 !ramdisk_image || !ramdisk_size)
309 return; /* No initrd provided by bootloader */
310
311 initrd_start = 0;
312
313 printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
314 ramdisk_end - 1);
315
316 if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
317 PFN_DOWN(ramdisk_end))) {
318 /* All are mapped, easy case */
319 initrd_start = ramdisk_image + PAGE_OFFSET;
320 initrd_end = initrd_start + ramdisk_size;
321 return;
322 }
323
324 relocate_initrd();
325
326 memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
327 }
328
329 #else
330 static void __init early_reserve_initrd(void)
331 {
332 }
333 static void __init reserve_initrd(void)
334 {
335 }
336 #endif /* CONFIG_BLK_DEV_INITRD */
337
338 static void __init parse_setup_data(void)
339 {
340 struct setup_data *data;
341 u64 pa_data, pa_next;
342
343 pa_data = boot_params.hdr.setup_data;
344 while (pa_data) {
345 u32 data_len, data_type;
346
347 data = early_memremap(pa_data, sizeof(*data));
348 data_len = data->len + sizeof(struct setup_data);
349 data_type = data->type;
350 pa_next = data->next;
351 early_memunmap(data, sizeof(*data));
352
353 switch (data_type) {
354 case SETUP_E820_EXT:
355 e820__memory_setup_extended(pa_data, data_len);
356 break;
357 case SETUP_DTB:
358 add_dtb(pa_data);
359 break;
360 case SETUP_EFI:
361 parse_efi_setup(pa_data, data_len);
362 break;
363 default:
364 break;
365 }
366 pa_data = pa_next;
367 }
368 }
369
370 static void __init memblock_x86_reserve_range_setup_data(void)
371 {
372 struct setup_data *data;
373 u64 pa_data;
374
375 pa_data = boot_params.hdr.setup_data;
376 while (pa_data) {
377 data = early_memremap(pa_data, sizeof(*data));
378 memblock_reserve(pa_data, sizeof(*data) + data->len);
379
380 if (data->type == SETUP_INDIRECT &&
381 ((struct setup_indirect *)data->data)->type != SETUP_INDIRECT)
382 memblock_reserve(((struct setup_indirect *)data->data)->addr,
383 ((struct setup_indirect *)data->data)->len);
384
385 pa_data = data->next;
386 early_memunmap(data, sizeof(*data));
387 }
388 }
389
390 /*
391 * --------- Crashkernel reservation ------------------------------
392 */
393
394 #ifdef CONFIG_KEXEC_CORE
395
396 /* 16M alignment for crash kernel regions */
397 #define CRASH_ALIGN SZ_16M
398
399 /*
400 * Keep the crash kernel below this limit.
401 *
402 * Earlier 32-bits kernels would limit the kernel to the low 512 MB range
403 * due to mapping restrictions.
404 *
405 * 64-bit kdump kernels need to be restricted to be under 64 TB, which is
406 * the upper limit of system RAM in 4-level paging mode. Since the kdump
407 * jump could be from 5-level paging to 4-level paging, the jump will fail if
408 * the kernel is put above 64 TB, and during the 1st kernel bootup there's
409 * no good way to detect the paging mode of the target kernel which will be
410 * loaded for dumping.
411 */
412 #ifdef CONFIG_X86_32
413 # define CRASH_ADDR_LOW_MAX SZ_512M
414 # define CRASH_ADDR_HIGH_MAX SZ_512M
415 #else
416 # define CRASH_ADDR_LOW_MAX SZ_4G
417 # define CRASH_ADDR_HIGH_MAX SZ_64T
418 #endif
419
420 static int __init reserve_crashkernel_low(void)
421 {
422 #ifdef CONFIG_X86_64
423 unsigned long long base, low_base = 0, low_size = 0;
424 unsigned long total_low_mem;
425 int ret;
426
427 total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
428
429 /* crashkernel=Y,low */
430 ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
431 if (ret) {
432 /*
433 * two parts from kernel/dma/swiotlb.c:
434 * -swiotlb size: user-specified with swiotlb= or default.
435 *
436 * -swiotlb overflow buffer: now hardcoded to 32k. We round it
437 * to 8M for other buffers that may need to stay low too. Also
438 * make sure we allocate enough extra low memory so that we
439 * don't run out of DMA buffers for 32-bit devices.
440 */
441 low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
442 } else {
443 /* passed with crashkernel=0,low ? */
444 if (!low_size)
445 return 0;
446 }
447
448 low_base = memblock_find_in_range(0, 1ULL << 32, low_size, CRASH_ALIGN);
449 if (!low_base) {
450 pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
451 (unsigned long)(low_size >> 20));
452 return -ENOMEM;
453 }
454
455 ret = memblock_reserve(low_base, low_size);
456 if (ret) {
457 pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
458 return ret;
459 }
460
461 pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
462 (unsigned long)(low_size >> 20),
463 (unsigned long)(low_base >> 20),
464 (unsigned long)(total_low_mem >> 20));
465
466 crashk_low_res.start = low_base;
467 crashk_low_res.end = low_base + low_size - 1;
468 insert_resource(&iomem_resource, &crashk_low_res);
469 #endif
470 return 0;
471 }
472
473 static void __init reserve_crashkernel(void)
474 {
475 unsigned long long crash_size, crash_base, total_mem;
476 bool high = false;
477 int ret;
478
479 total_mem = memblock_phys_mem_size();
480
481 /* crashkernel=XM */
482 ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
483 if (ret != 0 || crash_size <= 0) {
484 /* crashkernel=X,high */
485 ret = parse_crashkernel_high(boot_command_line, total_mem,
486 &crash_size, &crash_base);
487 if (ret != 0 || crash_size <= 0)
488 return;
489 high = true;
490 }
491
492 if (xen_pv_domain()) {
493 pr_info("Ignoring crashkernel for a Xen PV domain\n");
494 return;
495 }
496
497 /* 0 means: find the address automatically */
498 if (!crash_base) {
499 /*
500 * Set CRASH_ADDR_LOW_MAX upper bound for crash memory,
501 * crashkernel=x,high reserves memory over 4G, also allocates
502 * 256M extra low memory for DMA buffers and swiotlb.
503 * But the extra memory is not required for all machines.
504 * So try low memory first and fall back to high memory
505 * unless "crashkernel=size[KMG],high" is specified.
506 */
507 if (!high)
508 crash_base = memblock_find_in_range(CRASH_ALIGN,
509 CRASH_ADDR_LOW_MAX,
510 crash_size, CRASH_ALIGN);
511 if (!crash_base)
512 crash_base = memblock_find_in_range(CRASH_ALIGN,
513 CRASH_ADDR_HIGH_MAX,
514 crash_size, CRASH_ALIGN);
515 if (!crash_base) {
516 pr_info("crashkernel reservation failed - No suitable area found.\n");
517 return;
518 }
519 } else {
520 unsigned long long start;
521
522 start = memblock_find_in_range(crash_base,
523 crash_base + crash_size,
524 crash_size, 1 << 20);
525 if (start != crash_base) {
526 pr_info("crashkernel reservation failed - memory is in use.\n");
527 return;
528 }
529 }
530 ret = memblock_reserve(crash_base, crash_size);
531 if (ret) {
532 pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
533 return;
534 }
535
536 if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
537 memblock_free(crash_base, crash_size);
538 return;
539 }
540
541 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
542 (unsigned long)(crash_size >> 20),
543 (unsigned long)(crash_base >> 20),
544 (unsigned long)(total_mem >> 20));
545
546 crashk_res.start = crash_base;
547 crashk_res.end = crash_base + crash_size - 1;
548 insert_resource(&iomem_resource, &crashk_res);
549 }
550 #else
551 static void __init reserve_crashkernel(void)
552 {
553 }
554 #endif
555
556 static struct resource standard_io_resources[] = {
557 { .name = "dma1", .start = 0x00, .end = 0x1f,
558 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
559 { .name = "pic1", .start = 0x20, .end = 0x21,
560 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
561 { .name = "timer0", .start = 0x40, .end = 0x43,
562 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
563 { .name = "timer1", .start = 0x50, .end = 0x53,
564 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
565 { .name = "keyboard", .start = 0x60, .end = 0x60,
566 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
567 { .name = "keyboard", .start = 0x64, .end = 0x64,
568 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
569 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
570 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
571 { .name = "pic2", .start = 0xa0, .end = 0xa1,
572 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
573 { .name = "dma2", .start = 0xc0, .end = 0xdf,
574 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
575 { .name = "fpu", .start = 0xf0, .end = 0xff,
576 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
577 };
578
579 void __init reserve_standard_io_resources(void)
580 {
581 int i;
582
583 /* request I/O space for devices used on all i[345]86 PCs */
584 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
585 request_resource(&ioport_resource, &standard_io_resources[i]);
586
587 }
588
589 static __init void reserve_ibft_region(void)
590 {
591 unsigned long addr, size = 0;
592
593 addr = find_ibft_region(&size);
594
595 if (size)
596 memblock_reserve(addr, size);
597 }
598
599 static bool __init snb_gfx_workaround_needed(void)
600 {
601 #ifdef CONFIG_PCI
602 int i;
603 u16 vendor, devid;
604 static const __initconst u16 snb_ids[] = {
605 0x0102,
606 0x0112,
607 0x0122,
608 0x0106,
609 0x0116,
610 0x0126,
611 0x010a,
612 };
613
614 /* Assume no if something weird is going on with PCI */
615 if (!early_pci_allowed())
616 return false;
617
618 vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
619 if (vendor != 0x8086)
620 return false;
621
622 devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
623 for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
624 if (devid == snb_ids[i])
625 return true;
626 #endif
627
628 return false;
629 }
630
631 /*
632 * Sandy Bridge graphics has trouble with certain ranges, exclude
633 * them from allocation.
634 */
635 static void __init trim_snb_memory(void)
636 {
637 static const __initconst unsigned long bad_pages[] = {
638 0x20050000,
639 0x20110000,
640 0x20130000,
641 0x20138000,
642 0x40004000,
643 };
644 int i;
645
646 if (!snb_gfx_workaround_needed())
647 return;
648
649 printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
650
651 /*
652 * Reserve all memory below the 1 MB mark that has not
653 * already been reserved.
654 */
655 memblock_reserve(0, 1<<20);
656
657 for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
658 if (memblock_reserve(bad_pages[i], PAGE_SIZE))
659 printk(KERN_WARNING "failed to reserve 0x%08lx\n",
660 bad_pages[i]);
661 }
662 }
663
664 /*
665 * Here we put platform-specific memory range workarounds, i.e.
666 * memory known to be corrupt or otherwise in need to be reserved on
667 * specific platforms.
668 *
669 * If this gets used more widely it could use a real dispatch mechanism.
670 */
671 static void __init trim_platform_memory_ranges(void)
672 {
673 trim_snb_memory();
674 }
675
676 static void __init trim_bios_range(void)
677 {
678 /*
679 * A special case is the first 4Kb of memory;
680 * This is a BIOS owned area, not kernel ram, but generally
681 * not listed as such in the E820 table.
682 *
683 * This typically reserves additional memory (64KiB by default)
684 * since some BIOSes are known to corrupt low memory. See the
685 * Kconfig help text for X86_RESERVE_LOW.
686 */
687 e820__range_update(0, PAGE_SIZE, E820_TYPE_RAM, E820_TYPE_RESERVED);
688
689 /*
690 * special case: Some BIOSes report the PC BIOS
691 * area (640Kb -> 1Mb) as RAM even though it is not.
692 * take them out.
693 */
694 e820__range_remove(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_TYPE_RAM, 1);
695
696 e820__update_table(e820_table);
697 }
698
699 /* called before trim_bios_range() to spare extra sanitize */
700 static void __init e820_add_kernel_range(void)
701 {
702 u64 start = __pa_symbol(_text);
703 u64 size = __pa_symbol(_end) - start;
704
705 /*
706 * Complain if .text .data and .bss are not marked as E820_TYPE_RAM and
707 * attempt to fix it by adding the range. We may have a confused BIOS,
708 * or the user may have used memmap=exactmap or memmap=xxM$yyM to
709 * exclude kernel range. If we really are running on top non-RAM,
710 * we will crash later anyways.
711 */
712 if (e820__mapped_all(start, start + size, E820_TYPE_RAM))
713 return;
714
715 pr_warn(".text .data .bss are not marked as E820_TYPE_RAM!\n");
716 e820__range_remove(start, size, E820_TYPE_RAM, 0);
717 e820__range_add(start, size, E820_TYPE_RAM);
718 }
719
720 static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
721
722 static int __init parse_reservelow(char *p)
723 {
724 unsigned long long size;
725
726 if (!p)
727 return -EINVAL;
728
729 size = memparse(p, &p);
730
731 if (size < 4096)
732 size = 4096;
733
734 if (size > 640*1024)
735 size = 640*1024;
736
737 reserve_low = size;
738
739 return 0;
740 }
741
742 early_param("reservelow", parse_reservelow);
743
744 static void __init trim_low_memory_range(void)
745 {
746 memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
747 }
748
749 /*
750 * Dump out kernel offset information on panic.
751 */
752 static int
753 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
754 {
755 if (kaslr_enabled()) {
756 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
757 kaslr_offset(),
758 __START_KERNEL,
759 __START_KERNEL_map,
760 MODULES_VADDR-1);
761 } else {
762 pr_emerg("Kernel Offset: disabled\n");
763 }
764
765 return 0;
766 }
767
768 /*
769 * Determine if we were loaded by an EFI loader. If so, then we have also been
770 * passed the efi memmap, systab, etc., so we should use these data structures
771 * for initialization. Note, the efi init code path is determined by the
772 * global efi_enabled. This allows the same kernel image to be used on existing
773 * systems (with a traditional BIOS) as well as on EFI systems.
774 */
775 /*
776 * setup_arch - architecture-specific boot-time initializations
777 *
778 * Note: On x86_64, fixmaps are ready for use even before this is called.
779 */
780
781 void __init setup_arch(char **cmdline_p)
782 {
783 /*
784 * Reserve the memory occupied by the kernel between _text and
785 * __end_of_kernel_reserve symbols. Any kernel sections after the
786 * __end_of_kernel_reserve symbol must be explicitly reserved with a
787 * separate memblock_reserve() or they will be discarded.
788 */
789 memblock_reserve(__pa_symbol(_text),
790 (unsigned long)__end_of_kernel_reserve - (unsigned long)_text);
791
792 /*
793 * Make sure page 0 is always reserved because on systems with
794 * L1TF its contents can be leaked to user processes.
795 */
796 memblock_reserve(0, PAGE_SIZE);
797
798 early_reserve_initrd();
799
800 /*
801 * At this point everything still needed from the boot loader
802 * or BIOS or kernel text should be early reserved or marked not
803 * RAM in e820. All other memory is free game.
804 */
805
806 #ifdef CONFIG_X86_32
807 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
808
809 /*
810 * copy kernel address range established so far and switch
811 * to the proper swapper page table
812 */
813 clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY,
814 initial_page_table + KERNEL_PGD_BOUNDARY,
815 KERNEL_PGD_PTRS);
816
817 load_cr3(swapper_pg_dir);
818 /*
819 * Note: Quark X1000 CPUs advertise PGE incorrectly and require
820 * a cr3 based tlb flush, so the following __flush_tlb_all()
821 * will not flush anything because the CPU quirk which clears
822 * X86_FEATURE_PGE has not been invoked yet. Though due to the
823 * load_cr3() above the TLB has been flushed already. The
824 * quirk is invoked before subsequent calls to __flush_tlb_all()
825 * so proper operation is guaranteed.
826 */
827 __flush_tlb_all();
828 #else
829 printk(KERN_INFO "Command line: %s\n", boot_command_line);
830 boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
831 #endif
832
833 /*
834 * If we have OLPC OFW, we might end up relocating the fixmap due to
835 * reserve_top(), so do this before touching the ioremap area.
836 */
837 olpc_ofw_detect();
838
839 idt_setup_early_traps();
840 early_cpu_init();
841 arch_init_ideal_nops();
842 jump_label_init();
843 static_call_init();
844 early_ioremap_init();
845
846 setup_olpc_ofw_pgd();
847
848 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
849 screen_info = boot_params.screen_info;
850 edid_info = boot_params.edid_info;
851 #ifdef CONFIG_X86_32
852 apm_info.bios = boot_params.apm_bios_info;
853 ist_info = boot_params.ist_info;
854 #endif
855 saved_video_mode = boot_params.hdr.vid_mode;
856 bootloader_type = boot_params.hdr.type_of_loader;
857 if ((bootloader_type >> 4) == 0xe) {
858 bootloader_type &= 0xf;
859 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
860 }
861 bootloader_version = bootloader_type & 0xf;
862 bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
863
864 #ifdef CONFIG_BLK_DEV_RAM
865 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
866 #endif
867 #ifdef CONFIG_EFI
868 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
869 EFI32_LOADER_SIGNATURE, 4)) {
870 set_bit(EFI_BOOT, &efi.flags);
871 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
872 EFI64_LOADER_SIGNATURE, 4)) {
873 set_bit(EFI_BOOT, &efi.flags);
874 set_bit(EFI_64BIT, &efi.flags);
875 }
876 #endif
877
878 x86_init.oem.arch_setup();
879
880 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
881 e820__memory_setup();
882 parse_setup_data();
883
884 copy_edd();
885
886 if (!boot_params.hdr.root_flags)
887 root_mountflags &= ~MS_RDONLY;
888 init_mm.start_code = (unsigned long) _text;
889 init_mm.end_code = (unsigned long) _etext;
890 init_mm.end_data = (unsigned long) _edata;
891 init_mm.brk = _brk_end;
892
893 code_resource.start = __pa_symbol(_text);
894 code_resource.end = __pa_symbol(_etext)-1;
895 rodata_resource.start = __pa_symbol(__start_rodata);
896 rodata_resource.end = __pa_symbol(__end_rodata)-1;
897 data_resource.start = __pa_symbol(_sdata);
898 data_resource.end = __pa_symbol(_edata)-1;
899 bss_resource.start = __pa_symbol(__bss_start);
900 bss_resource.end = __pa_symbol(__bss_stop)-1;
901
902 #ifdef CONFIG_CMDLINE_BOOL
903 #ifdef CONFIG_CMDLINE_OVERRIDE
904 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
905 #else
906 if (builtin_cmdline[0]) {
907 /* append boot loader cmdline to builtin */
908 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
909 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
910 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
911 }
912 #endif
913 #endif
914
915 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
916 *cmdline_p = command_line;
917
918 /*
919 * x86_configure_nx() is called before parse_early_param() to detect
920 * whether hardware doesn't support NX (so that the early EHCI debug
921 * console setup can safely call set_fixmap()). It may then be called
922 * again from within noexec_setup() during parsing early parameters
923 * to honor the respective command line option.
924 */
925 x86_configure_nx();
926
927 parse_early_param();
928
929 if (efi_enabled(EFI_BOOT))
930 efi_memblock_x86_reserve_range();
931 #ifdef CONFIG_MEMORY_HOTPLUG
932 /*
933 * Memory used by the kernel cannot be hot-removed because Linux
934 * cannot migrate the kernel pages. When memory hotplug is
935 * enabled, we should prevent memblock from allocating memory
936 * for the kernel.
937 *
938 * ACPI SRAT records all hotpluggable memory ranges. But before
939 * SRAT is parsed, we don't know about it.
940 *
941 * The kernel image is loaded into memory at very early time. We
942 * cannot prevent this anyway. So on NUMA system, we set any
943 * node the kernel resides in as un-hotpluggable.
944 *
945 * Since on modern servers, one node could have double-digit
946 * gigabytes memory, we can assume the memory around the kernel
947 * image is also un-hotpluggable. So before SRAT is parsed, just
948 * allocate memory near the kernel image to try the best to keep
949 * the kernel away from hotpluggable memory.
950 */
951 if (movable_node_is_enabled())
952 memblock_set_bottom_up(true);
953 #endif
954
955 x86_report_nx();
956
957 /* after early param, so could get panic from serial */
958 memblock_x86_reserve_range_setup_data();
959
960 if (acpi_mps_check()) {
961 #ifdef CONFIG_X86_LOCAL_APIC
962 disable_apic = 1;
963 #endif
964 setup_clear_cpu_cap(X86_FEATURE_APIC);
965 }
966
967 e820__reserve_setup_data();
968 e820__finish_early_params();
969
970 if (efi_enabled(EFI_BOOT))
971 efi_init();
972
973 dmi_setup();
974
975 /*
976 * VMware detection requires dmi to be available, so this
977 * needs to be done after dmi_setup(), for the boot CPU.
978 */
979 init_hypervisor_platform();
980
981 tsc_early_init();
982 x86_init.resources.probe_roms();
983
984 /* after parse_early_param, so could debug it */
985 insert_resource(&iomem_resource, &code_resource);
986 insert_resource(&iomem_resource, &rodata_resource);
987 insert_resource(&iomem_resource, &data_resource);
988 insert_resource(&iomem_resource, &bss_resource);
989
990 e820_add_kernel_range();
991 trim_bios_range();
992 #ifdef CONFIG_X86_32
993 if (ppro_with_ram_bug()) {
994 e820__range_update(0x70000000ULL, 0x40000ULL, E820_TYPE_RAM,
995 E820_TYPE_RESERVED);
996 e820__update_table(e820_table);
997 printk(KERN_INFO "fixed physical RAM map:\n");
998 e820__print_table("bad_ppro");
999 }
1000 #else
1001 early_gart_iommu_check();
1002 #endif
1003
1004 /*
1005 * partially used pages are not usable - thus
1006 * we are rounding upwards:
1007 */
1008 max_pfn = e820__end_of_ram_pfn();
1009
1010 /* update e820 for memory not covered by WB MTRRs */
1011 mtrr_bp_init();
1012 if (mtrr_trim_uncached_memory(max_pfn))
1013 max_pfn = e820__end_of_ram_pfn();
1014
1015 max_possible_pfn = max_pfn;
1016
1017 /*
1018 * This call is required when the CPU does not support PAT. If
1019 * mtrr_bp_init() invoked it already via pat_init() the call has no
1020 * effect.
1021 */
1022 init_cache_modes();
1023
1024 /*
1025 * Define random base addresses for memory sections after max_pfn is
1026 * defined and before each memory section base is used.
1027 */
1028 kernel_randomize_memory();
1029
1030 #ifdef CONFIG_X86_32
1031 /* max_low_pfn get updated here */
1032 find_low_pfn_range();
1033 #else
1034 check_x2apic();
1035
1036 /* How many end-of-memory variables you have, grandma! */
1037 /* need this before calling reserve_initrd */
1038 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1039 max_low_pfn = e820__end_of_low_ram_pfn();
1040 else
1041 max_low_pfn = max_pfn;
1042
1043 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1044 #endif
1045
1046 /*
1047 * Find and reserve possible boot-time SMP configuration:
1048 */
1049 find_smp_config();
1050
1051 reserve_ibft_region();
1052
1053 early_alloc_pgt_buf();
1054
1055 /*
1056 * Need to conclude brk, before e820__memblock_setup()
1057 * it could use memblock_find_in_range, could overlap with
1058 * brk area.
1059 */
1060 reserve_brk();
1061
1062 cleanup_highmap();
1063
1064 memblock_set_current_limit(ISA_END_ADDRESS);
1065 e820__memblock_setup();
1066
1067 reserve_bios_regions();
1068
1069 efi_fake_memmap();
1070 efi_find_mirror();
1071 efi_esrt_init();
1072 efi_mokvar_table_init();
1073
1074 /*
1075 * The EFI specification says that boot service code won't be
1076 * called after ExitBootServices(). This is, in fact, a lie.
1077 */
1078 efi_reserve_boot_services();
1079
1080 /* preallocate 4k for mptable mpc */
1081 e820__memblock_alloc_reserved_mpc_new();
1082
1083 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1084 setup_bios_corruption_check();
1085 #endif
1086
1087 #ifdef CONFIG_X86_32
1088 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1089 (max_pfn_mapped<<PAGE_SHIFT) - 1);
1090 #endif
1091
1092 reserve_real_mode();
1093
1094 trim_platform_memory_ranges();
1095 trim_low_memory_range();
1096
1097 init_mem_mapping();
1098
1099 idt_setup_early_pf();
1100
1101 /*
1102 * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
1103 * with the current CR4 value. This may not be necessary, but
1104 * auditing all the early-boot CR4 manipulation would be needed to
1105 * rule it out.
1106 *
1107 * Mask off features that don't work outside long mode (just
1108 * PCIDE for now).
1109 */
1110 mmu_cr4_features = __read_cr4() & ~X86_CR4_PCIDE;
1111
1112 memblock_set_current_limit(get_max_mapped());
1113
1114 /*
1115 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1116 */
1117
1118 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1119 if (init_ohci1394_dma_early)
1120 init_ohci1394_dma_on_all_controllers();
1121 #endif
1122 /* Allocate bigger log buffer */
1123 setup_log_buf(1);
1124
1125 if (efi_enabled(EFI_BOOT)) {
1126 switch (boot_params.secure_boot) {
1127 case efi_secureboot_mode_disabled:
1128 pr_info("Secure boot disabled\n");
1129 break;
1130 case efi_secureboot_mode_enabled:
1131 pr_info("Secure boot enabled\n");
1132 break;
1133 default:
1134 pr_info("Secure boot could not be determined\n");
1135 break;
1136 }
1137 }
1138
1139 reserve_initrd();
1140
1141 acpi_table_upgrade();
1142
1143 vsmp_init();
1144
1145 io_delay_init();
1146
1147 early_platform_quirks();
1148
1149 /*
1150 * Parse the ACPI tables for possible boot-time SMP configuration.
1151 */
1152 acpi_boot_table_init();
1153
1154 early_acpi_boot_init();
1155
1156 initmem_init();
1157 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1158
1159 if (boot_cpu_has(X86_FEATURE_GBPAGES))
1160 hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
1161
1162 /*
1163 * Reserve memory for crash kernel after SRAT is parsed so that it
1164 * won't consume hotpluggable memory.
1165 */
1166 reserve_crashkernel();
1167
1168 memblock_find_dma_reserve();
1169
1170 if (!early_xdbc_setup_hardware())
1171 early_xdbc_register_console();
1172
1173 x86_init.paging.pagetable_init();
1174
1175 kasan_init();
1176
1177 /*
1178 * Sync back kernel address range.
1179 *
1180 * FIXME: Can the later sync in setup_cpu_entry_areas() replace
1181 * this call?
1182 */
1183 sync_initial_page_table();
1184
1185 tboot_probe();
1186
1187 map_vsyscall();
1188
1189 generic_apic_probe();
1190
1191 early_quirks();
1192
1193 /*
1194 * Read APIC and some other early information from ACPI tables.
1195 */
1196 acpi_boot_init();
1197 sfi_init();
1198 x86_dtb_init();
1199
1200 /*
1201 * get boot-time SMP configuration:
1202 */
1203 get_smp_config();
1204
1205 /*
1206 * Systems w/o ACPI and mptables might not have it mapped the local
1207 * APIC yet, but prefill_possible_map() might need to access it.
1208 */
1209 init_apic_mappings();
1210
1211 prefill_possible_map();
1212
1213 init_cpu_to_node();
1214
1215 io_apic_init_mappings();
1216
1217 x86_init.hyper.guest_late_init();
1218
1219 e820__reserve_resources();
1220 e820__register_nosave_regions(max_pfn);
1221
1222 x86_init.resources.reserve_resources();
1223
1224 e820__setup_pci_gap();
1225
1226 #ifdef CONFIG_VT
1227 #if defined(CONFIG_VGA_CONSOLE)
1228 if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1229 conswitchp = &vga_con;
1230 #endif
1231 #endif
1232 x86_init.oem.banner();
1233
1234 x86_init.timers.wallclock_init();
1235
1236 mcheck_init();
1237
1238 register_refined_jiffies(CLOCK_TICK_RATE);
1239
1240 #ifdef CONFIG_EFI
1241 if (efi_enabled(EFI_BOOT))
1242 efi_apply_memmap_quirks();
1243 #endif
1244
1245 unwind_init();
1246 }
1247
1248 #ifdef CONFIG_X86_32
1249
1250 static struct resource video_ram_resource = {
1251 .name = "Video RAM area",
1252 .start = 0xa0000,
1253 .end = 0xbffff,
1254 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1255 };
1256
1257 void __init i386_reserve_resources(void)
1258 {
1259 request_resource(&iomem_resource, &video_ram_resource);
1260 reserve_standard_io_resources();
1261 }
1262
1263 #endif /* CONFIG_X86_32 */
1264
1265 static struct notifier_block kernel_offset_notifier = {
1266 .notifier_call = dump_kernel_offset
1267 };
1268
1269 static int __init register_kernel_offset_dumper(void)
1270 {
1271 atomic_notifier_chain_register(&panic_notifier_list,
1272 &kernel_offset_notifier);
1273 return 0;
1274 }
1275 __initcall(register_kernel_offset_dumper);