1 // SPDX-License-Identifier: GPL-2.0
3 * prepare to run common code
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
8 #define DISABLE_BRANCH_PROFILING
10 /* cpu_feature_enabled() cannot be used this early */
11 #define USE_EARLY_PGTABLE_L5
13 #include <linux/init.h>
14 #include <linux/linkage.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/percpu.h>
19 #include <linux/start_kernel.h>
21 #include <linux/memblock.h>
22 #include <linux/mem_encrypt.h>
23 #include <linux/pgtable.h>
25 #include <asm/processor.h>
26 #include <asm/proto.h>
28 #include <asm/setup.h>
30 #include <asm/tlbflush.h>
31 #include <asm/sections.h>
32 #include <asm/kdebug.h>
33 #include <asm/e820/api.h>
34 #include <asm/bios_ebda.h>
35 #include <asm/bootparam_utils.h>
36 #include <asm/microcode.h>
37 #include <asm/kasan.h>
38 #include <asm/fixmap.h>
39 #include <asm/realmode.h>
40 #include <asm/extable.h>
41 #include <asm/trapnr.h>
45 * Manage page tables very early on.
47 extern pmd_t early_dynamic_pgts
[EARLY_DYNAMIC_PAGE_TABLES
][PTRS_PER_PMD
];
48 static unsigned int __initdata next_early_pgt
;
49 pmdval_t early_pmd_flags
= __PAGE_KERNEL_LARGE
& ~(_PAGE_GLOBAL
| _PAGE_NX
);
51 #ifdef CONFIG_X86_5LEVEL
52 unsigned int __pgtable_l5_enabled __ro_after_init
;
53 unsigned int pgdir_shift __ro_after_init
= 39;
54 EXPORT_SYMBOL(pgdir_shift
);
55 unsigned int ptrs_per_p4d __ro_after_init
= 1;
56 EXPORT_SYMBOL(ptrs_per_p4d
);
59 #ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
60 unsigned long page_offset_base __ro_after_init
= __PAGE_OFFSET_BASE_L4
;
61 EXPORT_SYMBOL(page_offset_base
);
62 unsigned long vmalloc_base __ro_after_init
= __VMALLOC_BASE_L4
;
63 EXPORT_SYMBOL(vmalloc_base
);
64 unsigned long vmemmap_base __ro_after_init
= __VMEMMAP_BASE_L4
;
65 EXPORT_SYMBOL(vmemmap_base
);
69 * GDT used on the boot CPU before switching to virtual addresses.
71 static struct desc_struct startup_gdt
[GDT_ENTRIES
] = {
72 [GDT_ENTRY_KERNEL32_CS
] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
73 [GDT_ENTRY_KERNEL_CS
] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
74 [GDT_ENTRY_KERNEL_DS
] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
78 * Address needs to be set at runtime because it references the startup_gdt
79 * while the kernel still uses a direct mapping.
81 static struct desc_ptr startup_gdt_descr
= {
82 .size
= sizeof(startup_gdt
),
86 #define __head __section(".head.text")
88 static void __head
*fixup_pointer(void *ptr
, unsigned long physaddr
)
90 return ptr
- (void *)_text
+ (void *)physaddr
;
93 static unsigned long __head
*fixup_long(void *ptr
, unsigned long physaddr
)
95 return fixup_pointer(ptr
, physaddr
);
98 #ifdef CONFIG_X86_5LEVEL
99 static unsigned int __head
*fixup_int(void *ptr
, unsigned long physaddr
)
101 return fixup_pointer(ptr
, physaddr
);
104 static bool __head
check_la57_support(unsigned long physaddr
)
107 * 5-level paging is detected and enabled at kernel decompression
108 * stage. Only check if it has been enabled there.
110 if (!(native_read_cr4() & X86_CR4_LA57
))
113 *fixup_int(&__pgtable_l5_enabled
, physaddr
) = 1;
114 *fixup_int(&pgdir_shift
, physaddr
) = 48;
115 *fixup_int(&ptrs_per_p4d
, physaddr
) = 512;
116 *fixup_long(&page_offset_base
, physaddr
) = __PAGE_OFFSET_BASE_L5
;
117 *fixup_long(&vmalloc_base
, physaddr
) = __VMALLOC_BASE_L5
;
118 *fixup_long(&vmemmap_base
, physaddr
) = __VMEMMAP_BASE_L5
;
123 static bool __head
check_la57_support(unsigned long physaddr
)
129 /* Code in __startup_64() can be relocated during execution, but the compiler
130 * doesn't have to generate PC-relative relocations when accessing globals from
131 * that function. Clang actually does not generate them, which leads to
132 * boot-time crashes. To work around this problem, every global pointer must
133 * be adjusted using fixup_pointer().
135 unsigned long __head
__startup_64(unsigned long physaddr
,
136 struct boot_params
*bp
)
138 unsigned long vaddr
, vaddr_end
;
139 unsigned long load_delta
, *p
;
140 unsigned long pgtable_flags
;
144 pmdval_t
*pmd
, pmd_entry
;
148 unsigned int *next_pgt_ptr
;
150 la57
= check_la57_support(physaddr
);
152 /* Is the address too large? */
153 if (physaddr
>> MAX_PHYSMEM_BITS
)
157 * Compute the delta between the address I am compiled to run at
158 * and the address I am actually running at.
160 load_delta
= physaddr
- (unsigned long)(_text
- __START_KERNEL_map
);
162 /* Is the address not 2M aligned? */
163 if (load_delta
& ~PMD_PAGE_MASK
)
166 /* Activate Secure Memory Encryption (SME) if supported and enabled */
169 /* Include the SME encryption mask in the fixup value */
170 load_delta
+= sme_get_me_mask();
172 /* Fixup the physical addresses in the page table */
174 pgd
= fixup_pointer(&early_top_pgt
, physaddr
);
175 p
= pgd
+ pgd_index(__START_KERNEL_map
);
177 *p
= (unsigned long)level4_kernel_pgt
;
179 *p
= (unsigned long)level3_kernel_pgt
;
180 *p
+= _PAGE_TABLE_NOENC
- __START_KERNEL_map
+ load_delta
;
183 p4d
= fixup_pointer(&level4_kernel_pgt
, physaddr
);
184 p4d
[511] += load_delta
;
187 pud
= fixup_pointer(&level3_kernel_pgt
, physaddr
);
188 pud
[510] += load_delta
;
189 pud
[511] += load_delta
;
191 pmd
= fixup_pointer(level2_fixmap_pgt
, physaddr
);
192 for (i
= FIXMAP_PMD_TOP
; i
> FIXMAP_PMD_TOP
- FIXMAP_PMD_NUM
; i
--)
193 pmd
[i
] += load_delta
;
196 * Set up the identity mapping for the switchover. These
197 * entries should *NOT* have the global bit set! This also
198 * creates a bunch of nonsense entries but that is fine --
199 * it avoids problems around wraparound.
202 next_pgt_ptr
= fixup_pointer(&next_early_pgt
, physaddr
);
203 pud
= fixup_pointer(early_dynamic_pgts
[(*next_pgt_ptr
)++], physaddr
);
204 pmd
= fixup_pointer(early_dynamic_pgts
[(*next_pgt_ptr
)++], physaddr
);
206 pgtable_flags
= _KERNPG_TABLE_NOENC
+ sme_get_me_mask();
209 p4d
= fixup_pointer(early_dynamic_pgts
[(*next_pgt_ptr
)++],
212 i
= (physaddr
>> PGDIR_SHIFT
) % PTRS_PER_PGD
;
213 pgd
[i
+ 0] = (pgdval_t
)p4d
+ pgtable_flags
;
214 pgd
[i
+ 1] = (pgdval_t
)p4d
+ pgtable_flags
;
216 i
= physaddr
>> P4D_SHIFT
;
217 p4d
[(i
+ 0) % PTRS_PER_P4D
] = (pgdval_t
)pud
+ pgtable_flags
;
218 p4d
[(i
+ 1) % PTRS_PER_P4D
] = (pgdval_t
)pud
+ pgtable_flags
;
220 i
= (physaddr
>> PGDIR_SHIFT
) % PTRS_PER_PGD
;
221 pgd
[i
+ 0] = (pgdval_t
)pud
+ pgtable_flags
;
222 pgd
[i
+ 1] = (pgdval_t
)pud
+ pgtable_flags
;
225 i
= physaddr
>> PUD_SHIFT
;
226 pud
[(i
+ 0) % PTRS_PER_PUD
] = (pudval_t
)pmd
+ pgtable_flags
;
227 pud
[(i
+ 1) % PTRS_PER_PUD
] = (pudval_t
)pmd
+ pgtable_flags
;
229 pmd_entry
= __PAGE_KERNEL_LARGE_EXEC
& ~_PAGE_GLOBAL
;
230 /* Filter out unsupported __PAGE_KERNEL_* bits: */
231 mask_ptr
= fixup_pointer(&__supported_pte_mask
, physaddr
);
232 pmd_entry
&= *mask_ptr
;
233 pmd_entry
+= sme_get_me_mask();
234 pmd_entry
+= physaddr
;
236 for (i
= 0; i
< DIV_ROUND_UP(_end
- _text
, PMD_SIZE
); i
++) {
237 int idx
= i
+ (physaddr
>> PMD_SHIFT
);
239 pmd
[idx
% PTRS_PER_PMD
] = pmd_entry
+ i
* PMD_SIZE
;
243 * Fixup the kernel text+data virtual addresses. Note that
244 * we might write invalid pmds, when the kernel is relocated
245 * cleanup_highmap() fixes this up along with the mappings
248 * Only the region occupied by the kernel image has so far
249 * been checked against the table of usable memory regions
250 * provided by the firmware, so invalidate pages outside that
251 * region. A page table entry that maps to a reserved area of
252 * memory would allow processor speculation into that area,
253 * and on some hardware (particularly the UV platform) even
254 * speculative access to some reserved areas is caught as an
255 * error, causing the BIOS to halt the system.
258 pmd
= fixup_pointer(level2_kernel_pgt
, physaddr
);
260 /* invalidate pages before the kernel image */
261 for (i
= 0; i
< pmd_index((unsigned long)_text
); i
++)
262 pmd
[i
] &= ~_PAGE_PRESENT
;
264 /* fixup pages that are part of the kernel image */
265 for (; i
<= pmd_index((unsigned long)_end
); i
++)
266 if (pmd
[i
] & _PAGE_PRESENT
)
267 pmd
[i
] += load_delta
;
269 /* invalidate pages after the kernel image */
270 for (; i
< PTRS_PER_PMD
; i
++)
271 pmd
[i
] &= ~_PAGE_PRESENT
;
274 * Fixup phys_base - remove the memory encryption mask to obtain
275 * the true physical address.
277 *fixup_long(&phys_base
, physaddr
) += load_delta
- sme_get_me_mask();
279 /* Encrypt the kernel and related (if SME is active) */
280 sme_encrypt_kernel(bp
);
283 * Clear the memory encryption mask from the .bss..decrypted section.
284 * The bss section will be memset to zero later in the initialization so
285 * there is no need to zero it after changing the memory encryption
288 if (mem_encrypt_active()) {
289 vaddr
= (unsigned long)__start_bss_decrypted
;
290 vaddr_end
= (unsigned long)__end_bss_decrypted
;
291 for (; vaddr
< vaddr_end
; vaddr
+= PMD_SIZE
) {
292 i
= pmd_index(vaddr
);
293 pmd
[i
] -= sme_get_me_mask();
298 * Return the SME encryption mask (if SME is active) to be used as a
299 * modifier for the initial pgdir entry programmed into CR3.
301 return sme_get_me_mask();
304 unsigned long __startup_secondary_64(void)
307 * Return the SME encryption mask (if SME is active) to be used as a
308 * modifier for the initial pgdir entry programmed into CR3.
310 return sme_get_me_mask();
313 /* Wipe all early page tables except for the kernel symbol map */
314 static void __init
reset_early_page_tables(void)
316 memset(early_top_pgt
, 0, sizeof(pgd_t
)*(PTRS_PER_PGD
-1));
318 write_cr3(__sme_pa_nodebug(early_top_pgt
));
321 /* Create a new PMD entry */
322 bool __init
__early_make_pgtable(unsigned long address
, pmdval_t pmd
)
324 unsigned long physaddr
= address
- __PAGE_OFFSET
;
325 pgdval_t pgd
, *pgd_p
;
326 p4dval_t p4d
, *p4d_p
;
327 pudval_t pud
, *pud_p
;
330 /* Invalid address or early pgt is done ? */
331 if (physaddr
>= MAXMEM
|| read_cr3_pa() != __pa_nodebug(early_top_pgt
))
335 pgd_p
= &early_top_pgt
[pgd_index(address
)].pgd
;
339 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
340 * critical -- __PAGE_OFFSET would point us back into the dynamic
341 * range and we might end up looping forever...
343 if (!pgtable_l5_enabled())
346 p4d_p
= (p4dval_t
*)((pgd
& PTE_PFN_MASK
) + __START_KERNEL_map
- phys_base
);
348 if (next_early_pgt
>= EARLY_DYNAMIC_PAGE_TABLES
) {
349 reset_early_page_tables();
353 p4d_p
= (p4dval_t
*)early_dynamic_pgts
[next_early_pgt
++];
354 memset(p4d_p
, 0, sizeof(*p4d_p
) * PTRS_PER_P4D
);
355 *pgd_p
= (pgdval_t
)p4d_p
- __START_KERNEL_map
+ phys_base
+ _KERNPG_TABLE
;
357 p4d_p
+= p4d_index(address
);
361 pud_p
= (pudval_t
*)((p4d
& PTE_PFN_MASK
) + __START_KERNEL_map
- phys_base
);
363 if (next_early_pgt
>= EARLY_DYNAMIC_PAGE_TABLES
) {
364 reset_early_page_tables();
368 pud_p
= (pudval_t
*)early_dynamic_pgts
[next_early_pgt
++];
369 memset(pud_p
, 0, sizeof(*pud_p
) * PTRS_PER_PUD
);
370 *p4d_p
= (p4dval_t
)pud_p
- __START_KERNEL_map
+ phys_base
+ _KERNPG_TABLE
;
372 pud_p
+= pud_index(address
);
376 pmd_p
= (pmdval_t
*)((pud
& PTE_PFN_MASK
) + __START_KERNEL_map
- phys_base
);
378 if (next_early_pgt
>= EARLY_DYNAMIC_PAGE_TABLES
) {
379 reset_early_page_tables();
383 pmd_p
= (pmdval_t
*)early_dynamic_pgts
[next_early_pgt
++];
384 memset(pmd_p
, 0, sizeof(*pmd_p
) * PTRS_PER_PMD
);
385 *pud_p
= (pudval_t
)pmd_p
- __START_KERNEL_map
+ phys_base
+ _KERNPG_TABLE
;
387 pmd_p
[pmd_index(address
)] = pmd
;
392 static bool __init
early_make_pgtable(unsigned long address
)
394 unsigned long physaddr
= address
- __PAGE_OFFSET
;
397 pmd
= (physaddr
& PMD_MASK
) + early_pmd_flags
;
399 return __early_make_pgtable(address
, pmd
);
402 void __init
do_early_exception(struct pt_regs
*regs
, int trapnr
)
404 if (trapnr
== X86_TRAP_PF
&&
405 early_make_pgtable(native_read_cr2()))
408 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT
) &&
409 trapnr
== X86_TRAP_VC
&& handle_vc_boot_ghcb(regs
))
412 early_fixup_exception(regs
, trapnr
);
415 /* Don't add a printk in there. printk relies on the PDA which is not initialized
417 static void __init
clear_bss(void)
419 memset(__bss_start
, 0,
420 (unsigned long) __bss_stop
- (unsigned long) __bss_start
);
423 static unsigned long get_cmd_line_ptr(void)
425 unsigned long cmd_line_ptr
= boot_params
.hdr
.cmd_line_ptr
;
427 cmd_line_ptr
|= (u64
)boot_params
.ext_cmd_line_ptr
<< 32;
432 static void __init
copy_bootdata(char *real_mode_data
)
435 unsigned long cmd_line_ptr
;
438 * If SME is active, this will create decrypted mappings of the
439 * boot data in advance of the copy operations.
441 sme_map_bootdata(real_mode_data
);
443 memcpy(&boot_params
, real_mode_data
, sizeof(boot_params
));
444 sanitize_boot_params(&boot_params
);
445 cmd_line_ptr
= get_cmd_line_ptr();
447 command_line
= __va(cmd_line_ptr
);
448 memcpy(boot_command_line
, command_line
, COMMAND_LINE_SIZE
);
452 * The old boot data is no longer needed and won't be reserved,
453 * freeing up that memory for use by the system. If SME is active,
454 * we need to remove the mappings that were created so that the
455 * memory doesn't remain mapped as decrypted.
457 sme_unmap_bootdata(real_mode_data
);
460 asmlinkage __visible
void __init
x86_64_start_kernel(char * real_mode_data
)
463 * Build-time sanity checks on the kernel image and module
464 * area mappings. (these are purely build-time and produce no code)
466 BUILD_BUG_ON(MODULES_VADDR
< __START_KERNEL_map
);
467 BUILD_BUG_ON(MODULES_VADDR
- __START_KERNEL_map
< KERNEL_IMAGE_SIZE
);
468 BUILD_BUG_ON(MODULES_LEN
+ KERNEL_IMAGE_SIZE
> 2*PUD_SIZE
);
469 BUILD_BUG_ON((__START_KERNEL_map
& ~PMD_MASK
) != 0);
470 BUILD_BUG_ON((MODULES_VADDR
& ~PMD_MASK
) != 0);
471 BUILD_BUG_ON(!(MODULES_VADDR
> __START_KERNEL
));
472 MAYBE_BUILD_BUG_ON(!(((MODULES_END
- 1) & PGDIR_MASK
) ==
473 (__START_KERNEL
& PGDIR_MASK
)));
474 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses
) <= MODULES_END
);
478 /* Kill off the identity-map trampoline */
479 reset_early_page_tables();
483 clear_page(init_top_pgt
);
486 * SME support may update early_pmd_flags to include the memory
487 * encryption mask, so it needs to be called before anything
488 * that may generate a page fault.
494 idt_setup_early_handler();
496 copy_bootdata(__va(real_mode_data
));
499 * Load microcode early on BSP.
503 /* set init_top_pgt kernel high mapping*/
504 init_top_pgt
[511] = early_top_pgt
[511];
506 x86_64_start_reservations(real_mode_data
);
509 void __init
x86_64_start_reservations(char *real_mode_data
)
511 /* version is always not zero if it is copied */
512 if (!boot_params
.hdr
.version
)
513 copy_bootdata(__va(real_mode_data
));
515 x86_early_init_platform_quirks();
517 switch (boot_params
.hdr
.hardware_subarch
) {
518 case X86_SUBARCH_INTEL_MID
:
519 x86_intel_mid_early_setup();
529 * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
530 * used until the idt_table takes over. On the boot CPU this happens in
531 * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
532 * this happens in the functions called from head_64.S.
534 * The idt_table can't be used that early because all the code modifying it is
535 * in idt.c and can be instrumented by tracing or KASAN, which both don't work
536 * during early CPU bringup. Also the idt_table has the runtime vectors
537 * configured which require certain CPU state to be setup already (like TSS),
538 * which also hasn't happened yet in early CPU bringup.
540 static gate_desc bringup_idt_table
[NUM_EXCEPTION_VECTORS
] __page_aligned_data
;
542 static struct desc_ptr bringup_idt_descr
= {
543 .size
= (NUM_EXCEPTION_VECTORS
* sizeof(gate_desc
)) - 1,
544 .address
= 0, /* Set at runtime */
547 static void set_bringup_idt_handler(gate_desc
*idt
, int n
, void *handler
)
549 #ifdef CONFIG_AMD_MEM_ENCRYPT
550 struct idt_data data
;
553 init_idt_data(&data
, n
, handler
);
554 idt_init_desc(&desc
, &data
);
555 native_write_idt_entry(idt
, n
, &desc
);
559 /* This runs while still in the direct mapping */
560 static void startup_64_load_idt(unsigned long physbase
)
562 struct desc_ptr
*desc
= fixup_pointer(&bringup_idt_descr
, physbase
);
563 gate_desc
*idt
= fixup_pointer(bringup_idt_table
, physbase
);
566 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT
)) {
569 /* VMM Communication Exception */
570 handler
= fixup_pointer(vc_no_ghcb
, physbase
);
571 set_bringup_idt_handler(idt
, X86_TRAP_VC
, handler
);
574 desc
->address
= (unsigned long)idt
;
575 native_load_idt(desc
);
578 /* This is used when running on kernel addresses */
579 void early_setup_idt(void)
581 /* VMM Communication Exception */
582 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT
))
583 set_bringup_idt_handler(bringup_idt_table
, X86_TRAP_VC
, vc_boot_ghcb
);
585 bringup_idt_descr
.address
= (unsigned long)bringup_idt_table
;
586 native_load_idt(&bringup_idt_descr
);
590 * Setup boot CPU state needed before kernel switches to virtual addresses.
592 void __head
startup_64_setup_env(unsigned long physbase
)
595 startup_gdt_descr
.address
= (unsigned long)fixup_pointer(startup_gdt
, physbase
);
596 native_load_gdt(&startup_gdt_descr
);
598 /* New GDT is live - reload data segment registers */
599 asm volatile("movl %%eax, %%ds\n"
601 "movl %%eax, %%es\n" : : "a"(__KERNEL_DS
) : "memory");
603 startup_64_load_idt(physbase
);