]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kernel/head64.c
x86/cpu/amd: Enumerate BTC_NO
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kernel / head64.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
835c34a1 3 * prepare to run common code
1da177e4
LT
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
1da177e4
LT
6 */
7
be3606ff 8#define DISABLE_BRANCH_PROFILING
ad3fe525
KS
9
10/* cpu_feature_enabled() cannot be used this early */
11#define USE_EARLY_PGTABLE_L5
12
1da177e4
LT
13#include <linux/init.h>
14#include <linux/linkage.h>
15#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/string.h>
18#include <linux/percpu.h>
eaf76e8b 19#include <linux/start_kernel.h>
8b664aa6 20#include <linux/io.h>
72d7c3b3 21#include <linux/memblock.h>
5868f365 22#include <linux/mem_encrypt.h>
65fddcfc 23#include <linux/pgtable.h>
1da177e4
LT
24
25#include <asm/processor.h>
26#include <asm/proto.h>
27#include <asm/smp.h>
1da177e4
LT
28#include <asm/setup.h>
29#include <asm/desc.h>
cfd243d4 30#include <asm/tlbflush.h>
2bc0414e 31#include <asm/sections.h>
718fc13b 32#include <asm/kdebug.h>
66441bd3 33#include <asm/e820/api.h>
47a3d5da 34#include <asm/bios_ebda.h>
5dcd14ec 35#include <asm/bootparam_utils.h>
feddc9de 36#include <asm/microcode.h>
ef7f0d6a 37#include <asm/kasan.h>
05ab1d8a 38#include <asm/fixmap.h>
f5963ba7 39#include <asm/realmode.h>
4b47cdbd
JR
40#include <asm/extable.h>
41#include <asm/trapnr.h>
e759959f 42#include <asm/sev.h>
1da177e4 43
8170e6be
PA
44/*
45 * Manage page tables very early on.
46 */
8170e6be 47extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
c88d7150 48static unsigned int __initdata next_early_pgt;
5e427ec2 49pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
8170e6be 50
e626e6bb 51#ifdef CONFIG_X86_5LEVEL
51be1335 52unsigned int __pgtable_l5_enabled __ro_after_init;
b16e770b 53unsigned int pgdir_shift __ro_after_init = 39;
c65e774f 54EXPORT_SYMBOL(pgdir_shift);
b16e770b 55unsigned int ptrs_per_p4d __ro_after_init = 1;
c65e774f 56EXPORT_SYMBOL(ptrs_per_p4d);
e626e6bb
KS
57#endif
58
eedb92ab 59#ifdef CONFIG_DYNAMIC_MEMORY_LAYOUT
4fa5662b 60unsigned long page_offset_base __ro_after_init = __PAGE_OFFSET_BASE_L4;
eedb92ab 61EXPORT_SYMBOL(page_offset_base);
a7412546 62unsigned long vmalloc_base __ro_after_init = __VMALLOC_BASE_L4;
eedb92ab 63EXPORT_SYMBOL(vmalloc_base);
9b46a051 64unsigned long vmemmap_base __ro_after_init = __VMEMMAP_BASE_L4;
eedb92ab
KS
65EXPORT_SYMBOL(vmemmap_base);
66#endif
67
866b556e
JR
68/*
69 * GDT used on the boot CPU before switching to virtual addresses.
70 */
71static struct desc_struct startup_gdt[GDT_ENTRIES] = {
72 [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
73 [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
74 [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
75};
76
77/*
78 * Address needs to be set at runtime because it references the startup_gdt
79 * while the kernel still uses a direct mapping.
80 */
81static struct desc_ptr startup_gdt_descr = {
82 .size = sizeof(startup_gdt),
83 .address = 0,
84};
85
33def849 86#define __head __section(".head.text")
26179670
KS
87
88static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
c88d7150
KS
89{
90 return ptr - (void *)_text + (void *)physaddr;
91}
92
4fa5662b
KS
93static unsigned long __head *fixup_long(void *ptr, unsigned long physaddr)
94{
95 return fixup_pointer(ptr, physaddr);
96}
97
4c2b4058
KS
98#ifdef CONFIG_X86_5LEVEL
99static unsigned int __head *fixup_int(void *ptr, unsigned long physaddr)
100{
101 return fixup_pointer(ptr, physaddr);
102}
103
6f9dd329 104static bool __head check_la57_support(unsigned long physaddr)
4c2b4058 105{
372fddf7 106 /*
d9f6e12f 107 * 5-level paging is detected and enabled at kernel decompression
372fddf7
KS
108 * stage. Only check if it has been enabled there.
109 */
110 if (!(native_read_cr4() & X86_CR4_LA57))
6f9dd329 111 return false;
4c2b4058 112
ad3fe525 113 *fixup_int(&__pgtable_l5_enabled, physaddr) = 1;
b16e770b
KS
114 *fixup_int(&pgdir_shift, physaddr) = 48;
115 *fixup_int(&ptrs_per_p4d, physaddr) = 512;
4fa5662b 116 *fixup_long(&page_offset_base, physaddr) = __PAGE_OFFSET_BASE_L5;
a7412546 117 *fixup_long(&vmalloc_base, physaddr) = __VMALLOC_BASE_L5;
9b46a051 118 *fixup_long(&vmemmap_base, physaddr) = __VMEMMAP_BASE_L5;
6f9dd329
KS
119
120 return true;
4c2b4058
KS
121}
122#else
6f9dd329
KS
123static bool __head check_la57_support(unsigned long physaddr)
124{
125 return false;
126}
4c2b4058
KS
127#endif
128
4a09f021
AP
129/* Code in __startup_64() can be relocated during execution, but the compiler
130 * doesn't have to generate PC-relative relocations when accessing globals from
131 * that function. Clang actually does not generate them, which leads to
132 * boot-time crashes. To work around this problem, every global pointer must
133 * be adjusted using fixup_pointer().
134 */
aca20d54
TL
135unsigned long __head __startup_64(unsigned long physaddr,
136 struct boot_params *bp)
c88d7150 137{
b3f0907c 138 unsigned long vaddr, vaddr_end;
6f9dd329 139 unsigned long load_delta, *p;
5868f365 140 unsigned long pgtable_flags;
c88d7150 141 pgdval_t *pgd;
032370b9 142 p4dval_t *p4d;
c88d7150
KS
143 pudval_t *pud;
144 pmdval_t *pmd, pmd_entry;
4a09f021 145 pteval_t *mask_ptr;
6f9dd329 146 bool la57;
c88d7150 147 int i;
187e91fe 148 unsigned int *next_pgt_ptr;
c88d7150 149
6f9dd329 150 la57 = check_la57_support(physaddr);
4c2b4058 151
c88d7150
KS
152 /* Is the address too large? */
153 if (physaddr >> MAX_PHYSMEM_BITS)
154 for (;;);
155
156 /*
157 * Compute the delta between the address I am compiled to run at
158 * and the address I am actually running at.
159 */
160 load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
161
162 /* Is the address not 2M aligned? */
163 if (load_delta & ~PMD_PAGE_MASK)
164 for (;;);
165
5868f365 166 /* Activate Secure Memory Encryption (SME) if supported and enabled */
aca20d54 167 sme_enable(bp);
5868f365
TL
168
169 /* Include the SME encryption mask in the fixup value */
170 load_delta += sme_get_me_mask();
171
c88d7150
KS
172 /* Fixup the physical addresses in the page table */
173
65ade2f8 174 pgd = fixup_pointer(&early_top_pgt, physaddr);
6f9dd329
KS
175 p = pgd + pgd_index(__START_KERNEL_map);
176 if (la57)
177 *p = (unsigned long)level4_kernel_pgt;
178 else
179 *p = (unsigned long)level3_kernel_pgt;
180 *p += _PAGE_TABLE_NOENC - __START_KERNEL_map + load_delta;
181
182 if (la57) {
032370b9
KS
183 p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
184 p4d[511] += load_delta;
185 }
186
c88d7150
KS
187 pud = fixup_pointer(&level3_kernel_pgt, physaddr);
188 pud[510] += load_delta;
189 pud[511] += load_delta;
190
191 pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
05ab1d8a
FT
192 for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
193 pmd[i] += load_delta;
c88d7150
KS
194
195 /*
196 * Set up the identity mapping for the switchover. These
197 * entries should *NOT* have the global bit set! This also
198 * creates a bunch of nonsense entries but that is fine --
199 * it avoids problems around wraparound.
200 */
201
187e91fe
AP
202 next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
203 pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
204 pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
c88d7150 205
21729f81 206 pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
c88d7150 207
6f9dd329 208 if (la57) {
c1887159
KS
209 p4d = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++],
210 physaddr);
032370b9
KS
211
212 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
5868f365
TL
213 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
214 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
032370b9 215
81c7ed29
KS
216 i = physaddr >> P4D_SHIFT;
217 p4d[(i + 0) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
218 p4d[(i + 1) % PTRS_PER_P4D] = (pgdval_t)pud + pgtable_flags;
032370b9
KS
219 } else {
220 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
5868f365
TL
221 pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
222 pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
032370b9 223 }
c88d7150 224
81c7ed29
KS
225 i = physaddr >> PUD_SHIFT;
226 pud[(i + 0) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
227 pud[(i + 1) % PTRS_PER_PUD] = (pudval_t)pmd + pgtable_flags;
c88d7150
KS
228
229 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
fb43d6cb 230 /* Filter out unsupported __PAGE_KERNEL_* bits: */
4a09f021
AP
231 mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr);
232 pmd_entry &= *mask_ptr;
5868f365 233 pmd_entry += sme_get_me_mask();
c88d7150
KS
234 pmd_entry += physaddr;
235
236 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
81c7ed29
KS
237 int idx = i + (physaddr >> PMD_SHIFT);
238
239 pmd[idx % PTRS_PER_PMD] = pmd_entry + i * PMD_SIZE;
c88d7150
KS
240 }
241
242 /*
243 * Fixup the kernel text+data virtual addresses. Note that
244 * we might write invalid pmds, when the kernel is relocated
245 * cleanup_highmap() fixes this up along with the mappings
246 * beyond _end.
2aa85f24
SW
247 *
248 * Only the region occupied by the kernel image has so far
249 * been checked against the table of usable memory regions
250 * provided by the firmware, so invalidate pages outside that
251 * region. A page table entry that maps to a reserved area of
252 * memory would allow processor speculation into that area,
253 * and on some hardware (particularly the UV platform) even
254 * speculative access to some reserved areas is caught as an
255 * error, causing the BIOS to halt the system.
c88d7150
KS
256 */
257
258 pmd = fixup_pointer(level2_kernel_pgt, physaddr);
2aa85f24
SW
259
260 /* invalidate pages before the kernel image */
261 for (i = 0; i < pmd_index((unsigned long)_text); i++)
262 pmd[i] &= ~_PAGE_PRESENT;
263
264 /* fixup pages that are part of the kernel image */
265 for (; i <= pmd_index((unsigned long)_end); i++)
c88d7150
KS
266 if (pmd[i] & _PAGE_PRESENT)
267 pmd[i] += load_delta;
2aa85f24
SW
268
269 /* invalidate pages after the kernel image */
270 for (; i < PTRS_PER_PMD; i++)
271 pmd[i] &= ~_PAGE_PRESENT;
c88d7150 272
5868f365
TL
273 /*
274 * Fixup phys_base - remove the memory encryption mask to obtain
275 * the true physical address.
276 */
4fa5662b 277 *fixup_long(&phys_base, physaddr) += load_delta - sme_get_me_mask();
5868f365 278
107cd253
TL
279 /* Encrypt the kernel and related (if SME is active) */
280 sme_encrypt_kernel(bp);
5868f365 281
b3f0907c
BS
282 /*
283 * Clear the memory encryption mask from the .bss..decrypted section.
284 * The bss section will be memset to zero later in the initialization so
285 * there is no need to zero it after changing the memory encryption
286 * attribute.
287 */
288 if (mem_encrypt_active()) {
289 vaddr = (unsigned long)__start_bss_decrypted;
290 vaddr_end = (unsigned long)__end_bss_decrypted;
291 for (; vaddr < vaddr_end; vaddr += PMD_SIZE) {
292 i = pmd_index(vaddr);
293 pmd[i] -= sme_get_me_mask();
294 }
295 }
296
5868f365
TL
297 /*
298 * Return the SME encryption mask (if SME is active) to be used as a
299 * modifier for the initial pgdir entry programmed into CR3.
300 */
301 return sme_get_me_mask();
302}
303
304unsigned long __startup_secondary_64(void)
305{
306 /*
307 * Return the SME encryption mask (if SME is active) to be used as a
308 * modifier for the initial pgdir entry programmed into CR3.
309 */
310 return sme_get_me_mask();
c88d7150
KS
311}
312
8170e6be
PA
313/* Wipe all early page tables except for the kernel symbol map */
314static void __init reset_early_page_tables(void)
cfd243d4 315{
65ade2f8 316 memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
8170e6be 317 next_early_pgt = 0;
21729f81 318 write_cr3(__sme_pa_nodebug(early_top_pgt));
8170e6be
PA
319}
320
321/* Create a new PMD entry */
4b47cdbd 322bool __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
8170e6be
PA
323{
324 unsigned long physaddr = address - __PAGE_OFFSET;
8170e6be 325 pgdval_t pgd, *pgd_p;
032370b9 326 p4dval_t p4d, *p4d_p;
6b9c75ac 327 pudval_t pud, *pud_p;
b9d05200 328 pmdval_t *pmd_p;
8170e6be
PA
329
330 /* Invalid address or early pgt is done ? */
65ade2f8 331 if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
4b47cdbd 332 return false;
8170e6be 333
6b9c75ac 334again:
65ade2f8 335 pgd_p = &early_top_pgt[pgd_index(address)].pgd;
8170e6be
PA
336 pgd = *pgd_p;
337
338 /*
339 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
340 * critical -- __PAGE_OFFSET would point us back into the dynamic
341 * range and we might end up looping forever...
342 */
ed7588d5 343 if (!pgtable_l5_enabled())
032370b9
KS
344 p4d_p = pgd_p;
345 else if (pgd)
346 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
347 else {
348 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
349 reset_early_page_tables();
350 goto again;
351 }
352
353 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
354 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
355 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
356 }
357 p4d_p += p4d_index(address);
358 p4d = *p4d_p;
359
360 if (p4d)
361 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
6b9c75ac
YL
362 else {
363 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
8170e6be 364 reset_early_page_tables();
6b9c75ac
YL
365 goto again;
366 }
8170e6be
PA
367
368 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
a91bbe01 369 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
032370b9 370 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
8170e6be 371 }
6b9c75ac
YL
372 pud_p += pud_index(address);
373 pud = *pud_p;
8170e6be 374
6b9c75ac
YL
375 if (pud)
376 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
377 else {
378 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
379 reset_early_page_tables();
380 goto again;
381 }
382
383 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
a91bbe01 384 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
6b9c75ac
YL
385 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
386 }
6b9c75ac 387 pmd_p[pmd_index(address)] = pmd;
8170e6be 388
4b47cdbd 389 return true;
cfd243d4
VG
390}
391
4b47cdbd 392static bool __init early_make_pgtable(unsigned long address)
b9d05200
TL
393{
394 unsigned long physaddr = address - __PAGE_OFFSET;
395 pmdval_t pmd;
396
397 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
398
399 return __early_make_pgtable(address, pmd);
400}
401
4b47cdbd
JR
402void __init do_early_exception(struct pt_regs *regs, int trapnr)
403{
404 if (trapnr == X86_TRAP_PF &&
405 early_make_pgtable(native_read_cr2()))
406 return;
407
1aa9aa8e
JR
408 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT) &&
409 trapnr == X86_TRAP_VC && handle_vc_boot_ghcb(regs))
410 return;
411
4b47cdbd
JR
412 early_fixup_exception(regs, trapnr);
413}
414
1da177e4
LT
415/* Don't add a printk in there. printk relies on the PDA which is not initialized
416 yet. */
417static void __init clear_bss(void)
418{
1da177e4 419 memset(__bss_start, 0,
2bc0414e 420 (unsigned long) __bss_stop - (unsigned long) __bss_start);
1da177e4
LT
421}
422
f1da834c
YL
423static unsigned long get_cmd_line_ptr(void)
424{
425 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
426
ee92d815
YL
427 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
428
f1da834c
YL
429 return cmd_line_ptr;
430}
431
1da177e4
LT
432static void __init copy_bootdata(char *real_mode_data)
433{
1da177e4 434 char * command_line;
f1da834c 435 unsigned long cmd_line_ptr;
1da177e4 436
b9d05200
TL
437 /*
438 * If SME is active, this will create decrypted mappings of the
439 * boot data in advance of the copy operations.
440 */
441 sme_map_bootdata(real_mode_data);
442
0e96f31e 443 memcpy(&boot_params, real_mode_data, sizeof(boot_params));
5dcd14ec 444 sanitize_boot_params(&boot_params);
f1da834c
YL
445 cmd_line_ptr = get_cmd_line_ptr();
446 if (cmd_line_ptr) {
447 command_line = __va(cmd_line_ptr);
30c82645 448 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
1da177e4 449 }
b9d05200
TL
450
451 /*
452 * The old boot data is no longer needed and won't be reserved,
453 * freeing up that memory for use by the system. If SME is active,
454 * we need to remove the mappings that were created so that the
455 * memory doesn't remain mapped as decrypted.
456 */
457 sme_unmap_bootdata(real_mode_data);
1da177e4
LT
458}
459
2605fc21 460asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
1da177e4 461{
b4e0409a
IM
462 /*
463 * Build-time sanity checks on the kernel image and module
464 * area mappings. (these are purely build-time and produce no code)
465 */
8e3c2a8c
BP
466 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
467 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
b4e0409a 468 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
8e3c2a8c 469 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
b4e0409a
IM
470 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
471 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
c65e774f 472 MAYBE_BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
b4e0409a 473 (__START_KERNEL & PGDIR_MASK)));
66d4bdf2 474 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
b4e0409a 475
1e02ce4c
AL
476 cr4_init_shadow();
477
8170e6be
PA
478 /* Kill off the identity-map trampoline */
479 reset_early_page_tables();
480
3df0af0e
YL
481 clear_bss();
482
65ade2f8 483 clear_page(init_top_pgt);
d0f77d4d 484
21729f81
TL
485 /*
486 * SME support may update early_pmd_flags to include the memory
487 * encryption mask, so it needs to be called before anything
488 * that may generate a page fault.
489 */
490 sme_early_init();
491
5d5aa3cf
AP
492 kasan_early_init();
493
588787fd 494 idt_setup_early_handler();
f6c2e333 495
fa2bbce9
YL
496 copy_bootdata(__va(real_mode_data));
497
feddc9de
FY
498 /*
499 * Load microcode early on BSP.
500 */
501 load_ucode_bsp();
502
65ade2f8
KS
503 /* set init_top_pgt kernel high mapping*/
504 init_top_pgt[511] = early_top_pgt[511];
8170e6be 505
f97013fd
JF
506 x86_64_start_reservations(real_mode_data);
507}
508
509void __init x86_64_start_reservations(char *real_mode_data)
510{
fa2bbce9
YL
511 /* version is always not zero if it is copied */
512 if (!boot_params.hdr.version)
513 copy_bootdata(__va(real_mode_data));
9de819fe 514
8d152e7a 515 x86_early_init_platform_quirks();
75175278 516
3fda5bb4
AS
517 switch (boot_params.hdr.hardware_subarch) {
518 case X86_SUBARCH_INTEL_MID:
519 x86_intel_mid_early_setup();
520 break;
521 default:
522 break;
523 }
524
1da177e4
LT
525 start_kernel();
526}
866b556e 527
f5963ba7
JR
528/*
529 * Data structures and code used for IDT setup in head_64.S. The bringup-IDT is
530 * used until the idt_table takes over. On the boot CPU this happens in
531 * x86_64_start_kernel(), on secondary CPUs in start_secondary(). In both cases
532 * this happens in the functions called from head_64.S.
533 *
534 * The idt_table can't be used that early because all the code modifying it is
535 * in idt.c and can be instrumented by tracing or KASAN, which both don't work
536 * during early CPU bringup. Also the idt_table has the runtime vectors
537 * configured which require certain CPU state to be setup already (like TSS),
538 * which also hasn't happened yet in early CPU bringup.
539 */
540static gate_desc bringup_idt_table[NUM_EXCEPTION_VECTORS] __page_aligned_data;
541
542static struct desc_ptr bringup_idt_descr = {
543 .size = (NUM_EXCEPTION_VECTORS * sizeof(gate_desc)) - 1,
544 .address = 0, /* Set at runtime */
545};
546
74d8d9d5
JR
547static void set_bringup_idt_handler(gate_desc *idt, int n, void *handler)
548{
549#ifdef CONFIG_AMD_MEM_ENCRYPT
550 struct idt_data data;
551 gate_desc desc;
552
553 init_idt_data(&data, n, handler);
554 idt_init_desc(&desc, &data);
555 native_write_idt_entry(idt, n, &desc);
556#endif
557}
558
f5963ba7
JR
559/* This runs while still in the direct mapping */
560static void startup_64_load_idt(unsigned long physbase)
561{
562 struct desc_ptr *desc = fixup_pointer(&bringup_idt_descr, physbase);
74d8d9d5
JR
563 gate_desc *idt = fixup_pointer(bringup_idt_table, physbase);
564
565
566 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT)) {
567 void *handler;
568
569 /* VMM Communication Exception */
570 handler = fixup_pointer(vc_no_ghcb, physbase);
571 set_bringup_idt_handler(idt, X86_TRAP_VC, handler);
572 }
f5963ba7 573
74d8d9d5 574 desc->address = (unsigned long)idt;
f5963ba7
JR
575 native_load_idt(desc);
576}
577
578/* This is used when running on kernel addresses */
579void early_setup_idt(void)
580{
1aa9aa8e
JR
581 /* VMM Communication Exception */
582 if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT))
583 set_bringup_idt_handler(bringup_idt_table, X86_TRAP_VC, vc_boot_ghcb);
584
f5963ba7
JR
585 bringup_idt_descr.address = (unsigned long)bringup_idt_table;
586 native_load_idt(&bringup_idt_descr);
587}
588
866b556e
JR
589/*
590 * Setup boot CPU state needed before kernel switches to virtual addresses.
591 */
592void __head startup_64_setup_env(unsigned long physbase)
593{
594 /* Load GDT */
595 startup_gdt_descr.address = (unsigned long)fixup_pointer(startup_gdt, physbase);
596 native_load_gdt(&startup_gdt_descr);
597
598 /* New GDT is live - reload data segment registers */
599 asm volatile("movl %%eax, %%ds\n"
600 "movl %%eax, %%ss\n"
601 "movl %%eax, %%es\n" : : "a"(__KERNEL_DS) : "memory");
f5963ba7
JR
602
603 startup_64_load_idt(physbase);
866b556e 604}