2 * prepare to run common code
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
7 #define DISABLE_BRANCH_PROFILING
8 #include <linux/init.h>
9 #include <linux/linkage.h>
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/string.h>
13 #include <linux/percpu.h>
14 #include <linux/start_kernel.h>
16 #include <linux/memblock.h>
17 #include <linux/mem_encrypt.h>
19 #include <asm/processor.h>
20 #include <asm/proto.h>
22 #include <asm/setup.h>
24 #include <asm/pgtable.h>
25 #include <asm/tlbflush.h>
26 #include <asm/sections.h>
27 #include <asm/kdebug.h>
28 #include <asm/e820/api.h>
29 #include <asm/bios_ebda.h>
30 #include <asm/bootparam_utils.h>
31 #include <asm/microcode.h>
32 #include <asm/kasan.h>
35 * Manage page tables very early on.
37 extern pgd_t early_top_pgt
[PTRS_PER_PGD
];
38 extern pmd_t early_dynamic_pgts
[EARLY_DYNAMIC_PAGE_TABLES
][PTRS_PER_PMD
];
39 static unsigned int __initdata next_early_pgt
;
40 pmdval_t early_pmd_flags
= __PAGE_KERNEL_LARGE
& ~(_PAGE_GLOBAL
| _PAGE_NX
);
42 #define __head __section(.head.text)
44 static void __head
*fixup_pointer(void *ptr
, unsigned long physaddr
)
46 return ptr
- (void *)_text
+ (void *)physaddr
;
49 unsigned long __head
__startup_64(unsigned long physaddr
)
51 unsigned long load_delta
, *p
;
52 unsigned long pgtable_flags
;
56 pmdval_t
*pmd
, pmd_entry
;
59 /* Is the address too large? */
60 if (physaddr
>> MAX_PHYSMEM_BITS
)
64 * Compute the delta between the address I am compiled to run at
65 * and the address I am actually running at.
67 load_delta
= physaddr
- (unsigned long)(_text
- __START_KERNEL_map
);
69 /* Is the address not 2M aligned? */
70 if (load_delta
& ~PMD_PAGE_MASK
)
73 /* Activate Secure Memory Encryption (SME) if supported and enabled */
76 /* Include the SME encryption mask in the fixup value */
77 load_delta
+= sme_get_me_mask();
79 /* Fixup the physical addresses in the page table */
81 pgd
= fixup_pointer(&early_top_pgt
, physaddr
);
82 pgd
[pgd_index(__START_KERNEL_map
)] += load_delta
;
84 if (IS_ENABLED(CONFIG_X86_5LEVEL
)) {
85 p4d
= fixup_pointer(&level4_kernel_pgt
, physaddr
);
86 p4d
[511] += load_delta
;
89 pud
= fixup_pointer(&level3_kernel_pgt
, physaddr
);
90 pud
[510] += load_delta
;
91 pud
[511] += load_delta
;
93 pmd
= fixup_pointer(level2_fixmap_pgt
, physaddr
);
94 pmd
[506] += load_delta
;
97 * Set up the identity mapping for the switchover. These
98 * entries should *NOT* have the global bit set! This also
99 * creates a bunch of nonsense entries but that is fine --
100 * it avoids problems around wraparound.
103 pud
= fixup_pointer(early_dynamic_pgts
[next_early_pgt
++], physaddr
);
104 pmd
= fixup_pointer(early_dynamic_pgts
[next_early_pgt
++], physaddr
);
105 pgtable_flags
= _KERNPG_TABLE
+ sme_get_me_mask();
107 if (IS_ENABLED(CONFIG_X86_5LEVEL
)) {
108 p4d
= fixup_pointer(early_dynamic_pgts
[next_early_pgt
++], physaddr
);
110 i
= (physaddr
>> PGDIR_SHIFT
) % PTRS_PER_PGD
;
111 pgd
[i
+ 0] = (pgdval_t
)p4d
+ pgtable_flags
;
112 pgd
[i
+ 1] = (pgdval_t
)p4d
+ pgtable_flags
;
114 i
= (physaddr
>> P4D_SHIFT
) % PTRS_PER_P4D
;
115 p4d
[i
+ 0] = (pgdval_t
)pud
+ pgtable_flags
;
116 p4d
[i
+ 1] = (pgdval_t
)pud
+ pgtable_flags
;
118 i
= (physaddr
>> PGDIR_SHIFT
) % PTRS_PER_PGD
;
119 pgd
[i
+ 0] = (pgdval_t
)pud
+ pgtable_flags
;
120 pgd
[i
+ 1] = (pgdval_t
)pud
+ pgtable_flags
;
123 i
= (physaddr
>> PUD_SHIFT
) % PTRS_PER_PUD
;
124 pud
[i
+ 0] = (pudval_t
)pmd
+ pgtable_flags
;
125 pud
[i
+ 1] = (pudval_t
)pmd
+ pgtable_flags
;
127 pmd_entry
= __PAGE_KERNEL_LARGE_EXEC
& ~_PAGE_GLOBAL
;
128 pmd_entry
+= sme_get_me_mask();
129 pmd_entry
+= physaddr
;
131 for (i
= 0; i
< DIV_ROUND_UP(_end
- _text
, PMD_SIZE
); i
++) {
132 int idx
= i
+ (physaddr
>> PMD_SHIFT
) % PTRS_PER_PMD
;
133 pmd
[idx
] = pmd_entry
+ i
* PMD_SIZE
;
137 * Fixup the kernel text+data virtual addresses. Note that
138 * we might write invalid pmds, when the kernel is relocated
139 * cleanup_highmap() fixes this up along with the mappings
143 pmd
= fixup_pointer(level2_kernel_pgt
, physaddr
);
144 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
145 if (pmd
[i
] & _PAGE_PRESENT
)
146 pmd
[i
] += load_delta
;
150 * Fixup phys_base - remove the memory encryption mask to obtain
151 * the true physical address.
153 p
= fixup_pointer(&phys_base
, physaddr
);
154 *p
+= load_delta
- sme_get_me_mask();
156 /* Encrypt the kernel (if SME is active) */
157 sme_encrypt_kernel();
160 * Return the SME encryption mask (if SME is active) to be used as a
161 * modifier for the initial pgdir entry programmed into CR3.
163 return sme_get_me_mask();
166 unsigned long __startup_secondary_64(void)
169 * Return the SME encryption mask (if SME is active) to be used as a
170 * modifier for the initial pgdir entry programmed into CR3.
172 return sme_get_me_mask();
175 /* Wipe all early page tables except for the kernel symbol map */
176 static void __init
reset_early_page_tables(void)
178 memset(early_top_pgt
, 0, sizeof(pgd_t
)*(PTRS_PER_PGD
-1));
180 write_cr3(__pa_nodebug(early_top_pgt
));
183 /* Create a new PMD entry */
184 int __init
early_make_pgtable(unsigned long address
)
186 unsigned long physaddr
= address
- __PAGE_OFFSET
;
187 pgdval_t pgd
, *pgd_p
;
188 p4dval_t p4d
, *p4d_p
;
189 pudval_t pud
, *pud_p
;
190 pmdval_t pmd
, *pmd_p
;
192 /* Invalid address or early pgt is done ? */
193 if (physaddr
>= MAXMEM
|| read_cr3_pa() != __pa_nodebug(early_top_pgt
))
197 pgd_p
= &early_top_pgt
[pgd_index(address
)].pgd
;
201 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
202 * critical -- __PAGE_OFFSET would point us back into the dynamic
203 * range and we might end up looping forever...
205 if (!IS_ENABLED(CONFIG_X86_5LEVEL
))
208 p4d_p
= (p4dval_t
*)((pgd
& PTE_PFN_MASK
) + __START_KERNEL_map
- phys_base
);
210 if (next_early_pgt
>= EARLY_DYNAMIC_PAGE_TABLES
) {
211 reset_early_page_tables();
215 p4d_p
= (p4dval_t
*)early_dynamic_pgts
[next_early_pgt
++];
216 memset(p4d_p
, 0, sizeof(*p4d_p
) * PTRS_PER_P4D
);
217 *pgd_p
= (pgdval_t
)p4d_p
- __START_KERNEL_map
+ phys_base
+ _KERNPG_TABLE
;
219 p4d_p
+= p4d_index(address
);
223 pud_p
= (pudval_t
*)((p4d
& PTE_PFN_MASK
) + __START_KERNEL_map
- phys_base
);
225 if (next_early_pgt
>= EARLY_DYNAMIC_PAGE_TABLES
) {
226 reset_early_page_tables();
230 pud_p
= (pudval_t
*)early_dynamic_pgts
[next_early_pgt
++];
231 memset(pud_p
, 0, sizeof(*pud_p
) * PTRS_PER_PUD
);
232 *p4d_p
= (p4dval_t
)pud_p
- __START_KERNEL_map
+ phys_base
+ _KERNPG_TABLE
;
234 pud_p
+= pud_index(address
);
238 pmd_p
= (pmdval_t
*)((pud
& PTE_PFN_MASK
) + __START_KERNEL_map
- phys_base
);
240 if (next_early_pgt
>= EARLY_DYNAMIC_PAGE_TABLES
) {
241 reset_early_page_tables();
245 pmd_p
= (pmdval_t
*)early_dynamic_pgts
[next_early_pgt
++];
246 memset(pmd_p
, 0, sizeof(*pmd_p
) * PTRS_PER_PMD
);
247 *pud_p
= (pudval_t
)pmd_p
- __START_KERNEL_map
+ phys_base
+ _KERNPG_TABLE
;
249 pmd
= (physaddr
& PMD_MASK
) + early_pmd_flags
;
250 pmd_p
[pmd_index(address
)] = pmd
;
255 /* Don't add a printk in there. printk relies on the PDA which is not initialized
257 static void __init
clear_bss(void)
259 memset(__bss_start
, 0,
260 (unsigned long) __bss_stop
- (unsigned long) __bss_start
);
263 static unsigned long get_cmd_line_ptr(void)
265 unsigned long cmd_line_ptr
= boot_params
.hdr
.cmd_line_ptr
;
267 cmd_line_ptr
|= (u64
)boot_params
.ext_cmd_line_ptr
<< 32;
272 static void __init
copy_bootdata(char *real_mode_data
)
275 unsigned long cmd_line_ptr
;
277 memcpy(&boot_params
, real_mode_data
, sizeof boot_params
);
278 sanitize_boot_params(&boot_params
);
279 cmd_line_ptr
= get_cmd_line_ptr();
281 command_line
= __va(cmd_line_ptr
);
282 memcpy(boot_command_line
, command_line
, COMMAND_LINE_SIZE
);
286 asmlinkage __visible
void __init
x86_64_start_kernel(char * real_mode_data
)
291 * Build-time sanity checks on the kernel image and module
292 * area mappings. (these are purely build-time and produce no code)
294 BUILD_BUG_ON(MODULES_VADDR
< __START_KERNEL_map
);
295 BUILD_BUG_ON(MODULES_VADDR
- __START_KERNEL_map
< KERNEL_IMAGE_SIZE
);
296 BUILD_BUG_ON(MODULES_LEN
+ KERNEL_IMAGE_SIZE
> 2*PUD_SIZE
);
297 BUILD_BUG_ON((__START_KERNEL_map
& ~PMD_MASK
) != 0);
298 BUILD_BUG_ON((MODULES_VADDR
& ~PMD_MASK
) != 0);
299 BUILD_BUG_ON(!(MODULES_VADDR
> __START_KERNEL
));
300 BUILD_BUG_ON(!(((MODULES_END
- 1) & PGDIR_MASK
) ==
301 (__START_KERNEL
& PGDIR_MASK
)));
302 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses
) <= MODULES_END
);
306 /* Kill off the identity-map trampoline */
307 reset_early_page_tables();
311 clear_page(init_top_pgt
);
315 for (i
= 0; i
< NUM_EXCEPTION_VECTORS
; i
++)
316 set_intr_gate(i
, early_idt_handler_array
[i
]);
317 load_idt((const struct desc_ptr
*)&idt_descr
);
319 copy_bootdata(__va(real_mode_data
));
322 * Load microcode early on BSP.
326 /* set init_top_pgt kernel high mapping*/
327 init_top_pgt
[511] = early_top_pgt
[511];
329 x86_64_start_reservations(real_mode_data
);
332 void __init
x86_64_start_reservations(char *real_mode_data
)
334 /* version is always not zero if it is copied */
335 if (!boot_params
.hdr
.version
)
336 copy_bootdata(__va(real_mode_data
));
338 x86_early_init_platform_quirks();
340 switch (boot_params
.hdr
.hardware_subarch
) {
341 case X86_SUBARCH_INTEL_MID
:
342 x86_intel_mid_early_setup();