]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/head64.c
x86/mm: Expand static page table for fixmap space
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / head64.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * prepare to run common code
4 *
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 */
7
8 #define DISABLE_BRANCH_PROFILING
9 #include <linux/init.h>
10 #include <linux/linkage.h>
11 #include <linux/types.h>
12 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/percpu.h>
15 #include <linux/start_kernel.h>
16 #include <linux/io.h>
17 #include <linux/memblock.h>
18 #include <linux/mem_encrypt.h>
19
20 #include <asm/processor.h>
21 #include <asm/proto.h>
22 #include <asm/smp.h>
23 #include <asm/setup.h>
24 #include <asm/desc.h>
25 #include <asm/pgtable.h>
26 #include <asm/tlbflush.h>
27 #include <asm/sections.h>
28 #include <asm/kdebug.h>
29 #include <asm/e820/api.h>
30 #include <asm/bios_ebda.h>
31 #include <asm/bootparam_utils.h>
32 #include <asm/microcode.h>
33 #include <asm/kasan.h>
34 #include <asm/fixmap.h>
35
36 /*
37 * Manage page tables very early on.
38 */
39 extern pmd_t early_dynamic_pgts[EARLY_DYNAMIC_PAGE_TABLES][PTRS_PER_PMD];
40 static unsigned int __initdata next_early_pgt;
41 pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
42
43 #define __head __section(.head.text)
44
45 static void __head *fixup_pointer(void *ptr, unsigned long physaddr)
46 {
47 return ptr - (void *)_text + (void *)physaddr;
48 }
49
50 /* Code in __startup_64() can be relocated during execution, but the compiler
51 * doesn't have to generate PC-relative relocations when accessing globals from
52 * that function. Clang actually does not generate them, which leads to
53 * boot-time crashes. To work around this problem, every global pointer must
54 * be adjusted using fixup_pointer().
55 */
56 unsigned long __head __startup_64(unsigned long physaddr,
57 struct boot_params *bp)
58 {
59 unsigned long load_delta, *p;
60 unsigned long pgtable_flags;
61 pgdval_t *pgd;
62 p4dval_t *p4d;
63 pudval_t *pud;
64 pmdval_t *pmd, pmd_entry;
65 pteval_t *mask_ptr;
66 int i;
67 unsigned int *next_pgt_ptr;
68
69 /* Is the address too large? */
70 if (physaddr >> MAX_PHYSMEM_BITS)
71 for (;;);
72
73 /*
74 * Compute the delta between the address I am compiled to run at
75 * and the address I am actually running at.
76 */
77 load_delta = physaddr - (unsigned long)(_text - __START_KERNEL_map);
78
79 /* Is the address not 2M aligned? */
80 if (load_delta & ~PMD_PAGE_MASK)
81 for (;;);
82
83 /* Activate Secure Memory Encryption (SME) if supported and enabled */
84 sme_enable(bp);
85
86 /* Include the SME encryption mask in the fixup value */
87 load_delta += sme_get_me_mask();
88
89 /* Fixup the physical addresses in the page table */
90
91 pgd = fixup_pointer(&early_top_pgt, physaddr);
92 pgd[pgd_index(__START_KERNEL_map)] += load_delta;
93
94 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
95 p4d = fixup_pointer(&level4_kernel_pgt, physaddr);
96 p4d[511] += load_delta;
97 }
98
99 pud = fixup_pointer(&level3_kernel_pgt, physaddr);
100 pud[510] += load_delta;
101 pud[511] += load_delta;
102
103 pmd = fixup_pointer(level2_fixmap_pgt, physaddr);
104 for (i = FIXMAP_PMD_TOP; i > FIXMAP_PMD_TOP - FIXMAP_PMD_NUM; i--)
105 pmd[i] += load_delta;
106
107 /*
108 * Set up the identity mapping for the switchover. These
109 * entries should *NOT* have the global bit set! This also
110 * creates a bunch of nonsense entries but that is fine --
111 * it avoids problems around wraparound.
112 */
113
114 next_pgt_ptr = fixup_pointer(&next_early_pgt, physaddr);
115 pud = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
116 pmd = fixup_pointer(early_dynamic_pgts[(*next_pgt_ptr)++], physaddr);
117
118 pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
119
120 if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
121 p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
122
123 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
124 pgd[i + 0] = (pgdval_t)p4d + pgtable_flags;
125 pgd[i + 1] = (pgdval_t)p4d + pgtable_flags;
126
127 i = (physaddr >> P4D_SHIFT) % PTRS_PER_P4D;
128 p4d[i + 0] = (pgdval_t)pud + pgtable_flags;
129 p4d[i + 1] = (pgdval_t)pud + pgtable_flags;
130 } else {
131 i = (physaddr >> PGDIR_SHIFT) % PTRS_PER_PGD;
132 pgd[i + 0] = (pgdval_t)pud + pgtable_flags;
133 pgd[i + 1] = (pgdval_t)pud + pgtable_flags;
134 }
135
136 i = (physaddr >> PUD_SHIFT) % PTRS_PER_PUD;
137 pud[i + 0] = (pudval_t)pmd + pgtable_flags;
138 pud[i + 1] = (pudval_t)pmd + pgtable_flags;
139
140 pmd_entry = __PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL;
141 /* Filter out unsupported __PAGE_KERNEL_* bits: */
142 mask_ptr = fixup_pointer(&__supported_pte_mask, physaddr);
143 pmd_entry &= *mask_ptr;
144 pmd_entry += sme_get_me_mask();
145 pmd_entry += physaddr;
146
147 for (i = 0; i < DIV_ROUND_UP(_end - _text, PMD_SIZE); i++) {
148 int idx = i + (physaddr >> PMD_SHIFT) % PTRS_PER_PMD;
149 pmd[idx] = pmd_entry + i * PMD_SIZE;
150 }
151
152 /*
153 * Fixup the kernel text+data virtual addresses. Note that
154 * we might write invalid pmds, when the kernel is relocated
155 * cleanup_highmap() fixes this up along with the mappings
156 * beyond _end.
157 */
158
159 pmd = fixup_pointer(level2_kernel_pgt, physaddr);
160 for (i = 0; i < PTRS_PER_PMD; i++) {
161 if (pmd[i] & _PAGE_PRESENT)
162 pmd[i] += load_delta;
163 }
164
165 /*
166 * Fixup phys_base - remove the memory encryption mask to obtain
167 * the true physical address.
168 */
169 p = fixup_pointer(&phys_base, physaddr);
170 *p += load_delta - sme_get_me_mask();
171
172 /* Encrypt the kernel and related (if SME is active) */
173 sme_encrypt_kernel(bp);
174
175 /*
176 * Return the SME encryption mask (if SME is active) to be used as a
177 * modifier for the initial pgdir entry programmed into CR3.
178 */
179 return sme_get_me_mask();
180 }
181
182 unsigned long __startup_secondary_64(void)
183 {
184 /*
185 * Return the SME encryption mask (if SME is active) to be used as a
186 * modifier for the initial pgdir entry programmed into CR3.
187 */
188 return sme_get_me_mask();
189 }
190
191 /* Wipe all early page tables except for the kernel symbol map */
192 static void __init reset_early_page_tables(void)
193 {
194 memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
195 next_early_pgt = 0;
196 write_cr3(__sme_pa_nodebug(early_top_pgt));
197 }
198
199 /* Create a new PMD entry */
200 int __init __early_make_pgtable(unsigned long address, pmdval_t pmd)
201 {
202 unsigned long physaddr = address - __PAGE_OFFSET;
203 pgdval_t pgd, *pgd_p;
204 p4dval_t p4d, *p4d_p;
205 pudval_t pud, *pud_p;
206 pmdval_t *pmd_p;
207
208 /* Invalid address or early pgt is done ? */
209 if (physaddr >= MAXMEM || read_cr3_pa() != __pa_nodebug(early_top_pgt))
210 return -1;
211
212 again:
213 pgd_p = &early_top_pgt[pgd_index(address)].pgd;
214 pgd = *pgd_p;
215
216 /*
217 * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
218 * critical -- __PAGE_OFFSET would point us back into the dynamic
219 * range and we might end up looping forever...
220 */
221 if (!IS_ENABLED(CONFIG_X86_5LEVEL))
222 p4d_p = pgd_p;
223 else if (pgd)
224 p4d_p = (p4dval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
225 else {
226 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
227 reset_early_page_tables();
228 goto again;
229 }
230
231 p4d_p = (p4dval_t *)early_dynamic_pgts[next_early_pgt++];
232 memset(p4d_p, 0, sizeof(*p4d_p) * PTRS_PER_P4D);
233 *pgd_p = (pgdval_t)p4d_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
234 }
235 p4d_p += p4d_index(address);
236 p4d = *p4d_p;
237
238 if (p4d)
239 pud_p = (pudval_t *)((p4d & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
240 else {
241 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
242 reset_early_page_tables();
243 goto again;
244 }
245
246 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
247 memset(pud_p, 0, sizeof(*pud_p) * PTRS_PER_PUD);
248 *p4d_p = (p4dval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
249 }
250 pud_p += pud_index(address);
251 pud = *pud_p;
252
253 if (pud)
254 pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
255 else {
256 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
257 reset_early_page_tables();
258 goto again;
259 }
260
261 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
262 memset(pmd_p, 0, sizeof(*pmd_p) * PTRS_PER_PMD);
263 *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
264 }
265 pmd_p[pmd_index(address)] = pmd;
266
267 return 0;
268 }
269
270 int __init early_make_pgtable(unsigned long address)
271 {
272 unsigned long physaddr = address - __PAGE_OFFSET;
273 pmdval_t pmd;
274
275 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
276
277 return __early_make_pgtable(address, pmd);
278 }
279
280 /* Don't add a printk in there. printk relies on the PDA which is not initialized
281 yet. */
282 static void __init clear_bss(void)
283 {
284 memset(__bss_start, 0,
285 (unsigned long) __bss_stop - (unsigned long) __bss_start);
286 }
287
288 static unsigned long get_cmd_line_ptr(void)
289 {
290 unsigned long cmd_line_ptr = boot_params.hdr.cmd_line_ptr;
291
292 cmd_line_ptr |= (u64)boot_params.ext_cmd_line_ptr << 32;
293
294 return cmd_line_ptr;
295 }
296
297 static void __init copy_bootdata(char *real_mode_data)
298 {
299 char * command_line;
300 unsigned long cmd_line_ptr;
301
302 /*
303 * If SME is active, this will create decrypted mappings of the
304 * boot data in advance of the copy operations.
305 */
306 sme_map_bootdata(real_mode_data);
307
308 memcpy(&boot_params, real_mode_data, sizeof boot_params);
309 sanitize_boot_params(&boot_params);
310 cmd_line_ptr = get_cmd_line_ptr();
311 if (cmd_line_ptr) {
312 command_line = __va(cmd_line_ptr);
313 memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE);
314 }
315
316 /*
317 * The old boot data is no longer needed and won't be reserved,
318 * freeing up that memory for use by the system. If SME is active,
319 * we need to remove the mappings that were created so that the
320 * memory doesn't remain mapped as decrypted.
321 */
322 sme_unmap_bootdata(real_mode_data);
323 }
324
325 asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
326 {
327 /*
328 * Build-time sanity checks on the kernel image and module
329 * area mappings. (these are purely build-time and produce no code)
330 */
331 BUILD_BUG_ON(MODULES_VADDR < __START_KERNEL_map);
332 BUILD_BUG_ON(MODULES_VADDR - __START_KERNEL_map < KERNEL_IMAGE_SIZE);
333 BUILD_BUG_ON(MODULES_LEN + KERNEL_IMAGE_SIZE > 2*PUD_SIZE);
334 BUILD_BUG_ON((__START_KERNEL_map & ~PMD_MASK) != 0);
335 BUILD_BUG_ON((MODULES_VADDR & ~PMD_MASK) != 0);
336 BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
337 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
338 (__START_KERNEL & PGDIR_MASK)));
339 BUILD_BUG_ON(__fix_to_virt(__end_of_fixed_addresses) <= MODULES_END);
340
341 cr4_init_shadow();
342
343 /* Kill off the identity-map trampoline */
344 reset_early_page_tables();
345
346 clear_bss();
347
348 clear_page(init_top_pgt);
349
350 /*
351 * SME support may update early_pmd_flags to include the memory
352 * encryption mask, so it needs to be called before anything
353 * that may generate a page fault.
354 */
355 sme_early_init();
356
357 kasan_early_init();
358
359 idt_setup_early_handler();
360
361 copy_bootdata(__va(real_mode_data));
362
363 /*
364 * Load microcode early on BSP.
365 */
366 load_ucode_bsp();
367
368 /* set init_top_pgt kernel high mapping*/
369 init_top_pgt[511] = early_top_pgt[511];
370
371 x86_64_start_reservations(real_mode_data);
372 }
373
374 void __init x86_64_start_reservations(char *real_mode_data)
375 {
376 /* version is always not zero if it is copied */
377 if (!boot_params.hdr.version)
378 copy_bootdata(__va(real_mode_data));
379
380 x86_early_init_platform_quirks();
381
382 switch (boot_params.hdr.hardware_subarch) {
383 case X86_SUBARCH_INTEL_MID:
384 x86_intel_mid_early_setup();
385 break;
386 default:
387 break;
388 }
389
390 start_kernel();
391 }