]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/boot/compressed/pagetable.c
Merge tag 'trace-v4.15-rc4-2' of git://git.kernel.org/pub/scm/linux/kernel/git/rosted...
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / boot / compressed / pagetable.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * This code is used on x86_64 to create page table identity mappings on
4 * demand by building up a new set of page tables (or appending to the
5 * existing ones), and then switching over to them when ready.
6 *
7 * Copyright (C) 2015-2016 Yinghai Lu
8 * Copyright (C) 2016 Kees Cook
9 */
10
11 /*
12 * Since we're dealing with identity mappings, physical and virtual
13 * addresses are the same, so override these defines which are ultimately
14 * used by the headers in misc.h.
15 */
16 #define __pa(x) ((unsigned long)(x))
17 #define __va(x) ((void *)((unsigned long)(x)))
18
19 /*
20 * The pgtable.h and mm/ident_map.c includes make use of the SME related
21 * information which is not used in the compressed image support. Un-define
22 * the SME support to avoid any compile and link errors.
23 */
24 #undef CONFIG_AMD_MEM_ENCRYPT
25
26 /* No PAGE_TABLE_ISOLATION support needed either: */
27 #undef CONFIG_PAGE_TABLE_ISOLATION
28
29 #include "misc.h"
30
31 /* These actually do the work of building the kernel identity maps. */
32 #include <asm/init.h>
33 #include <asm/pgtable.h>
34 /* Use the static base for this part of the boot process */
35 #undef __PAGE_OFFSET
36 #define __PAGE_OFFSET __PAGE_OFFSET_BASE
37 #include "../../mm/ident_map.c"
38
39 /* Used by pgtable.h asm code to force instruction serialization. */
40 unsigned long __force_order;
41
42 /* Used to track our page table allocation area. */
43 struct alloc_pgt_data {
44 unsigned char *pgt_buf;
45 unsigned long pgt_buf_size;
46 unsigned long pgt_buf_offset;
47 };
48
49 /*
50 * Allocates space for a page table entry, using struct alloc_pgt_data
51 * above. Besides the local callers, this is used as the allocation
52 * callback in mapping_info below.
53 */
54 static void *alloc_pgt_page(void *context)
55 {
56 struct alloc_pgt_data *pages = (struct alloc_pgt_data *)context;
57 unsigned char *entry;
58
59 /* Validate there is space available for a new page. */
60 if (pages->pgt_buf_offset >= pages->pgt_buf_size) {
61 debug_putstr("out of pgt_buf in " __FILE__ "!?\n");
62 debug_putaddr(pages->pgt_buf_offset);
63 debug_putaddr(pages->pgt_buf_size);
64 return NULL;
65 }
66
67 entry = pages->pgt_buf + pages->pgt_buf_offset;
68 pages->pgt_buf_offset += PAGE_SIZE;
69
70 return entry;
71 }
72
73 /* Used to track our allocated page tables. */
74 static struct alloc_pgt_data pgt_data;
75
76 /* The top level page table entry pointer. */
77 static unsigned long top_level_pgt;
78
79 /*
80 * Mapping information structure passed to kernel_ident_mapping_init().
81 * Due to relocation, pointers must be assigned at run time not build time.
82 */
83 static struct x86_mapping_info mapping_info;
84
85 /* Locates and clears a region for a new top level page table. */
86 void initialize_identity_maps(void)
87 {
88 unsigned long sev_me_mask = get_sev_encryption_mask();
89
90 /* Init mapping_info with run-time function/buffer pointers. */
91 mapping_info.alloc_pgt_page = alloc_pgt_page;
92 mapping_info.context = &pgt_data;
93 mapping_info.page_flag = __PAGE_KERNEL_LARGE_EXEC | sev_me_mask;
94 mapping_info.kernpg_flag = _KERNPG_TABLE | sev_me_mask;
95
96 /*
97 * It should be impossible for this not to already be true,
98 * but since calling this a second time would rewind the other
99 * counters, let's just make sure this is reset too.
100 */
101 pgt_data.pgt_buf_offset = 0;
102
103 /*
104 * If we came here via startup_32(), cr3 will be _pgtable already
105 * and we must append to the existing area instead of entirely
106 * overwriting it.
107 *
108 * With 5-level paging, we use '_pgtable' to allocate the p4d page table,
109 * the top-level page table is allocated separately.
110 *
111 * p4d_offset(top_level_pgt, 0) would cover both the 4- and 5-level
112 * cases. On 4-level paging it's equal to 'top_level_pgt'.
113 */
114 top_level_pgt = read_cr3_pa();
115 if (p4d_offset((pgd_t *)top_level_pgt, 0) == (p4d_t *)_pgtable) {
116 debug_putstr("booted via startup_32()\n");
117 pgt_data.pgt_buf = _pgtable + BOOT_INIT_PGT_SIZE;
118 pgt_data.pgt_buf_size = BOOT_PGT_SIZE - BOOT_INIT_PGT_SIZE;
119 memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
120 } else {
121 debug_putstr("booted via startup_64()\n");
122 pgt_data.pgt_buf = _pgtable;
123 pgt_data.pgt_buf_size = BOOT_PGT_SIZE;
124 memset(pgt_data.pgt_buf, 0, pgt_data.pgt_buf_size);
125 top_level_pgt = (unsigned long)alloc_pgt_page(&pgt_data);
126 }
127 }
128
129 /*
130 * Adds the specified range to what will become the new identity mappings.
131 * Once all ranges have been added, the new mapping is activated by calling
132 * finalize_identity_maps() below.
133 */
134 void add_identity_map(unsigned long start, unsigned long size)
135 {
136 unsigned long end = start + size;
137
138 /* Align boundary to 2M. */
139 start = round_down(start, PMD_SIZE);
140 end = round_up(end, PMD_SIZE);
141 if (start >= end)
142 return;
143
144 /* Build the mapping. */
145 kernel_ident_mapping_init(&mapping_info, (pgd_t *)top_level_pgt,
146 start, end);
147 }
148
149 /*
150 * This switches the page tables to the new level4 that has been built
151 * via calls to add_identity_map() above. If booted via startup_32(),
152 * this is effectively a no-op.
153 */
154 void finalize_identity_maps(void)
155 {
156 write_cr3(top_level_pgt);
157 }