]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
0483e1fa TG |
2 | /* |
3 | * This file implements KASLR memory randomization for x86_64. It randomizes | |
4 | * the virtual address space of kernel memory regions (physical memory | |
5 | * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates | |
6 | * exploits relying on predictable kernel addresses. | |
7 | * | |
8 | * Entropy is generated using the KASLR early boot functions now shared in | |
9 | * the lib directory (originally written by Kees Cook). Randomization is | |
8624c1f6 KS |
10 | * done on PGD & P4D/PUD page table levels to increase possible addresses. |
11 | * The physical memory mapping code was adapted to support P4D/PUD level | |
12 | * virtual addresses. This implementation on the best configuration provides | |
13 | * 30,000 possible virtual addresses in average for each memory region. | |
14 | * An additional low memory page is used to ensure each CPU can start with | |
15 | * a PGD aligned virtual address (for realmode). | |
0483e1fa TG |
16 | * |
17 | * The order of each memory region is not changed. The feature looks at | |
18 | * the available space for the regions based on different configuration | |
19 | * options and randomizes the base and space between each. The size of the | |
20 | * physical memory mapping is the available physical memory. | |
21 | */ | |
22 | ||
23 | #include <linux/kernel.h> | |
24 | #include <linux/init.h> | |
25 | #include <linux/random.h> | |
26 | ||
27 | #include <asm/pgalloc.h> | |
28 | #include <asm/pgtable.h> | |
29 | #include <asm/setup.h> | |
30 | #include <asm/kaslr.h> | |
31 | ||
32 | #include "mm_internal.h" | |
33 | ||
34 | #define TB_SHIFT 40 | |
35 | ||
36 | /* | |
37 | * Virtual address start and end range for randomization. The end changes base | |
38 | * on configuration to have the highest amount of space for randomization. | |
39 | * It increases the possible random position for each randomized region. | |
40 | * | |
41 | * You need to add an if/def entry if you introduce a new memory region | |
42 | * compatible with KASLR. Your entry must be in logical order with memory | |
43 | * layout. For example, ESPFIX is before EFI because its virtual address is | |
25dfe478 | 44 | * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to |
0483e1fa TG |
45 | * ensure that this order is correct and won't be changed. |
46 | */ | |
021182e5 | 47 | static const unsigned long vaddr_start = __PAGE_OFFSET_BASE; |
25dfe478 TG |
48 | |
49 | #if defined(CONFIG_X86_ESPFIX64) | |
50 | static const unsigned long vaddr_end = ESPFIX_BASE_ADDR; | |
51 | #elif defined(CONFIG_EFI) | |
a46f60d7 | 52 | static const unsigned long vaddr_end = EFI_VA_END; |
25dfe478 TG |
53 | #else |
54 | static const unsigned long vaddr_end = __START_KERNEL_map; | |
55 | #endif | |
021182e5 TG |
56 | |
57 | /* Default values */ | |
58 | unsigned long page_offset_base = __PAGE_OFFSET_BASE; | |
59 | EXPORT_SYMBOL(page_offset_base); | |
a95ae27c TG |
60 | unsigned long vmalloc_base = __VMALLOC_BASE; |
61 | EXPORT_SYMBOL(vmalloc_base); | |
25dfe478 TG |
62 | unsigned long vmemmap_base = __VMEMMAP_BASE; |
63 | EXPORT_SYMBOL(vmemmap_base); | |
0483e1fa TG |
64 | |
65 | /* | |
66 | * Memory regions randomized by KASLR (except modules that use a separate logic | |
67 | * earlier during boot). The list is ordered based on virtual addresses. This | |
68 | * order is kept after randomization. | |
69 | */ | |
70 | static __initdata struct kaslr_memory_region { | |
71 | unsigned long *base; | |
72 | unsigned long size_tb; | |
73 | } kaslr_regions[] = { | |
8624c1f6 | 74 | { &page_offset_base, 1 << (__PHYSICAL_MASK_SHIFT - TB_SHIFT) /* Maximum */ }, |
a95ae27c | 75 | { &vmalloc_base, VMALLOC_SIZE_TB }, |
25dfe478 | 76 | { &vmemmap_base, 1 }, |
0483e1fa TG |
77 | }; |
78 | ||
79 | /* Get size in bytes used by the memory region */ | |
80 | static inline unsigned long get_padding(struct kaslr_memory_region *region) | |
81 | { | |
82 | return (region->size_tb << TB_SHIFT); | |
83 | } | |
84 | ||
85 | /* | |
86 | * Apply no randomization if KASLR was disabled at boot or if KASAN | |
87 | * is enabled. KASAN shadow mappings rely on regions being PGD aligned. | |
88 | */ | |
89 | static inline bool kaslr_memory_enabled(void) | |
90 | { | |
a5ff1b34 | 91 | return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN); |
0483e1fa TG |
92 | } |
93 | ||
94 | /* Initialize base and padding for each memory region randomized with KASLR */ | |
95 | void __init kernel_randomize_memory(void) | |
96 | { | |
97 | size_t i; | |
98 | unsigned long vaddr = vaddr_start; | |
021182e5 | 99 | unsigned long rand, memory_tb; |
0483e1fa TG |
100 | struct rnd_state rand_state; |
101 | unsigned long remain_entropy; | |
102 | ||
25dfe478 TG |
103 | /* |
104 | * All these BUILD_BUG_ON checks ensures the memory layout is | |
105 | * consistent with the vaddr_start/vaddr_end variables. | |
106 | */ | |
107 | BUILD_BUG_ON(vaddr_start >= vaddr_end); | |
c0a0aba8 | 108 | BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) && |
a46f60d7 | 109 | vaddr_end >= EFI_VA_END); |
c0a0aba8 MY |
110 | BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) || |
111 | IS_ENABLED(CONFIG_EFI)) && | |
25dfe478 TG |
112 | vaddr_end >= __START_KERNEL_map); |
113 | BUILD_BUG_ON(vaddr_end > __START_KERNEL_map); | |
114 | ||
0483e1fa TG |
115 | if (!kaslr_memory_enabled()) |
116 | return; | |
117 | ||
90397a41 TG |
118 | /* |
119 | * Update Physical memory mapping to available and | |
120 | * add padding if needed (especially for memory hotplug support). | |
121 | */ | |
021182e5 | 122 | BUG_ON(kaslr_regions[0].base != &page_offset_base); |
c7d2361f | 123 | memory_tb = DIV_ROUND_UP(max_pfn << PAGE_SHIFT, 1UL << TB_SHIFT) + |
90397a41 | 124 | CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING; |
021182e5 TG |
125 | |
126 | /* Adapt phyiscal memory region size based on available memory */ | |
127 | if (memory_tb < kaslr_regions[0].size_tb) | |
128 | kaslr_regions[0].size_tb = memory_tb; | |
129 | ||
0483e1fa TG |
130 | /* Calculate entropy available between regions */ |
131 | remain_entropy = vaddr_end - vaddr_start; | |
132 | for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) | |
133 | remain_entropy -= get_padding(&kaslr_regions[i]); | |
134 | ||
135 | prandom_seed_state(&rand_state, kaslr_get_random_long("Memory")); | |
136 | ||
137 | for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++) { | |
138 | unsigned long entropy; | |
139 | ||
140 | /* | |
141 | * Select a random virtual address using the extra entropy | |
142 | * available. | |
143 | */ | |
144 | entropy = remain_entropy / (ARRAY_SIZE(kaslr_regions) - i); | |
145 | prandom_bytes_state(&rand_state, &rand, sizeof(rand)); | |
8624c1f6 KS |
146 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) |
147 | entropy = (rand % (entropy + 1)) & P4D_MASK; | |
148 | else | |
149 | entropy = (rand % (entropy + 1)) & PUD_MASK; | |
0483e1fa TG |
150 | vaddr += entropy; |
151 | *kaslr_regions[i].base = vaddr; | |
152 | ||
153 | /* | |
154 | * Jump the region and add a minimum padding based on | |
155 | * randomization alignment. | |
156 | */ | |
157 | vaddr += get_padding(&kaslr_regions[i]); | |
8624c1f6 KS |
158 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) |
159 | vaddr = round_up(vaddr + 1, P4D_SIZE); | |
160 | else | |
161 | vaddr = round_up(vaddr + 1, PUD_SIZE); | |
0483e1fa TG |
162 | remain_entropy -= entropy; |
163 | } | |
164 | } | |
165 | ||
8624c1f6 | 166 | static void __meminit init_trampoline_pud(void) |
0483e1fa TG |
167 | { |
168 | unsigned long paddr, paddr_next; | |
169 | pgd_t *pgd; | |
170 | pud_t *pud_page, *pud_page_tramp; | |
171 | int i; | |
172 | ||
0483e1fa TG |
173 | pud_page_tramp = alloc_low_page(); |
174 | ||
175 | paddr = 0; | |
176 | pgd = pgd_offset_k((unsigned long)__va(paddr)); | |
177 | pud_page = (pud_t *) pgd_page_vaddr(*pgd); | |
178 | ||
179 | for (i = pud_index(paddr); i < PTRS_PER_PUD; i++, paddr = paddr_next) { | |
180 | pud_t *pud, *pud_tramp; | |
181 | unsigned long vaddr = (unsigned long)__va(paddr); | |
182 | ||
183 | pud_tramp = pud_page_tramp + pud_index(paddr); | |
184 | pud = pud_page + pud_index(vaddr); | |
185 | paddr_next = (paddr & PUD_MASK) + PUD_SIZE; | |
186 | ||
187 | *pud_tramp = *pud; | |
188 | } | |
189 | ||
190 | set_pgd(&trampoline_pgd_entry, | |
191 | __pgd(_KERNPG_TABLE | __pa(pud_page_tramp))); | |
192 | } | |
8624c1f6 KS |
193 | |
194 | static void __meminit init_trampoline_p4d(void) | |
195 | { | |
196 | unsigned long paddr, paddr_next; | |
197 | pgd_t *pgd; | |
198 | p4d_t *p4d_page, *p4d_page_tramp; | |
199 | int i; | |
200 | ||
201 | p4d_page_tramp = alloc_low_page(); | |
202 | ||
203 | paddr = 0; | |
204 | pgd = pgd_offset_k((unsigned long)__va(paddr)); | |
205 | p4d_page = (p4d_t *) pgd_page_vaddr(*pgd); | |
206 | ||
207 | for (i = p4d_index(paddr); i < PTRS_PER_P4D; i++, paddr = paddr_next) { | |
208 | p4d_t *p4d, *p4d_tramp; | |
209 | unsigned long vaddr = (unsigned long)__va(paddr); | |
210 | ||
211 | p4d_tramp = p4d_page_tramp + p4d_index(paddr); | |
212 | p4d = p4d_page + p4d_index(vaddr); | |
213 | paddr_next = (paddr & P4D_MASK) + P4D_SIZE; | |
214 | ||
215 | *p4d_tramp = *p4d; | |
216 | } | |
217 | ||
218 | set_pgd(&trampoline_pgd_entry, | |
219 | __pgd(_KERNPG_TABLE | __pa(p4d_page_tramp))); | |
220 | } | |
221 | ||
222 | /* | |
223 | * Create PGD aligned trampoline table to allow real mode initialization | |
224 | * of additional CPUs. Consume only 1 low memory page. | |
225 | */ | |
226 | void __meminit init_trampoline(void) | |
227 | { | |
228 | ||
229 | if (!kaslr_memory_enabled()) { | |
230 | init_trampoline_default(); | |
231 | return; | |
232 | } | |
233 | ||
234 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) | |
235 | init_trampoline_p4d(); | |
236 | else | |
237 | init_trampoline_pud(); | |
238 | } |