1 // SPDX-License-Identifier: GPL-2.0
3 * This file implements KASLR memory randomization for x86_64. It randomizes
4 * the virtual address space of kernel memory regions (physical memory
5 * mapping, vmalloc & vmemmap) for x86_64. This security feature mitigates
6 * exploits relying on predictable kernel addresses.
8 * Entropy is generated using the KASLR early boot functions now shared in
9 * the lib directory (originally written by Kees Cook). Randomization is
10 * done on PGD & P4D/PUD page table levels to increase possible addresses.
11 * The physical memory mapping code was adapted to support P4D/PUD level
12 * virtual addresses. This implementation on the best configuration provides
13 * 30,000 possible virtual addresses in average for each memory region.
14 * An additional low memory page is used to ensure each CPU can start with
15 * a PGD aligned virtual address (for realmode).
17 * The order of each memory region is not changed. The feature looks at
18 * the available space for the regions based on different configuration
19 * options and randomizes the base and space between each. The size of the
20 * physical memory mapping is the available physical memory.
23 #include <linux/kernel.h>
24 #include <linux/init.h>
25 #include <linux/random.h>
27 #include <asm/pgalloc.h>
28 #include <asm/pgtable.h>
29 #include <asm/setup.h>
30 #include <asm/kaslr.h>
32 #include "mm_internal.h"
37 * Virtual address start and end range for randomization. The end changes base
38 * on configuration to have the highest amount of space for randomization.
39 * It increases the possible random position for each randomized region.
41 * You need to add an if/def entry if you introduce a new memory region
42 * compatible with KASLR. Your entry must be in logical order with memory
43 * layout. For example, ESPFIX is before EFI because its virtual address is
44 * before. You also need to add a BUILD_BUG_ON() in kernel_randomize_memory() to
45 * ensure that this order is correct and won't be changed.
47 static const unsigned long vaddr_start
= __PAGE_OFFSET_BASE
;
49 #if defined(CONFIG_X86_ESPFIX64)
50 static const unsigned long vaddr_end
= ESPFIX_BASE_ADDR
;
51 #elif defined(CONFIG_EFI)
52 static const unsigned long vaddr_end
= EFI_VA_END
;
54 static const unsigned long vaddr_end
= __START_KERNEL_map
;
58 unsigned long page_offset_base
= __PAGE_OFFSET_BASE
;
59 EXPORT_SYMBOL(page_offset_base
);
60 unsigned long vmalloc_base
= __VMALLOC_BASE
;
61 EXPORT_SYMBOL(vmalloc_base
);
62 unsigned long vmemmap_base
= __VMEMMAP_BASE
;
63 EXPORT_SYMBOL(vmemmap_base
);
66 * Memory regions randomized by KASLR (except modules that use a separate logic
67 * earlier during boot). The list is ordered based on virtual addresses. This
68 * order is kept after randomization.
70 static __initdata
struct kaslr_memory_region
{
72 unsigned long size_tb
;
74 { &page_offset_base
, 1 << (__PHYSICAL_MASK_SHIFT
- TB_SHIFT
) /* Maximum */ },
75 { &vmalloc_base
, VMALLOC_SIZE_TB
},
79 /* Get size in bytes used by the memory region */
80 static inline unsigned long get_padding(struct kaslr_memory_region
*region
)
82 return (region
->size_tb
<< TB_SHIFT
);
86 * Apply no randomization if KASLR was disabled at boot or if KASAN
87 * is enabled. KASAN shadow mappings rely on regions being PGD aligned.
89 static inline bool kaslr_memory_enabled(void)
91 return kaslr_enabled() && !IS_ENABLED(CONFIG_KASAN
);
94 /* Initialize base and padding for each memory region randomized with KASLR */
95 void __init
kernel_randomize_memory(void)
98 unsigned long vaddr
= vaddr_start
;
99 unsigned long rand
, memory_tb
;
100 struct rnd_state rand_state
;
101 unsigned long remain_entropy
;
104 * All these BUILD_BUG_ON checks ensures the memory layout is
105 * consistent with the vaddr_start/vaddr_end variables.
107 BUILD_BUG_ON(vaddr_start
>= vaddr_end
);
108 BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64
) &&
109 vaddr_end
>= EFI_VA_END
);
110 BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64
) ||
111 IS_ENABLED(CONFIG_EFI
)) &&
112 vaddr_end
>= __START_KERNEL_map
);
113 BUILD_BUG_ON(vaddr_end
> __START_KERNEL_map
);
115 if (!kaslr_memory_enabled())
119 * Update Physical memory mapping to available and
120 * add padding if needed (especially for memory hotplug support).
122 BUG_ON(kaslr_regions
[0].base
!= &page_offset_base
);
123 memory_tb
= DIV_ROUND_UP(max_pfn
<< PAGE_SHIFT
, 1UL << TB_SHIFT
) +
124 CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING
;
126 /* Adapt phyiscal memory region size based on available memory */
127 if (memory_tb
< kaslr_regions
[0].size_tb
)
128 kaslr_regions
[0].size_tb
= memory_tb
;
130 /* Calculate entropy available between regions */
131 remain_entropy
= vaddr_end
- vaddr_start
;
132 for (i
= 0; i
< ARRAY_SIZE(kaslr_regions
); i
++)
133 remain_entropy
-= get_padding(&kaslr_regions
[i
]);
135 prandom_seed_state(&rand_state
, kaslr_get_random_long("Memory"));
137 for (i
= 0; i
< ARRAY_SIZE(kaslr_regions
); i
++) {
138 unsigned long entropy
;
141 * Select a random virtual address using the extra entropy
144 entropy
= remain_entropy
/ (ARRAY_SIZE(kaslr_regions
) - i
);
145 prandom_bytes_state(&rand_state
, &rand
, sizeof(rand
));
146 if (IS_ENABLED(CONFIG_X86_5LEVEL
))
147 entropy
= (rand
% (entropy
+ 1)) & P4D_MASK
;
149 entropy
= (rand
% (entropy
+ 1)) & PUD_MASK
;
151 *kaslr_regions
[i
].base
= vaddr
;
154 * Jump the region and add a minimum padding based on
155 * randomization alignment.
157 vaddr
+= get_padding(&kaslr_regions
[i
]);
158 if (IS_ENABLED(CONFIG_X86_5LEVEL
))
159 vaddr
= round_up(vaddr
+ 1, P4D_SIZE
);
161 vaddr
= round_up(vaddr
+ 1, PUD_SIZE
);
162 remain_entropy
-= entropy
;
166 static void __meminit
init_trampoline_pud(void)
168 unsigned long paddr
, paddr_next
;
170 pud_t
*pud_page
, *pud_page_tramp
;
173 pud_page_tramp
= alloc_low_page();
176 pgd
= pgd_offset_k((unsigned long)__va(paddr
));
177 pud_page
= (pud_t
*) pgd_page_vaddr(*pgd
);
179 for (i
= pud_index(paddr
); i
< PTRS_PER_PUD
; i
++, paddr
= paddr_next
) {
180 pud_t
*pud
, *pud_tramp
;
181 unsigned long vaddr
= (unsigned long)__va(paddr
);
183 pud_tramp
= pud_page_tramp
+ pud_index(paddr
);
184 pud
= pud_page
+ pud_index(vaddr
);
185 paddr_next
= (paddr
& PUD_MASK
) + PUD_SIZE
;
190 set_pgd(&trampoline_pgd_entry
,
191 __pgd(_KERNPG_TABLE
| __pa(pud_page_tramp
)));
194 static void __meminit
init_trampoline_p4d(void)
196 unsigned long paddr
, paddr_next
;
198 p4d_t
*p4d_page
, *p4d_page_tramp
;
201 p4d_page_tramp
= alloc_low_page();
204 pgd
= pgd_offset_k((unsigned long)__va(paddr
));
205 p4d_page
= (p4d_t
*) pgd_page_vaddr(*pgd
);
207 for (i
= p4d_index(paddr
); i
< PTRS_PER_P4D
; i
++, paddr
= paddr_next
) {
208 p4d_t
*p4d
, *p4d_tramp
;
209 unsigned long vaddr
= (unsigned long)__va(paddr
);
211 p4d_tramp
= p4d_page_tramp
+ p4d_index(paddr
);
212 p4d
= p4d_page
+ p4d_index(vaddr
);
213 paddr_next
= (paddr
& P4D_MASK
) + P4D_SIZE
;
218 set_pgd(&trampoline_pgd_entry
,
219 __pgd(_KERNPG_TABLE
| __pa(p4d_page_tramp
)));
223 * Create PGD aligned trampoline table to allow real mode initialization
224 * of additional CPUs. Consume only 1 low memory page.
226 void __meminit
init_trampoline(void)
229 if (!kaslr_memory_enabled()) {
230 init_trampoline_default();
234 if (IS_ENABLED(CONFIG_X86_5LEVEL
))
235 init_trampoline_p4d();
237 init_trampoline_pud();