]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
be3606ff | 2 | #define DISABLE_BRANCH_PROFILING |
85155229 | 3 | #define pr_fmt(fmt) "kasan: " fmt |
ef7f0d6a AR |
4 | #include <linux/bootmem.h> |
5 | #include <linux/kasan.h> | |
6 | #include <linux/kdebug.h> | |
2aeb0736 | 7 | #include <linux/memblock.h> |
ef7f0d6a AR |
8 | #include <linux/mm.h> |
9 | #include <linux/sched.h> | |
9164bb4a | 10 | #include <linux/sched/task.h> |
ef7f0d6a AR |
11 | #include <linux/vmalloc.h> |
12 | ||
5520b7e7 | 13 | #include <asm/e820/types.h> |
2aeb0736 | 14 | #include <asm/pgalloc.h> |
ef7f0d6a AR |
15 | #include <asm/tlbflush.h> |
16 | #include <asm/sections.h> | |
b9d05200 | 17 | #include <asm/pgtable.h> |
92a0f81d | 18 | #include <asm/cpu_entry_area.h> |
ef7f0d6a | 19 | |
08b46d5d | 20 | extern struct range pfn_mapped[E820_MAX_ENTRIES]; |
ef7f0d6a | 21 | |
12a8cc7f AR |
22 | static p4d_t tmp_p4d_table[PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE); |
23 | ||
2aeb0736 AR |
24 | static __init void *early_alloc(size_t size, int nid) |
25 | { | |
26 | return memblock_virt_alloc_try_nid_nopanic(size, size, | |
27 | __pa(MAX_DMA_ADDRESS), BOOTMEM_ALLOC_ACCESSIBLE, nid); | |
28 | } | |
29 | ||
30 | static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr, | |
31 | unsigned long end, int nid) | |
32 | { | |
33 | pte_t *pte; | |
34 | ||
35 | if (pmd_none(*pmd)) { | |
36 | void *p; | |
37 | ||
38 | if (boot_cpu_has(X86_FEATURE_PSE) && | |
39 | ((end - addr) == PMD_SIZE) && | |
40 | IS_ALIGNED(addr, PMD_SIZE)) { | |
41 | p = early_alloc(PMD_SIZE, nid); | |
42 | if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL)) | |
43 | return; | |
44 | else if (p) | |
45 | memblock_free(__pa(p), PMD_SIZE); | |
46 | } | |
47 | ||
48 | p = early_alloc(PAGE_SIZE, nid); | |
49 | pmd_populate_kernel(&init_mm, pmd, p); | |
50 | } | |
51 | ||
52 | pte = pte_offset_kernel(pmd, addr); | |
53 | do { | |
54 | pte_t entry; | |
55 | void *p; | |
56 | ||
57 | if (!pte_none(*pte)) | |
58 | continue; | |
59 | ||
60 | p = early_alloc(PAGE_SIZE, nid); | |
61 | entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL); | |
62 | set_pte_at(&init_mm, addr, pte, entry); | |
63 | } while (pte++, addr += PAGE_SIZE, addr != end); | |
64 | } | |
65 | ||
66 | static void __init kasan_populate_pud(pud_t *pud, unsigned long addr, | |
67 | unsigned long end, int nid) | |
68 | { | |
69 | pmd_t *pmd; | |
70 | unsigned long next; | |
71 | ||
72 | if (pud_none(*pud)) { | |
73 | void *p; | |
74 | ||
75 | if (boot_cpu_has(X86_FEATURE_GBPAGES) && | |
76 | ((end - addr) == PUD_SIZE) && | |
77 | IS_ALIGNED(addr, PUD_SIZE)) { | |
78 | p = early_alloc(PUD_SIZE, nid); | |
79 | if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL)) | |
80 | return; | |
81 | else if (p) | |
82 | memblock_free(__pa(p), PUD_SIZE); | |
83 | } | |
84 | ||
85 | p = early_alloc(PAGE_SIZE, nid); | |
86 | pud_populate(&init_mm, pud, p); | |
87 | } | |
88 | ||
89 | pmd = pmd_offset(pud, addr); | |
90 | do { | |
91 | next = pmd_addr_end(addr, end); | |
92 | if (!pmd_large(*pmd)) | |
93 | kasan_populate_pmd(pmd, addr, next, nid); | |
94 | } while (pmd++, addr = next, addr != end); | |
95 | } | |
96 | ||
97 | static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr, | |
98 | unsigned long end, int nid) | |
99 | { | |
100 | pud_t *pud; | |
101 | unsigned long next; | |
102 | ||
103 | if (p4d_none(*p4d)) { | |
104 | void *p = early_alloc(PAGE_SIZE, nid); | |
105 | ||
106 | p4d_populate(&init_mm, p4d, p); | |
107 | } | |
108 | ||
109 | pud = pud_offset(p4d, addr); | |
110 | do { | |
111 | next = pud_addr_end(addr, end); | |
112 | if (!pud_large(*pud)) | |
113 | kasan_populate_pud(pud, addr, next, nid); | |
114 | } while (pud++, addr = next, addr != end); | |
115 | } | |
116 | ||
117 | static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr, | |
118 | unsigned long end, int nid) | |
119 | { | |
120 | void *p; | |
121 | p4d_t *p4d; | |
122 | unsigned long next; | |
123 | ||
124 | if (pgd_none(*pgd)) { | |
125 | p = early_alloc(PAGE_SIZE, nid); | |
126 | pgd_populate(&init_mm, pgd, p); | |
127 | } | |
128 | ||
129 | p4d = p4d_offset(pgd, addr); | |
130 | do { | |
131 | next = p4d_addr_end(addr, end); | |
132 | kasan_populate_p4d(p4d, addr, next, nid); | |
133 | } while (p4d++, addr = next, addr != end); | |
134 | } | |
135 | ||
136 | static void __init kasan_populate_shadow(unsigned long addr, unsigned long end, | |
137 | int nid) | |
138 | { | |
139 | pgd_t *pgd; | |
140 | unsigned long next; | |
141 | ||
142 | addr = addr & PAGE_MASK; | |
143 | end = round_up(end, PAGE_SIZE); | |
144 | pgd = pgd_offset_k(addr); | |
145 | do { | |
146 | next = pgd_addr_end(addr, end); | |
147 | kasan_populate_pgd(pgd, addr, next, nid); | |
148 | } while (pgd++, addr = next, addr != end); | |
149 | } | |
150 | ||
151 | static void __init map_range(struct range *range) | |
ef7f0d6a AR |
152 | { |
153 | unsigned long start; | |
154 | unsigned long end; | |
155 | ||
156 | start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start)); | |
157 | end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end)); | |
158 | ||
2aeb0736 | 159 | kasan_populate_shadow(start, end, early_pfn_to_nid(range->start)); |
ef7f0d6a AR |
160 | } |
161 | ||
162 | static void __init clear_pgds(unsigned long start, | |
163 | unsigned long end) | |
164 | { | |
d691a3cf | 165 | pgd_t *pgd; |
12a8cc7f AR |
166 | /* See comment in kasan_init() */ |
167 | unsigned long pgd_end = end & PGDIR_MASK; | |
d691a3cf | 168 | |
12a8cc7f | 169 | for (; start < pgd_end; start += PGDIR_SIZE) { |
d691a3cf KS |
170 | pgd = pgd_offset_k(start); |
171 | /* | |
172 | * With folded p4d, pgd_clear() is nop, use p4d_clear() | |
173 | * instead. | |
174 | */ | |
175 | if (CONFIG_PGTABLE_LEVELS < 5) | |
176 | p4d_clear(p4d_offset(pgd, start)); | |
177 | else | |
178 | pgd_clear(pgd); | |
179 | } | |
12a8cc7f AR |
180 | |
181 | pgd = pgd_offset_k(start); | |
182 | for (; start < end; start += P4D_SIZE) | |
183 | p4d_clear(p4d_offset(pgd, start)); | |
184 | } | |
185 | ||
186 | static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr) | |
187 | { | |
188 | unsigned long p4d; | |
189 | ||
190 | if (!IS_ENABLED(CONFIG_X86_5LEVEL)) | |
191 | return (p4d_t *)pgd; | |
192 | ||
193 | p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK; | |
194 | p4d += __START_KERNEL_map - phys_base; | |
195 | return (p4d_t *)p4d + p4d_index(addr); | |
196 | } | |
197 | ||
198 | static void __init kasan_early_p4d_populate(pgd_t *pgd, | |
199 | unsigned long addr, | |
200 | unsigned long end) | |
201 | { | |
202 | pgd_t pgd_entry; | |
203 | p4d_t *p4d, p4d_entry; | |
204 | unsigned long next; | |
205 | ||
206 | if (pgd_none(*pgd)) { | |
207 | pgd_entry = __pgd(_KERNPG_TABLE | __pa_nodebug(kasan_zero_p4d)); | |
208 | set_pgd(pgd, pgd_entry); | |
209 | } | |
210 | ||
211 | p4d = early_p4d_offset(pgd, addr); | |
212 | do { | |
213 | next = p4d_addr_end(addr, end); | |
214 | ||
215 | if (!p4d_none(*p4d)) | |
216 | continue; | |
217 | ||
218 | p4d_entry = __p4d(_KERNPG_TABLE | __pa_nodebug(kasan_zero_pud)); | |
219 | set_p4d(p4d, p4d_entry); | |
220 | } while (p4d++, addr = next, addr != end && p4d_none(*p4d)); | |
ef7f0d6a AR |
221 | } |
222 | ||
5d5aa3cf | 223 | static void __init kasan_map_early_shadow(pgd_t *pgd) |
ef7f0d6a | 224 | { |
12a8cc7f AR |
225 | /* See comment in kasan_init() */ |
226 | unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK; | |
ef7f0d6a | 227 | unsigned long end = KASAN_SHADOW_END; |
12a8cc7f | 228 | unsigned long next; |
ef7f0d6a | 229 | |
12a8cc7f AR |
230 | pgd += pgd_index(addr); |
231 | do { | |
232 | next = pgd_addr_end(addr, end); | |
233 | kasan_early_p4d_populate(pgd, addr, next); | |
234 | } while (pgd++, addr = next, addr != end); | |
ef7f0d6a AR |
235 | } |
236 | ||
ef7f0d6a AR |
237 | #ifdef CONFIG_KASAN_INLINE |
238 | static int kasan_die_handler(struct notifier_block *self, | |
239 | unsigned long val, | |
240 | void *data) | |
241 | { | |
242 | if (val == DIE_GPF) { | |
2ba78056 DV |
243 | pr_emerg("CONFIG_KASAN_INLINE enabled\n"); |
244 | pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n"); | |
ef7f0d6a AR |
245 | } |
246 | return NOTIFY_OK; | |
247 | } | |
248 | ||
249 | static struct notifier_block kasan_die_notifier = { | |
250 | .notifier_call = kasan_die_handler, | |
251 | }; | |
252 | #endif | |
253 | ||
5d5aa3cf AP |
254 | void __init kasan_early_init(void) |
255 | { | |
256 | int i; | |
21729f81 | 257 | pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC; |
5d5aa3cf AP |
258 | pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE; |
259 | pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE; | |
5480bb61 | 260 | p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE; |
5d5aa3cf AP |
261 | |
262 | for (i = 0; i < PTRS_PER_PTE; i++) | |
263 | kasan_zero_pte[i] = __pte(pte_val); | |
264 | ||
265 | for (i = 0; i < PTRS_PER_PMD; i++) | |
266 | kasan_zero_pmd[i] = __pmd(pmd_val); | |
267 | ||
268 | for (i = 0; i < PTRS_PER_PUD; i++) | |
269 | kasan_zero_pud[i] = __pud(pud_val); | |
270 | ||
12a8cc7f | 271 | for (i = 0; IS_ENABLED(CONFIG_X86_5LEVEL) && i < PTRS_PER_P4D; i++) |
5480bb61 KS |
272 | kasan_zero_p4d[i] = __p4d(p4d_val); |
273 | ||
65ade2f8 KS |
274 | kasan_map_early_shadow(early_top_pgt); |
275 | kasan_map_early_shadow(init_top_pgt); | |
5d5aa3cf AP |
276 | } |
277 | ||
ef7f0d6a AR |
278 | void __init kasan_init(void) |
279 | { | |
280 | int i; | |
21506525 | 281 | void *shadow_cpu_entry_begin, *shadow_cpu_entry_end; |
ef7f0d6a AR |
282 | |
283 | #ifdef CONFIG_KASAN_INLINE | |
284 | register_die_notifier(&kasan_die_notifier); | |
285 | #endif | |
286 | ||
65ade2f8 | 287 | memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt)); |
12a8cc7f AR |
288 | |
289 | /* | |
290 | * We use the same shadow offset for 4- and 5-level paging to | |
291 | * facilitate boot-time switching between paging modes. | |
292 | * As result in 5-level paging mode KASAN_SHADOW_START and | |
293 | * KASAN_SHADOW_END are not aligned to PGD boundary. | |
294 | * | |
295 | * KASAN_SHADOW_START doesn't share PGD with anything else. | |
296 | * We claim whole PGD entry to make things easier. | |
297 | * | |
298 | * KASAN_SHADOW_END lands in the last PGD entry and it collides with | |
299 | * bunch of things like kernel code, modules, EFI mapping, etc. | |
300 | * We need to take extra steps to not overwrite them. | |
301 | */ | |
302 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { | |
303 | void *ptr; | |
304 | ||
305 | ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); | |
306 | memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table)); | |
307 | set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)], | |
308 | __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE)); | |
309 | } | |
310 | ||
65ade2f8 | 311 | load_cr3(early_top_pgt); |
241d2c54 | 312 | __flush_tlb_all(); |
ef7f0d6a | 313 | |
12a8cc7f | 314 | clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END); |
ef7f0d6a | 315 | |
12a8cc7f | 316 | kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK), |
ef7f0d6a AR |
317 | kasan_mem_to_shadow((void *)PAGE_OFFSET)); |
318 | ||
08b46d5d | 319 | for (i = 0; i < E820_MAX_ENTRIES; i++) { |
ef7f0d6a AR |
320 | if (pfn_mapped[i].end == 0) |
321 | break; | |
322 | ||
2aeb0736 | 323 | map_range(&pfn_mapped[i]); |
ef7f0d6a | 324 | } |
2aeb0736 | 325 | |
92a0f81d | 326 | shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE; |
21506525 AL |
327 | shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin); |
328 | shadow_cpu_entry_begin = (void *)round_down((unsigned long)shadow_cpu_entry_begin, | |
329 | PAGE_SIZE); | |
330 | ||
92a0f81d TG |
331 | shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE + |
332 | CPU_ENTRY_AREA_MAP_SIZE); | |
21506525 AL |
333 | shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end); |
334 | shadow_cpu_entry_end = (void *)round_up((unsigned long)shadow_cpu_entry_end, | |
335 | PAGE_SIZE); | |
336 | ||
92a0f81d TG |
337 | kasan_populate_zero_shadow( |
338 | kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM), | |
339 | shadow_cpu_entry_begin); | |
21506525 AL |
340 | |
341 | kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin, | |
342 | (unsigned long)shadow_cpu_entry_end, 0); | |
343 | ||
92a0f81d TG |
344 | kasan_populate_zero_shadow(shadow_cpu_entry_end, |
345 | kasan_mem_to_shadow((void *)__START_KERNEL_map)); | |
346 | ||
347 | kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext), | |
348 | (unsigned long)kasan_mem_to_shadow(_end), | |
349 | early_pfn_to_nid(__pa(_stext))); | |
350 | ||
351 | kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END), | |
352 | (void *)KASAN_SHADOW_END); | |
ef7f0d6a | 353 | |
65ade2f8 | 354 | load_cr3(init_top_pgt); |
241d2c54 | 355 | __flush_tlb_all(); |
85155229 | 356 | |
69e0210f AR |
357 | /* |
358 | * kasan_zero_page has been used as early shadow memory, thus it may | |
063fb3e5 AR |
359 | * contain some garbage. Now we can clear and write protect it, since |
360 | * after the TLB flush no one should write to it. | |
69e0210f AR |
361 | */ |
362 | memset(kasan_zero_page, 0, PAGE_SIZE); | |
063fb3e5 | 363 | for (i = 0; i < PTRS_PER_PTE; i++) { |
21729f81 | 364 | pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC); |
063fb3e5 AR |
365 | set_pte(&kasan_zero_pte[i], pte); |
366 | } | |
367 | /* Flush TLBs again to be sure that write protection applied. */ | |
368 | __flush_tlb_all(); | |
69e0210f AR |
369 | |
370 | init_task.kasan_depth = 0; | |
25add7ec | 371 | pr_info("KernelAddressSanitizer initialized\n"); |
ef7f0d6a | 372 | } |