]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - mm/kasan/kasan_init.c
2 * This file contains some kasan initialization code.
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #include <linux/bootmem.h>
14 #include <linux/init.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/memblock.h>
19 #include <linux/pfn.h>
20 #include <linux/slab.h>
23 #include <asm/pgalloc.h>
28 * This page serves two purposes:
29 * - It used as early shadow memory. The entire shadow region populated
30 * with this page, before we will be able to setup normal shadow memory.
31 * - Latter it reused it as zero shadow to cover large ranges of memory
32 * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
34 unsigned char kasan_zero_page
[PAGE_SIZE
] __page_aligned_bss
;
36 #if CONFIG_PGTABLE_LEVELS > 4
37 p4d_t kasan_zero_p4d
[MAX_PTRS_PER_P4D
] __page_aligned_bss
;
38 static inline bool kasan_p4d_table(pgd_t pgd
)
40 return pgd_page(pgd
) == virt_to_page(lm_alias(kasan_zero_p4d
));
43 static inline bool kasan_p4d_table(pgd_t pgd
)
48 #if CONFIG_PGTABLE_LEVELS > 3
49 pud_t kasan_zero_pud
[PTRS_PER_PUD
] __page_aligned_bss
;
50 static inline bool kasan_pud_table(p4d_t p4d
)
52 return p4d_page(p4d
) == virt_to_page(lm_alias(kasan_zero_pud
));
55 static inline bool kasan_pud_table(p4d_t p4d
)
60 #if CONFIG_PGTABLE_LEVELS > 2
61 pmd_t kasan_zero_pmd
[PTRS_PER_PMD
] __page_aligned_bss
;
62 static inline bool kasan_pmd_table(pud_t pud
)
64 return pud_page(pud
) == virt_to_page(lm_alias(kasan_zero_pmd
));
67 static inline bool kasan_pmd_table(pud_t pud
)
72 pte_t kasan_zero_pte
[PTRS_PER_PTE
] __page_aligned_bss
;
74 static inline bool kasan_pte_table(pmd_t pmd
)
76 return pmd_page(pmd
) == virt_to_page(lm_alias(kasan_zero_pte
));
79 static inline bool kasan_zero_page_entry(pte_t pte
)
81 return pte_page(pte
) == virt_to_page(lm_alias(kasan_zero_page
));
84 static __init
void *early_alloc(size_t size
, int node
)
86 return memblock_virt_alloc_try_nid(size
, size
, __pa(MAX_DMA_ADDRESS
),
87 BOOTMEM_ALLOC_ACCESSIBLE
, node
);
90 static void __ref
zero_pte_populate(pmd_t
*pmd
, unsigned long addr
,
93 pte_t
*pte
= pte_offset_kernel(pmd
, addr
);
96 zero_pte
= pfn_pte(PFN_DOWN(__pa_symbol(kasan_zero_page
)), PAGE_KERNEL
);
97 zero_pte
= pte_wrprotect(zero_pte
);
99 while (addr
+ PAGE_SIZE
<= end
) {
100 set_pte_at(&init_mm
, addr
, pte
, zero_pte
);
102 pte
= pte_offset_kernel(pmd
, addr
);
106 static int __ref
zero_pmd_populate(pud_t
*pud
, unsigned long addr
,
109 pmd_t
*pmd
= pmd_offset(pud
, addr
);
113 next
= pmd_addr_end(addr
, end
);
115 if (IS_ALIGNED(addr
, PMD_SIZE
) && end
- addr
>= PMD_SIZE
) {
116 pmd_populate_kernel(&init_mm
, pmd
, lm_alias(kasan_zero_pte
));
120 if (pmd_none(*pmd
)) {
123 if (slab_is_available())
124 p
= pte_alloc_one_kernel(&init_mm
, addr
);
126 p
= early_alloc(PAGE_SIZE
, NUMA_NO_NODE
);
130 pmd_populate_kernel(&init_mm
, pmd
, p
);
132 zero_pte_populate(pmd
, addr
, next
);
133 } while (pmd
++, addr
= next
, addr
!= end
);
138 static int __ref
zero_pud_populate(p4d_t
*p4d
, unsigned long addr
,
141 pud_t
*pud
= pud_offset(p4d
, addr
);
145 next
= pud_addr_end(addr
, end
);
146 if (IS_ALIGNED(addr
, PUD_SIZE
) && end
- addr
>= PUD_SIZE
) {
149 pud_populate(&init_mm
, pud
, lm_alias(kasan_zero_pmd
));
150 pmd
= pmd_offset(pud
, addr
);
151 pmd_populate_kernel(&init_mm
, pmd
, lm_alias(kasan_zero_pte
));
155 if (pud_none(*pud
)) {
158 if (slab_is_available()) {
159 p
= pmd_alloc(&init_mm
, pud
, addr
);
163 pud_populate(&init_mm
, pud
,
164 early_alloc(PAGE_SIZE
, NUMA_NO_NODE
));
167 zero_pmd_populate(pud
, addr
, next
);
168 } while (pud
++, addr
= next
, addr
!= end
);
173 static int __ref
zero_p4d_populate(pgd_t
*pgd
, unsigned long addr
,
176 p4d_t
*p4d
= p4d_offset(pgd
, addr
);
180 next
= p4d_addr_end(addr
, end
);
181 if (IS_ALIGNED(addr
, P4D_SIZE
) && end
- addr
>= P4D_SIZE
) {
185 p4d_populate(&init_mm
, p4d
, lm_alias(kasan_zero_pud
));
186 pud
= pud_offset(p4d
, addr
);
187 pud_populate(&init_mm
, pud
, lm_alias(kasan_zero_pmd
));
188 pmd
= pmd_offset(pud
, addr
);
189 pmd_populate_kernel(&init_mm
, pmd
,
190 lm_alias(kasan_zero_pte
));
194 if (p4d_none(*p4d
)) {
197 if (slab_is_available()) {
198 p
= pud_alloc(&init_mm
, p4d
, addr
);
202 p4d_populate(&init_mm
, p4d
,
203 early_alloc(PAGE_SIZE
, NUMA_NO_NODE
));
206 zero_pud_populate(p4d
, addr
, next
);
207 } while (p4d
++, addr
= next
, addr
!= end
);
213 * kasan_populate_zero_shadow - populate shadow memory region with
215 * @shadow_start - start of the memory range to populate
216 * @shadow_end - end of the memory range to populate
218 int __ref
kasan_populate_zero_shadow(const void *shadow_start
,
219 const void *shadow_end
)
221 unsigned long addr
= (unsigned long)shadow_start
;
222 unsigned long end
= (unsigned long)shadow_end
;
223 pgd_t
*pgd
= pgd_offset_k(addr
);
227 next
= pgd_addr_end(addr
, end
);
229 if (IS_ALIGNED(addr
, PGDIR_SIZE
) && end
- addr
>= PGDIR_SIZE
) {
235 * kasan_zero_pud should be populated with pmds
237 * [pud,pmd]_populate*() below needed only for
238 * 3,2 - level page tables where we don't have
239 * puds,pmds, so pgd_populate(), pud_populate()
242 * The ifndef is required to avoid build breakage.
244 * With 5level-fixup.h, pgd_populate() is not nop and
245 * we reference kasan_zero_p4d. It's not defined
246 * unless 5-level paging enabled.
248 * The ifndef can be dropped once all KASAN-enabled
249 * architectures will switch to pgtable-nop4d.h.
251 #ifndef __ARCH_HAS_5LEVEL_HACK
252 pgd_populate(&init_mm
, pgd
, lm_alias(kasan_zero_p4d
));
254 p4d
= p4d_offset(pgd
, addr
);
255 p4d_populate(&init_mm
, p4d
, lm_alias(kasan_zero_pud
));
256 pud
= pud_offset(p4d
, addr
);
257 pud_populate(&init_mm
, pud
, lm_alias(kasan_zero_pmd
));
258 pmd
= pmd_offset(pud
, addr
);
259 pmd_populate_kernel(&init_mm
, pmd
, lm_alias(kasan_zero_pte
));
263 if (pgd_none(*pgd
)) {
266 if (slab_is_available()) {
267 p
= p4d_alloc(&init_mm
, pgd
, addr
);
271 pgd_populate(&init_mm
, pgd
,
272 early_alloc(PAGE_SIZE
, NUMA_NO_NODE
));
275 zero_p4d_populate(pgd
, addr
, next
);
276 } while (pgd
++, addr
= next
, addr
!= end
);
281 static void kasan_free_pte(pte_t
*pte_start
, pmd_t
*pmd
)
286 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
292 pte_free_kernel(&init_mm
, (pte_t
*)page_to_virt(pmd_page(*pmd
)));
296 static void kasan_free_pmd(pmd_t
*pmd_start
, pud_t
*pud
)
301 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
307 pmd_free(&init_mm
, (pmd_t
*)page_to_virt(pud_page(*pud
)));
311 static void kasan_free_pud(pud_t
*pud_start
, p4d_t
*p4d
)
316 for (i
= 0; i
< PTRS_PER_PUD
; i
++) {
322 pud_free(&init_mm
, (pud_t
*)page_to_virt(p4d_page(*p4d
)));
326 static void kasan_free_p4d(p4d_t
*p4d_start
, pgd_t
*pgd
)
331 for (i
= 0; i
< PTRS_PER_P4D
; i
++) {
337 p4d_free(&init_mm
, (p4d_t
*)page_to_virt(pgd_page(*pgd
)));
341 static void kasan_remove_pte_table(pte_t
*pte
, unsigned long addr
,
346 for (; addr
< end
; addr
= next
, pte
++) {
347 next
= (addr
+ PAGE_SIZE
) & PAGE_MASK
;
351 if (!pte_present(*pte
))
354 if (WARN_ON(!kasan_zero_page_entry(*pte
)))
356 pte_clear(&init_mm
, addr
, pte
);
360 static void kasan_remove_pmd_table(pmd_t
*pmd
, unsigned long addr
,
365 for (; addr
< end
; addr
= next
, pmd
++) {
368 next
= pmd_addr_end(addr
, end
);
370 if (!pmd_present(*pmd
))
373 if (kasan_pte_table(*pmd
)) {
374 if (IS_ALIGNED(addr
, PMD_SIZE
) &&
375 IS_ALIGNED(next
, PMD_SIZE
))
379 pte
= pte_offset_kernel(pmd
, addr
);
380 kasan_remove_pte_table(pte
, addr
, next
);
381 kasan_free_pte(pte_offset_kernel(pmd
, 0), pmd
);
385 static void kasan_remove_pud_table(pud_t
*pud
, unsigned long addr
,
390 for (; addr
< end
; addr
= next
, pud
++) {
391 pmd_t
*pmd
, *pmd_base
;
393 next
= pud_addr_end(addr
, end
);
395 if (!pud_present(*pud
))
398 if (kasan_pmd_table(*pud
)) {
399 if (IS_ALIGNED(addr
, PUD_SIZE
) &&
400 IS_ALIGNED(next
, PUD_SIZE
))
404 pmd
= pmd_offset(pud
, addr
);
405 pmd_base
= pmd_offset(pud
, 0);
406 kasan_remove_pmd_table(pmd
, addr
, next
);
407 kasan_free_pmd(pmd_base
, pud
);
411 static void kasan_remove_p4d_table(p4d_t
*p4d
, unsigned long addr
,
416 for (; addr
< end
; addr
= next
, p4d
++) {
419 next
= p4d_addr_end(addr
, end
);
421 if (!p4d_present(*p4d
))
424 if (kasan_pud_table(*p4d
)) {
425 if (IS_ALIGNED(addr
, P4D_SIZE
) &&
426 IS_ALIGNED(next
, P4D_SIZE
))
430 pud
= pud_offset(p4d
, addr
);
431 kasan_remove_pud_table(pud
, addr
, next
);
432 kasan_free_pud(pud_offset(p4d
, 0), p4d
);
436 void kasan_remove_zero_shadow(void *start
, unsigned long size
)
438 unsigned long addr
, end
, next
;
441 addr
= (unsigned long)kasan_mem_to_shadow(start
);
442 end
= addr
+ (size
>> KASAN_SHADOW_SCALE_SHIFT
);
444 if (WARN_ON((unsigned long)start
%
445 (KASAN_SHADOW_SCALE_SIZE
* PAGE_SIZE
)) ||
446 WARN_ON(size
% (KASAN_SHADOW_SCALE_SIZE
* PAGE_SIZE
)))
449 for (; addr
< end
; addr
= next
) {
452 next
= pgd_addr_end(addr
, end
);
454 pgd
= pgd_offset_k(addr
);
455 if (!pgd_present(*pgd
))
458 if (kasan_p4d_table(*pgd
)) {
459 if (IS_ALIGNED(addr
, PGDIR_SIZE
) &&
460 IS_ALIGNED(next
, PGDIR_SIZE
))
465 p4d
= p4d_offset(pgd
, addr
);
466 kasan_remove_p4d_table(p4d
, addr
, next
);
467 kasan_free_p4d(p4d_offset(pgd
, 0), pgd
);
471 int kasan_add_zero_shadow(void *start
, unsigned long size
)
474 void *shadow_start
, *shadow_end
;
476 shadow_start
= kasan_mem_to_shadow(start
);
477 shadow_end
= shadow_start
+ (size
>> KASAN_SHADOW_SCALE_SHIFT
);
479 if (WARN_ON((unsigned long)start
%
480 (KASAN_SHADOW_SCALE_SIZE
* PAGE_SIZE
)) ||
481 WARN_ON(size
% (KASAN_SHADOW_SCALE_SIZE
* PAGE_SIZE
)))
484 ret
= kasan_populate_zero_shadow(shadow_start
, shadow_end
);
486 kasan_remove_zero_shadow(shadow_start
,
487 size
>> KASAN_SHADOW_SCALE_SHIFT
);