1 // SPDX-License-Identifier: GPL-2.0
2 #define DISABLE_BRANCH_PROFILING
3 #define pr_fmt(fmt) "kasan: " fmt
4 #include <linux/bootmem.h>
5 #include <linux/kasan.h>
6 #include <linux/kdebug.h>
7 #include <linux/memblock.h>
9 #include <linux/sched.h>
10 #include <linux/sched/task.h>
11 #include <linux/vmalloc.h>
13 #include <asm/e820/types.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/sections.h>
17 #include <asm/pgtable.h>
18 #include <asm/cpu_entry_area.h>
20 extern struct range pfn_mapped
[E820_MAX_ENTRIES
];
22 static p4d_t tmp_p4d_table
[PTRS_PER_P4D
] __initdata
__aligned(PAGE_SIZE
);
24 static __init
void *early_alloc(size_t size
, int nid
)
26 return memblock_virt_alloc_try_nid_nopanic(size
, size
,
27 __pa(MAX_DMA_ADDRESS
), BOOTMEM_ALLOC_ACCESSIBLE
, nid
);
30 static void __init
kasan_populate_pmd(pmd_t
*pmd
, unsigned long addr
,
31 unsigned long end
, int nid
)
38 if (boot_cpu_has(X86_FEATURE_PSE
) &&
39 ((end
- addr
) == PMD_SIZE
) &&
40 IS_ALIGNED(addr
, PMD_SIZE
)) {
41 p
= early_alloc(PMD_SIZE
, nid
);
42 if (p
&& pmd_set_huge(pmd
, __pa(p
), PAGE_KERNEL
))
45 memblock_free(__pa(p
), PMD_SIZE
);
48 p
= early_alloc(PAGE_SIZE
, nid
);
49 pmd_populate_kernel(&init_mm
, pmd
, p
);
52 pte
= pte_offset_kernel(pmd
, addr
);
60 p
= early_alloc(PAGE_SIZE
, nid
);
61 entry
= pfn_pte(PFN_DOWN(__pa(p
)), PAGE_KERNEL
);
62 set_pte_at(&init_mm
, addr
, pte
, entry
);
63 } while (pte
++, addr
+= PAGE_SIZE
, addr
!= end
);
66 static void __init
kasan_populate_pud(pud_t
*pud
, unsigned long addr
,
67 unsigned long end
, int nid
)
75 if (boot_cpu_has(X86_FEATURE_GBPAGES
) &&
76 ((end
- addr
) == PUD_SIZE
) &&
77 IS_ALIGNED(addr
, PUD_SIZE
)) {
78 p
= early_alloc(PUD_SIZE
, nid
);
79 if (p
&& pud_set_huge(pud
, __pa(p
), PAGE_KERNEL
))
82 memblock_free(__pa(p
), PUD_SIZE
);
85 p
= early_alloc(PAGE_SIZE
, nid
);
86 pud_populate(&init_mm
, pud
, p
);
89 pmd
= pmd_offset(pud
, addr
);
91 next
= pmd_addr_end(addr
, end
);
93 kasan_populate_pmd(pmd
, addr
, next
, nid
);
94 } while (pmd
++, addr
= next
, addr
!= end
);
97 static void __init
kasan_populate_p4d(p4d_t
*p4d
, unsigned long addr
,
98 unsigned long end
, int nid
)
103 if (p4d_none(*p4d
)) {
104 void *p
= early_alloc(PAGE_SIZE
, nid
);
106 p4d_populate(&init_mm
, p4d
, p
);
109 pud
= pud_offset(p4d
, addr
);
111 next
= pud_addr_end(addr
, end
);
112 if (!pud_large(*pud
))
113 kasan_populate_pud(pud
, addr
, next
, nid
);
114 } while (pud
++, addr
= next
, addr
!= end
);
117 static void __init
kasan_populate_pgd(pgd_t
*pgd
, unsigned long addr
,
118 unsigned long end
, int nid
)
124 if (pgd_none(*pgd
)) {
125 p
= early_alloc(PAGE_SIZE
, nid
);
126 pgd_populate(&init_mm
, pgd
, p
);
129 p4d
= p4d_offset(pgd
, addr
);
131 next
= p4d_addr_end(addr
, end
);
132 kasan_populate_p4d(p4d
, addr
, next
, nid
);
133 } while (p4d
++, addr
= next
, addr
!= end
);
136 static void __init
kasan_populate_shadow(unsigned long addr
, unsigned long end
,
142 addr
= addr
& PAGE_MASK
;
143 end
= round_up(end
, PAGE_SIZE
);
144 pgd
= pgd_offset_k(addr
);
146 next
= pgd_addr_end(addr
, end
);
147 kasan_populate_pgd(pgd
, addr
, next
, nid
);
148 } while (pgd
++, addr
= next
, addr
!= end
);
151 static void __init
map_range(struct range
*range
)
156 start
= (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range
->start
));
157 end
= (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range
->end
));
159 kasan_populate_shadow(start
, end
, early_pfn_to_nid(range
->start
));
162 static void __init
clear_pgds(unsigned long start
,
166 /* See comment in kasan_init() */
167 unsigned long pgd_end
= end
& PGDIR_MASK
;
169 for (; start
< pgd_end
; start
+= PGDIR_SIZE
) {
170 pgd
= pgd_offset_k(start
);
172 * With folded p4d, pgd_clear() is nop, use p4d_clear()
175 if (CONFIG_PGTABLE_LEVELS
< 5)
176 p4d_clear(p4d_offset(pgd
, start
));
181 pgd
= pgd_offset_k(start
);
182 for (; start
< end
; start
+= P4D_SIZE
)
183 p4d_clear(p4d_offset(pgd
, start
));
186 static inline p4d_t
*early_p4d_offset(pgd_t
*pgd
, unsigned long addr
)
190 if (!IS_ENABLED(CONFIG_X86_5LEVEL
))
193 p4d
= __pa_nodebug(pgd_val(*pgd
)) & PTE_PFN_MASK
;
194 p4d
+= __START_KERNEL_map
- phys_base
;
195 return (p4d_t
*)p4d
+ p4d_index(addr
);
198 static void __init
kasan_early_p4d_populate(pgd_t
*pgd
,
203 p4d_t
*p4d
, p4d_entry
;
206 if (pgd_none(*pgd
)) {
207 pgd_entry
= __pgd(_KERNPG_TABLE
| __pa_nodebug(kasan_zero_p4d
));
208 set_pgd(pgd
, pgd_entry
);
211 p4d
= early_p4d_offset(pgd
, addr
);
213 next
= p4d_addr_end(addr
, end
);
218 p4d_entry
= __p4d(_KERNPG_TABLE
| __pa_nodebug(kasan_zero_pud
));
219 set_p4d(p4d
, p4d_entry
);
220 } while (p4d
++, addr
= next
, addr
!= end
&& p4d_none(*p4d
));
223 static void __init
kasan_map_early_shadow(pgd_t
*pgd
)
225 /* See comment in kasan_init() */
226 unsigned long addr
= KASAN_SHADOW_START
& PGDIR_MASK
;
227 unsigned long end
= KASAN_SHADOW_END
;
230 pgd
+= pgd_index(addr
);
232 next
= pgd_addr_end(addr
, end
);
233 kasan_early_p4d_populate(pgd
, addr
, next
);
234 } while (pgd
++, addr
= next
, addr
!= end
);
237 #ifdef CONFIG_KASAN_INLINE
238 static int kasan_die_handler(struct notifier_block
*self
,
242 if (val
== DIE_GPF
) {
243 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
244 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
249 static struct notifier_block kasan_die_notifier
= {
250 .notifier_call
= kasan_die_handler
,
254 void __init
kasan_early_init(void)
257 pteval_t pte_val
= __pa_nodebug(kasan_zero_page
) | __PAGE_KERNEL
| _PAGE_ENC
;
258 pmdval_t pmd_val
= __pa_nodebug(kasan_zero_pte
) | _KERNPG_TABLE
;
259 pudval_t pud_val
= __pa_nodebug(kasan_zero_pmd
) | _KERNPG_TABLE
;
260 p4dval_t p4d_val
= __pa_nodebug(kasan_zero_pud
) | _KERNPG_TABLE
;
262 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
263 kasan_zero_pte
[i
] = __pte(pte_val
);
265 for (i
= 0; i
< PTRS_PER_PMD
; i
++)
266 kasan_zero_pmd
[i
] = __pmd(pmd_val
);
268 for (i
= 0; i
< PTRS_PER_PUD
; i
++)
269 kasan_zero_pud
[i
] = __pud(pud_val
);
271 for (i
= 0; IS_ENABLED(CONFIG_X86_5LEVEL
) && i
< PTRS_PER_P4D
; i
++)
272 kasan_zero_p4d
[i
] = __p4d(p4d_val
);
274 kasan_map_early_shadow(early_top_pgt
);
275 kasan_map_early_shadow(init_top_pgt
);
278 void __init
kasan_init(void)
281 void *shadow_cpu_entry_begin
, *shadow_cpu_entry_end
;
283 #ifdef CONFIG_KASAN_INLINE
284 register_die_notifier(&kasan_die_notifier
);
287 memcpy(early_top_pgt
, init_top_pgt
, sizeof(early_top_pgt
));
290 * We use the same shadow offset for 4- and 5-level paging to
291 * facilitate boot-time switching between paging modes.
292 * As result in 5-level paging mode KASAN_SHADOW_START and
293 * KASAN_SHADOW_END are not aligned to PGD boundary.
295 * KASAN_SHADOW_START doesn't share PGD with anything else.
296 * We claim whole PGD entry to make things easier.
298 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
299 * bunch of things like kernel code, modules, EFI mapping, etc.
300 * We need to take extra steps to not overwrite them.
302 if (IS_ENABLED(CONFIG_X86_5LEVEL
)) {
305 ptr
= (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END
));
306 memcpy(tmp_p4d_table
, (void *)ptr
, sizeof(tmp_p4d_table
));
307 set_pgd(&early_top_pgt
[pgd_index(KASAN_SHADOW_END
)],
308 __pgd(__pa(tmp_p4d_table
) | _KERNPG_TABLE
));
311 load_cr3(early_top_pgt
);
314 clear_pgds(KASAN_SHADOW_START
& PGDIR_MASK
, KASAN_SHADOW_END
);
316 kasan_populate_zero_shadow((void *)(KASAN_SHADOW_START
& PGDIR_MASK
),
317 kasan_mem_to_shadow((void *)PAGE_OFFSET
));
319 for (i
= 0; i
< E820_MAX_ENTRIES
; i
++) {
320 if (pfn_mapped
[i
].end
== 0)
323 map_range(&pfn_mapped
[i
]);
326 shadow_cpu_entry_begin
= (void *)CPU_ENTRY_AREA_BASE
;
327 shadow_cpu_entry_begin
= kasan_mem_to_shadow(shadow_cpu_entry_begin
);
328 shadow_cpu_entry_begin
= (void *)round_down((unsigned long)shadow_cpu_entry_begin
,
331 shadow_cpu_entry_end
= (void *)(CPU_ENTRY_AREA_BASE
+
332 CPU_ENTRY_AREA_MAP_SIZE
);
333 shadow_cpu_entry_end
= kasan_mem_to_shadow(shadow_cpu_entry_end
);
334 shadow_cpu_entry_end
= (void *)round_up((unsigned long)shadow_cpu_entry_end
,
337 kasan_populate_zero_shadow(
338 kasan_mem_to_shadow((void *)PAGE_OFFSET
+ MAXMEM
),
339 shadow_cpu_entry_begin
);
341 kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin
,
342 (unsigned long)shadow_cpu_entry_end
, 0);
344 kasan_populate_zero_shadow(shadow_cpu_entry_end
,
345 kasan_mem_to_shadow((void *)__START_KERNEL_map
));
347 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext
),
348 (unsigned long)kasan_mem_to_shadow(_end
),
349 early_pfn_to_nid(__pa(_stext
)));
351 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END
),
352 (void *)KASAN_SHADOW_END
);
354 load_cr3(init_top_pgt
);
358 * kasan_zero_page has been used as early shadow memory, thus it may
359 * contain some garbage. Now we can clear and write protect it, since
360 * after the TLB flush no one should write to it.
362 memset(kasan_zero_page
, 0, PAGE_SIZE
);
363 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
364 pte_t pte
= __pte(__pa(kasan_zero_page
) | __PAGE_KERNEL_RO
| _PAGE_ENC
);
365 set_pte(&kasan_zero_pte
[i
], pte
);
367 /* Flush TLBs again to be sure that write protection applied. */
370 init_task
.kasan_depth
= 0;
371 pr_info("KernelAddressSanitizer initialized\n");