1 #define pr_fmt(fmt) "kasan: " fmt
2 #include <linux/bootmem.h>
3 #include <linux/kasan.h>
4 #include <linux/kdebug.h>
6 #include <linux/sched.h>
7 #include <linux/sched/task.h>
8 #include <linux/vmalloc.h>
10 #include <asm/tlbflush.h>
11 #include <asm/sections.h>
13 extern pgd_t early_level4_pgt
[PTRS_PER_PGD
];
14 extern struct range pfn_mapped
[E820_X_MAX
];
16 static int __init
map_range(struct range
*range
)
21 start
= (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range
->start
));
22 end
= (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range
->end
));
25 * end + 1 here is intentional. We check several shadow bytes in advance
26 * to slightly speed up fastpath. In some rare cases we could cross
27 * boundary of mapped shadow, so we just map some more here.
29 return vmemmap_populate(start
, end
+ 1, NUMA_NO_NODE
);
32 static void __init
clear_pgds(unsigned long start
,
37 for (; start
< end
; start
+= PGDIR_SIZE
) {
38 pgd
= pgd_offset_k(start
);
40 * With folded p4d, pgd_clear() is nop, use p4d_clear()
43 if (CONFIG_PGTABLE_LEVELS
< 5)
44 p4d_clear(p4d_offset(pgd
, start
));
50 static void __init
kasan_map_early_shadow(pgd_t
*pgd
)
53 unsigned long start
= KASAN_SHADOW_START
;
54 unsigned long end
= KASAN_SHADOW_END
;
56 for (i
= pgd_index(start
); start
< end
; i
++) {
57 pgd
[i
] = __pgd(__pa_nodebug(kasan_zero_pud
)
63 #ifdef CONFIG_KASAN_INLINE
64 static int kasan_die_handler(struct notifier_block
*self
,
69 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
70 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
75 static struct notifier_block kasan_die_notifier
= {
76 .notifier_call
= kasan_die_handler
,
80 void __init
kasan_early_init(void)
83 pteval_t pte_val
= __pa_nodebug(kasan_zero_page
) | __PAGE_KERNEL
;
84 pmdval_t pmd_val
= __pa_nodebug(kasan_zero_pte
) | _KERNPG_TABLE
;
85 pudval_t pud_val
= __pa_nodebug(kasan_zero_pmd
) | _KERNPG_TABLE
;
87 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
88 kasan_zero_pte
[i
] = __pte(pte_val
);
90 for (i
= 0; i
< PTRS_PER_PMD
; i
++)
91 kasan_zero_pmd
[i
] = __pmd(pmd_val
);
93 for (i
= 0; i
< PTRS_PER_PUD
; i
++)
94 kasan_zero_pud
[i
] = __pud(pud_val
);
96 kasan_map_early_shadow(early_level4_pgt
);
97 kasan_map_early_shadow(init_level4_pgt
);
100 void __init
kasan_init(void)
104 #ifdef CONFIG_KASAN_INLINE
105 register_die_notifier(&kasan_die_notifier
);
108 memcpy(early_level4_pgt
, init_level4_pgt
, sizeof(early_level4_pgt
));
109 load_cr3(early_level4_pgt
);
112 clear_pgds(KASAN_SHADOW_START
, KASAN_SHADOW_END
);
114 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START
,
115 kasan_mem_to_shadow((void *)PAGE_OFFSET
));
117 for (i
= 0; i
< E820_X_MAX
; i
++) {
118 if (pfn_mapped
[i
].end
== 0)
121 if (map_range(&pfn_mapped
[i
]))
122 panic("kasan: unable to allocate shadow!");
124 kasan_populate_zero_shadow(
125 kasan_mem_to_shadow((void *)PAGE_OFFSET
+ MAXMEM
),
126 kasan_mem_to_shadow((void *)__START_KERNEL_map
));
128 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext
),
129 (unsigned long)kasan_mem_to_shadow(_end
),
132 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END
),
133 (void *)KASAN_SHADOW_END
);
135 load_cr3(init_level4_pgt
);
139 * kasan_zero_page has been used as early shadow memory, thus it may
140 * contain some garbage. Now we can clear and write protect it, since
141 * after the TLB flush no one should write to it.
143 memset(kasan_zero_page
, 0, PAGE_SIZE
);
144 for (i
= 0; i
< PTRS_PER_PTE
; i
++) {
145 pte_t pte
= __pte(__pa(kasan_zero_page
) | __PAGE_KERNEL_RO
);
146 set_pte(&kasan_zero_pte
[i
], pte
);
148 /* Flush TLBs again to be sure that write protection applied. */
151 init_task
.kasan_depth
= 0;
152 pr_info("KernelAddressSanitizer initialized\n");