]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/mm/kasan_init_64.c
3d1059db6bf62ede5691ca8f0c01f1792958925b
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / mm / kasan_init_64.c
1 #define DISABLE_BRANCH_PROFILING
2 #define pr_fmt(fmt) "kasan: " fmt
3 #include <linux/bootmem.h>
4 #include <linux/kasan.h>
5 #include <linux/kdebug.h>
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/sched/task.h>
9 #include <linux/vmalloc.h>
10
11 #include <asm/tlbflush.h>
12 #include <asm/sections.h>
13
14 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
15 extern struct range pfn_mapped[E820_X_MAX];
16
17 static int __init map_range(struct range *range)
18 {
19 unsigned long start;
20 unsigned long end;
21
22 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
23 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
24
25 /*
26 * end + 1 here is intentional. We check several shadow bytes in advance
27 * to slightly speed up fastpath. In some rare cases we could cross
28 * boundary of mapped shadow, so we just map some more here.
29 */
30 return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
31 }
32
33 static void __init clear_pgds(unsigned long start,
34 unsigned long end)
35 {
36 pgd_t *pgd;
37
38 for (; start < end; start += PGDIR_SIZE) {
39 pgd = pgd_offset_k(start);
40 /*
41 * With folded p4d, pgd_clear() is nop, use p4d_clear()
42 * instead.
43 */
44 if (CONFIG_PGTABLE_LEVELS < 5)
45 p4d_clear(p4d_offset(pgd, start));
46 else
47 pgd_clear(pgd);
48 }
49 }
50
51 static void __init kasan_map_early_shadow(pgd_t *pgd)
52 {
53 int i;
54 unsigned long start = KASAN_SHADOW_START;
55 unsigned long end = KASAN_SHADOW_END;
56
57 for (i = pgd_index(start); start < end; i++) {
58 pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
59 | _KERNPG_TABLE);
60 start += PGDIR_SIZE;
61 }
62 }
63
64 #ifdef CONFIG_KASAN_INLINE
65 static int kasan_die_handler(struct notifier_block *self,
66 unsigned long val,
67 void *data)
68 {
69 if (val == DIE_GPF) {
70 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
71 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
72 }
73 return NOTIFY_OK;
74 }
75
76 static struct notifier_block kasan_die_notifier = {
77 .notifier_call = kasan_die_handler,
78 };
79 #endif
80
81 void __init kasan_early_init(void)
82 {
83 int i;
84 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
85 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
86 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
87
88 for (i = 0; i < PTRS_PER_PTE; i++)
89 kasan_zero_pte[i] = __pte(pte_val);
90
91 for (i = 0; i < PTRS_PER_PMD; i++)
92 kasan_zero_pmd[i] = __pmd(pmd_val);
93
94 for (i = 0; i < PTRS_PER_PUD; i++)
95 kasan_zero_pud[i] = __pud(pud_val);
96
97 kasan_map_early_shadow(early_level4_pgt);
98 kasan_map_early_shadow(init_level4_pgt);
99 }
100
101 void __init kasan_init(void)
102 {
103 int i;
104
105 #ifdef CONFIG_KASAN_INLINE
106 register_die_notifier(&kasan_die_notifier);
107 #endif
108
109 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
110 load_cr3(early_level4_pgt);
111 __flush_tlb_all();
112
113 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
114
115 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
116 kasan_mem_to_shadow((void *)PAGE_OFFSET));
117
118 for (i = 0; i < E820_X_MAX; i++) {
119 if (pfn_mapped[i].end == 0)
120 break;
121
122 if (map_range(&pfn_mapped[i]))
123 panic("kasan: unable to allocate shadow!");
124 }
125 kasan_populate_zero_shadow(
126 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
127 kasan_mem_to_shadow((void *)__START_KERNEL_map));
128
129 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
130 (unsigned long)kasan_mem_to_shadow(_end),
131 NUMA_NO_NODE);
132
133 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
134 (void *)KASAN_SHADOW_END);
135
136 load_cr3(init_level4_pgt);
137 __flush_tlb_all();
138
139 /*
140 * kasan_zero_page has been used as early shadow memory, thus it may
141 * contain some garbage. Now we can clear and write protect it, since
142 * after the TLB flush no one should write to it.
143 */
144 memset(kasan_zero_page, 0, PAGE_SIZE);
145 for (i = 0; i < PTRS_PER_PTE; i++) {
146 pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
147 set_pte(&kasan_zero_pte[i], pte);
148 }
149 /* Flush TLBs again to be sure that write protection applied. */
150 __flush_tlb_all();
151
152 init_task.kasan_depth = 0;
153 pr_info("KernelAddressSanitizer initialized\n");
154 }