]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/x86/mm/kasan_init_64.c
Merge branch 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / mm / kasan_init_64.c
1 #define DISABLE_BRANCH_PROFILING
2 #define pr_fmt(fmt) "kasan: " fmt
3 #include <linux/bootmem.h>
4 #include <linux/kasan.h>
5 #include <linux/kdebug.h>
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <linux/sched/task.h>
9 #include <linux/vmalloc.h>
10
11 #include <asm/e820/types.h>
12 #include <asm/tlbflush.h>
13 #include <asm/sections.h>
14
15 extern pgd_t early_level4_pgt[PTRS_PER_PGD];
16 extern struct range pfn_mapped[E820_MAX_ENTRIES];
17
18 static int __init map_range(struct range *range)
19 {
20 unsigned long start;
21 unsigned long end;
22
23 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
24 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
25
26 /*
27 * end + 1 here is intentional. We check several shadow bytes in advance
28 * to slightly speed up fastpath. In some rare cases we could cross
29 * boundary of mapped shadow, so we just map some more here.
30 */
31 return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
32 }
33
34 static void __init clear_pgds(unsigned long start,
35 unsigned long end)
36 {
37 for (; start < end; start += PGDIR_SIZE)
38 pgd_clear(pgd_offset_k(start));
39 }
40
41 static void __init kasan_map_early_shadow(pgd_t *pgd)
42 {
43 int i;
44 unsigned long start = KASAN_SHADOW_START;
45 unsigned long end = KASAN_SHADOW_END;
46
47 for (i = pgd_index(start); start < end; i++) {
48 pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
49 | _KERNPG_TABLE);
50 start += PGDIR_SIZE;
51 }
52 }
53
54 #ifdef CONFIG_KASAN_INLINE
55 static int kasan_die_handler(struct notifier_block *self,
56 unsigned long val,
57 void *data)
58 {
59 if (val == DIE_GPF) {
60 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
61 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
62 }
63 return NOTIFY_OK;
64 }
65
66 static struct notifier_block kasan_die_notifier = {
67 .notifier_call = kasan_die_handler,
68 };
69 #endif
70
71 void __init kasan_early_init(void)
72 {
73 int i;
74 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
75 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
76 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
77
78 for (i = 0; i < PTRS_PER_PTE; i++)
79 kasan_zero_pte[i] = __pte(pte_val);
80
81 for (i = 0; i < PTRS_PER_PMD; i++)
82 kasan_zero_pmd[i] = __pmd(pmd_val);
83
84 for (i = 0; i < PTRS_PER_PUD; i++)
85 kasan_zero_pud[i] = __pud(pud_val);
86
87 kasan_map_early_shadow(early_level4_pgt);
88 kasan_map_early_shadow(init_level4_pgt);
89 }
90
91 void __init kasan_init(void)
92 {
93 int i;
94
95 #ifdef CONFIG_KASAN_INLINE
96 register_die_notifier(&kasan_die_notifier);
97 #endif
98
99 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
100 load_cr3(early_level4_pgt);
101 __flush_tlb_all();
102
103 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
104
105 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
106 kasan_mem_to_shadow((void *)PAGE_OFFSET));
107
108 for (i = 0; i < E820_MAX_ENTRIES; i++) {
109 if (pfn_mapped[i].end == 0)
110 break;
111
112 if (map_range(&pfn_mapped[i]))
113 panic("kasan: unable to allocate shadow!");
114 }
115 kasan_populate_zero_shadow(
116 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
117 kasan_mem_to_shadow((void *)__START_KERNEL_map));
118
119 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
120 (unsigned long)kasan_mem_to_shadow(_end),
121 NUMA_NO_NODE);
122
123 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
124 (void *)KASAN_SHADOW_END);
125
126 load_cr3(init_level4_pgt);
127 __flush_tlb_all();
128
129 /*
130 * kasan_zero_page has been used as early shadow memory, thus it may
131 * contain some garbage. Now we can clear and write protect it, since
132 * after the TLB flush no one should write to it.
133 */
134 memset(kasan_zero_page, 0, PAGE_SIZE);
135 for (i = 0; i < PTRS_PER_PTE; i++) {
136 pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
137 set_pte(&kasan_zero_pte[i], pte);
138 }
139 /* Flush TLBs again to be sure that write protection applied. */
140 __flush_tlb_all();
141
142 init_task.kasan_depth = 0;
143 pr_info("KernelAddressSanitizer initialized\n");
144 }