]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/x86/mm/kasan_init_64.c
Merge tag 'efi-urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/efi/efi into...
[mirror_ubuntu-artful-kernel.git] / arch / x86 / mm / kasan_init_64.c
CommitLineData
85155229 1#define pr_fmt(fmt) "kasan: " fmt
ef7f0d6a
AR
2#include <linux/bootmem.h>
3#include <linux/kasan.h>
4#include <linux/kdebug.h>
5#include <linux/mm.h>
6#include <linux/sched.h>
9164bb4a 7#include <linux/sched/task.h>
ef7f0d6a
AR
8#include <linux/vmalloc.h>
9
10#include <asm/tlbflush.h>
11#include <asm/sections.h>
12
13extern pgd_t early_level4_pgt[PTRS_PER_PGD];
14extern struct range pfn_mapped[E820_X_MAX];
15
ef7f0d6a
AR
16static int __init map_range(struct range *range)
17{
18 unsigned long start;
19 unsigned long end;
20
21 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
22 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
23
24 /*
25 * end + 1 here is intentional. We check several shadow bytes in advance
26 * to slightly speed up fastpath. In some rare cases we could cross
27 * boundary of mapped shadow, so we just map some more here.
28 */
29 return vmemmap_populate(start, end + 1, NUMA_NO_NODE);
30}
31
32static void __init clear_pgds(unsigned long start,
33 unsigned long end)
34{
35 for (; start < end; start += PGDIR_SIZE)
36 pgd_clear(pgd_offset_k(start));
37}
38
5d5aa3cf 39static void __init kasan_map_early_shadow(pgd_t *pgd)
ef7f0d6a
AR
40{
41 int i;
42 unsigned long start = KASAN_SHADOW_START;
43 unsigned long end = KASAN_SHADOW_END;
44
45 for (i = pgd_index(start); start < end; i++) {
46 pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud)
47 | _KERNPG_TABLE);
48 start += PGDIR_SIZE;
49 }
50}
51
ef7f0d6a
AR
52#ifdef CONFIG_KASAN_INLINE
53static int kasan_die_handler(struct notifier_block *self,
54 unsigned long val,
55 void *data)
56{
57 if (val == DIE_GPF) {
2ba78056
DV
58 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
59 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
ef7f0d6a
AR
60 }
61 return NOTIFY_OK;
62}
63
64static struct notifier_block kasan_die_notifier = {
65 .notifier_call = kasan_die_handler,
66};
67#endif
68
5d5aa3cf
AP
69void __init kasan_early_init(void)
70{
71 int i;
72 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
73 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
74 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
75
76 for (i = 0; i < PTRS_PER_PTE; i++)
77 kasan_zero_pte[i] = __pte(pte_val);
78
79 for (i = 0; i < PTRS_PER_PMD; i++)
80 kasan_zero_pmd[i] = __pmd(pmd_val);
81
82 for (i = 0; i < PTRS_PER_PUD; i++)
83 kasan_zero_pud[i] = __pud(pud_val);
84
85 kasan_map_early_shadow(early_level4_pgt);
86 kasan_map_early_shadow(init_level4_pgt);
87}
88
ef7f0d6a
AR
89void __init kasan_init(void)
90{
91 int i;
92
93#ifdef CONFIG_KASAN_INLINE
94 register_die_notifier(&kasan_die_notifier);
95#endif
96
97 memcpy(early_level4_pgt, init_level4_pgt, sizeof(early_level4_pgt));
98 load_cr3(early_level4_pgt);
241d2c54 99 __flush_tlb_all();
ef7f0d6a
AR
100
101 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
102
69786cdb 103 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
ef7f0d6a
AR
104 kasan_mem_to_shadow((void *)PAGE_OFFSET));
105
106 for (i = 0; i < E820_X_MAX; i++) {
107 if (pfn_mapped[i].end == 0)
108 break;
109
110 if (map_range(&pfn_mapped[i]))
111 panic("kasan: unable to allocate shadow!");
112 }
69786cdb
AR
113 kasan_populate_zero_shadow(
114 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
115 kasan_mem_to_shadow((void *)__START_KERNEL_map));
c420f167
AR
116
117 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
118 (unsigned long)kasan_mem_to_shadow(_end),
119 NUMA_NO_NODE);
120
69786cdb 121 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
c420f167 122 (void *)KASAN_SHADOW_END);
ef7f0d6a 123
ef7f0d6a 124 load_cr3(init_level4_pgt);
241d2c54 125 __flush_tlb_all();
85155229 126
69e0210f
AR
127 /*
128 * kasan_zero_page has been used as early shadow memory, thus it may
063fb3e5
AR
129 * contain some garbage. Now we can clear and write protect it, since
130 * after the TLB flush no one should write to it.
69e0210f
AR
131 */
132 memset(kasan_zero_page, 0, PAGE_SIZE);
063fb3e5
AR
133 for (i = 0; i < PTRS_PER_PTE; i++) {
134 pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
135 set_pte(&kasan_zero_pte[i], pte);
136 }
137 /* Flush TLBs again to be sure that write protection applied. */
138 __flush_tlb_all();
69e0210f
AR
139
140 init_task.kasan_depth = 0;
25add7ec 141 pr_info("KernelAddressSanitizer initialized\n");
ef7f0d6a 142}