]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm64/mm/kasan_init.c
arm64: kasan: avoid pfn_to_nid() before page array is initialized
[mirror_ubuntu-bionic-kernel.git] / arch / arm64 / mm / kasan_init.c
1 /*
2 * This file contains kasan initialization code for ARM64.
3 *
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13 #define pr_fmt(fmt) "kasan: " fmt
14 #include <linux/bootmem.h>
15 #include <linux/kasan.h>
16 #include <linux/kernel.h>
17 #include <linux/sched/task.h>
18 #include <linux/memblock.h>
19 #include <linux/start_kernel.h>
20 #include <linux/mm.h>
21
22 #include <asm/mmu_context.h>
23 #include <asm/kernel-pgtable.h>
24 #include <asm/page.h>
25 #include <asm/pgalloc.h>
26 #include <asm/pgtable.h>
27 #include <asm/sections.h>
28 #include <asm/tlbflush.h>
29
30 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
31
32 /*
33 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
34 * directly on kernel symbols (bm_p*d). All the early functions are called too
35 * early to use lm_alias so __p*d_populate functions must be used to populate
36 * with the physical address from __pa_symbol.
37 */
38
39 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
40 {
41 void *p = memblock_virt_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
42 __pa(MAX_DMA_ADDRESS),
43 MEMBLOCK_ALLOC_ACCESSIBLE, node);
44 return __pa(p);
45 }
46
47 static pte_t *__init kasan_pte_offset(pmd_t *pmd, unsigned long addr, int node,
48 bool early)
49 {
50 if (pmd_none(*pmd)) {
51 phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
52 : kasan_alloc_zeroed_page(node);
53 __pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
54 }
55
56 return early ? pte_offset_kimg(pmd, addr)
57 : pte_offset_kernel(pmd, addr);
58 }
59
60 static pmd_t *__init kasan_pmd_offset(pud_t *pud, unsigned long addr, int node,
61 bool early)
62 {
63 if (pud_none(*pud)) {
64 phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
65 : kasan_alloc_zeroed_page(node);
66 __pud_populate(pud, pmd_phys, PMD_TYPE_TABLE);
67 }
68
69 return early ? pmd_offset_kimg(pud, addr) : pmd_offset(pud, addr);
70 }
71
72 static pud_t *__init kasan_pud_offset(pgd_t *pgd, unsigned long addr, int node,
73 bool early)
74 {
75 if (pgd_none(*pgd)) {
76 phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
77 : kasan_alloc_zeroed_page(node);
78 __pgd_populate(pgd, pud_phys, PMD_TYPE_TABLE);
79 }
80
81 return early ? pud_offset_kimg(pgd, addr) : pud_offset(pgd, addr);
82 }
83
84 static void __init kasan_pte_populate(pmd_t *pmd, unsigned long addr,
85 unsigned long end, int node, bool early)
86 {
87 unsigned long next;
88 pte_t *pte = kasan_pte_offset(pmd, addr, node, early);
89
90 do {
91 phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
92 : kasan_alloc_zeroed_page(node);
93 next = addr + PAGE_SIZE;
94 set_pte(pte, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
95 } while (pte++, addr = next, addr != end && pte_none(*pte));
96 }
97
98 static void __init kasan_pmd_populate(pud_t *pud, unsigned long addr,
99 unsigned long end, int node, bool early)
100 {
101 unsigned long next;
102 pmd_t *pmd = kasan_pmd_offset(pud, addr, node, early);
103
104 do {
105 next = pmd_addr_end(addr, end);
106 kasan_pte_populate(pmd, addr, next, node, early);
107 } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
108 }
109
110 static void __init kasan_pud_populate(pgd_t *pgd, unsigned long addr,
111 unsigned long end, int node, bool early)
112 {
113 unsigned long next;
114 pud_t *pud = kasan_pud_offset(pgd, addr, node, early);
115
116 do {
117 next = pud_addr_end(addr, end);
118 kasan_pmd_populate(pud, addr, next, node, early);
119 } while (pud++, addr = next, addr != end && pud_none(*pud));
120 }
121
122 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
123 int node, bool early)
124 {
125 unsigned long next;
126 pgd_t *pgd;
127
128 pgd = pgd_offset_k(addr);
129 do {
130 next = pgd_addr_end(addr, end);
131 kasan_pud_populate(pgd, addr, next, node, early);
132 } while (pgd++, addr = next, addr != end);
133 }
134
135 /* The early shadow maps everything to a single page of zeroes */
136 asmlinkage void __init kasan_early_init(void)
137 {
138 BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
139 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
140 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
141 kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
142 true);
143 }
144
145 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
146 static void __init kasan_map_populate(unsigned long start, unsigned long end,
147 int node)
148 {
149 kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
150 }
151
152 /*
153 * Copy the current shadow region into a new pgdir.
154 */
155 void __init kasan_copy_shadow(pgd_t *pgdir)
156 {
157 pgd_t *pgd, *pgd_new, *pgd_end;
158
159 pgd = pgd_offset_k(KASAN_SHADOW_START);
160 pgd_end = pgd_offset_k(KASAN_SHADOW_END);
161 pgd_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
162 do {
163 set_pgd(pgd_new, *pgd);
164 } while (pgd++, pgd_new++, pgd != pgd_end);
165 }
166
167 static void __init clear_pgds(unsigned long start,
168 unsigned long end)
169 {
170 /*
171 * Remove references to kasan page tables from
172 * swapper_pg_dir. pgd_clear() can't be used
173 * here because it's nop on 2,3-level pagetable setups
174 */
175 for (; start < end; start += PGDIR_SIZE)
176 set_pgd(pgd_offset_k(start), __pgd(0));
177 }
178
179 void __init kasan_init(void)
180 {
181 u64 kimg_shadow_start, kimg_shadow_end;
182 u64 mod_shadow_start, mod_shadow_end;
183 struct memblock_region *reg;
184 int i;
185
186 kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
187 kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
188
189 mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
190 mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
191
192 /*
193 * We are going to perform proper setup of shadow memory.
194 * At first we should unmap early shadow (clear_pgds() call bellow).
195 * However, instrumented code couldn't execute without shadow memory.
196 * tmp_pg_dir used to keep early shadow mapped until full shadow
197 * setup will be finished.
198 */
199 memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
200 dsb(ishst);
201 cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
202
203 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
204
205 kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
206 early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
207
208 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
209 (void *)mod_shadow_start);
210 kasan_populate_zero_shadow((void *)kimg_shadow_end,
211 kasan_mem_to_shadow((void *)PAGE_OFFSET));
212
213 if (kimg_shadow_start > mod_shadow_end)
214 kasan_populate_zero_shadow((void *)mod_shadow_end,
215 (void *)kimg_shadow_start);
216
217 for_each_memblock(memory, reg) {
218 void *start = (void *)__phys_to_virt(reg->base);
219 void *end = (void *)__phys_to_virt(reg->base + reg->size);
220
221 if (start >= end)
222 break;
223
224 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
225 (unsigned long)kasan_mem_to_shadow(end),
226 early_pfn_to_nid(virt_to_pfn(start)));
227 }
228
229 /*
230 * KAsan may reuse the contents of kasan_zero_pte directly, so we
231 * should make sure that it maps the zero page read-only.
232 */
233 for (i = 0; i < PTRS_PER_PTE; i++)
234 set_pte(&kasan_zero_pte[i],
235 pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
236
237 memset(kasan_zero_page, 0, PAGE_SIZE);
238 cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
239
240 /* At this point kasan is fully initialized. Enable error messages */
241 init_task.kasan_depth = 0;
242 pr_info("KernelAddressSanitizer initialized\n");
243 }