]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/s390/mm/kasan_init.c
Merge tag 'spi-nor/for-5.6' into mtd/next
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / mm / kasan_init.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kasan.h>
3 #include <linux/sched/task.h>
4 #include <linux/memblock.h>
5 #include <asm/pgalloc.h>
6 #include <asm/pgtable.h>
7 #include <asm/kasan.h>
8 #include <asm/mem_detect.h>
9 #include <asm/processor.h>
10 #include <asm/sclp.h>
11 #include <asm/facility.h>
12 #include <asm/sections.h>
13 #include <asm/setup.h>
14
15 static unsigned long segment_pos __initdata;
16 static unsigned long segment_low __initdata;
17 static unsigned long pgalloc_pos __initdata;
18 static unsigned long pgalloc_low __initdata;
19 static unsigned long pgalloc_freeable __initdata;
20 static bool has_edat __initdata;
21 static bool has_nx __initdata;
22
23 #define __sha(x) ((unsigned long)kasan_mem_to_shadow((void *)x))
24
25 static pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
26
27 static void __init kasan_early_panic(const char *reason)
28 {
29 sclp_early_printk("The Linux kernel failed to boot with the KernelAddressSanitizer:\n");
30 sclp_early_printk(reason);
31 disabled_wait();
32 }
33
34 static void * __init kasan_early_alloc_segment(void)
35 {
36 segment_pos -= _SEGMENT_SIZE;
37
38 if (segment_pos < segment_low)
39 kasan_early_panic("out of memory during initialisation\n");
40
41 return (void *)segment_pos;
42 }
43
44 static void * __init kasan_early_alloc_pages(unsigned int order)
45 {
46 pgalloc_pos -= (PAGE_SIZE << order);
47
48 if (pgalloc_pos < pgalloc_low)
49 kasan_early_panic("out of memory during initialisation\n");
50
51 return (void *)pgalloc_pos;
52 }
53
54 static void * __init kasan_early_crst_alloc(unsigned long val)
55 {
56 unsigned long *table;
57
58 table = kasan_early_alloc_pages(CRST_ALLOC_ORDER);
59 if (table)
60 crst_table_init(table, val);
61 return table;
62 }
63
64 static pte_t * __init kasan_early_pte_alloc(void)
65 {
66 static void *pte_leftover;
67 pte_t *pte;
68
69 BUILD_BUG_ON(_PAGE_TABLE_SIZE * 2 != PAGE_SIZE);
70
71 if (!pte_leftover) {
72 pte_leftover = kasan_early_alloc_pages(0);
73 pte = pte_leftover + _PAGE_TABLE_SIZE;
74 } else {
75 pte = pte_leftover;
76 pte_leftover = NULL;
77 }
78 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
79 return pte;
80 }
81
82 enum populate_mode {
83 POPULATE_ONE2ONE,
84 POPULATE_MAP,
85 POPULATE_ZERO_SHADOW,
86 POPULATE_SHALLOW
87 };
88 static void __init kasan_early_vmemmap_populate(unsigned long address,
89 unsigned long end,
90 enum populate_mode mode)
91 {
92 unsigned long pgt_prot_zero, pgt_prot, sgt_prot;
93 pgd_t *pg_dir;
94 p4d_t *p4_dir;
95 pud_t *pu_dir;
96 pmd_t *pm_dir;
97 pte_t *pt_dir;
98
99 pgt_prot_zero = pgprot_val(PAGE_KERNEL_RO);
100 if (!has_nx)
101 pgt_prot_zero &= ~_PAGE_NOEXEC;
102 pgt_prot = pgprot_val(PAGE_KERNEL_EXEC);
103 sgt_prot = pgprot_val(SEGMENT_KERNEL_EXEC);
104
105 while (address < end) {
106 pg_dir = pgd_offset_k(address);
107 if (pgd_none(*pg_dir)) {
108 if (mode == POPULATE_ZERO_SHADOW &&
109 IS_ALIGNED(address, PGDIR_SIZE) &&
110 end - address >= PGDIR_SIZE) {
111 pgd_populate(&init_mm, pg_dir,
112 kasan_early_shadow_p4d);
113 address = (address + PGDIR_SIZE) & PGDIR_MASK;
114 continue;
115 }
116 p4_dir = kasan_early_crst_alloc(_REGION2_ENTRY_EMPTY);
117 pgd_populate(&init_mm, pg_dir, p4_dir);
118 }
119
120 if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
121 mode == POPULATE_SHALLOW) {
122 address = (address + P4D_SIZE) & P4D_MASK;
123 continue;
124 }
125
126 p4_dir = p4d_offset(pg_dir, address);
127 if (p4d_none(*p4_dir)) {
128 if (mode == POPULATE_ZERO_SHADOW &&
129 IS_ALIGNED(address, P4D_SIZE) &&
130 end - address >= P4D_SIZE) {
131 p4d_populate(&init_mm, p4_dir,
132 kasan_early_shadow_pud);
133 address = (address + P4D_SIZE) & P4D_MASK;
134 continue;
135 }
136 pu_dir = kasan_early_crst_alloc(_REGION3_ENTRY_EMPTY);
137 p4d_populate(&init_mm, p4_dir, pu_dir);
138 }
139
140 if (!IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING) &&
141 mode == POPULATE_SHALLOW) {
142 address = (address + PUD_SIZE) & PUD_MASK;
143 continue;
144 }
145
146 pu_dir = pud_offset(p4_dir, address);
147 if (pud_none(*pu_dir)) {
148 if (mode == POPULATE_ZERO_SHADOW &&
149 IS_ALIGNED(address, PUD_SIZE) &&
150 end - address >= PUD_SIZE) {
151 pud_populate(&init_mm, pu_dir,
152 kasan_early_shadow_pmd);
153 address = (address + PUD_SIZE) & PUD_MASK;
154 continue;
155 }
156 pm_dir = kasan_early_crst_alloc(_SEGMENT_ENTRY_EMPTY);
157 pud_populate(&init_mm, pu_dir, pm_dir);
158 }
159
160 pm_dir = pmd_offset(pu_dir, address);
161 if (pmd_none(*pm_dir)) {
162 if (mode == POPULATE_ZERO_SHADOW &&
163 IS_ALIGNED(address, PMD_SIZE) &&
164 end - address >= PMD_SIZE) {
165 pmd_populate(&init_mm, pm_dir,
166 kasan_early_shadow_pte);
167 address = (address + PMD_SIZE) & PMD_MASK;
168 continue;
169 }
170 /* the first megabyte of 1:1 is mapped with 4k pages */
171 if (has_edat && address && end - address >= PMD_SIZE &&
172 mode != POPULATE_ZERO_SHADOW) {
173 void *page;
174
175 if (mode == POPULATE_ONE2ONE) {
176 page = (void *)address;
177 } else {
178 page = kasan_early_alloc_segment();
179 memset(page, 0, _SEGMENT_SIZE);
180 }
181 pmd_val(*pm_dir) = __pa(page) | sgt_prot;
182 address = (address + PMD_SIZE) & PMD_MASK;
183 continue;
184 }
185
186 pt_dir = kasan_early_pte_alloc();
187 pmd_populate(&init_mm, pm_dir, pt_dir);
188 } else if (pmd_large(*pm_dir)) {
189 address = (address + PMD_SIZE) & PMD_MASK;
190 continue;
191 }
192
193 pt_dir = pte_offset_kernel(pm_dir, address);
194 if (pte_none(*pt_dir)) {
195 void *page;
196
197 switch (mode) {
198 case POPULATE_ONE2ONE:
199 page = (void *)address;
200 pte_val(*pt_dir) = __pa(page) | pgt_prot;
201 break;
202 case POPULATE_MAP:
203 page = kasan_early_alloc_pages(0);
204 memset(page, 0, PAGE_SIZE);
205 pte_val(*pt_dir) = __pa(page) | pgt_prot;
206 break;
207 case POPULATE_ZERO_SHADOW:
208 page = kasan_early_shadow_page;
209 pte_val(*pt_dir) = __pa(page) | pgt_prot_zero;
210 break;
211 case POPULATE_SHALLOW:
212 /* should never happen */
213 break;
214 }
215 }
216 address += PAGE_SIZE;
217 }
218 }
219
220 static void __init kasan_set_pgd(pgd_t *pgd, unsigned long asce_type)
221 {
222 unsigned long asce_bits;
223
224 asce_bits = asce_type | _ASCE_TABLE_LENGTH;
225 S390_lowcore.kernel_asce = (__pa(pgd) & PAGE_MASK) | asce_bits;
226 S390_lowcore.user_asce = S390_lowcore.kernel_asce;
227
228 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
229 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
230 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
231 }
232
233 static void __init kasan_enable_dat(void)
234 {
235 psw_t psw;
236
237 psw.mask = __extract_psw();
238 psw_bits(psw).dat = 1;
239 psw_bits(psw).as = PSW_BITS_AS_HOME;
240 __load_psw_mask(psw.mask);
241 }
242
243 static void __init kasan_early_detect_facilities(void)
244 {
245 if (test_facility(8)) {
246 has_edat = true;
247 __ctl_set_bit(0, 23);
248 }
249 if (!noexec_disabled && test_facility(130)) {
250 has_nx = true;
251 __ctl_set_bit(0, 20);
252 }
253 }
254
255 void __init kasan_early_init(void)
256 {
257 unsigned long untracked_mem_end;
258 unsigned long shadow_alloc_size;
259 unsigned long initrd_end;
260 unsigned long asce_type;
261 unsigned long memsize;
262 unsigned long vmax;
263 unsigned long pgt_prot = pgprot_val(PAGE_KERNEL_RO);
264 pte_t pte_z;
265 pmd_t pmd_z = __pmd(__pa(kasan_early_shadow_pte) | _SEGMENT_ENTRY);
266 pud_t pud_z = __pud(__pa(kasan_early_shadow_pmd) | _REGION3_ENTRY);
267 p4d_t p4d_z = __p4d(__pa(kasan_early_shadow_pud) | _REGION2_ENTRY);
268
269 kasan_early_detect_facilities();
270 if (!has_nx)
271 pgt_prot &= ~_PAGE_NOEXEC;
272 pte_z = __pte(__pa(kasan_early_shadow_page) | pgt_prot);
273
274 memsize = get_mem_detect_end();
275 if (!memsize)
276 kasan_early_panic("cannot detect physical memory size\n");
277 /* respect mem= cmdline parameter */
278 if (memory_end_set && memsize > memory_end)
279 memsize = memory_end;
280 if (IS_ENABLED(CONFIG_CRASH_DUMP) && OLDMEM_BASE)
281 memsize = min(memsize, OLDMEM_SIZE);
282 memsize = min(memsize, KASAN_SHADOW_START);
283
284 if (IS_ENABLED(CONFIG_KASAN_S390_4_LEVEL_PAGING)) {
285 /* 4 level paging */
286 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, P4D_SIZE));
287 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, P4D_SIZE));
288 crst_table_init((unsigned long *)early_pg_dir,
289 _REGION2_ENTRY_EMPTY);
290 untracked_mem_end = vmax = _REGION1_SIZE;
291 asce_type = _ASCE_TYPE_REGION2;
292 } else {
293 /* 3 level paging */
294 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PUD_SIZE));
295 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PUD_SIZE));
296 crst_table_init((unsigned long *)early_pg_dir,
297 _REGION3_ENTRY_EMPTY);
298 untracked_mem_end = vmax = _REGION2_SIZE;
299 asce_type = _ASCE_TYPE_REGION3;
300 }
301
302 /* init kasan zero shadow */
303 crst_table_init((unsigned long *)kasan_early_shadow_p4d,
304 p4d_val(p4d_z));
305 crst_table_init((unsigned long *)kasan_early_shadow_pud,
306 pud_val(pud_z));
307 crst_table_init((unsigned long *)kasan_early_shadow_pmd,
308 pmd_val(pmd_z));
309 memset64((u64 *)kasan_early_shadow_pte, pte_val(pte_z), PTRS_PER_PTE);
310
311 shadow_alloc_size = memsize >> KASAN_SHADOW_SCALE_SHIFT;
312 pgalloc_low = round_up((unsigned long)_end, _SEGMENT_SIZE);
313 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD)) {
314 initrd_end =
315 round_up(INITRD_START + INITRD_SIZE, _SEGMENT_SIZE);
316 pgalloc_low = max(pgalloc_low, initrd_end);
317 }
318
319 if (pgalloc_low + shadow_alloc_size > memsize)
320 kasan_early_panic("out of memory during initialisation\n");
321
322 if (has_edat) {
323 segment_pos = round_down(memsize, _SEGMENT_SIZE);
324 segment_low = segment_pos - shadow_alloc_size;
325 pgalloc_pos = segment_low;
326 } else {
327 pgalloc_pos = memsize;
328 }
329 init_mm.pgd = early_pg_dir;
330 /*
331 * Current memory layout:
332 * +- 0 -------------+ +- shadow start -+
333 * | 1:1 ram mapping | /| 1/8 ram |
334 * | | / | |
335 * +- end of ram ----+ / +----------------+
336 * | ... gap ... | / | |
337 * | |/ | kasan |
338 * +- shadow start --+ | zero |
339 * | 1/8 addr space | | page |
340 * +- shadow end -+ | mapping |
341 * | ... gap ... |\ | (untracked) |
342 * +- vmalloc area -+ \ | |
343 * | vmalloc_size | \ | |
344 * +- modules vaddr -+ \ +----------------+
345 * | 2Gb | \| unmapped | allocated per module
346 * +-----------------+ +- shadow end ---+
347 *
348 * Current memory layout (KASAN_VMALLOC):
349 * +- 0 -------------+ +- shadow start -+
350 * | 1:1 ram mapping | /| 1/8 ram |
351 * | | / | |
352 * +- end of ram ----+ / +----------------+
353 * | ... gap ... | / | kasan |
354 * | |/ | zero |
355 * +- shadow start --+ | page |
356 * | 1/8 addr space | | mapping |
357 * +- shadow end -+ | (untracked) |
358 * | ... gap ... |\ | |
359 * +- vmalloc area -+ \ +- vmalloc area -+
360 * | vmalloc_size | \ |shallow populate|
361 * +- modules vaddr -+ \ +- modules area -+
362 * | 2Gb | \|shallow populate|
363 * +-----------------+ +- shadow end ---+
364 */
365 /* populate kasan shadow (for identity mapping and zero page mapping) */
366 kasan_early_vmemmap_populate(__sha(0), __sha(memsize), POPULATE_MAP);
367 if (IS_ENABLED(CONFIG_MODULES))
368 untracked_mem_end = vmax - MODULES_LEN;
369 if (IS_ENABLED(CONFIG_KASAN_VMALLOC)) {
370 untracked_mem_end = vmax - vmalloc_size - MODULES_LEN;
371 /* shallowly populate kasan shadow for vmalloc and modules */
372 kasan_early_vmemmap_populate(__sha(untracked_mem_end),
373 __sha(vmax), POPULATE_SHALLOW);
374 }
375 /* populate kasan shadow for untracked memory */
376 kasan_early_vmemmap_populate(__sha(max_physmem_end),
377 __sha(untracked_mem_end),
378 POPULATE_ZERO_SHADOW);
379 /* memory allocated for identity mapping structs will be freed later */
380 pgalloc_freeable = pgalloc_pos;
381 /* populate identity mapping */
382 kasan_early_vmemmap_populate(0, memsize, POPULATE_ONE2ONE);
383 kasan_set_pgd(early_pg_dir, asce_type);
384 kasan_enable_dat();
385 /* enable kasan */
386 init_task.kasan_depth = 0;
387 memblock_reserve(pgalloc_pos, memsize - pgalloc_pos);
388 sclp_early_printk("KernelAddressSanitizer initialized\n");
389 }
390
391 void __init kasan_copy_shadow(pgd_t *pg_dir)
392 {
393 /*
394 * At this point we are still running on early pages setup early_pg_dir,
395 * while swapper_pg_dir has just been initialized with identity mapping.
396 * Carry over shadow memory region from early_pg_dir to swapper_pg_dir.
397 */
398
399 pgd_t *pg_dir_src;
400 pgd_t *pg_dir_dst;
401 p4d_t *p4_dir_src;
402 p4d_t *p4_dir_dst;
403 pud_t *pu_dir_src;
404 pud_t *pu_dir_dst;
405
406 pg_dir_src = pgd_offset_raw(early_pg_dir, KASAN_SHADOW_START);
407 pg_dir_dst = pgd_offset_raw(pg_dir, KASAN_SHADOW_START);
408 p4_dir_src = p4d_offset(pg_dir_src, KASAN_SHADOW_START);
409 p4_dir_dst = p4d_offset(pg_dir_dst, KASAN_SHADOW_START);
410 if (!p4d_folded(*p4_dir_src)) {
411 /* 4 level paging */
412 memcpy(p4_dir_dst, p4_dir_src,
413 (KASAN_SHADOW_SIZE >> P4D_SHIFT) * sizeof(p4d_t));
414 return;
415 }
416 /* 3 level paging */
417 pu_dir_src = pud_offset(p4_dir_src, KASAN_SHADOW_START);
418 pu_dir_dst = pud_offset(p4_dir_dst, KASAN_SHADOW_START);
419 memcpy(pu_dir_dst, pu_dir_src,
420 (KASAN_SHADOW_SIZE >> PUD_SHIFT) * sizeof(pud_t));
421 }
422
423 void __init kasan_free_early_identity(void)
424 {
425 memblock_free(pgalloc_pos, pgalloc_freeable - pgalloc_pos);
426 }