]>
git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/arm64/mm/kasan_init.c
2 * This file contains kasan initialization code for ARM64.
4 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
13 #define pr_fmt(fmt) "kasan: " fmt
14 #include <linux/kasan.h>
15 #include <linux/kernel.h>
16 #include <linux/memblock.h>
17 #include <linux/start_kernel.h>
20 #include <asm/mmu_context.h>
21 #include <asm/kernel-pgtable.h>
23 #include <asm/pgalloc.h>
24 #include <asm/pgtable.h>
25 #include <asm/sections.h>
26 #include <asm/tlbflush.h>
28 static pgd_t tmp_pg_dir
[PTRS_PER_PGD
] __initdata
__aligned(PGD_SIZE
);
31 * The p*d_populate functions call virt_to_phys implicitly so they can't be used
32 * directly on kernel symbols (bm_p*d). All the early functions are called too
33 * early to use lm_alias so __p*d_populate functions must be used to populate
34 * with the physical address from __pa_symbol.
37 static void __init
kasan_early_pte_populate(pmd_t
*pmd
, unsigned long addr
,
44 __pmd_populate(pmd
, __pa_symbol(kasan_zero_pte
), PMD_TYPE_TABLE
);
46 pte
= pte_offset_kimg(pmd
, addr
);
48 next
= addr
+ PAGE_SIZE
;
49 set_pte(pte
, pfn_pte(sym_to_pfn(kasan_zero_page
),
51 } while (pte
++, addr
= next
, addr
!= end
&& pte_none(*pte
));
54 static void __init
kasan_early_pmd_populate(pud_t
*pud
,
62 __pud_populate(pud
, __pa_symbol(kasan_zero_pmd
), PMD_TYPE_TABLE
);
64 pmd
= pmd_offset_kimg(pud
, addr
);
66 next
= pmd_addr_end(addr
, end
);
67 kasan_early_pte_populate(pmd
, addr
, next
);
68 } while (pmd
++, addr
= next
, addr
!= end
&& pmd_none(*pmd
));
71 static void __init
kasan_early_pud_populate(pgd_t
*pgd
,
79 __pgd_populate(pgd
, __pa_symbol(kasan_zero_pud
), PUD_TYPE_TABLE
);
81 pud
= pud_offset_kimg(pgd
, addr
);
83 next
= pud_addr_end(addr
, end
);
84 kasan_early_pmd_populate(pud
, addr
, next
);
85 } while (pud
++, addr
= next
, addr
!= end
&& pud_none(*pud
));
88 static void __init
kasan_map_early_shadow(void)
90 unsigned long addr
= KASAN_SHADOW_START
;
91 unsigned long end
= KASAN_SHADOW_END
;
95 pgd
= pgd_offset_k(addr
);
97 next
= pgd_addr_end(addr
, end
);
98 kasan_early_pud_populate(pgd
, addr
, next
);
99 } while (pgd
++, addr
= next
, addr
!= end
);
102 asmlinkage
void __init
kasan_early_init(void)
104 BUILD_BUG_ON(KASAN_SHADOW_OFFSET
!= KASAN_SHADOW_END
- (1UL << 61));
105 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START
, PGDIR_SIZE
));
106 BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END
, PGDIR_SIZE
));
107 kasan_map_early_shadow();
111 * Copy the current shadow region into a new pgdir.
113 void __init
kasan_copy_shadow(pgd_t
*pgdir
)
115 pgd_t
*pgd
, *pgd_new
, *pgd_end
;
117 pgd
= pgd_offset_k(KASAN_SHADOW_START
);
118 pgd_end
= pgd_offset_k(KASAN_SHADOW_END
);
119 pgd_new
= pgd_offset_raw(pgdir
, KASAN_SHADOW_START
);
121 set_pgd(pgd_new
, *pgd
);
122 } while (pgd
++, pgd_new
++, pgd
!= pgd_end
);
125 static void __init
clear_pgds(unsigned long start
,
129 * Remove references to kasan page tables from
130 * swapper_pg_dir. pgd_clear() can't be used
131 * here because it's nop on 2,3-level pagetable setups
133 for (; start
< end
; start
+= PGDIR_SIZE
)
134 set_pgd(pgd_offset_k(start
), __pgd(0));
137 void __init
kasan_init(void)
139 u64 kimg_shadow_start
, kimg_shadow_end
;
140 u64 mod_shadow_start
, mod_shadow_end
;
141 struct memblock_region
*reg
;
144 kimg_shadow_start
= (u64
)kasan_mem_to_shadow(_text
);
145 kimg_shadow_end
= (u64
)kasan_mem_to_shadow(_end
);
147 mod_shadow_start
= (u64
)kasan_mem_to_shadow((void *)MODULES_VADDR
);
148 mod_shadow_end
= (u64
)kasan_mem_to_shadow((void *)MODULES_END
);
151 * We are going to perform proper setup of shadow memory.
152 * At first we should unmap early shadow (clear_pgds() call bellow).
153 * However, instrumented code couldn't execute without shadow memory.
154 * tmp_pg_dir used to keep early shadow mapped until full shadow
155 * setup will be finished.
157 memcpy(tmp_pg_dir
, swapper_pg_dir
, sizeof(tmp_pg_dir
));
159 cpu_replace_ttbr1(lm_alias(tmp_pg_dir
));
161 clear_pgds(KASAN_SHADOW_START
, KASAN_SHADOW_END
);
163 vmemmap_populate(kimg_shadow_start
, kimg_shadow_end
,
164 pfn_to_nid(virt_to_pfn(_text
)));
167 * vmemmap_populate() has populated the shadow region that covers the
168 * kernel image with SWAPPER_BLOCK_SIZE mappings, so we have to round
169 * the start and end addresses to SWAPPER_BLOCK_SIZE as well, to prevent
170 * kasan_populate_zero_shadow() from replacing the page table entries
171 * (PMD or PTE) at the edges of the shadow region for the kernel
174 kimg_shadow_start
= round_down(kimg_shadow_start
, SWAPPER_BLOCK_SIZE
);
175 kimg_shadow_end
= round_up(kimg_shadow_end
, SWAPPER_BLOCK_SIZE
);
177 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START
,
178 (void *)mod_shadow_start
);
179 kasan_populate_zero_shadow((void *)kimg_shadow_end
,
180 kasan_mem_to_shadow((void *)PAGE_OFFSET
));
182 if (kimg_shadow_start
> mod_shadow_end
)
183 kasan_populate_zero_shadow((void *)mod_shadow_end
,
184 (void *)kimg_shadow_start
);
186 for_each_memblock(memory
, reg
) {
187 void *start
= (void *)__phys_to_virt(reg
->base
);
188 void *end
= (void *)__phys_to_virt(reg
->base
+ reg
->size
);
194 * end + 1 here is intentional. We check several shadow bytes in
195 * advance to slightly speed up fastpath. In some rare cases
196 * we could cross boundary of mapped shadow, so we just map
199 vmemmap_populate((unsigned long)kasan_mem_to_shadow(start
),
200 (unsigned long)kasan_mem_to_shadow(end
) + 1,
201 pfn_to_nid(virt_to_pfn(start
)));
205 * KAsan may reuse the contents of kasan_zero_pte directly, so we
206 * should make sure that it maps the zero page read-only.
208 for (i
= 0; i
< PTRS_PER_PTE
; i
++)
209 set_pte(&kasan_zero_pte
[i
],
210 pfn_pte(sym_to_pfn(kasan_zero_page
), PAGE_KERNEL_RO
));
212 memset(kasan_zero_page
, 0, PAGE_SIZE
);
213 cpu_replace_ttbr1(lm_alias(swapper_pg_dir
));
215 /* At this point kasan is fully initialized. Enable error messages */
216 init_task
.kasan_depth
= 0;
217 pr_info("KernelAddressSanitizer initialized\n");