]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
powerpc/8xx: Implement dedicated kasan_init_region()
authorChristophe Leroy <christophe.leroy@csgroup.eu>
Tue, 19 May 2020 05:49:27 +0000 (05:49 +0000)
committerMichael Ellerman <mpe@ellerman.id.au>
Tue, 26 May 2020 12:22:23 +0000 (22:22 +1000)
Implement a kasan_init_region() dedicated to 8xx that
allocates KASAN regions using huge pages.

Signed-off-by: Christophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/d2d60202a8821dc81cffe6ff59cc13c15b7e4bb6.1589866984.git.christophe.leroy@csgroup.eu
arch/powerpc/mm/kasan/8xx.c [new file with mode: 0644]
arch/powerpc/mm/kasan/Makefile

diff --git a/arch/powerpc/mm/kasan/8xx.c b/arch/powerpc/mm/kasan/8xx.c
new file mode 100644 (file)
index 0000000..db4ef44
--- /dev/null
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#define DISABLE_BRANCH_PROFILING
+
+#include <linux/kasan.h>
+#include <linux/memblock.h>
+#include <linux/hugetlb.h>
+#include <asm/pgalloc.h>
+
+static int __init
+kasan_init_shadow_8M(unsigned long k_start, unsigned long k_end, void *block)
+{
+       pmd_t *pmd = pmd_ptr_k(k_start);
+       unsigned long k_cur, k_next;
+
+       for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd += 2, block += SZ_8M) {
+               pte_basic_t *new;
+
+               k_next = pgd_addr_end(k_cur, k_end);
+               k_next = pgd_addr_end(k_next, k_end);
+               if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
+                       continue;
+
+               new = memblock_alloc(sizeof(pte_basic_t), SZ_4K);
+               if (!new)
+                       return -ENOMEM;
+
+               *new = pte_val(pte_mkhuge(pfn_pte(PHYS_PFN(__pa(block)), PAGE_KERNEL)));
+
+               hugepd_populate_kernel((hugepd_t *)pmd, (pte_t *)new, PAGE_SHIFT_8M);
+               hugepd_populate_kernel((hugepd_t *)pmd + 1, (pte_t *)new, PAGE_SHIFT_8M);
+       }
+       return 0;
+}
+
+int __init kasan_init_region(void *start, size_t size)
+{
+       unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
+       unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
+       unsigned long k_cur;
+       int ret;
+       void *block;
+
+       block = memblock_alloc(k_end - k_start, SZ_8M);
+       if (!block)
+               return -ENOMEM;
+
+       if (IS_ALIGNED(k_start, SZ_8M)) {
+               kasan_init_shadow_8M(k_start, ALIGN_DOWN(k_end, SZ_8M), block);
+               k_cur = ALIGN_DOWN(k_end, SZ_8M);
+               if (k_cur == k_end)
+                       goto finish;
+       } else {
+               k_cur = k_start;
+       }
+
+       ret = kasan_init_shadow_page_tables(k_start, k_end);
+       if (ret)
+               return ret;
+
+       for (; k_cur < k_end; k_cur += PAGE_SIZE) {
+               pmd_t *pmd = pmd_ptr_k(k_cur);
+               void *va = block + k_cur - k_start;
+               pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
+
+               if (k_cur < ALIGN_DOWN(k_end, SZ_512K))
+                       pte = pte_mkhuge(pte);
+
+               __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
+       }
+finish:
+       flush_tlb_kernel_range(k_start, k_end);
+       return 0;
+}
index 6577897673dda6a0fd3c4ce7cc8e2bee0839e0b6..440038ea79f18d3bb03b1a21e4e8a017e047af25 100644 (file)
@@ -3,3 +3,4 @@
 KASAN_SANITIZE := n
 
 obj-$(CONFIG_PPC32)           += kasan_init_32.o
+obj-$(CONFIG_PPC_8xx)          += 8xx.o