]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
powerpc/mm: Add a helper to select PAGE_KERNEL_RO or PAGE_READONLY
authorChristophe Leroy <christophe.leroy@c-s.fr>
Wed, 21 Aug 2019 10:20:00 +0000 (10:20 +0000)
committerKhalid Elmously <khalid.elmously@canonical.com>
Fri, 18 Oct 2019 08:26:32 +0000 (04:26 -0400)
BugLink: https://bugs.launchpad.net/bugs/1848039
commit 4c0f5d1eb4072871c34530358df45f05ab80edd6 upstream.

In a couple of places there is a need to select whether read-only
protection of shadow pages is performed with PAGE_KERNEL_RO or with
PAGE_READONLY.

Add a helper to avoid duplicating the choice.

Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
Cc: stable@vger.kernel.org
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/9f33f44b9cd741c4a02b3dce7b8ef9438fe2cd2a.1566382750.git.christophe.leroy@c-s.fr
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Paolo Pisati <paolo.pisati@canonical.com>
Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
arch/powerpc/mm/kasan/kasan_init_32.c

index 802387b231adcd5036a6d72a26bea5f81f730d4c..e8ab3cc5f6e4fd6b2814f9b90e9e90735388564b 100644 (file)
 #include <asm/code-patching.h>
 #include <mm/mmu_decl.h>
 
+static pgprot_t kasan_prot_ro(void)
+{
+       if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
+               return PAGE_READONLY;
+
+       return PAGE_KERNEL_RO;
+}
+
 static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
 {
        unsigned long va = (unsigned long)kasan_early_shadow_page;
@@ -26,6 +34,7 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
 {
        pmd_t *pmd;
        unsigned long k_cur, k_next;
+       pgprot_t prot = kasan_prot_ro();
 
        pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
 
@@ -43,10 +52,7 @@ static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned l
 
                if (!new)
                        return -ENOMEM;
-               if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
-                       kasan_populate_pte(new, PAGE_READONLY);
-               else
-                       kasan_populate_pte(new, PAGE_KERNEL_RO);
+               kasan_populate_pte(new, prot);
 
                smp_wmb(); /* See comment in __pte_alloc */
 
@@ -103,10 +109,9 @@ static int __ref kasan_init_region(void *start, size_t size)
 
 static void __init kasan_remap_early_shadow_ro(void)
 {
-       if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
-               kasan_populate_pte(kasan_early_shadow_pte, PAGE_READONLY);
-       else
-               kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL_RO);
+       pgprot_t prot = kasan_prot_ro();
+
+       kasan_populate_pte(kasan_early_shadow_pte, prot);
 
        flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
 }