]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
x86/mm: Provide general kernel support for memory encryption
authorTom Lendacky <thomas.lendacky@amd.com>
Mon, 17 Jul 2017 21:10:07 +0000 (16:10 -0500)
committerIngo Molnar <mingo@kernel.org>
Tue, 18 Jul 2017 09:38:00 +0000 (11:38 +0200)
Changes to the existing page table macros will allow the SME support to
be enabled in a simple fashion with minimal changes to files that use these
macros.  Since the memory encryption mask will now be part of the regular
pagetable macros, we introduce two new macros (_PAGE_TABLE_NOENC and
_KERNPG_TABLE_NOENC) to allow for early pagetable creation/initialization
without the encryption mask before SME becomes active.  Two new pgprot()
macros are defined to allow setting or clearing the page encryption mask.

The FIXMAP_PAGE_NOCACHE define is introduced for use with MMIO.  SME does
not support encryption for MMIO areas so this define removes the encryption
mask from the page attribute.

Two new macros are introduced (__sme_pa() / __sme_pa_nodebug()) to allow
creating a physical address with the encryption mask.  These are used when
working with the cr3 register so that the PGD can be encrypted. The current
__va() macro is updated so that the virtual address is generated based off
of the physical address without the encryption mask thus allowing the same
virtual address to be generated regardless of whether encryption is enabled
for that physical location or not.

Also, an early initialization function is added for SME.  If SME is active,
this function:

 - Updates the early_pmd_flags so that early page faults create mappings
   with the encryption mask.

 - Updates the __supported_pte_mask to include the encryption mask.

 - Updates the protection_map entries to include the encryption mask so
   that user-space allocations will automatically have the encryption mask
   applied.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Borislav Petkov <bp@suse.de>
Cc: Alexander Potapenko <glider@google.com>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brijesh Singh <brijesh.singh@amd.com>
Cc: Dave Young <dyoung@redhat.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Matt Fleming <matt@codeblueprint.co.uk>
Cc: Michael S. Tsirkin <mst@redhat.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Radim Krčmář <rkrcmar@redhat.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Toshimitsu Kani <toshi.kani@hpe.com>
Cc: kasan-dev@googlegroups.com
Cc: kvm@vger.kernel.org
Cc: linux-arch@vger.kernel.org
Cc: linux-doc@vger.kernel.org
Cc: linux-efi@vger.kernel.org
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/b36e952c4c39767ae7f0a41cf5345adf27438480.1500319216.git.thomas.lendacky@amd.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
16 files changed:
arch/x86/boot/compressed/pagetable.c
arch/x86/include/asm/fixmap.h
arch/x86/include/asm/mem_encrypt.h
arch/x86/include/asm/page_types.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/processor.h
arch/x86/kernel/espfix_64.c
arch/x86/kernel/head64.c
arch/x86/kernel/head_64.S
arch/x86/mm/kasan_init_64.c
arch/x86/mm/mem_encrypt.c
arch/x86/mm/pageattr.c
arch/x86/mm/tlb.c
include/asm-generic/pgtable.h
include/linux/mem_encrypt.h

index 28029be47fbb839f248826b517a9e295f4389395..f1aa43854bed423e7bfccaa84ff66ea91996b731 100644 (file)
 #define __pa(x)  ((unsigned long)(x))
 #define __va(x)  ((void *)((unsigned long)(x)))
 
+/*
+ * The pgtable.h and mm/ident_map.c includes make use of the SME related
+ * information which is not used in the compressed image support. Un-define
+ * the SME support to avoid any compile and link errors.
+ */
+#undef CONFIG_AMD_MEM_ENCRYPT
+
 #include "misc.h"
 
 /* These actually do the work of building the kernel identity maps. */
index b65155cc3760a72b49b680c3f70923ddedf684d2..d9ff226cb4890351297ec34411fc15ec9472cb7f 100644 (file)
@@ -157,6 +157,13 @@ static inline void __set_fixmap(enum fixed_addresses idx,
 }
 #endif
 
+/*
+ * FIXMAP_PAGE_NOCACHE is used for MMIO. Memory encryption is not
+ * supported for MMIO addresses, so make sure that the memory encryption
+ * mask is not part of the page attributes.
+ */
+#define FIXMAP_PAGE_NOCACHE PAGE_KERNEL_IO_NOCACHE
+
 #include <asm-generic/fixmap.h>
 
 #define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
index 475e34f537938dd69ae6b5796cb1a18950681356..dbae7a5a347d8d0360c433d4cf1c9943e45287cc 100644 (file)
@@ -21,6 +21,8 @@
 
 extern unsigned long sme_me_mask;
 
+void __init sme_early_init(void);
+
 void __init sme_encrypt_kernel(void);
 void __init sme_enable(void);
 
@@ -28,11 +30,22 @@ void __init sme_enable(void);
 
 #define sme_me_mask    0UL
 
+static inline void __init sme_early_init(void) { }
+
 static inline void __init sme_encrypt_kernel(void) { }
 static inline void __init sme_enable(void) { }
 
 #endif /* CONFIG_AMD_MEM_ENCRYPT */
 
+/*
+ * The __sme_pa() and __sme_pa_nodebug() macros are meant for use when
+ * writing to or comparing values from the cr3 register.  Having the
+ * encryption mask set in cr3 enables the PGD entry to be encrypted and
+ * avoid special case handling of PGD allocations.
+ */
+#define __sme_pa(x)            (__pa(x) | sme_me_mask)
+#define __sme_pa_nodebug(x)    (__pa_nodebug(x) | sme_me_mask)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __X86_MEM_ENCRYPT_H__ */
index 7bd0099384cac4ed0fa89a4e25c42e4a63ae5f9c..b98ed9d1463098936bcebbae46b59dc00495508c 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/const.h>
 #include <linux/types.h>
+#include <linux/mem_encrypt.h>
 
 /* PAGE_SHIFT determines the page size */
 #define PAGE_SHIFT             12
@@ -15,7 +16,7 @@
 #define PUD_PAGE_SIZE          (_AC(1, UL) << PUD_SHIFT)
 #define PUD_PAGE_MASK          (~(PUD_PAGE_SIZE-1))
 
-#define __PHYSICAL_MASK                ((phys_addr_t)((1ULL << __PHYSICAL_MASK_SHIFT) - 1))
+#define __PHYSICAL_MASK                ((phys_addr_t)(__sme_clr((1ULL << __PHYSICAL_MASK_SHIFT) - 1)))
 #define __VIRTUAL_MASK         ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
 
 /* Cast *PAGE_MASK to a signed type so that it is sign-extended if
index b64ea527edfb46e043face785094afd476774894..c6452cb12c0b73dcb0329c56267b57455f3b28db 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _ASM_X86_PGTABLE_H
 #define _ASM_X86_PGTABLE_H
 
+#include <linux/mem_encrypt.h>
 #include <asm/page.h>
 #include <asm/pgtable_types.h>
 
                     cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS)))     \
         : (prot))
 
+/*
+ * Macros to add or remove encryption attribute
+ */
+#define pgprot_encrypted(prot) __pgprot(__sme_set(pgprot_val(prot)))
+#define pgprot_decrypted(prot) __pgprot(__sme_clr(pgprot_val(prot)))
+
 #ifndef __ASSEMBLY__
 #include <asm/x86_init.h>
 
@@ -38,6 +45,8 @@ extern struct list_head pgd_list;
 
 extern struct mm_struct *pgd_page_get_mm(struct page *page);
 
+extern pmdval_t early_pmd_flags;
+
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #else  /* !CONFIG_PARAVIRT */
index bf9638e1ee4215d4101d2836d5c3963d59e6dbab..de32ca32928a1a157ab766ca94379e0e6bdc8601 100644 (file)
@@ -2,6 +2,8 @@
 #define _ASM_X86_PGTABLE_DEFS_H
 
 #include <linux/const.h>
+#include <linux/mem_encrypt.h>
+
 #include <asm/page_types.h>
 
 #define FIRST_USER_ADDRESS     0UL
 
 #define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
 
-#define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |        \
-                        _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE  (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |    \
-                        _PAGE_DIRTY)
+#define _PAGE_TABLE_NOENC      (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |\
+                                _PAGE_ACCESSED | _PAGE_DIRTY)
+#define _KERNPG_TABLE_NOENC    (_PAGE_PRESENT | _PAGE_RW |             \
+                                _PAGE_ACCESSED | _PAGE_DIRTY)
 
 /*
  * Set of bits not changed in pte_modify.  The pte's
@@ -191,18 +193,29 @@ enum page_cache_mode {
 #define __PAGE_KERNEL_IO               (__PAGE_KERNEL)
 #define __PAGE_KERNEL_IO_NOCACHE       (__PAGE_KERNEL_NOCACHE)
 
-#define PAGE_KERNEL                    __pgprot(__PAGE_KERNEL)
-#define PAGE_KERNEL_RO                 __pgprot(__PAGE_KERNEL_RO)
-#define PAGE_KERNEL_EXEC               __pgprot(__PAGE_KERNEL_EXEC)
-#define PAGE_KERNEL_RX                 __pgprot(__PAGE_KERNEL_RX)
-#define PAGE_KERNEL_NOCACHE            __pgprot(__PAGE_KERNEL_NOCACHE)
-#define PAGE_KERNEL_LARGE              __pgprot(__PAGE_KERNEL_LARGE)
-#define PAGE_KERNEL_LARGE_EXEC         __pgprot(__PAGE_KERNEL_LARGE_EXEC)
-#define PAGE_KERNEL_VSYSCALL           __pgprot(__PAGE_KERNEL_VSYSCALL)
-#define PAGE_KERNEL_VVAR               __pgprot(__PAGE_KERNEL_VVAR)
-
-#define PAGE_KERNEL_IO                 __pgprot(__PAGE_KERNEL_IO)
-#define PAGE_KERNEL_IO_NOCACHE         __pgprot(__PAGE_KERNEL_IO_NOCACHE)
+#ifndef __ASSEMBLY__
+
+#define _PAGE_ENC      (_AT(pteval_t, sme_me_mask))
+
+#define _PAGE_TABLE    (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER |        \
+                        _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_ENC)
+#define _KERNPG_TABLE  (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED |    \
+                        _PAGE_DIRTY | _PAGE_ENC)
+
+#define PAGE_KERNEL            __pgprot(__PAGE_KERNEL | _PAGE_ENC)
+#define PAGE_KERNEL_RO         __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC)
+#define PAGE_KERNEL_EXEC       __pgprot(__PAGE_KERNEL_EXEC | _PAGE_ENC)
+#define PAGE_KERNEL_RX         __pgprot(__PAGE_KERNEL_RX | _PAGE_ENC)
+#define PAGE_KERNEL_NOCACHE    __pgprot(__PAGE_KERNEL_NOCACHE | _PAGE_ENC)
+#define PAGE_KERNEL_LARGE      __pgprot(__PAGE_KERNEL_LARGE | _PAGE_ENC)
+#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC | _PAGE_ENC)
+#define PAGE_KERNEL_VSYSCALL   __pgprot(__PAGE_KERNEL_VSYSCALL | _PAGE_ENC)
+#define PAGE_KERNEL_VVAR       __pgprot(__PAGE_KERNEL_VVAR | _PAGE_ENC)
+
+#define PAGE_KERNEL_IO         __pgprot(__PAGE_KERNEL_IO)
+#define PAGE_KERNEL_IO_NOCACHE __pgprot(__PAGE_KERNEL_IO_NOCACHE)
+
+#endif /* __ASSEMBLY__ */
 
 /*         xwr */
 #define __P000 PAGE_NONE
index 6a79547e8ee01e06a84cc948f0d9a61002cdda82..a68f70c3debc46d9ef9b65604389863a5c53bb03 100644 (file)
@@ -29,6 +29,7 @@ struct vm86;
 #include <linux/math64.h>
 #include <linux/err.h>
 #include <linux/irqflags.h>
+#include <linux/mem_encrypt.h>
 
 /*
  * We handle most unaligned accesses in hardware.  On the other hand
@@ -241,7 +242,7 @@ static inline unsigned long read_cr3_pa(void)
 
 static inline void load_cr3(pgd_t *pgdir)
 {
-       write_cr3(__pa(pgdir));
+       write_cr3(__sme_pa(pgdir));
 }
 
 #ifdef CONFIG_X86_32
index 6b91e2eb8d3f8a5b8ad1a57c7a2a47d0a3b4440d..9c4e7ba6870c142921cfbbd07b8bbf45285e5c07 100644 (file)
@@ -195,7 +195,7 @@ void init_espfix_ap(int cpu)
 
        pte_p = pte_offset_kernel(&pmd, addr);
        stack_page = page_address(alloc_pages_node(node, GFP_KERNEL, 0));
-       pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
+       pte = __pte(__pa(stack_page) | ((__PAGE_KERNEL_RO | _PAGE_ENC) & ptemask));
        for (n = 0; n < ESPFIX_PTE_CLONES; n++)
                set_pte(&pte_p[n*PTE_STRIDE], pte);
 
index 1f0ddcc9675cb4910ed5c54ad35b9fdb2dc05966..5cd0b72a02834ca1f56efae4c4681220a56a0572 100644 (file)
@@ -102,7 +102,7 @@ unsigned long __head __startup_64(unsigned long physaddr)
 
        pud = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
        pmd = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
-       pgtable_flags = _KERNPG_TABLE + sme_get_me_mask();
+       pgtable_flags = _KERNPG_TABLE_NOENC + sme_get_me_mask();
 
        if (IS_ENABLED(CONFIG_X86_5LEVEL)) {
                p4d = fixup_pointer(early_dynamic_pgts[next_early_pgt++], physaddr);
@@ -177,7 +177,7 @@ static void __init reset_early_page_tables(void)
 {
        memset(early_top_pgt, 0, sizeof(pgd_t)*(PTRS_PER_PGD-1));
        next_early_pgt = 0;
-       write_cr3(__pa_nodebug(early_top_pgt));
+       write_cr3(__sme_pa_nodebug(early_top_pgt));
 }
 
 /* Create a new PMD entry */
@@ -310,6 +310,13 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
 
        clear_page(init_top_pgt);
 
+       /*
+        * SME support may update early_pmd_flags to include the memory
+        * encryption mask, so it needs to be called before anything
+        * that may generate a page fault.
+        */
+       sme_early_init();
+
        kasan_early_init();
 
        for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
index ec5d5e90c8f199cb78282e42a012c451e5494abc..513cbb012eccc51f18ac16f9dac74a33abaa4c16 100644 (file)
@@ -351,9 +351,9 @@ GLOBAL(name)
 NEXT_PAGE(early_top_pgt)
        .fill   511,8,0
 #ifdef CONFIG_X86_5LEVEL
-       .quad   level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+       .quad   level4_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
 #else
-       .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+       .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
 #endif
 
 NEXT_PAGE(early_dynamic_pgts)
@@ -366,15 +366,15 @@ NEXT_PAGE(init_top_pgt)
        .fill   512,8,0
 #else
 NEXT_PAGE(init_top_pgt)
-       .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+       .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
        .org    init_top_pgt + PGD_PAGE_OFFSET*8, 0
-       .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+       .quad   level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
        .org    init_top_pgt + PGD_START_KERNEL*8, 0
        /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
-       .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+       .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
 
 NEXT_PAGE(level3_ident_pgt)
-       .quad   level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
+       .quad   level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
        .fill   511, 8, 0
 NEXT_PAGE(level2_ident_pgt)
        /* Since I easily can, map the first 1G.
@@ -386,14 +386,14 @@ NEXT_PAGE(level2_ident_pgt)
 #ifdef CONFIG_X86_5LEVEL
 NEXT_PAGE(level4_kernel_pgt)
        .fill   511,8,0
-       .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
+       .quad   level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
 #endif
 
 NEXT_PAGE(level3_kernel_pgt)
        .fill   L3_START_KERNEL,8,0
        /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
-       .quad   level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
-       .quad   level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+       .quad   level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
+       .quad   level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
 
 NEXT_PAGE(level2_kernel_pgt)
        /*
@@ -411,7 +411,7 @@ NEXT_PAGE(level2_kernel_pgt)
 
 NEXT_PAGE(level2_fixmap_pgt)
        .fill   506,8,0
-       .quad   level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
+       .quad   level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
        /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
        .fill   5,8,0
 
index 02c9d75534091a0cf06b78716a990c41847cb6e4..39d4daf5e289a3fa274286b39d4450454cee5eec 100644 (file)
@@ -87,7 +87,7 @@ static struct notifier_block kasan_die_notifier = {
 void __init kasan_early_init(void)
 {
        int i;
-       pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL;
+       pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
        pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
        pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
        p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
@@ -153,7 +153,7 @@ void __init kasan_init(void)
         */
        memset(kasan_zero_page, 0, PAGE_SIZE);
        for (i = 0; i < PTRS_PER_PTE; i++) {
-               pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO);
+               pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
                set_pte(&kasan_zero_pte[i], pte);
        }
        /* Flush TLBs again to be sure that write protection applied. */
index 3ac6f99b095c040dfd68d4368a7fd89fdb3ad683..f973d3dc3802327d70798ec2a919467c7aa58daf 100644 (file)
@@ -12,6 +12,7 @@
 
 #include <linux/linkage.h>
 #include <linux/init.h>
+#include <linux/mm.h>
 
 /*
  * Since SME related variables are set early in the boot process they must
 unsigned long sme_me_mask __section(.data) = 0;
 EXPORT_SYMBOL_GPL(sme_me_mask);
 
+void __init sme_early_init(void)
+{
+       unsigned int i;
+
+       if (!sme_me_mask)
+               return;
+
+       early_pmd_flags = __sme_set(early_pmd_flags);
+
+       __supported_pte_mask = __sme_set(__supported_pte_mask);
+
+       /* Update the protection map with memory encryption mask */
+       for (i = 0; i < ARRAY_SIZE(protection_map); i++)
+               protection_map[i] = pgprot_encrypted(protection_map[i]);
+}
+
 void __init sme_encrypt_kernel(void)
 {
 }
index 757b0bcdf712dfb1e73527c603ab5d5b05f5bcae..7e2d6c0a64c4367af8b6ddbb62f498929d800198 100644 (file)
@@ -2020,6 +2020,9 @@ int kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn, unsigned long address,
        if (!(page_flags & _PAGE_RW))
                cpa.mask_clr = __pgprot(_PAGE_RW);
 
+       if (!(page_flags & _PAGE_ENC))
+               cpa.mask_clr = pgprot_encrypted(cpa.mask_clr);
+
        cpa.mask_set = __pgprot(_PAGE_PRESENT | page_flags);
 
        retval = __change_page_attr_set_clr(&cpa, 0);
index 2c1b8881e9d381a4b61724f08fd3a3553887f65e..593d2f76a54c7caba47c65d181fe614db05e9993 100644 (file)
@@ -115,7 +115,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                         */
                        this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen,
                                       next_tlb_gen);
-                       write_cr3(__pa(next->pgd));
+                       write_cr3(__sme_pa(next->pgd));
                        trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH,
                                        TLB_FLUSH_ALL);
                }
@@ -157,7 +157,7 @@ void switch_mm_irqs_off(struct mm_struct *prev, struct mm_struct *next,
                this_cpu_write(cpu_tlbstate.ctxs[0].ctx_id, next->context.ctx_id);
                this_cpu_write(cpu_tlbstate.ctxs[0].tlb_gen, next_tlb_gen);
                this_cpu_write(cpu_tlbstate.loaded_mm, next);
-               write_cr3(__pa(next->pgd));
+               write_cr3(__sme_pa(next->pgd));
 
                trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
        }
index 7dfa767dc68012ac52ac81d48e92b3ec79c97311..4d7bb98f41340f52881f78a4d8e4b9dc2f21600f 100644 (file)
@@ -582,6 +582,18 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
 #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
 #endif /* CONFIG_MMU */
 
+/*
+ * No-op macros that just return the current protection value. Defined here
+ * because these macros can be used used even if CONFIG_MMU is not defined.
+ */
+#ifndef pgprot_encrypted
+#define pgprot_encrypted(prot) (prot)
+#endif
+
+#ifndef pgprot_decrypted
+#define pgprot_decrypted(prot) (prot)
+#endif
+
 /*
  * A facility to provide lazy MMU batching.  This allows PTE updates and
  * page invalidations to be delayed until a call to leave lazy MMU mode
index 570f4fcff13f174c9c47590fb4e4d40adaaaed96..1255f09f5e425a293d2da840242fab393a1cbb9e 100644 (file)
@@ -35,6 +35,14 @@ static inline unsigned long sme_get_me_mask(void)
        return sme_me_mask;
 }
 
+/*
+ * The __sme_set() and __sme_clr() macros are useful for adding or removing
+ * the encryption mask from a value (e.g. when dealing with pagetable
+ * entries).
+ */
+#define __sme_set(x)           ((unsigned long)(x) | sme_me_mask)
+#define __sme_clr(x)           ((unsigned long)(x) & ~sme_me_mask)
+
 #endif /* __ASSEMBLY__ */
 
 #endif /* __MEM_ENCRYPT_H__ */