]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/commitdiff
x86/cpu_entry_area: Prepare for IST guard pages
authorThomas Gleixner <tglx@linutronix.de>
Sun, 14 Apr 2019 15:59:48 +0000 (17:59 +0200)
committerBorislav Petkov <bp@suse.de>
Wed, 17 Apr 2019 10:58:21 +0000 (12:58 +0200)
To allow guard pages between the IST stacks each stack needs to be
mapped individually.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Josh Poimboeuf <jpoimboe@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: x86-ml <x86@kernel.org>
Link: https://lkml.kernel.org/r/20190414160144.592691557@linutronix.de
arch/x86/mm/cpu_entry_area.c

index 6a09b84c13fea1619916ba205c15080527e680f8..2b1407662a6d5c748332af88c67f802d74e8b45e 100644 (file)
@@ -77,6 +77,34 @@ static void __init percpu_setup_debug_store(unsigned int cpu)
 #endif
 }
 
+#ifdef CONFIG_X86_64
+
+#define cea_map_stack(name) do {                                       \
+       npages = sizeof(estacks->name## _stack) / PAGE_SIZE;            \
+       cea_map_percpu_pages(cea->estacks.name## _stack,                \
+                       estacks->name## _stack, npages, PAGE_KERNEL);   \
+       } while (0)
+
+static void __init percpu_setup_exception_stacks(unsigned int cpu)
+{
+       struct exception_stacks *estacks = per_cpu_ptr(&exception_stacks, cpu);
+       struct cpu_entry_area *cea = get_cpu_entry_area(cpu);
+       unsigned int npages;
+
+       BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
+       /*
+        * The exceptions stack mappings in the per cpu area are protected
+        * by guard pages so each stack must be mapped separately.
+        */
+       cea_map_stack(DF);
+       cea_map_stack(NMI);
+       cea_map_stack(DB);
+       cea_map_stack(MCE);
+}
+#else
+static inline void percpu_setup_exception_stacks(unsigned int cpu) {}
+#endif
+
 /* Setup the fixmap mappings only once per-processor */
 static void __init setup_cpu_entry_area(unsigned int cpu)
 {
@@ -134,13 +162,8 @@ static void __init setup_cpu_entry_area(unsigned int cpu)
        per_cpu(cpu_entry_area, cpu) = cea;
 #endif
 
-#ifdef CONFIG_X86_64
-       BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
-       BUILD_BUG_ON(sizeof(exception_stacks) !=
-                    sizeof(((struct cpu_entry_area *)0)->estacks));
-       cea_map_percpu_pages(&cea->estacks, &per_cpu(exception_stacks, cpu),
-                            sizeof(exception_stacks) / PAGE_SIZE, PAGE_KERNEL);
-#endif
+       percpu_setup_exception_stacks(cpu);
+
        percpu_setup_debug_store(cpu);
 }