1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Andy Lutomirski <luto@kernel.org>
3 Date: Mon, 4 Dec 2017 15:07:26 +0100
4 Subject: [PATCH] x86/entry/64: Move the IST stacks into struct cpu_entry_area
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
11 The IST stacks are needed when an IST exception occurs and are accessed
12 before any kernel code at all runs. Move them into struct cpu_entry_area.
14 The IST stacks are unlike the rest of cpu_entry_area: they're used even for
15 entries from kernel mode. This means that they should be set up before we
16 load the final IDT. Move cpu_entry_area setup to trap_init() for the boot
17 CPU and set it up for all possible CPUs at once in native_smp_prepare_cpus().
19 Signed-off-by: Andy Lutomirski <luto@kernel.org>
20 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
21 Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
22 Reviewed-by: Borislav Petkov <bp@suse.de>
23 Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
24 Cc: Borislav Petkov <bp@alien8.de>
25 Cc: Borislav Petkov <bpetkov@suse.de>
26 Cc: Brian Gerst <brgerst@gmail.com>
27 Cc: Dave Hansen <dave.hansen@intel.com>
28 Cc: Dave Hansen <dave.hansen@linux.intel.com>
29 Cc: David Laight <David.Laight@aculab.com>
30 Cc: Denys Vlasenko <dvlasenk@redhat.com>
31 Cc: Eduardo Valentin <eduval@amazon.com>
32 Cc: Greg KH <gregkh@linuxfoundation.org>
33 Cc: H. Peter Anvin <hpa@zytor.com>
34 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
35 Cc: Juergen Gross <jgross@suse.com>
36 Cc: Linus Torvalds <torvalds@linux-foundation.org>
37 Cc: Peter Zijlstra <peterz@infradead.org>
38 Cc: Rik van Riel <riel@redhat.com>
39 Cc: Will Deacon <will.deacon@arm.com>
40 Cc: aliguori@amazon.com
41 Cc: daniel.gruss@iaik.tugraz.at
43 Cc: keescook@google.com
44 Link: https://lkml.kernel.org/r/20171204150606.480598743@linutronix.de
45 Signed-off-by: Ingo Molnar <mingo@kernel.org>
46 (backported from commit 40e7f949e0d9a33968ebde5d67f7e3a47c97742a)
47 Signed-off-by: Andy Whitcroft <apw@canonical.com>
48 Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
49 (cherry picked from commit 88e7277709f2e7c023e66ff9ae158aeff4cf7c8f)
50 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
52 arch/x86/include/asm/fixmap.h | 12 +++++++
53 arch/x86/kernel/cpu/common.c | 74 ++++++++++++++++++++++++-------------------
54 arch/x86/kernel/traps.c | 3 ++
55 3 files changed, 57 insertions(+), 32 deletions(-)
57 diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
58 index 189d12d8afe0..953aed54cb5e 100644
59 --- a/arch/x86/include/asm/fixmap.h
60 +++ b/arch/x86/include/asm/fixmap.h
61 @@ -63,10 +63,22 @@ struct cpu_entry_area {
62 struct tss_struct tss;
64 char entry_trampoline[PAGE_SIZE];
68 + * Exception stacks used for IST entries.
70 + * In the future, this should have a separate slot for each stack
71 + * with guard pages between them.
73 + char exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ];
77 #define CPU_ENTRY_AREA_PAGES (sizeof(struct cpu_entry_area) / PAGE_SIZE)
79 +extern void setup_cpu_entry_areas(void);
82 * Here we define all the compile-time 'special' virtual
83 * addresses. The point is to have a constant address at
84 diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
85 index c2b2ee73b8a1..f487766855d3 100644
86 --- a/arch/x86/kernel/cpu/common.c
87 +++ b/arch/x86/kernel/cpu/common.c
88 @@ -466,24 +466,36 @@ void load_percpu_segment(int cpu)
89 load_stack_canary_segment();
92 -static void set_percpu_fixmap_pages(int fixmap_index, void *ptr,
93 - int pages, pgprot_t prot)
97 - for (i = 0; i < pages; i++) {
98 - __set_fixmap(fixmap_index - i,
99 - per_cpu_ptr_to_phys(ptr + i * PAGE_SIZE), prot);
104 /* The 32-bit entry code needs to find cpu_entry_area. */
105 DEFINE_PER_CPU(struct cpu_entry_area *, cpu_entry_area);
108 +#ifdef CONFIG_X86_64
110 + * Special IST stacks which the CPU switches to when it calls
111 + * an IST-marked descriptor entry. Up to 7 stacks (hardware
112 + * limit), all of them are 4K, except the debug stack which
115 +static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
116 + [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
117 + [DEBUG_STACK - 1] = DEBUG_STKSZ
120 +static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
121 + [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
125 +set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot)
127 + for ( ; pages; pages--, idx--, ptr += PAGE_SIZE)
128 + __set_fixmap(idx, per_cpu_ptr_to_phys(ptr), prot);
131 /* Setup the fixmap mappings only once per-processor */
132 -static inline void setup_cpu_entry_area(int cpu)
133 +static void __init setup_cpu_entry_area(int cpu)
136 extern char _entry_trampoline[];
137 @@ -532,15 +544,31 @@ static inline void setup_cpu_entry_area(int cpu)
141 - this_cpu_write(cpu_entry_area, get_cpu_entry_area(cpu));
142 + per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu);
146 + BUILD_BUG_ON(sizeof(exception_stacks) % PAGE_SIZE != 0);
147 + BUILD_BUG_ON(sizeof(exception_stacks) !=
148 + sizeof(((struct cpu_entry_area *)0)->exception_stacks));
149 + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, exception_stacks),
150 + &per_cpu(exception_stacks, cpu),
151 + sizeof(exception_stacks) / PAGE_SIZE,
154 __set_fixmap(get_cpu_entry_area_index(cpu, entry_trampoline),
155 __pa_symbol(_entry_trampoline), PAGE_KERNEL_RX);
159 +void __init setup_cpu_entry_areas(void)
163 + for_each_possible_cpu(cpu)
164 + setup_cpu_entry_area(cpu);
167 /* Load the original GDT from the per-cpu structure */
168 void load_direct_gdt(int cpu)
170 @@ -1386,20 +1414,6 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
171 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
172 EXPORT_PER_CPU_SYMBOL(__preempt_count);
175 - * Special IST stacks which the CPU switches to when it calls
176 - * an IST-marked descriptor entry. Up to 7 stacks (hardware
177 - * limit), all of them are 4K, except the debug stack which
180 -static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
181 - [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
182 - [DEBUG_STACK - 1] = DEBUG_STKSZ
185 -static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
186 - [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]);
188 /* May not be marked __init: used by software suspend */
189 void syscall_init(void)
191 @@ -1608,7 +1622,7 @@ void cpu_init(void)
192 * set up and load the per-CPU TSS
195 - char *estacks = per_cpu(exception_stacks, cpu);
196 + char *estacks = get_cpu_entry_area(cpu)->exception_stacks;
198 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
199 estacks += exception_stack_sizes[v];
200 @@ -1633,8 +1647,6 @@ void cpu_init(void)
202 enter_lazy_tlb(&init_mm, me);
204 - setup_cpu_entry_area(cpu);
207 * Initialize the TSS. sp0 points to the entry trampoline stack
208 * regardless of what task is running.
209 @@ -1693,8 +1705,6 @@ void cpu_init(void)
211 enter_lazy_tlb(&init_mm, curr);
213 - setup_cpu_entry_area(cpu);
216 * Initialize the TSS. Don't bother initializing sp0, as the initial
217 * task never enters user mode.
218 diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
219 index d9debdafe7a6..fd4d47e8672e 100644
220 --- a/arch/x86/kernel/traps.c
221 +++ b/arch/x86/kernel/traps.c
222 @@ -992,6 +992,9 @@ void __init trap_init(void)
226 + /* Init cpu_entry_area before IST entries are set up */
227 + setup_cpu_entry_areas();
230 void __iomem *p = early_ioremap(0x0FFFD9, 4);