]>
Commit | Line | Data |
---|---|---|
321d628a FG |
1 | From a4da7aed31f0355b881bdeeb3d269a20759f16a8 Mon Sep 17 00:00:00 2001 |
2 | From: Andy Lutomirski <luto@kernel.org> | |
3 | Date: Mon, 4 Dec 2017 15:07:29 +0100 | |
e4cdf2a5 | 4 | Subject: [PATCH 159/241] x86/entry/64: Make cpu_entry_area.tss read-only |
321d628a FG |
5 | MIME-Version: 1.0 |
6 | Content-Type: text/plain; charset=UTF-8 | |
7 | Content-Transfer-Encoding: 8bit | |
8 | ||
9 | CVE-2017-5754 | |
10 | ||
11 | The TSS is a fairly juicy target for exploits, and, now that the TSS | |
12 | is in the cpu_entry_area, it's no longer protected by kASLR. Make it | |
13 | read-only on x86_64. | |
14 | ||
15 | On x86_32, it can't be RO because it's written by the CPU during task | |
16 | switches, and we use a task gate for double faults. I'd also be | |
17 | nervous about errata if we tried to make it RO even on configurations | |
18 | without double fault handling. | |
19 | ||
20 | [ tglx: AMD confirmed that there is no problem on 64-bit with TSS RO. So | |
21 | it's probably safe to assume that it's a non issue, though Intel | |
22 | might have been creative in that area. Still waiting for | |
23 | confirmation. ] | |
24 | ||
25 | Signed-off-by: Andy Lutomirski <luto@kernel.org> | |
26 | Signed-off-by: Thomas Gleixner <tglx@linutronix.de> | |
27 | Reviewed-by: Borislav Petkov <bpetkov@suse.de> | |
28 | Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> | |
29 | Cc: Borislav Petkov <bp@alien8.de> | |
30 | Cc: Brian Gerst <brgerst@gmail.com> | |
31 | Cc: Dave Hansen <dave.hansen@intel.com> | |
32 | Cc: Dave Hansen <dave.hansen@linux.intel.com> | |
33 | Cc: David Laight <David.Laight@aculab.com> | |
34 | Cc: Denys Vlasenko <dvlasenk@redhat.com> | |
35 | Cc: Eduardo Valentin <eduval@amazon.com> | |
36 | Cc: Greg KH <gregkh@linuxfoundation.org> | |
37 | Cc: H. Peter Anvin <hpa@zytor.com> | |
38 | Cc: Josh Poimboeuf <jpoimboe@redhat.com> | |
39 | Cc: Juergen Gross <jgross@suse.com> | |
40 | Cc: Kees Cook <keescook@chromium.org> | |
41 | Cc: Linus Torvalds <torvalds@linux-foundation.org> | |
42 | Cc: Peter Zijlstra <peterz@infradead.org> | |
43 | Cc: Rik van Riel <riel@redhat.com> | |
44 | Cc: Will Deacon <will.deacon@arm.com> | |
45 | Cc: aliguori@amazon.com | |
46 | Cc: daniel.gruss@iaik.tugraz.at | |
47 | Cc: hughd@google.com | |
48 | Cc: keescook@google.com | |
49 | Link: https://lkml.kernel.org/r/20171204150606.733700132@linutronix.de | |
50 | Signed-off-by: Ingo Molnar <mingo@kernel.org> | |
51 | (backported from commit c482feefe1aeb150156248ba0fd3e029bc886605) | |
52 | Signed-off-by: Andy Whitcroft <apw@canonical.com> | |
53 | Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com> | |
54 | (cherry picked from commit 785be108f90cd62eab2da17490714085ef752538) | |
55 | Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com> | |
56 | --- | |
57 | arch/x86/include/asm/fixmap.h | 13 +++++++++---- | |
58 | arch/x86/include/asm/processor.h | 17 ++++++++--------- | |
59 | arch/x86/include/asm/switch_to.h | 4 ++-- | |
60 | arch/x86/include/asm/thread_info.h | 2 +- | |
61 | arch/x86/kernel/asm-offsets.c | 5 ++--- | |
62 | arch/x86/kernel/asm-offsets_32.c | 4 ++-- | |
63 | arch/x86/kernel/cpu/common.c | 29 +++++++++++++++++++---------- | |
64 | arch/x86/kernel/ioport.c | 2 +- | |
65 | arch/x86/kernel/process.c | 6 +++--- | |
66 | arch/x86/kernel/process_32.c | 2 +- | |
67 | arch/x86/kernel/process_64.c | 2 +- | |
68 | arch/x86/kernel/traps.c | 4 ++-- | |
69 | arch/x86/lib/delay.c | 4 ++-- | |
70 | arch/x86/xen/enlighten_pv.c | 2 +- | |
71 | arch/x86/entry/entry_32.S | 4 ++-- | |
72 | arch/x86/entry/entry_64.S | 8 ++++---- | |
73 | 16 files changed, 60 insertions(+), 48 deletions(-) | |
74 | ||
75 | diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h | |
76 | index 56aaffbbffd6..5dc269ff4085 100644 | |
77 | --- a/arch/x86/include/asm/fixmap.h | |
78 | +++ b/arch/x86/include/asm/fixmap.h | |
79 | @@ -56,9 +56,14 @@ struct cpu_entry_area { | |
80 | char gdt[PAGE_SIZE]; | |
81 | ||
82 | /* | |
83 | - * The GDT is just below cpu_tss and thus serves (on x86_64) as a | |
84 | - * a read-only guard page for the SYSENTER stack at the bottom | |
85 | - * of the TSS region. | |
86 | + * The GDT is just below SYSENTER_stack and thus serves (on x86_64) as | |
87 | + * a a read-only guard page. | |
88 | + */ | |
89 | + struct SYSENTER_stack_page SYSENTER_stack_page; | |
90 | + | |
91 | + /* | |
92 | + * On x86_64, the TSS is mapped RO. On x86_32, it's mapped RW because | |
93 | + * we need task switches to work, and task switches write to the TSS. | |
94 | */ | |
95 | struct tss_struct tss; | |
96 | ||
97 | @@ -227,7 +232,7 @@ static inline struct cpu_entry_area *get_cpu_entry_area(int cpu) | |
98 | ||
99 | static inline struct SYSENTER_stack *cpu_SYSENTER_stack(int cpu) | |
100 | { | |
101 | - return &get_cpu_entry_area(cpu)->tss.SYSENTER_stack; | |
102 | + return &get_cpu_entry_area(cpu)->SYSENTER_stack_page.stack; | |
103 | } | |
104 | ||
105 | #endif /* !__ASSEMBLY__ */ | |
106 | diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h | |
107 | index 2d489a414a86..bccec7ed1676 100644 | |
108 | --- a/arch/x86/include/asm/processor.h | |
109 | +++ b/arch/x86/include/asm/processor.h | |
110 | @@ -334,13 +334,11 @@ struct SYSENTER_stack { | |
111 | unsigned long words[64]; | |
112 | }; | |
113 | ||
114 | -struct tss_struct { | |
115 | - /* | |
116 | - * Space for the temporary SYSENTER stack, used for SYSENTER | |
117 | - * and the entry trampoline as well. | |
118 | - */ | |
119 | - struct SYSENTER_stack SYSENTER_stack; | |
120 | +struct SYSENTER_stack_page { | |
121 | + struct SYSENTER_stack stack; | |
122 | +} __aligned(PAGE_SIZE); | |
123 | ||
124 | +struct tss_struct { | |
125 | /* | |
126 | * The fixed hardware portion. This must not cross a page boundary | |
127 | * at risk of violating the SDM's advice and potentially triggering | |
128 | @@ -357,7 +355,7 @@ struct tss_struct { | |
129 | unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; | |
130 | } __aligned(PAGE_SIZE); | |
131 | ||
132 | -DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss); | |
133 | +DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss_rw); | |
134 | ||
135 | /* | |
136 | * sizeof(unsigned long) coming from an extra "long" at the end | |
137 | @@ -372,7 +370,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss); | |
138 | #ifdef CONFIG_X86_32 | |
139 | DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack); | |
140 | #else | |
141 | -#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1 | |
142 | +/* The RO copy can't be accessed with this_cpu_xyz(), so use the RW copy. */ | |
143 | +#define cpu_current_top_of_stack cpu_tss_rw.x86_tss.sp1 | |
144 | #endif | |
145 | ||
146 | /* | |
147 | @@ -532,7 +531,7 @@ static inline void native_set_iopl_mask(unsigned mask) | |
148 | static inline void | |
149 | native_load_sp0(unsigned long sp0) | |
150 | { | |
151 | - this_cpu_write(cpu_tss.x86_tss.sp0, sp0); | |
152 | + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); | |
153 | } | |
154 | ||
155 | static inline void native_swapgs(void) | |
156 | diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h | |
157 | index ca2fc84ad278..cfb6dfe4c457 100644 | |
158 | --- a/arch/x86/include/asm/switch_to.h | |
159 | +++ b/arch/x86/include/asm/switch_to.h | |
160 | @@ -78,10 +78,10 @@ do { \ | |
161 | static inline void refresh_sysenter_cs(struct thread_struct *thread) | |
162 | { | |
163 | /* Only happens when SEP is enabled, no need to test "SEP"arately: */ | |
164 | - if (unlikely(this_cpu_read(cpu_tss.x86_tss.ss1) == thread->sysenter_cs)) | |
165 | + if (unlikely(this_cpu_read(cpu_tss_rw.x86_tss.ss1) == thread->sysenter_cs)) | |
166 | return; | |
167 | ||
168 | - this_cpu_write(cpu_tss.x86_tss.ss1, thread->sysenter_cs); | |
169 | + this_cpu_write(cpu_tss_rw.x86_tss.ss1, thread->sysenter_cs); | |
170 | wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0); | |
171 | } | |
172 | #endif | |
173 | diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h | |
174 | index 760dd8a73927..6275b391ac61 100644 | |
175 | --- a/arch/x86/include/asm/thread_info.h | |
176 | +++ b/arch/x86/include/asm/thread_info.h | |
177 | @@ -214,7 +214,7 @@ static inline int arch_within_stack_frames(const void * const stack, | |
178 | #else /* !__ASSEMBLY__ */ | |
179 | ||
180 | #ifdef CONFIG_X86_64 | |
181 | -# define cpu_current_top_of_stack (cpu_tss + TSS_sp1) | |
182 | +# define cpu_current_top_of_stack (cpu_tss_rw + TSS_sp1) | |
183 | #endif | |
184 | ||
185 | #endif | |
186 | diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c | |
187 | index 00ea20bfa857..40c3fab107ac 100644 | |
188 | --- a/arch/x86/kernel/asm-offsets.c | |
189 | +++ b/arch/x86/kernel/asm-offsets.c | |
190 | @@ -93,10 +93,9 @@ void common(void) { | |
191 | BLANK(); | |
192 | DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); | |
193 | ||
194 | - OFFSET(TSS_STRUCT_SYSENTER_stack, tss_struct, SYSENTER_stack); | |
195 | - DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack)); | |
196 | - | |
197 | /* Layout info for cpu_entry_area */ | |
198 | OFFSET(CPU_ENTRY_AREA_tss, cpu_entry_area, tss); | |
199 | OFFSET(CPU_ENTRY_AREA_entry_trampoline, cpu_entry_area, entry_trampoline); | |
200 | + OFFSET(CPU_ENTRY_AREA_SYSENTER_stack, cpu_entry_area, SYSENTER_stack_page); | |
201 | + DEFINE(SIZEOF_SYSENTER_stack, sizeof(struct SYSENTER_stack)); | |
202 | } | |
203 | diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c | |
204 | index d09b161a3bd0..c4f23da7a0f0 100644 | |
205 | --- a/arch/x86/kernel/asm-offsets_32.c | |
206 | +++ b/arch/x86/kernel/asm-offsets_32.c | |
207 | @@ -49,8 +49,8 @@ void foo(void) | |
208 | BLANK(); | |
209 | ||
210 | /* Offset from the sysenter stack to tss.sp0 */ | |
211 | - DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) - | |
212 | - offsetofend(struct tss_struct, SYSENTER_stack)); | |
213 | + DEFINE(TSS_sysenter_sp0, offsetof(struct cpu_entry_area, tss.x86_tss.sp0) - | |
214 | + offsetofend(struct cpu_entry_area, SYSENTER_stack_page.stack)); | |
215 | ||
216 | #ifdef CONFIG_CC_STACKPROTECTOR | |
217 | BLANK(); | |
218 | diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c | |
219 | index f9541c48c290..7992e5a8076c 100644 | |
220 | --- a/arch/x86/kernel/cpu/common.c | |
221 | +++ b/arch/x86/kernel/cpu/common.c | |
222 | @@ -487,6 +487,9 @@ static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks | |
223 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]); | |
224 | #endif | |
225 | ||
226 | +static DEFINE_PER_CPU_PAGE_ALIGNED(struct SYSENTER_stack_page, | |
227 | + SYSENTER_stack_storage); | |
228 | + | |
229 | static void __init | |
230 | set_percpu_fixmap_pages(int idx, void *ptr, int pages, pgprot_t prot) | |
231 | { | |
232 | @@ -500,23 +503,29 @@ static void __init setup_cpu_entry_area(int cpu) | |
233 | #ifdef CONFIG_X86_64 | |
234 | extern char _entry_trampoline[]; | |
235 | ||
236 | - /* On 64-bit systems, we use a read-only fixmap GDT. */ | |
237 | + /* On 64-bit systems, we use a read-only fixmap GDT and TSS. */ | |
238 | pgprot_t gdt_prot = PAGE_KERNEL_RO; | |
239 | + pgprot_t tss_prot = PAGE_KERNEL_RO; | |
240 | #else | |
241 | /* | |
242 | * On native 32-bit systems, the GDT cannot be read-only because | |
243 | * our double fault handler uses a task gate, and entering through | |
244 | - * a task gate needs to change an available TSS to busy. If the GDT | |
245 | - * is read-only, that will triple fault. | |
246 | + * a task gate needs to change an available TSS to busy. If the | |
247 | + * GDT is read-only, that will triple fault. The TSS cannot be | |
248 | + * read-only because the CPU writes to it on task switches. | |
249 | * | |
250 | - * On Xen PV, the GDT must be read-only because the hypervisor requires | |
251 | - * it. | |
252 | + * On Xen PV, the GDT must be read-only because the hypervisor | |
253 | + * requires it. | |
254 | */ | |
255 | pgprot_t gdt_prot = boot_cpu_has(X86_FEATURE_XENPV) ? | |
256 | PAGE_KERNEL_RO : PAGE_KERNEL; | |
257 | + pgprot_t tss_prot = PAGE_KERNEL; | |
258 | #endif | |
259 | ||
260 | __set_fixmap(get_cpu_entry_area_index(cpu, gdt), get_cpu_gdt_paddr(cpu), gdt_prot); | |
261 | + set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, SYSENTER_stack_page), | |
262 | + per_cpu_ptr(&SYSENTER_stack_storage, cpu), 1, | |
263 | + PAGE_KERNEL); | |
264 | ||
265 | /* | |
266 | * The Intel SDM says (Volume 3, 7.2.1): | |
267 | @@ -539,9 +548,9 @@ static void __init setup_cpu_entry_area(int cpu) | |
268 | offsetofend(struct tss_struct, x86_tss)) & PAGE_MASK); | |
269 | BUILD_BUG_ON(sizeof(struct tss_struct) % PAGE_SIZE != 0); | |
270 | set_percpu_fixmap_pages(get_cpu_entry_area_index(cpu, tss), | |
271 | - &per_cpu(cpu_tss, cpu), | |
272 | + &per_cpu(cpu_tss_rw, cpu), | |
273 | sizeof(struct tss_struct) / PAGE_SIZE, | |
274 | - PAGE_KERNEL); | |
275 | + tss_prot); | |
276 | ||
277 | #ifdef CONFIG_X86_32 | |
278 | per_cpu(cpu_entry_area, cpu) = get_cpu_entry_area(cpu); | |
279 | @@ -1297,7 +1306,7 @@ void enable_sep_cpu(void) | |
280 | return; | |
281 | ||
282 | cpu = get_cpu(); | |
283 | - tss = &per_cpu(cpu_tss, cpu); | |
284 | + tss = &per_cpu(cpu_tss_rw, cpu); | |
285 | ||
286 | /* | |
287 | * We cache MSR_IA32_SYSENTER_CS's value in the TSS's ss1 field -- | |
288 | @@ -1576,7 +1585,7 @@ void cpu_init(void) | |
289 | if (cpu) | |
290 | load_ucode_ap(); | |
291 | ||
292 | - t = &per_cpu(cpu_tss, cpu); | |
293 | + t = &per_cpu(cpu_tss_rw, cpu); | |
294 | oist = &per_cpu(orig_ist, cpu); | |
295 | ||
296 | #ifdef CONFIG_NUMA | |
297 | @@ -1667,7 +1676,7 @@ void cpu_init(void) | |
298 | { | |
299 | int cpu = smp_processor_id(); | |
300 | struct task_struct *curr = current; | |
301 | - struct tss_struct *t = &per_cpu(cpu_tss, cpu); | |
302 | + struct tss_struct *t = &per_cpu(cpu_tss_rw, cpu); | |
303 | ||
304 | wait_for_master_cpu(cpu); | |
305 | ||
306 | diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c | |
307 | index 4a613fed94b6..d13777d49d8b 100644 | |
308 | --- a/arch/x86/kernel/ioport.c | |
309 | +++ b/arch/x86/kernel/ioport.c | |
310 | @@ -66,7 +66,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on) | |
311 | * because the ->io_bitmap_max value must match the bitmap | |
312 | * contents: | |
313 | */ | |
314 | - tss = &per_cpu(cpu_tss, get_cpu()); | |
315 | + tss = &per_cpu(cpu_tss_rw, get_cpu()); | |
316 | ||
317 | if (turn_on) | |
318 | bitmap_clear(t->io_bitmap_ptr, from, num); | |
319 | diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c | |
320 | index ec758390d24e..3688a7b9d055 100644 | |
321 | --- a/arch/x86/kernel/process.c | |
322 | +++ b/arch/x86/kernel/process.c | |
323 | @@ -46,7 +46,7 @@ | |
324 | * section. Since TSS's are completely CPU-local, we want them | |
325 | * on exact cacheline boundaries, to eliminate cacheline ping-pong. | |
326 | */ | |
327 | -__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { | |
328 | +__visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss_rw) = { | |
329 | .x86_tss = { | |
330 | /* | |
331 | * .sp0 is only used when entering ring 0 from a lower | |
332 | @@ -81,7 +81,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = { | |
333 | .io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 }, | |
334 | #endif | |
335 | }; | |
336 | -EXPORT_PER_CPU_SYMBOL(cpu_tss); | |
337 | +EXPORT_PER_CPU_SYMBOL(cpu_tss_rw); | |
338 | ||
339 | DEFINE_PER_CPU(bool, __tss_limit_invalid); | |
340 | EXPORT_PER_CPU_SYMBOL_GPL(__tss_limit_invalid); | |
341 | @@ -110,7 +110,7 @@ void exit_thread(struct task_struct *tsk) | |
342 | struct fpu *fpu = &t->fpu; | |
343 | ||
344 | if (bp) { | |
345 | - struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu()); | |
346 | + struct tss_struct *tss = &per_cpu(cpu_tss_rw, get_cpu()); | |
347 | ||
348 | t->io_bitmap_ptr = NULL; | |
349 | clear_thread_flag(TIF_IO_BITMAP); | |
350 | diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c | |
351 | index c0d60420466c..784ff9147172 100644 | |
352 | --- a/arch/x86/kernel/process_32.c | |
353 | +++ b/arch/x86/kernel/process_32.c | |
354 | @@ -234,7 +234,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |
355 | struct fpu *prev_fpu = &prev->fpu; | |
356 | struct fpu *next_fpu = &next->fpu; | |
357 | int cpu = smp_processor_id(); | |
358 | - struct tss_struct *tss = &per_cpu(cpu_tss, cpu); | |
359 | + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); | |
360 | ||
361 | /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ | |
362 | ||
363 | diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c | |
364 | index 157f81816915..c75466232016 100644 | |
365 | --- a/arch/x86/kernel/process_64.c | |
366 | +++ b/arch/x86/kernel/process_64.c | |
367 | @@ -399,7 +399,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) | |
368 | struct fpu *prev_fpu = &prev->fpu; | |
369 | struct fpu *next_fpu = &next->fpu; | |
370 | int cpu = smp_processor_id(); | |
371 | - struct tss_struct *tss = &per_cpu(cpu_tss, cpu); | |
372 | + struct tss_struct *tss = &per_cpu(cpu_tss_rw, cpu); | |
373 | ||
374 | WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) && | |
375 | this_cpu_read(irq_count) != -1); | |
376 | diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c | |
377 | index 2818c83892b3..14b462eefa17 100644 | |
378 | --- a/arch/x86/kernel/traps.c | |
379 | +++ b/arch/x86/kernel/traps.c | |
380 | @@ -376,7 +376,7 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) | |
381 | regs->cs == __KERNEL_CS && | |
382 | regs->ip == (unsigned long)native_irq_return_iret) | |
383 | { | |
384 | - struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1; | |
385 | + struct pt_regs *gpregs = (struct pt_regs *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; | |
386 | ||
387 | /* | |
388 | * regs->sp points to the failing IRET frame on the | |
389 | @@ -661,7 +661,7 @@ struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s) | |
390 | * exception came from the IRET target. | |
391 | */ | |
392 | struct bad_iret_stack *new_stack = | |
393 | - (struct bad_iret_stack *)this_cpu_read(cpu_tss.x86_tss.sp0) - 1; | |
394 | + (struct bad_iret_stack *)this_cpu_read(cpu_tss_rw.x86_tss.sp0) - 1; | |
395 | ||
396 | /* Copy the IRET target to the new stack. */ | |
397 | memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8); | |
398 | diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c | |
399 | index 29df077cb089..cf2ac227c2ac 100644 | |
400 | --- a/arch/x86/lib/delay.c | |
401 | +++ b/arch/x86/lib/delay.c | |
402 | @@ -106,10 +106,10 @@ static void delay_mwaitx(unsigned long __loops) | |
403 | delay = min_t(u64, MWAITX_MAX_LOOPS, loops); | |
404 | ||
405 | /* | |
406 | - * Use cpu_tss as a cacheline-aligned, seldomly | |
407 | + * Use cpu_tss_rw as a cacheline-aligned, seldomly | |
408 | * accessed per-cpu variable as the monitor target. | |
409 | */ | |
410 | - __monitorx(raw_cpu_ptr(&cpu_tss), 0, 0); | |
411 | + __monitorx(raw_cpu_ptr(&cpu_tss_rw), 0, 0); | |
412 | ||
413 | /* | |
414 | * AMD, like Intel, supports the EAX hint and EAX=0xf | |
415 | diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c | |
416 | index 63c81154083b..3b76cf85e306 100644 | |
417 | --- a/arch/x86/xen/enlighten_pv.c | |
418 | +++ b/arch/x86/xen/enlighten_pv.c | |
419 | @@ -817,7 +817,7 @@ static void xen_load_sp0(unsigned long sp0) | |
420 | mcs = xen_mc_entry(0); | |
421 | MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0); | |
422 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
423 | - this_cpu_write(cpu_tss.x86_tss.sp0, sp0); | |
424 | + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); | |
425 | } | |
426 | ||
427 | void xen_set_iopl_mask(unsigned mask) | |
428 | diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S | |
429 | index 04abcd3f8e2d..3ef7800007f8 100644 | |
430 | --- a/arch/x86/entry/entry_32.S | |
431 | +++ b/arch/x86/entry/entry_32.S | |
432 | @@ -949,7 +949,7 @@ ENTRY(debug) | |
433 | ||
434 | /* Are we currently on the SYSENTER stack? */ | |
435 | movl PER_CPU_VAR(cpu_entry_area), %ecx | |
436 | - addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx | |
437 | + addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx | |
438 | subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ | |
439 | cmpl $SIZEOF_SYSENTER_stack, %ecx | |
440 | jb .Ldebug_from_sysenter_stack | |
441 | @@ -993,7 +993,7 @@ ENTRY(nmi) | |
442 | ||
443 | /* Are we currently on the SYSENTER stack? */ | |
444 | movl PER_CPU_VAR(cpu_entry_area), %ecx | |
445 | - addl $CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx | |
446 | + addl $CPU_ENTRY_AREA_SYSENTER_stack + SIZEOF_SYSENTER_stack, %ecx | |
447 | subl %eax, %ecx /* ecx = (end of SYSENTER_stack) - esp */ | |
448 | cmpl $SIZEOF_SYSENTER_stack, %ecx | |
449 | jb .Lnmi_from_sysenter_stack | |
450 | diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S | |
451 | index 7a5e9edcdaf4..157860b3569f 100644 | |
452 | --- a/arch/x86/entry/entry_64.S | |
453 | +++ b/arch/x86/entry/entry_64.S | |
454 | @@ -153,7 +153,7 @@ END(native_usergs_sysret64) | |
455 | _entry_trampoline - CPU_ENTRY_AREA_entry_trampoline(%rip) | |
456 | ||
457 | /* The top word of the SYSENTER stack is hot and is usable as scratch space. */ | |
458 | -#define RSP_SCRATCH CPU_ENTRY_AREA_tss + TSS_STRUCT_SYSENTER_stack + \ | |
459 | +#define RSP_SCRATCH CPU_ENTRY_AREA_SYSENTER_stack + \ | |
460 | SIZEOF_SYSENTER_stack - 8 + CPU_ENTRY_AREA | |
461 | ||
462 | ENTRY(entry_SYSCALL_64_trampoline) | |
463 | @@ -389,7 +389,7 @@ syscall_return_via_sysret: | |
464 | * Save old stack pointer and switch to trampoline stack. | |
465 | */ | |
466 | movq %rsp, %rdi | |
467 | - movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp | |
468 | + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp | |
469 | ||
470 | pushq RSP-RDI(%rdi) /* RSP */ | |
471 | pushq (%rdi) /* RDI */ | |
472 | @@ -718,7 +718,7 @@ GLOBAL(swapgs_restore_regs_and_return_to_usermode) | |
473 | * Save old stack pointer and switch to trampoline stack. | |
474 | */ | |
475 | movq %rsp, %rdi | |
476 | - movq PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp | |
477 | + movq PER_CPU_VAR(cpu_tss_rw + TSS_sp0), %rsp | |
478 | ||
479 | /* Copy the IRET frame to the trampoline stack. */ | |
480 | pushq 6*8(%rdi) /* SS */ | |
481 | @@ -946,7 +946,7 @@ apicinterrupt IRQ_WORK_VECTOR irq_work_interrupt smp_irq_work_interrupt | |
482 | /* | |
483 | * Exception entry points. | |
484 | */ | |
485 | -#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8) | |
486 | +#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss_rw) + (TSS_ist + ((x) - 1) * 8) | |
487 | ||
488 | /* | |
489 | * Switch to the thread stack. This is called with the IRET frame and | |
490 | -- | |
491 | 2.14.2 | |
492 |