]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0153-x86-entry-64-Separate-cpu_current_top_of_stack-from-.patch
c1d06d2cec8ca2ad6b961dc0c8a13ddc9091e032
[pve-kernel.git] / patches / kernel / 0153-x86-entry-64-Separate-cpu_current_top_of_stack-from-.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Andy Lutomirski <luto@kernel.org>
3 Date: Mon, 4 Dec 2017 15:07:21 +0100
4 Subject: [PATCH] x86/entry/64: Separate cpu_current_top_of_stack from TSS.sp0
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 CVE-2017-5754
10
11 On 64-bit kernels, we used to assume that TSS.sp0 was the current
12 top of stack. With the addition of an entry trampoline, this will
13 no longer be the case. Store the current top of stack in TSS.sp1,
14 which is otherwise unused but shares the same cacheline.
15
16 Signed-off-by: Andy Lutomirski <luto@kernel.org>
17 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
18 Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
19 Reviewed-by: Borislav Petkov <bp@suse.de>
20 Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
21 Cc: Borislav Petkov <bp@alien8.de>
22 Cc: Borislav Petkov <bpetkov@suse.de>
23 Cc: Brian Gerst <brgerst@gmail.com>
24 Cc: Dave Hansen <dave.hansen@intel.com>
25 Cc: Dave Hansen <dave.hansen@linux.intel.com>
26 Cc: David Laight <David.Laight@aculab.com>
27 Cc: Denys Vlasenko <dvlasenk@redhat.com>
28 Cc: Eduardo Valentin <eduval@amazon.com>
29 Cc: Greg KH <gregkh@linuxfoundation.org>
30 Cc: H. Peter Anvin <hpa@zytor.com>
31 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
32 Cc: Juergen Gross <jgross@suse.com>
33 Cc: Linus Torvalds <torvalds@linux-foundation.org>
34 Cc: Peter Zijlstra <peterz@infradead.org>
35 Cc: Rik van Riel <riel@redhat.com>
36 Cc: Will Deacon <will.deacon@arm.com>
37 Cc: aliguori@amazon.com
38 Cc: daniel.gruss@iaik.tugraz.at
39 Cc: hughd@google.com
40 Cc: keescook@google.com
41 Link: https://lkml.kernel.org/r/20171204150606.050864668@linutronix.de
42 Signed-off-by: Ingo Molnar <mingo@kernel.org>
43 (cherry picked from commit 9aaefe7b59ae00605256a7d6bd1c1456432495fc)
44 Signed-off-by: Andy Whitcroft <apw@canonical.com>
45 Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
46 (cherry picked from commit 281be4ff07f7c67dc2a9c75ab24a7b9ff25544ae)
47 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
48 ---
49 arch/x86/include/asm/processor.h | 18 +++++++++++++-----
50 arch/x86/include/asm/thread_info.h | 2 +-
51 arch/x86/kernel/asm-offsets_64.c | 1 +
52 arch/x86/kernel/process.c | 10 ++++++++++
53 arch/x86/kernel/process_64.c | 1 +
54 5 files changed, 26 insertions(+), 6 deletions(-)
55
56 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
57 index 55885465c3a7..1bfe4bad797a 100644
58 --- a/arch/x86/include/asm/processor.h
59 +++ b/arch/x86/include/asm/processor.h
60 @@ -303,7 +303,13 @@ struct x86_hw_tss {
61 struct x86_hw_tss {
62 u32 reserved1;
63 u64 sp0;
64 +
65 + /*
66 + * We store cpu_current_top_of_stack in sp1 so it's always accessible.
67 + * Linux does not use ring 1, so sp1 is not otherwise needed.
68 + */
69 u64 sp1;
70 +
71 u64 sp2;
72 u64 reserved2;
73 u64 ist[7];
74 @@ -362,6 +368,8 @@ DECLARE_PER_CPU_PAGE_ALIGNED(struct tss_struct, cpu_tss);
75
76 #ifdef CONFIG_X86_32
77 DECLARE_PER_CPU(unsigned long, cpu_current_top_of_stack);
78 +#else
79 +#define cpu_current_top_of_stack cpu_tss.x86_tss.sp1
80 #endif
81
82 /*
83 @@ -533,12 +541,12 @@ static inline void native_swapgs(void)
84
85 static inline unsigned long current_top_of_stack(void)
86 {
87 -#ifdef CONFIG_X86_64
88 - return this_cpu_read_stable(cpu_tss.x86_tss.sp0);
89 -#else
90 - /* sp0 on x86_32 is special in and around vm86 mode. */
91 + /*
92 + * We can't read directly from tss.sp0: sp0 on x86_32 is special in
93 + * and around vm86 mode and sp0 on x86_64 is special because of the
94 + * entry trampoline.
95 + */
96 return this_cpu_read_stable(cpu_current_top_of_stack);
97 -#endif
98 }
99
100 static inline bool on_thread_stack(void)
101 diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
102 index ec8ef3bbb7dc..760dd8a73927 100644
103 --- a/arch/x86/include/asm/thread_info.h
104 +++ b/arch/x86/include/asm/thread_info.h
105 @@ -214,7 +214,7 @@ static inline int arch_within_stack_frames(const void * const stack,
106 #else /* !__ASSEMBLY__ */
107
108 #ifdef CONFIG_X86_64
109 -# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
110 +# define cpu_current_top_of_stack (cpu_tss + TSS_sp1)
111 #endif
112
113 #endif
114 diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
115 index c21a5315b38e..048f68ff3396 100644
116 --- a/arch/x86/kernel/asm-offsets_64.c
117 +++ b/arch/x86/kernel/asm-offsets_64.c
118 @@ -65,6 +65,7 @@ int main(void)
119
120 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
121 OFFSET(TSS_sp0, tss_struct, x86_tss.sp0);
122 + OFFSET(TSS_sp1, tss_struct, x86_tss.sp1);
123 BLANK();
124
125 #ifdef CONFIG_CC_STACKPROTECTOR
126 diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
127 index aa86e810fb54..407fc37a8718 100644
128 --- a/arch/x86/kernel/process.c
129 +++ b/arch/x86/kernel/process.c
130 @@ -55,6 +55,16 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
131 * Poison it.
132 */
133 .sp0 = (1UL << (BITS_PER_LONG-1)) + 1,
134 +
135 +#ifdef CONFIG_X86_64
136 + /*
137 + * .sp1 is cpu_current_top_of_stack. The init task never
138 + * runs user code, but cpu_current_top_of_stack should still
139 + * be well defined before the first context switch.
140 + */
141 + .sp1 = TOP_OF_INIT_STACK,
142 +#endif
143 +
144 #ifdef CONFIG_X86_32
145 .ss0 = __KERNEL_DS,
146 .ss1 = __KERNEL_CS,
147 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
148 index 01b119bebb68..157f81816915 100644
149 --- a/arch/x86/kernel/process_64.c
150 +++ b/arch/x86/kernel/process_64.c
151 @@ -461,6 +461,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
152 * Switch the PDA and FPU contexts.
153 */
154 this_cpu_write(current_task, next_p);
155 + this_cpu_write(cpu_current_top_of_stack, task_top_of_stack(next_p));
156
157 /* Reload sp0. */
158 update_sp0(next_p);
159 --
160 2.14.2
161