]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0104-x86-entry-64-Remove-thread_struct-sp0.patch
KPTI: add follow-up fixes
[pve-kernel.git] / patches / kernel / 0104-x86-entry-64-Remove-thread_struct-sp0.patch
1 From a97c6afa806d4fe6475a2d9215ff57367ee34b72 Mon Sep 17 00:00:00 2001
2 From: Andy Lutomirski <luto@kernel.org>
3 Date: Thu, 2 Nov 2017 00:59:16 -0700
4 Subject: [PATCH 104/241] x86/entry/64: Remove thread_struct::sp0
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 CVE-2017-5754
10
11 On x86_64, we can easily calculate sp0 when needed instead of
12 storing it in thread_struct.
13
14 On x86_32, a similar cleanup would be possible, but it would require
15 cleaning up the vm86 code first, and that can wait for a later
16 cleanup series.
17
18 Signed-off-by: Andy Lutomirski <luto@kernel.org>
19 Cc: Borislav Petkov <bpetkov@suse.de>
20 Cc: Brian Gerst <brgerst@gmail.com>
21 Cc: Dave Hansen <dave.hansen@intel.com>
22 Cc: Linus Torvalds <torvalds@linux-foundation.org>
23 Cc: Peter Zijlstra <peterz@infradead.org>
24 Cc: Thomas Gleixner <tglx@linutronix.de>
25 Link: http://lkml.kernel.org/r/719cd9c66c548c4350d98a90f050aee8b17f8919.1509609304.git.luto@kernel.org
26 Signed-off-by: Ingo Molnar <mingo@kernel.org>
27 (cherry picked from commit d375cf1530595e33961a8844192cddab913650e3)
28 Signed-off-by: Andy Whitcroft <apw@canonical.com>
29 Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
30 (cherry picked from commit 4910af19c69a87e9432467f4d7cb78da5fbcc30a)
31 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
32 ---
33 arch/x86/include/asm/compat.h | 1 +
34 arch/x86/include/asm/processor.h | 28 +++++++++-------------------
35 arch/x86/include/asm/switch_to.h | 6 ++++++
36 arch/x86/kernel/process_64.c | 1 -
37 4 files changed, 16 insertions(+), 20 deletions(-)
38
39 diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
40 index 5343c19814b3..948b6d8ec46f 100644
41 --- a/arch/x86/include/asm/compat.h
42 +++ b/arch/x86/include/asm/compat.h
43 @@ -6,6 +6,7 @@
44 */
45 #include <linux/types.h>
46 #include <linux/sched.h>
47 +#include <linux/sched/task_stack.h>
48 #include <asm/processor.h>
49 #include <asm/user32.h>
50 #include <asm/unistd.h>
51 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
52 index f83fbf1b6dd9..cec9a329c0f1 100644
53 --- a/arch/x86/include/asm/processor.h
54 +++ b/arch/x86/include/asm/processor.h
55 @@ -423,7 +423,9 @@ typedef struct {
56 struct thread_struct {
57 /* Cached TLS descriptors: */
58 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
59 +#ifdef CONFIG_X86_32
60 unsigned long sp0;
61 +#endif
62 unsigned long sp;
63 #ifdef CONFIG_X86_32
64 unsigned long sysenter_cs;
65 @@ -790,6 +792,13 @@ static inline void spin_lock_prefetch(const void *x)
66
67 #define task_top_of_stack(task) ((unsigned long)(task_pt_regs(task) + 1))
68
69 +#define task_pt_regs(task) \
70 +({ \
71 + unsigned long __ptr = (unsigned long)task_stack_page(task); \
72 + __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
73 + ((struct pt_regs *)__ptr) - 1; \
74 +})
75 +
76 #ifdef CONFIG_X86_32
77 /*
78 * User space process size: 3GB (default).
79 @@ -807,23 +816,6 @@ static inline void spin_lock_prefetch(const void *x)
80 .addr_limit = KERNEL_DS, \
81 }
82
83 -/*
84 - * TOP_OF_KERNEL_STACK_PADDING reserves 8 bytes on top of the ring0 stack.
85 - * This is necessary to guarantee that the entire "struct pt_regs"
86 - * is accessible even if the CPU haven't stored the SS/ESP registers
87 - * on the stack (interrupt gate does not save these registers
88 - * when switching to the same priv ring).
89 - * Therefore beware: accessing the ss/esp fields of the
90 - * "struct pt_regs" is possible, but they may contain the
91 - * completely wrong values.
92 - */
93 -#define task_pt_regs(task) \
94 -({ \
95 - unsigned long __ptr = (unsigned long)task_stack_page(task); \
96 - __ptr += THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING; \
97 - ((struct pt_regs *)__ptr) - 1; \
98 -})
99 -
100 #define KSTK_ESP(task) (task_pt_regs(task)->sp)
101
102 #else
103 @@ -853,11 +845,9 @@ static inline void spin_lock_prefetch(const void *x)
104 #define STACK_TOP_MAX TASK_SIZE_MAX
105
106 #define INIT_THREAD { \
107 - .sp0 = TOP_OF_INIT_STACK, \
108 .addr_limit = KERNEL_DS, \
109 }
110
111 -#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
112 extern unsigned long KSTK_ESP(struct task_struct *task);
113
114 #endif /* CONFIG_X86_64 */
115 diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
116 index 54e64d909725..010cd6e4eafc 100644
117 --- a/arch/x86/include/asm/switch_to.h
118 +++ b/arch/x86/include/asm/switch_to.h
119 @@ -1,6 +1,8 @@
120 #ifndef _ASM_X86_SWITCH_TO_H
121 #define _ASM_X86_SWITCH_TO_H
122
123 +#include <linux/sched/task_stack.h>
124 +
125 struct task_struct; /* one of the stranger aspects of C forward declarations */
126
127 struct task_struct *__switch_to_asm(struct task_struct *prev,
128 @@ -87,7 +89,11 @@ static inline void refresh_sysenter_cs(struct thread_struct *thread)
129 /* This is used when switching tasks or entering/exiting vm86 mode. */
130 static inline void update_sp0(struct task_struct *task)
131 {
132 +#ifdef CONFIG_X86_32
133 load_sp0(task->thread.sp0);
134 +#else
135 + load_sp0(task_top_of_stack(task));
136 +#endif
137 }
138
139 #endif /* _ASM_X86_SWITCH_TO_H */
140 diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
141 index 8a748e17bf6e..b08b9b6c40eb 100644
142 --- a/arch/x86/kernel/process_64.c
143 +++ b/arch/x86/kernel/process_64.c
144 @@ -275,7 +275,6 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long sp,
145 struct inactive_task_frame *frame;
146 struct task_struct *me = current;
147
148 - p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
149 childregs = task_pt_regs(p);
150 fork_frame = container_of(childregs, struct fork_frame, regs);
151 frame = &fork_frame->frame;
152 --
153 2.14.2
154