]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0169-x86-ldt-Rework-locking.patch
6c1e628af771ddfd750d7f762e9e06eb7805acec
[pve-kernel.git] / patches / kernel / 0169-x86-ldt-Rework-locking.patch
1 From b37d3e3a9b29caf78e2da6efba8959fc912e47a0 Mon Sep 17 00:00:00 2001
2 From: Peter Zijlstra <peterz@infradead.org>
3 Date: Thu, 14 Dec 2017 12:27:30 +0100
4 Subject: [PATCH 169/233] x86/ldt: Rework locking
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 CVE-2017-5754
10
11 The LDT is duplicated on fork() and on exec(), which is wrong as exec()
12 should start from a clean state, i.e. without LDT. To fix this the LDT
13 duplication code will be moved into arch_dup_mmap() which is only called
14 for fork().
15
16 This introduces a locking problem. arch_dup_mmap() holds mmap_sem of the
17 parent process, but the LDT duplication code needs to acquire
18 mm->context.lock to access the LDT data safely, which is the reverse lock
19 order of write_ldt() where mmap_sem nests into context.lock.
20
21 Solve this by introducing a new rw semaphore which serializes the
22 read/write_ldt() syscall operations and use context.lock to protect the
23 actual installment of the LDT descriptor.
24
25 So context.lock stabilizes mm->context.ldt and can nest inside of the new
26 semaphore or mmap_sem.
27
28 Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
29 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
30 Cc: Andy Lutomirski <luto@kernel.org>
31 Cc: Andy Lutomirsky <luto@kernel.org>
32 Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
33 Cc: Borislav Petkov <bp@alien8.de>
34 Cc: Borislav Petkov <bpetkov@suse.de>
35 Cc: Brian Gerst <brgerst@gmail.com>
36 Cc: Dave Hansen <dave.hansen@intel.com>
37 Cc: Dave Hansen <dave.hansen@linux.intel.com>
38 Cc: David Laight <David.Laight@aculab.com>
39 Cc: Denys Vlasenko <dvlasenk@redhat.com>
40 Cc: Eduardo Valentin <eduval@amazon.com>
41 Cc: Greg KH <gregkh@linuxfoundation.org>
42 Cc: H. Peter Anvin <hpa@zytor.com>
43 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
44 Cc: Juergen Gross <jgross@suse.com>
45 Cc: Linus Torvalds <torvalds@linux-foundation.org>
46 Cc: Peter Zijlstra <peterz@infradead.org>
47 Cc: Will Deacon <will.deacon@arm.com>
48 Cc: aliguori@amazon.com
49 Cc: dan.j.williams@intel.com
50 Cc: hughd@google.com
51 Cc: keescook@google.com
52 Cc: kirill.shutemov@linux.intel.com
53 Cc: linux-mm@kvack.org
54 Signed-off-by: Ingo Molnar <mingo@kernel.org>
55 (cherry picked from commit c2b3496bb30bd159e9de42e5c952e1f1f33c9a77)
56 Signed-off-by: Andy Whitcroft <apw@canonical.com>
57 Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
58 (cherry picked from commit bf7ee649ccc71ef9acb713a00472886c19e78684)
59 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
60 ---
61 arch/x86/include/asm/mmu.h | 4 +++-
62 arch/x86/include/asm/mmu_context.h | 2 ++
63 arch/x86/kernel/ldt.c | 33 +++++++++++++++++++++------------
64 3 files changed, 26 insertions(+), 13 deletions(-)
65
66 diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
67 index bb8c597c2248..2d7e852b2dad 100644
68 --- a/arch/x86/include/asm/mmu.h
69 +++ b/arch/x86/include/asm/mmu.h
70 @@ -2,6 +2,7 @@
71 #define _ASM_X86_MMU_H
72
73 #include <linux/spinlock.h>
74 +#include <linux/rwsem.h>
75 #include <linux/mutex.h>
76 #include <linux/atomic.h>
77
78 @@ -26,7 +27,8 @@ typedef struct {
79 atomic64_t tlb_gen;
80
81 #ifdef CONFIG_MODIFY_LDT_SYSCALL
82 - struct ldt_struct *ldt;
83 + struct rw_semaphore ldt_usr_sem;
84 + struct ldt_struct *ldt;
85 #endif
86
87 #ifdef CONFIG_X86_64
88 diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
89 index 9be54d9c04c4..dd865c2acb9d 100644
90 --- a/arch/x86/include/asm/mmu_context.h
91 +++ b/arch/x86/include/asm/mmu_context.h
92 @@ -131,6 +131,8 @@ void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
93 static inline int init_new_context(struct task_struct *tsk,
94 struct mm_struct *mm)
95 {
96 + mutex_init(&mm->context.lock);
97 +
98 mm->context.ctx_id = atomic64_inc_return(&last_mm_ctx_id);
99 atomic64_set(&mm->context.tlb_gen, 0);
100
101 diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
102 index b8be2413cb74..3e7208f0c350 100644
103 --- a/arch/x86/kernel/ldt.c
104 +++ b/arch/x86/kernel/ldt.c
105 @@ -4,6 +4,11 @@
106 * Copyright (C) 2002 Andi Kleen
107 *
108 * This handles calls from both 32bit and 64bit mode.
109 + *
110 + * Lock order:
111 + * contex.ldt_usr_sem
112 + * mmap_sem
113 + * context.lock
114 */
115
116 #include <linux/errno.h>
117 @@ -41,7 +46,7 @@ static void refresh_ldt_segments(void)
118 #endif
119 }
120
121 -/* context.lock is held for us, so we don't need any locking. */
122 +/* context.lock is held by the task which issued the smp function call */
123 static void flush_ldt(void *__mm)
124 {
125 struct mm_struct *mm = __mm;
126 @@ -98,15 +103,17 @@ static void finalize_ldt_struct(struct ldt_struct *ldt)
127 paravirt_alloc_ldt(ldt->entries, ldt->nr_entries);
128 }
129
130 -/* context.lock is held */
131 -static void install_ldt(struct mm_struct *current_mm,
132 - struct ldt_struct *ldt)
133 +static void install_ldt(struct mm_struct *mm, struct ldt_struct *ldt)
134 {
135 + mutex_lock(&mm->context.lock);
136 +
137 /* Synchronizes with READ_ONCE in load_mm_ldt. */
138 - smp_store_release(&current_mm->context.ldt, ldt);
139 + smp_store_release(&mm->context.ldt, ldt);
140
141 - /* Activate the LDT for all CPUs using current_mm. */
142 - on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
143 + /* Activate the LDT for all CPUs using currents mm. */
144 + on_each_cpu_mask(mm_cpumask(mm), flush_ldt, mm, true);
145 +
146 + mutex_unlock(&mm->context.lock);
147 }
148
149 static void free_ldt_struct(struct ldt_struct *ldt)
150 @@ -132,7 +139,8 @@ int init_new_context_ldt(struct task_struct *tsk, struct mm_struct *mm)
151 struct mm_struct *old_mm;
152 int retval = 0;
153
154 - mutex_init(&mm->context.lock);
155 + init_rwsem(&mm->context.ldt_usr_sem);
156 +
157 old_mm = current->mm;
158 if (!old_mm) {
159 mm->context.ldt = NULL;
160 @@ -179,7 +187,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
161 unsigned long entries_size;
162 int retval;
163
164 - mutex_lock(&mm->context.lock);
165 + down_read(&mm->context.ldt_usr_sem);
166
167 if (!mm->context.ldt) {
168 retval = 0;
169 @@ -208,7 +216,7 @@ static int read_ldt(void __user *ptr, unsigned long bytecount)
170 retval = bytecount;
171
172 out_unlock:
173 - mutex_unlock(&mm->context.lock);
174 + up_read(&mm->context.ldt_usr_sem);
175 return retval;
176 }
177
178 @@ -268,7 +276,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
179 ldt.avl = 0;
180 }
181
182 - mutex_lock(&mm->context.lock);
183 + if (down_write_killable(&mm->context.ldt_usr_sem))
184 + return -EINTR;
185
186 old_ldt = mm->context.ldt;
187 old_nr_entries = old_ldt ? old_ldt->nr_entries : 0;
188 @@ -290,7 +299,7 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
189 error = 0;
190
191 out_unlock:
192 - mutex_unlock(&mm->context.lock);
193 + up_write(&mm->context.ldt_usr_sem);
194 out:
195 return error;
196 }
197 --
198 2.14.2
199