]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0083-x86-mm-Relocate-page-fault-error-codes-to-traps.h.patch
KPTI: add follow-up fixes
[pve-kernel.git] / patches / kernel / 0083-x86-mm-Relocate-page-fault-error-codes-to-traps.h.patch
1 From 9e6bc95ae1c4b92d9838ee8d2ee8b0e65f4e4469 Mon Sep 17 00:00:00 2001
2 From: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
3 Date: Fri, 27 Oct 2017 13:25:28 -0700
4 Subject: [PATCH 083/241] x86/mm: Relocate page fault error codes to traps.h
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 CVE-2017-5754
10
11 Up to this point, only fault.c used the definitions of the page fault error
12 codes. Thus, it made sense to keep them within such file. Other portions of
13 code might be interested in those definitions too. For instance, the User-
14 Mode Instruction Prevention emulation code will use such definitions to
15 emulate a page fault when it is unable to successfully copy the results
16 of the emulated instructions to user space.
17
18 While relocating the error code enumeration, the prefix X86_ is used to
19 make it consistent with the rest of the definitions in traps.h. Of course,
20 code using the enumeration had to be updated as well. No functional changes
21 were performed.
22
23 Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
24 Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
25 Reviewed-by: Borislav Petkov <bp@suse.de>
26 Reviewed-by: Andy Lutomirski <luto@kernel.org>
27 Cc: "Michael S. Tsirkin" <mst@redhat.com>
28 Cc: Peter Zijlstra <peterz@infradead.org>
29 Cc: Dave Hansen <dave.hansen@linux.intel.com>
30 Cc: ricardo.neri@intel.com
31 Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
32 Cc: Huang Rui <ray.huang@amd.com>
33 Cc: Shuah Khan <shuah@kernel.org>
34 Cc: Jonathan Corbet <corbet@lwn.net>
35 Cc: Jiri Slaby <jslaby@suse.cz>
36 Cc: "Ravi V. Shankar" <ravi.v.shankar@intel.com>
37 Cc: Chris Metcalf <cmetcalf@mellanox.com>
38 Cc: Brian Gerst <brgerst@gmail.com>
39 Cc: Josh Poimboeuf <jpoimboe@redhat.com>
40 Cc: Chen Yucong <slaoub@gmail.com>
41 Cc: Vlastimil Babka <vbabka@suse.cz>
42 Cc: Masami Hiramatsu <mhiramat@kernel.org>
43 Cc: Paolo Bonzini <pbonzini@redhat.com>
44 Cc: Andrew Morton <akpm@linux-foundation.org>
45 Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
46 Link: https://lkml.kernel.org/r/1509135945-13762-2-git-send-email-ricardo.neri-calderon@linux.intel.com
47
48 (cherry picked from commit 1067f030994c69ca1fba8c607437c8895dcf8509)
49 Signed-off-by: Andy Whitcroft <apw@canonical.com>
50 Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
51 (cherry picked from commit a85a07ab9111e3c78797c20b60a664dbd5db4981)
52 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
53 ---
54 arch/x86/include/asm/traps.h | 18 +++++++++
55 arch/x86/mm/fault.c | 88 +++++++++++++++++---------------------------
56 2 files changed, 52 insertions(+), 54 deletions(-)
57
58 diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
59 index feb89dbe359d..8e5bf86f87e5 100644
60 --- a/arch/x86/include/asm/traps.h
61 +++ b/arch/x86/include/asm/traps.h
62 @@ -162,4 +162,22 @@ enum {
63 X86_TRAP_IRET = 32, /* 32, IRET Exception */
64 };
65
66 +/*
67 + * Page fault error code bits:
68 + *
69 + * bit 0 == 0: no page found 1: protection fault
70 + * bit 1 == 0: read access 1: write access
71 + * bit 2 == 0: kernel-mode access 1: user-mode access
72 + * bit 3 == 1: use of reserved bit detected
73 + * bit 4 == 1: fault was an instruction fetch
74 + * bit 5 == 1: protection keys block access
75 + */
76 +enum x86_pf_error_code {
77 + X86_PF_PROT = 1 << 0,
78 + X86_PF_WRITE = 1 << 1,
79 + X86_PF_USER = 1 << 2,
80 + X86_PF_RSVD = 1 << 3,
81 + X86_PF_INSTR = 1 << 4,
82 + X86_PF_PK = 1 << 5,
83 +};
84 #endif /* _ASM_X86_TRAPS_H */
85 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
86 index 4ee9eb916826..d3a57e7ad311 100644
87 --- a/arch/x86/mm/fault.c
88 +++ b/arch/x86/mm/fault.c
89 @@ -28,26 +28,6 @@
90 #define CREATE_TRACE_POINTS
91 #include <asm/trace/exceptions.h>
92
93 -/*
94 - * Page fault error code bits:
95 - *
96 - * bit 0 == 0: no page found 1: protection fault
97 - * bit 1 == 0: read access 1: write access
98 - * bit 2 == 0: kernel-mode access 1: user-mode access
99 - * bit 3 == 1: use of reserved bit detected
100 - * bit 4 == 1: fault was an instruction fetch
101 - * bit 5 == 1: protection keys block access
102 - */
103 -enum x86_pf_error_code {
104 -
105 - PF_PROT = 1 << 0,
106 - PF_WRITE = 1 << 1,
107 - PF_USER = 1 << 2,
108 - PF_RSVD = 1 << 3,
109 - PF_INSTR = 1 << 4,
110 - PF_PK = 1 << 5,
111 -};
112 -
113 /*
114 * Returns 0 if mmiotrace is disabled, or if the fault is not
115 * handled by mmiotrace:
116 @@ -149,7 +129,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
117 * If it was a exec (instruction fetch) fault on NX page, then
118 * do not ignore the fault:
119 */
120 - if (error_code & PF_INSTR)
121 + if (error_code & X86_PF_INSTR)
122 return 0;
123
124 instr = (void *)convert_ip_to_linear(current, regs);
125 @@ -179,7 +159,7 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
126 * siginfo so userspace can discover which protection key was set
127 * on the PTE.
128 *
129 - * If we get here, we know that the hardware signaled a PF_PK
130 + * If we get here, we know that the hardware signaled a X86_PF_PK
131 * fault and that there was a VMA once we got in the fault
132 * handler. It does *not* guarantee that the VMA we find here
133 * was the one that we faulted on.
134 @@ -204,7 +184,7 @@ static void fill_sig_info_pkey(int si_code, siginfo_t *info, u32 *pkey)
135 /*
136 * force_sig_info_fault() is called from a number of
137 * contexts, some of which have a VMA and some of which
138 - * do not. The PF_PK handing happens after we have a
139 + * do not. The X86_PF_PK handing happens after we have a
140 * valid VMA, so we should never reach this without a
141 * valid VMA.
142 */
143 @@ -693,7 +673,7 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
144 if (!oops_may_print())
145 return;
146
147 - if (error_code & PF_INSTR) {
148 + if (error_code & X86_PF_INSTR) {
149 unsigned int level;
150 pgd_t *pgd;
151 pte_t *pte;
152 @@ -775,7 +755,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
153 */
154 if (current->thread.sig_on_uaccess_err && signal) {
155 tsk->thread.trap_nr = X86_TRAP_PF;
156 - tsk->thread.error_code = error_code | PF_USER;
157 + tsk->thread.error_code = error_code | X86_PF_USER;
158 tsk->thread.cr2 = address;
159
160 /* XXX: hwpoison faults will set the wrong code. */
161 @@ -894,7 +874,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
162 struct task_struct *tsk = current;
163
164 /* User mode accesses just cause a SIGSEGV */
165 - if (error_code & PF_USER) {
166 + if (error_code & X86_PF_USER) {
167 /*
168 * It's possible to have interrupts off here:
169 */
170 @@ -915,7 +895,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
171 * Instruction fetch faults in the vsyscall page might need
172 * emulation.
173 */
174 - if (unlikely((error_code & PF_INSTR) &&
175 + if (unlikely((error_code & X86_PF_INSTR) &&
176 ((address & ~0xfff) == VSYSCALL_ADDR))) {
177 if (emulate_vsyscall(regs, address))
178 return;
179 @@ -928,7 +908,7 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
180 * are always protection faults.
181 */
182 if (address >= TASK_SIZE_MAX)
183 - error_code |= PF_PROT;
184 + error_code |= X86_PF_PROT;
185
186 if (likely(show_unhandled_signals))
187 show_signal_msg(regs, error_code, address, tsk);
188 @@ -989,11 +969,11 @@ static inline bool bad_area_access_from_pkeys(unsigned long error_code,
189
190 if (!boot_cpu_has(X86_FEATURE_OSPKE))
191 return false;
192 - if (error_code & PF_PK)
193 + if (error_code & X86_PF_PK)
194 return true;
195 /* this checks permission keys on the VMA: */
196 - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
197 - (error_code & PF_INSTR), foreign))
198 + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
199 + (error_code & X86_PF_INSTR), foreign))
200 return true;
201 return false;
202 }
203 @@ -1021,7 +1001,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
204 int code = BUS_ADRERR;
205
206 /* Kernel mode? Handle exceptions or die: */
207 - if (!(error_code & PF_USER)) {
208 + if (!(error_code & X86_PF_USER)) {
209 no_context(regs, error_code, address, SIGBUS, BUS_ADRERR);
210 return;
211 }
212 @@ -1049,14 +1029,14 @@ static noinline void
213 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
214 unsigned long address, u32 *pkey, unsigned int fault)
215 {
216 - if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
217 + if (fatal_signal_pending(current) && !(error_code & X86_PF_USER)) {
218 no_context(regs, error_code, address, 0, 0);
219 return;
220 }
221
222 if (fault & VM_FAULT_OOM) {
223 /* Kernel mode? Handle exceptions or die: */
224 - if (!(error_code & PF_USER)) {
225 + if (!(error_code & X86_PF_USER)) {
226 no_context(regs, error_code, address,
227 SIGSEGV, SEGV_MAPERR);
228 return;
229 @@ -1081,16 +1061,16 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
230
231 static int spurious_fault_check(unsigned long error_code, pte_t *pte)
232 {
233 - if ((error_code & PF_WRITE) && !pte_write(*pte))
234 + if ((error_code & X86_PF_WRITE) && !pte_write(*pte))
235 return 0;
236
237 - if ((error_code & PF_INSTR) && !pte_exec(*pte))
238 + if ((error_code & X86_PF_INSTR) && !pte_exec(*pte))
239 return 0;
240 /*
241 * Note: We do not do lazy flushing on protection key
242 - * changes, so no spurious fault will ever set PF_PK.
243 + * changes, so no spurious fault will ever set X86_PF_PK.
244 */
245 - if ((error_code & PF_PK))
246 + if ((error_code & X86_PF_PK))
247 return 1;
248
249 return 1;
250 @@ -1136,8 +1116,8 @@ spurious_fault(unsigned long error_code, unsigned long address)
251 * change, so user accesses are not expected to cause spurious
252 * faults.
253 */
254 - if (error_code != (PF_WRITE | PF_PROT)
255 - && error_code != (PF_INSTR | PF_PROT))
256 + if (error_code != (X86_PF_WRITE | X86_PF_PROT) &&
257 + error_code != (X86_PF_INSTR | X86_PF_PROT))
258 return 0;
259
260 pgd = init_mm.pgd + pgd_index(address);
261 @@ -1197,19 +1177,19 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
262 * always an unconditional error and can never result in
263 * a follow-up action to resolve the fault, like a COW.
264 */
265 - if (error_code & PF_PK)
266 + if (error_code & X86_PF_PK)
267 return 1;
268
269 /*
270 * Make sure to check the VMA so that we do not perform
271 - * faults just to hit a PF_PK as soon as we fill in a
272 + * faults just to hit a X86_PF_PK as soon as we fill in a
273 * page.
274 */
275 - if (!arch_vma_access_permitted(vma, (error_code & PF_WRITE),
276 - (error_code & PF_INSTR), foreign))
277 + if (!arch_vma_access_permitted(vma, (error_code & X86_PF_WRITE),
278 + (error_code & X86_PF_INSTR), foreign))
279 return 1;
280
281 - if (error_code & PF_WRITE) {
282 + if (error_code & X86_PF_WRITE) {
283 /* write, present and write, not present: */
284 if (unlikely(!(vma->vm_flags & VM_WRITE)))
285 return 1;
286 @@ -1217,7 +1197,7 @@ access_error(unsigned long error_code, struct vm_area_struct *vma)
287 }
288
289 /* read, present: */
290 - if (unlikely(error_code & PF_PROT))
291 + if (unlikely(error_code & X86_PF_PROT))
292 return 1;
293
294 /* read, not present: */
295 @@ -1240,7 +1220,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
296 if (!static_cpu_has(X86_FEATURE_SMAP))
297 return false;
298
299 - if (error_code & PF_USER)
300 + if (error_code & X86_PF_USER)
301 return false;
302
303 if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
304 @@ -1293,7 +1273,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
305 * protection error (error_code & 9) == 0.
306 */
307 if (unlikely(fault_in_kernel_space(address))) {
308 - if (!(error_code & (PF_RSVD | PF_USER | PF_PROT))) {
309 + if (!(error_code & (X86_PF_RSVD | X86_PF_USER | X86_PF_PROT))) {
310 if (vmalloc_fault(address) >= 0)
311 return;
312
313 @@ -1321,7 +1301,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
314 if (unlikely(kprobes_fault(regs)))
315 return;
316
317 - if (unlikely(error_code & PF_RSVD))
318 + if (unlikely(error_code & X86_PF_RSVD))
319 pgtable_bad(regs, error_code, address);
320
321 if (unlikely(smap_violation(error_code, regs))) {
322 @@ -1347,7 +1327,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
323 */
324 if (user_mode(regs)) {
325 local_irq_enable();
326 - error_code |= PF_USER;
327 + error_code |= X86_PF_USER;
328 flags |= FAULT_FLAG_USER;
329 } else {
330 if (regs->flags & X86_EFLAGS_IF)
331 @@ -1356,9 +1336,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
332
333 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
334
335 - if (error_code & PF_WRITE)
336 + if (error_code & X86_PF_WRITE)
337 flags |= FAULT_FLAG_WRITE;
338 - if (error_code & PF_INSTR)
339 + if (error_code & X86_PF_INSTR)
340 flags |= FAULT_FLAG_INSTRUCTION;
341
342 /*
343 @@ -1378,7 +1358,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
344 * space check, thus avoiding the deadlock:
345 */
346 if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
347 - if ((error_code & PF_USER) == 0 &&
348 + if (!(error_code & X86_PF_USER) &&
349 !search_exception_tables(regs->ip)) {
350 bad_area_nosemaphore(regs, error_code, address, NULL);
351 return;
352 @@ -1405,7 +1385,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
353 bad_area(regs, error_code, address);
354 return;
355 }
356 - if (error_code & PF_USER) {
357 + if (error_code & X86_PF_USER) {
358 /*
359 * Accessing the stack below %sp is always a bug.
360 * The large cushion allows instructions like enter
361 --
362 2.14.2
363