]> git.proxmox.com Git - pve-kernel.git/blame - patches/kernel/0096-x86-entry-64-De-Xen-ify-our-NMI-code.patch
KPTI: add follow-up fixes
[pve-kernel.git] / patches / kernel / 0096-x86-entry-64-De-Xen-ify-our-NMI-code.patch
CommitLineData
321d628a
FG
1From 4a112915e611296f0d196bb6cb2baa99af0e9148 Mon Sep 17 00:00:00 2001
2From: Andy Lutomirski <luto@kernel.org>
3Date: Thu, 2 Nov 2017 00:59:08 -0700
e4cdf2a5 4Subject: [PATCH 096/241] x86/entry/64: De-Xen-ify our NMI code
321d628a
FG
5MIME-Version: 1.0
6Content-Type: text/plain; charset=UTF-8
7Content-Transfer-Encoding: 8bit
8
9CVE-2017-5754
10
11Xen PV is fundamentally incompatible with our fancy NMI code: it
12doesn't use IST at all, and Xen entries clobber two stack slots
13below the hardware frame.
14
15Drop Xen PV support from our NMI code entirely.
16
17Signed-off-by: Andy Lutomirski <luto@kernel.org>
18Reviewed-by: Borislav Petkov <bp@suse.de>
19Acked-by: Juergen Gross <jgross@suse.com>
20Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
21Cc: Borislav Petkov <bpetkov@suse.de>
22Cc: Brian Gerst <brgerst@gmail.com>
23Cc: Dave Hansen <dave.hansen@intel.com>
24Cc: Linus Torvalds <torvalds@linux-foundation.org>
25Cc: Peter Zijlstra <peterz@infradead.org>
26Cc: Thomas Gleixner <tglx@linutronix.de>
27Link: http://lkml.kernel.org/r/bfbe711b5ae03f672f8848999a8eb2711efc7f98.1509609304.git.luto@kernel.org
28Signed-off-by: Ingo Molnar <mingo@kernel.org>
29(cherry picked from commit 929bacec21478a72c78e4f29f98fb799bd00105a)
30Signed-off-by: Andy Whitcroft <apw@canonical.com>
31Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
32(cherry picked from commit ffc372909c1701c4fdd2bde7861692573ef381a7)
33Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
34---
35 arch/x86/entry/entry_64.S | 30 ++++++++++++++++++------------
36 1 file changed, 18 insertions(+), 12 deletions(-)
37
38diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
39index 5a6aba7cf3bd..05501c781c20 100644
40--- a/arch/x86/entry/entry_64.S
41+++ b/arch/x86/entry/entry_64.S
42@@ -1253,9 +1253,13 @@ ENTRY(error_exit)
43 jmp retint_user
44 END(error_exit)
45
46-/* Runs on exception stack */
47+/*
48+ * Runs on exception stack. Xen PV does not go through this path at all,
49+ * so we can use real assembly here.
50+ */
51 ENTRY(nmi)
52 UNWIND_HINT_IRET_REGS
53+
54 /*
55 * We allow breakpoints in NMIs. If a breakpoint occurs, then
56 * the iretq it performs will take us out of NMI context.
57@@ -1313,7 +1317,7 @@ ENTRY(nmi)
58 * stacks lest we corrupt the "NMI executing" variable.
59 */
60
61- SWAPGS_UNSAFE_STACK
62+ swapgs
63 cld
64 movq %rsp, %rdx
65 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
66@@ -1478,7 +1482,7 @@ nested_nmi_out:
67 popq %rdx
68
69 /* We are returning to kernel mode, so this cannot result in a fault. */
70- INTERRUPT_RETURN
71+ iretq
72
73 first_nmi:
74 /* Restore rdx. */
75@@ -1509,7 +1513,7 @@ first_nmi:
76 pushfq /* RFLAGS */
77 pushq $__KERNEL_CS /* CS */
78 pushq $1f /* RIP */
79- INTERRUPT_RETURN /* continues at repeat_nmi below */
80+ iretq /* continues at repeat_nmi below */
81 UNWIND_HINT_IRET_REGS
82 1:
83 #endif
84@@ -1584,20 +1588,22 @@ nmi_restore:
85 /*
86 * Clear "NMI executing". Set DF first so that we can easily
87 * distinguish the remaining code between here and IRET from
88- * the SYSCALL entry and exit paths. On a native kernel, we
89- * could just inspect RIP, but, on paravirt kernels,
90- * INTERRUPT_RETURN can translate into a jump into a
91- * hypercall page.
92+ * the SYSCALL entry and exit paths.
93+ *
94+ * We arguably should just inspect RIP instead, but I (Andy) wrote
95+ * this code when I had the misapprehension that Xen PV supported
96+ * NMIs, and Xen PV would break that approach.
97 */
98 std
99 movq $0, 5*8(%rsp) /* clear "NMI executing" */
100
101 /*
102- * INTERRUPT_RETURN reads the "iret" frame and exits the NMI
103- * stack in a single instruction. We are returning to kernel
104- * mode, so this cannot result in a fault.
105+ * iretq reads the "iret" frame and exits the NMI stack in a
106+ * single instruction. We are returning to kernel mode, so this
107+ * cannot result in a fault. Similarly, we don't need to worry
108+ * about espfix64 on the way back to kernel mode.
109 */
110- INTERRUPT_RETURN
111+ iretq
112 END(nmi)
113
114 ENTRY(ignore_sysret)
115--
1162.14.2
117