]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0022-x86-xen-64-Rearrange-the-SYSCALL-entries.patch
cbaeb0619feb8b1c412e977b43dcf61e709029f1
[pve-kernel.git] / patches / kernel / 0022-x86-xen-64-Rearrange-the-SYSCALL-entries.patch
1 From c63a9850ba744d9871b4ca2dad11588db5d670a2 Mon Sep 17 00:00:00 2001
2 From: Andy Lutomirski <luto@kernel.org>
3 Date: Mon, 7 Aug 2017 20:59:21 -0700
4 Subject: [PATCH 022/241] x86/xen/64: Rearrange the SYSCALL entries
5 MIME-Version: 1.0
6 Content-Type: text/plain; charset=UTF-8
7 Content-Transfer-Encoding: 8bit
8
9 CVE-2017-5754
10
11 Xen's raw SYSCALL entries are much less weird than native. Rather
12 than fudging them to look like native entries, use the Xen-provided
13 stack frame directly.
14
15 This lets us eliminate entry_SYSCALL_64_after_swapgs and two uses of
16 the SWAPGS_UNSAFE_STACK paravirt hook. The SYSENTER code would
17 benefit from similar treatment.
18
19 This makes one change to the native code path: the compat
20 instruction that clears the high 32 bits of %rax is moved slightly
21 later. I'd be surprised if this affects performance at all.
22
23 Tested-by: Juergen Gross <jgross@suse.com>
24 Signed-off-by: Andy Lutomirski <luto@kernel.org>
25 Reviewed-by: Juergen Gross <jgross@suse.com>
26 Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
27 Cc: Borislav Petkov <bpetkov@suse.de>
28 Cc: Linus Torvalds <torvalds@linux-foundation.org>
29 Cc: Peter Zijlstra <peterz@infradead.org>
30 Cc: Thomas Gleixner <tglx@linutronix.de>
31 Cc: xen-devel@lists.xenproject.org
32 Link: http://lkml.kernel.org/r/7c88ed36805d36841ab03ec3b48b4122c4418d71.1502164668.git.luto@kernel.org
33 Signed-off-by: Ingo Molnar <mingo@kernel.org>
34 (cherry picked from commit 8a9949bc71a71b3dd633255ebe8f8869b1f73474)
35 Signed-off-by: Andy Whitcroft <apw@canonical.com>
36 Signed-off-by: Kleber Sacilotto de Souza <kleber.souza@canonical.com>
37 (cherry picked from commit b8cec41ee5f30df5032cfe8c86103f7d92a89590)
38 Signed-off-by: Fabian Grünbichler <f.gruenbichler@proxmox.com>
39 ---
40 arch/x86/entry/entry_64.S | 9 ++-------
41 arch/x86/entry/entry_64_compat.S | 7 +++----
42 arch/x86/xen/xen-asm_64.S | 23 +++++++++--------------
43 3 files changed, 14 insertions(+), 25 deletions(-)
44
45 diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
46 index 64b233ab7cad..4dbb336a1fdd 100644
47 --- a/arch/x86/entry/entry_64.S
48 +++ b/arch/x86/entry/entry_64.S
49 @@ -142,14 +142,8 @@ ENTRY(entry_SYSCALL_64)
50 * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
51 * it is too small to ever cause noticeable irq latency.
52 */
53 - SWAPGS_UNSAFE_STACK
54 - /*
55 - * A hypervisor implementation might want to use a label
56 - * after the swapgs, so that it can do the swapgs
57 - * for the guest and jump here on syscall.
58 - */
59 -GLOBAL(entry_SYSCALL_64_after_swapgs)
60
61 + swapgs
62 movq %rsp, PER_CPU_VAR(rsp_scratch)
63 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
64
65 @@ -161,6 +155,7 @@ GLOBAL(entry_SYSCALL_64_after_swapgs)
66 pushq %r11 /* pt_regs->flags */
67 pushq $__USER_CS /* pt_regs->cs */
68 pushq %rcx /* pt_regs->ip */
69 +GLOBAL(entry_SYSCALL_64_after_hwframe)
70 pushq %rax /* pt_regs->orig_ax */
71 pushq %rdi /* pt_regs->di */
72 pushq %rsi /* pt_regs->si */
73 diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
74 index e1721dafbcb1..5314d7b8e5ad 100644
75 --- a/arch/x86/entry/entry_64_compat.S
76 +++ b/arch/x86/entry/entry_64_compat.S
77 @@ -183,21 +183,20 @@ ENDPROC(entry_SYSENTER_compat)
78 */
79 ENTRY(entry_SYSCALL_compat)
80 /* Interrupts are off on entry. */
81 - SWAPGS_UNSAFE_STACK
82 + swapgs
83
84 /* Stash user ESP and switch to the kernel stack. */
85 movl %esp, %r8d
86 movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
87
88 - /* Zero-extending 32-bit regs, do not remove */
89 - movl %eax, %eax
90 -
91 /* Construct struct pt_regs on stack */
92 pushq $__USER32_DS /* pt_regs->ss */
93 pushq %r8 /* pt_regs->sp */
94 pushq %r11 /* pt_regs->flags */
95 pushq $__USER32_CS /* pt_regs->cs */
96 pushq %rcx /* pt_regs->ip */
97 +GLOBAL(entry_SYSCALL_compat_after_hwframe)
98 + movl %eax, %eax /* discard orig_ax high bits */
99 pushq %rax /* pt_regs->orig_ax */
100 pushq %rdi /* pt_regs->di */
101 pushq %rsi /* pt_regs->si */
102 diff --git a/arch/x86/xen/xen-asm_64.S b/arch/x86/xen/xen-asm_64.S
103 index c3df43141e70..a8a4f4c460a6 100644
104 --- a/arch/x86/xen/xen-asm_64.S
105 +++ b/arch/x86/xen/xen-asm_64.S
106 @@ -82,34 +82,29 @@ RELOC(xen_sysret64, 1b+1)
107 * rip
108 * r11
109 * rsp->rcx
110 - *
111 - * In all the entrypoints, we undo all that to make it look like a
112 - * CPU-generated syscall/sysenter and jump to the normal entrypoint.
113 */
114
115 -.macro undo_xen_syscall
116 - mov 0*8(%rsp), %rcx
117 - mov 1*8(%rsp), %r11
118 - mov 5*8(%rsp), %rsp
119 -.endm
120 -
121 /* Normal 64-bit system call target */
122 ENTRY(xen_syscall_target)
123 - undo_xen_syscall
124 - jmp entry_SYSCALL_64_after_swapgs
125 + popq %rcx
126 + popq %r11
127 + jmp entry_SYSCALL_64_after_hwframe
128 ENDPROC(xen_syscall_target)
129
130 #ifdef CONFIG_IA32_EMULATION
131
132 /* 32-bit compat syscall target */
133 ENTRY(xen_syscall32_target)
134 - undo_xen_syscall
135 - jmp entry_SYSCALL_compat
136 + popq %rcx
137 + popq %r11
138 + jmp entry_SYSCALL_compat_after_hwframe
139 ENDPROC(xen_syscall32_target)
140
141 /* 32-bit compat sysenter target */
142 ENTRY(xen_sysenter_target)
143 - undo_xen_syscall
144 + mov 0*8(%rsp), %rcx
145 + mov 1*8(%rsp), %r11
146 + mov 5*8(%rsp), %rsp
147 jmp entry_SYSENTER_compat
148 ENDPROC(xen_sysenter_target)
149
150 --
151 2.14.2
152