]>
Commit | Line | Data |
---|---|---|
453eafbe SC |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #include <linux/linkage.h> | |
3 | #include <asm/asm.h> | |
5e0781df SC |
4 | #include <asm/bitsperlong.h> |
5 | #include <asm/kvm_vcpu_regs.h> | |
f2fde6a5 | 6 | #include <asm/nospec-branch.h> |
535f7ef2 | 7 | #include <asm/segment.h> |
e5b6b3e7 | 8 | #include "run_flags.h" |
5e0781df SC |
9 | |
10 | #define WORD_SIZE (BITS_PER_LONG / 8) | |
11 | ||
12 | #define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE | |
13 | #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE | |
14 | #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE | |
15 | #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE | |
16 | /* Intentionally omit RSP as it's context switched by hardware */ | |
17 | #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE | |
18 | #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE | |
19 | #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE | |
20 | ||
21 | #ifdef CONFIG_X86_64 | |
22 | #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE | |
23 | #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE | |
24 | #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE | |
25 | #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE | |
26 | #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE | |
27 | #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE | |
28 | #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE | |
29 | #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE | |
30 | #endif | |
453eafbe | 31 | |
3ebccdf3 | 32 | .section .noinstr.text, "ax" |
453eafbe | 33 | |
5e0781df | 34 | /** |
ee2fc635 | 35 | * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode |
68d98cfd | 36 | * @vmx: struct vcpu_vmx * |
5e0781df | 37 | * @regs: unsigned long * (to guest registers) |
68d98cfd JP |
38 | * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH |
39 | * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl | |
5e0781df SC |
40 | * |
41 | * Returns: | |
e75c3c3a | 42 | * 0 on VM-Exit, 1 on VM-Fail |
5e0781df | 43 | */ |
6dcc5627 | 44 | SYM_FUNC_START(__vmx_vcpu_run) |
5e0781df SC |
45 | push %_ASM_BP |
46 | mov %_ASM_SP, %_ASM_BP | |
3b895ef4 SC |
47 | #ifdef CONFIG_X86_64 |
48 | push %r15 | |
49 | push %r14 | |
50 | push %r13 | |
51 | push %r12 | |
52 | #else | |
53 | push %edi | |
54 | push %esi | |
55 | #endif | |
56 | push %_ASM_BX | |
5e0781df | 57 | |
68d98cfd JP |
58 | /* Save @vmx for SPEC_CTRL handling */ |
59 | push %_ASM_ARG1 | |
60 | ||
61 | /* Save @flags for SPEC_CTRL handling */ | |
62 | push %_ASM_ARG3 | |
63 | ||
5e0781df SC |
64 | /* |
65 | * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and | |
66 | * @regs is needed after VM-Exit to save the guest's register values. | |
67 | */ | |
68 | push %_ASM_ARG2 | |
69 | ||
e5b6b3e7 | 70 | /* Copy @flags to BL, _ASM_ARG3 is volatile. */ |
77df5495 SC |
71 | mov %_ASM_ARG3B, %bl |
72 | ||
b241fd3d | 73 | lea (%_ASM_SP), %_ASM_ARG2 |
5e0781df SC |
74 | call vmx_update_host_rsp |
75 | ||
a62fd5a7 SC |
76 | /* Load @regs to RAX. */ |
77 | mov (%_ASM_SP), %_ASM_AX | |
5e0781df SC |
78 | |
79 | /* Check if vmlaunch or vmresume is needed */ | |
e5b6b3e7 | 80 | testb $VMX_RUN_VMRESUME, %bl |
5e0781df SC |
81 | |
82 | /* Load guest registers. Don't clobber flags. */ | |
a62fd5a7 SC |
83 | mov VCPU_RCX(%_ASM_AX), %_ASM_CX |
84 | mov VCPU_RDX(%_ASM_AX), %_ASM_DX | |
bb03911f UB |
85 | mov VCPU_RBX(%_ASM_AX), %_ASM_BX |
86 | mov VCPU_RBP(%_ASM_AX), %_ASM_BP | |
a62fd5a7 SC |
87 | mov VCPU_RSI(%_ASM_AX), %_ASM_SI |
88 | mov VCPU_RDI(%_ASM_AX), %_ASM_DI | |
5e0781df | 89 | #ifdef CONFIG_X86_64 |
a62fd5a7 SC |
90 | mov VCPU_R8 (%_ASM_AX), %r8 |
91 | mov VCPU_R9 (%_ASM_AX), %r9 | |
92 | mov VCPU_R10(%_ASM_AX), %r10 | |
93 | mov VCPU_R11(%_ASM_AX), %r11 | |
94 | mov VCPU_R12(%_ASM_AX), %r12 | |
95 | mov VCPU_R13(%_ASM_AX), %r13 | |
96 | mov VCPU_R14(%_ASM_AX), %r14 | |
97 | mov VCPU_R15(%_ASM_AX), %r15 | |
5e0781df | 98 | #endif |
b6852ae7 | 99 | /* Load guest RAX. This kills the @regs pointer! */ |
a62fd5a7 | 100 | mov VCPU_RAX(%_ASM_AX), %_ASM_AX |
5e0781df | 101 | |
b241fd3d | 102 | /* Check EFLAGS.ZF from 'testb' above */ |
e5b6b3e7 | 103 | jz .Lvmlaunch |
5e0781df | 104 | |
b241fd3d JP |
105 | /* |
106 | * After a successful VMRESUME/VMLAUNCH, control flow "magically" | |
107 | * resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting. | |
108 | * So this isn't a typical function and objtool needs to be told to | |
109 | * save the unwind state here and restore it below. | |
110 | */ | |
111 | UNWIND_HINT_SAVE | |
112 | ||
113 | /* | |
114 | * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at | |
115 | * the 'vmx_vmexit' label below. | |
116 | */ | |
117 | .Lvmresume: | |
118 | vmresume | |
119 | jmp .Lvmfail | |
120 | ||
121 | .Lvmlaunch: | |
122 | vmlaunch | |
123 | jmp .Lvmfail | |
124 | ||
125 | _ASM_EXTABLE(.Lvmresume, .Lfixup) | |
126 | _ASM_EXTABLE(.Lvmlaunch, .Lfixup) | |
127 | ||
128 | SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL) | |
129 | ||
130 | /* Restore unwind state from before the VMRESUME/VMLAUNCH. */ | |
131 | UNWIND_HINT_RESTORE | |
5e0781df | 132 | |
a62fd5a7 SC |
133 | /* Temporarily save guest's RAX. */ |
134 | push %_ASM_AX | |
5e0781df | 135 | |
a62fd5a7 SC |
136 | /* Reload @regs to RAX. */ |
137 | mov WORD_SIZE(%_ASM_SP), %_ASM_AX | |
5e0781df | 138 | |
a62fd5a7 | 139 | /* Save all guest registers, including RAX from the stack */ |
c16312f4 UB |
140 | pop VCPU_RAX(%_ASM_AX) |
141 | mov %_ASM_CX, VCPU_RCX(%_ASM_AX) | |
142 | mov %_ASM_DX, VCPU_RDX(%_ASM_AX) | |
143 | mov %_ASM_BX, VCPU_RBX(%_ASM_AX) | |
144 | mov %_ASM_BP, VCPU_RBP(%_ASM_AX) | |
145 | mov %_ASM_SI, VCPU_RSI(%_ASM_AX) | |
146 | mov %_ASM_DI, VCPU_RDI(%_ASM_AX) | |
5e0781df | 147 | #ifdef CONFIG_X86_64 |
a62fd5a7 SC |
148 | mov %r8, VCPU_R8 (%_ASM_AX) |
149 | mov %r9, VCPU_R9 (%_ASM_AX) | |
150 | mov %r10, VCPU_R10(%_ASM_AX) | |
151 | mov %r11, VCPU_R11(%_ASM_AX) | |
152 | mov %r12, VCPU_R12(%_ASM_AX) | |
153 | mov %r13, VCPU_R13(%_ASM_AX) | |
154 | mov %r14, VCPU_R14(%_ASM_AX) | |
155 | mov %r15, VCPU_R15(%_ASM_AX) | |
5e0781df SC |
156 | #endif |
157 | ||
68d98cfd JP |
158 | /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */ |
159 | xor %ebx, %ebx | |
5e0781df | 160 | |
b241fd3d | 161 | .Lclear_regs: |
5e0781df | 162 | /* |
68d98cfd | 163 | * Clear all general purpose registers except RSP and RBX to prevent |
5e0781df SC |
164 | * speculative use of the guest's values, even those that are reloaded |
165 | * via the stack. In theory, an L1 cache miss when restoring registers | |
166 | * could lead to speculative execution with the guest's values. | |
167 | * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially | |
e75c3c3a | 168 | * free. RSP and RAX are exempt as RSP is restored by hardware during |
68d98cfd JP |
169 | * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return |
170 | * value. | |
5e0781df | 171 | */ |
68d98cfd | 172 | xor %eax, %eax |
b241fd3d | 173 | xor %ecx, %ecx |
4f44c4ee | 174 | xor %edx, %edx |
bb03911f | 175 | xor %ebp, %ebp |
4f44c4ee SC |
176 | xor %esi, %esi |
177 | xor %edi, %edi | |
5e0781df SC |
178 | #ifdef CONFIG_X86_64 |
179 | xor %r8d, %r8d | |
180 | xor %r9d, %r9d | |
181 | xor %r10d, %r10d | |
182 | xor %r11d, %r11d | |
183 | xor %r12d, %r12d | |
184 | xor %r13d, %r13d | |
185 | xor %r14d, %r14d | |
186 | xor %r15d, %r15d | |
187 | #endif | |
5e0781df SC |
188 | |
189 | /* "POP" @regs. */ | |
190 | add $WORD_SIZE, %_ASM_SP | |
3b895ef4 | 191 | |
68d98cfd JP |
192 | /* |
193 | * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before | |
194 | * the first unbalanced RET after vmexit! | |
195 | * | |
196 | * For retpoline, RSB filling is needed to prevent poisoned RSB entries | |
197 | * and (in some cases) RSB underflow. | |
198 | * | |
199 | * eIBRS has its own protection against poisoned RSB, so it doesn't | |
200 | * need the RSB filling sequence. But it does need to be enabled | |
201 | * before the first unbalanced RET. | |
202 | */ | |
203 | ||
204 | FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE | |
205 | ||
206 | pop %_ASM_ARG2 /* @flags */ | |
207 | pop %_ASM_ARG1 /* @vmx */ | |
208 | ||
209 | call vmx_spec_ctrl_restore_host | |
210 | ||
211 | /* Put return value in AX */ | |
212 | mov %_ASM_BX, %_ASM_AX | |
213 | ||
b241fd3d | 214 | pop %_ASM_BX |
3b895ef4 SC |
215 | #ifdef CONFIG_X86_64 |
216 | pop %r12 | |
217 | pop %r13 | |
218 | pop %r14 | |
219 | pop %r15 | |
220 | #else | |
221 | pop %esi | |
222 | pop %edi | |
223 | #endif | |
5e0781df | 224 | pop %_ASM_BP |
5a8cd547 | 225 | RET |
5e0781df | 226 | |
b241fd3d JP |
227 | .Lfixup: |
228 | cmpb $0, kvm_rebooting | |
229 | jne .Lvmfail | |
230 | ud2 | |
231 | .Lvmfail: | |
232 | /* VM-Fail: set return value to 1 */ | |
68d98cfd | 233 | mov $1, %_ASM_BX |
b241fd3d JP |
234 | jmp .Lclear_regs |
235 | ||
6dcc5627 | 236 | SYM_FUNC_END(__vmx_vcpu_run) |
842f4be9 | 237 | |
3ebccdf3 TG |
238 | |
239 | .section .text, "ax" | |
240 | ||
842f4be9 SC |
241 | /** |
242 | * vmread_error_trampoline - Trampoline from inline asm to vmread_error() | |
243 | * @field: VMCS field encoding that failed | |
244 | * @fault: %true if the VMREAD faulted, %false if it failed | |
245 | ||
246 | * Save and restore volatile registers across a call to vmread_error(). Note, | |
247 | * all parameters are passed on the stack. | |
248 | */ | |
249 | SYM_FUNC_START(vmread_error_trampoline) | |
250 | push %_ASM_BP | |
251 | mov %_ASM_SP, %_ASM_BP | |
252 | ||
253 | push %_ASM_AX | |
254 | push %_ASM_CX | |
255 | push %_ASM_DX | |
256 | #ifdef CONFIG_X86_64 | |
257 | push %rdi | |
258 | push %rsi | |
259 | push %r8 | |
260 | push %r9 | |
261 | push %r10 | |
262 | push %r11 | |
263 | #endif | |
264 | #ifdef CONFIG_X86_64 | |
265 | /* Load @field and @fault to arg1 and arg2 respectively. */ | |
266 | mov 3*WORD_SIZE(%rbp), %_ASM_ARG2 | |
267 | mov 2*WORD_SIZE(%rbp), %_ASM_ARG1 | |
268 | #else | |
269 | /* Parameters are passed on the stack for 32-bit (see asmlinkage). */ | |
270 | push 3*WORD_SIZE(%ebp) | |
271 | push 2*WORD_SIZE(%ebp) | |
272 | #endif | |
273 | ||
274 | call vmread_error | |
275 | ||
276 | #ifndef CONFIG_X86_64 | |
277 | add $8, %esp | |
278 | #endif | |
279 | ||
280 | /* Zero out @fault, which will be popped into the result register. */ | |
281 | _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP) | |
282 | ||
283 | #ifdef CONFIG_X86_64 | |
284 | pop %r11 | |
285 | pop %r10 | |
286 | pop %r9 | |
287 | pop %r8 | |
288 | pop %rsi | |
289 | pop %rdi | |
290 | #endif | |
291 | pop %_ASM_DX | |
292 | pop %_ASM_CX | |
293 | pop %_ASM_AX | |
294 | pop %_ASM_BP | |
295 | ||
5a8cd547 | 296 | RET |
842f4be9 | 297 | SYM_FUNC_END(vmread_error_trampoline) |
535f7ef2 SC |
298 | |
299 | SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff) | |
300 | /* | |
301 | * Unconditionally create a stack frame, getting the correct RSP on the | |
302 | * stack (for x86-64) would take two instructions anyways, and RBP can | |
303 | * be used to restore RSP to make objtool happy (see below). | |
304 | */ | |
305 | push %_ASM_BP | |
306 | mov %_ASM_SP, %_ASM_BP | |
307 | ||
308 | #ifdef CONFIG_X86_64 | |
309 | /* | |
310 | * Align RSP to a 16-byte boundary (to emulate CPU behavior) before | |
311 | * creating the synthetic interrupt stack frame for the IRQ/NMI. | |
312 | */ | |
313 | and $-16, %rsp | |
314 | push $__KERNEL_DS | |
315 | push %rbp | |
316 | #endif | |
317 | pushf | |
318 | push $__KERNEL_CS | |
319 | CALL_NOSPEC _ASM_ARG1 | |
320 | ||
321 | /* | |
322 | * "Restore" RSP from RBP, even though IRET has already unwound RSP to | |
323 | * the correct value. objtool doesn't know the callee will IRET and, | |
324 | * without the explicit restore, thinks the stack is getting walloped. | |
325 | * Using an unwind hint is problematic due to x86-64's dynamic alignment. | |
326 | */ | |
327 | mov %_ASM_BP, %_ASM_SP | |
328 | pop %_ASM_BP | |
5a8cd547 | 329 | RET |
535f7ef2 | 330 | SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff) |