]>
Commit | Line | Data |
---|---|---|
453eafbe SC |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #include <linux/linkage.h> | |
3 | #include <asm/asm.h> | |
5e0781df SC |
4 | #include <asm/bitsperlong.h> |
5 | #include <asm/kvm_vcpu_regs.h> | |
f2fde6a5 | 6 | #include <asm/nospec-branch.h> |
5e0781df SC |
7 | |
8 | #define WORD_SIZE (BITS_PER_LONG / 8) | |
9 | ||
10 | #define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE | |
11 | #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE | |
12 | #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE | |
13 | #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE | |
14 | /* Intentionally omit RSP as it's context switched by hardware */ | |
15 | #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE | |
16 | #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE | |
17 | #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE | |
18 | ||
19 | #ifdef CONFIG_X86_64 | |
20 | #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE | |
21 | #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE | |
22 | #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE | |
23 | #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE | |
24 | #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE | |
25 | #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE | |
26 | #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE | |
27 | #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE | |
28 | #endif | |
453eafbe SC |
29 | |
30 | .text | |
31 | ||
32 | /** | |
33 | * vmx_vmenter - VM-Enter the current loaded VMCS | |
34 | * | |
35 | * %RFLAGS.ZF: !VMCS.LAUNCHED, i.e. controls VMLAUNCH vs. VMRESUME | |
36 | * | |
37 | * Returns: | |
38 | * %RFLAGS.CF is set on VM-Fail Invalid | |
39 | * %RFLAGS.ZF is set on VM-Fail Valid | |
40 | * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit | |
41 | * | |
42 | * Note that VMRESUME/VMLAUNCH fall-through and return directly if | |
43 | * they VM-Fail, whereas a successful VM-Enter + VM-Exit will jump | |
44 | * to vmx_vmexit. | |
45 | */ | |
46 | ENTRY(vmx_vmenter) | |
47 | /* EFLAGS.ZF is set if VMCS.LAUNCHED == 0 */ | |
48 | je 2f | |
49 | ||
50 | 1: vmresume | |
51 | ret | |
52 | ||
53 | 2: vmlaunch | |
54 | ret | |
55 | ||
56 | 3: cmpb $0, kvm_rebooting | |
19f2d8fa JP |
57 | je 4f |
58 | ret | |
59 | 4: ud2 | |
453eafbe SC |
60 | |
61 | .pushsection .fixup, "ax" | |
62 | 5: jmp 3b | |
63 | .popsection | |
64 | ||
65 | _ASM_EXTABLE(1b, 5b) | |
66 | _ASM_EXTABLE(2b, 5b) | |
67 | ||
68 | ENDPROC(vmx_vmenter) | |
69 | ||
70 | /** | |
71 | * vmx_vmexit - Handle a VMX VM-Exit | |
72 | * | |
73 | * Returns: | |
74 | * %RFLAGS.{CF,ZF} are cleared on VM-Success, i.e. VM-Exit | |
75 | * | |
76 | * This is vmx_vmenter's partner in crime. On a VM-Exit, control will jump | |
77 | * here after hardware loads the host's state, i.e. this is the destination | |
78 | * referred to by VMCS.HOST_RIP. | |
79 | */ | |
80 | ENTRY(vmx_vmexit) | |
f2fde6a5 RE |
81 | #ifdef CONFIG_RETPOLINE |
82 | ALTERNATIVE "jmp .Lvmexit_skip_rsb", "", X86_FEATURE_RETPOLINE | |
83 | /* Preserve guest's RAX, it's used to stuff the RSB. */ | |
84 | push %_ASM_AX | |
85 | ||
86 | /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */ | |
87 | FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE | |
88 | ||
89 | pop %_ASM_AX | |
90 | .Lvmexit_skip_rsb: | |
91 | #endif | |
453eafbe SC |
92 | ret |
93 | ENDPROC(vmx_vmexit) | |
5e0781df SC |
94 | |
95 | /** | |
ee2fc635 | 96 | * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode |
5e0781df SC |
97 | * @vmx: struct vcpu_vmx * |
98 | * @regs: unsigned long * (to guest registers) | |
77df5495 | 99 | * @launched: %true if the VMCS has been launched |
5e0781df SC |
100 | * |
101 | * Returns: | |
e75c3c3a | 102 | * 0 on VM-Exit, 1 on VM-Fail |
5e0781df | 103 | */ |
ee2fc635 | 104 | ENTRY(__vmx_vcpu_run) |
5e0781df SC |
105 | push %_ASM_BP |
106 | mov %_ASM_SP, %_ASM_BP | |
3b895ef4 SC |
107 | #ifdef CONFIG_X86_64 |
108 | push %r15 | |
109 | push %r14 | |
110 | push %r13 | |
111 | push %r12 | |
112 | #else | |
113 | push %edi | |
114 | push %esi | |
115 | #endif | |
116 | push %_ASM_BX | |
5e0781df SC |
117 | |
118 | /* | |
119 | * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and | |
120 | * @regs is needed after VM-Exit to save the guest's register values. | |
121 | */ | |
122 | push %_ASM_ARG2 | |
123 | ||
77df5495 SC |
124 | /* Copy @launched to BL, _ASM_ARG3 is volatile. */ |
125 | mov %_ASM_ARG3B, %bl | |
126 | ||
5e0781df SC |
127 | /* Adjust RSP to account for the CALL to vmx_vmenter(). */ |
128 | lea -WORD_SIZE(%_ASM_SP), %_ASM_ARG2 | |
129 | call vmx_update_host_rsp | |
130 | ||
a62fd5a7 SC |
131 | /* Load @regs to RAX. */ |
132 | mov (%_ASM_SP), %_ASM_AX | |
5e0781df SC |
133 | |
134 | /* Check if vmlaunch or vmresume is needed */ | |
135 | cmpb $0, %bl | |
136 | ||
137 | /* Load guest registers. Don't clobber flags. */ | |
a62fd5a7 SC |
138 | mov VCPU_RBX(%_ASM_AX), %_ASM_BX |
139 | mov VCPU_RCX(%_ASM_AX), %_ASM_CX | |
140 | mov VCPU_RDX(%_ASM_AX), %_ASM_DX | |
141 | mov VCPU_RSI(%_ASM_AX), %_ASM_SI | |
142 | mov VCPU_RDI(%_ASM_AX), %_ASM_DI | |
143 | mov VCPU_RBP(%_ASM_AX), %_ASM_BP | |
5e0781df | 144 | #ifdef CONFIG_X86_64 |
a62fd5a7 SC |
145 | mov VCPU_R8 (%_ASM_AX), %r8 |
146 | mov VCPU_R9 (%_ASM_AX), %r9 | |
147 | mov VCPU_R10(%_ASM_AX), %r10 | |
148 | mov VCPU_R11(%_ASM_AX), %r11 | |
149 | mov VCPU_R12(%_ASM_AX), %r12 | |
150 | mov VCPU_R13(%_ASM_AX), %r13 | |
151 | mov VCPU_R14(%_ASM_AX), %r14 | |
152 | mov VCPU_R15(%_ASM_AX), %r15 | |
5e0781df | 153 | #endif |
a62fd5a7 SC |
154 | /* Load guest RAX. This kills the vmx_vcpu pointer! */ |
155 | mov VCPU_RAX(%_ASM_AX), %_ASM_AX | |
5e0781df SC |
156 | |
157 | /* Enter guest mode */ | |
158 | call vmx_vmenter | |
159 | ||
160 | /* Jump on VM-Fail. */ | |
161 | jbe 2f | |
162 | ||
a62fd5a7 SC |
163 | /* Temporarily save guest's RAX. */ |
164 | push %_ASM_AX | |
5e0781df | 165 | |
a62fd5a7 SC |
166 | /* Reload @regs to RAX. */ |
167 | mov WORD_SIZE(%_ASM_SP), %_ASM_AX | |
5e0781df | 168 | |
a62fd5a7 SC |
169 | /* Save all guest registers, including RAX from the stack */ |
170 | __ASM_SIZE(pop) VCPU_RAX(%_ASM_AX) | |
171 | mov %_ASM_BX, VCPU_RBX(%_ASM_AX) | |
172 | mov %_ASM_CX, VCPU_RCX(%_ASM_AX) | |
173 | mov %_ASM_DX, VCPU_RDX(%_ASM_AX) | |
174 | mov %_ASM_SI, VCPU_RSI(%_ASM_AX) | |
175 | mov %_ASM_DI, VCPU_RDI(%_ASM_AX) | |
176 | mov %_ASM_BP, VCPU_RBP(%_ASM_AX) | |
5e0781df | 177 | #ifdef CONFIG_X86_64 |
a62fd5a7 SC |
178 | mov %r8, VCPU_R8 (%_ASM_AX) |
179 | mov %r9, VCPU_R9 (%_ASM_AX) | |
180 | mov %r10, VCPU_R10(%_ASM_AX) | |
181 | mov %r11, VCPU_R11(%_ASM_AX) | |
182 | mov %r12, VCPU_R12(%_ASM_AX) | |
183 | mov %r13, VCPU_R13(%_ASM_AX) | |
184 | mov %r14, VCPU_R14(%_ASM_AX) | |
185 | mov %r15, VCPU_R15(%_ASM_AX) | |
5e0781df SC |
186 | #endif |
187 | ||
e75c3c3a SC |
188 | /* Clear RAX to indicate VM-Exit (as opposed to VM-Fail). */ |
189 | xor %eax, %eax | |
5e0781df SC |
190 | |
191 | /* | |
e75c3c3a | 192 | * Clear all general purpose registers except RSP and RAX to prevent |
5e0781df SC |
193 | * speculative use of the guest's values, even those that are reloaded |
194 | * via the stack. In theory, an L1 cache miss when restoring registers | |
195 | * could lead to speculative execution with the guest's values. | |
196 | * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially | |
e75c3c3a SC |
197 | * free. RSP and RAX are exempt as RSP is restored by hardware during |
198 | * VM-Exit and RAX is explicitly loaded with 0 or 1 to return VM-Fail. | |
5e0781df | 199 | */ |
4f44c4ee SC |
200 | 1: xor %ebx, %ebx |
201 | xor %ecx, %ecx | |
202 | xor %edx, %edx | |
203 | xor %esi, %esi | |
204 | xor %edi, %edi | |
205 | xor %ebp, %ebp | |
5e0781df SC |
206 | #ifdef CONFIG_X86_64 |
207 | xor %r8d, %r8d | |
208 | xor %r9d, %r9d | |
209 | xor %r10d, %r10d | |
210 | xor %r11d, %r11d | |
211 | xor %r12d, %r12d | |
212 | xor %r13d, %r13d | |
213 | xor %r14d, %r14d | |
214 | xor %r15d, %r15d | |
215 | #endif | |
5e0781df SC |
216 | |
217 | /* "POP" @regs. */ | |
218 | add $WORD_SIZE, %_ASM_SP | |
3b895ef4 SC |
219 | pop %_ASM_BX |
220 | ||
221 | #ifdef CONFIG_X86_64 | |
222 | pop %r12 | |
223 | pop %r13 | |
224 | pop %r14 | |
225 | pop %r15 | |
226 | #else | |
227 | pop %esi | |
228 | pop %edi | |
229 | #endif | |
5e0781df SC |
230 | pop %_ASM_BP |
231 | ret | |
232 | ||
233 | /* VM-Fail. Out-of-line to avoid a taken Jcc after VM-Exit. */ | |
e75c3c3a | 234 | 2: mov $1, %eax |
5e0781df | 235 | jmp 1b |
ee2fc635 | 236 | ENDPROC(__vmx_vcpu_run) |