]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kvm/vmx/vmx_ops.h
KVM: VMX: Prevent RSB underflow before vmenter
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / vmx / vmx_ops.h
CommitLineData
89b0c9f5
SC
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_VMX_INSN_H
3#define __KVM_X86_VMX_INSN_H
4
5#include <linux/nospec.h>
6
89b0c9f5
SC
7#include <asm/vmx.h>
8
9#include "evmcs.h"
10#include "vmcs.h"
85561a53 11#include "../x86.h"
89b0c9f5 12
514ccc19 13asmlinkage void vmread_error(unsigned long field, bool fault);
842f4be9
SC
14__attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
15 bool fault);
52a9fcbc
SC
16void vmwrite_error(unsigned long field, unsigned long value);
17void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
18void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
19void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
20void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
21
89b0c9f5
SC
22static __always_inline void vmcs_check16(unsigned long field)
23{
24 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
25 "16-bit accessor invalid for 64-bit field");
26 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
27 "16-bit accessor invalid for 64-bit high field");
28 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
29 "16-bit accessor invalid for 32-bit high field");
30 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
31 "16-bit accessor invalid for natural width field");
32}
33
34static __always_inline void vmcs_check32(unsigned long field)
35{
36 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
37 "32-bit accessor invalid for 16-bit field");
870c575a
HL
38 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
39 "32-bit accessor invalid for 64-bit field");
40 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
41 "32-bit accessor invalid for 64-bit high field");
89b0c9f5
SC
42 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
43 "32-bit accessor invalid for natural width field");
44}
45
46static __always_inline void vmcs_check64(unsigned long field)
47{
48 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
49 "64-bit accessor invalid for 16-bit field");
50 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
51 "64-bit accessor invalid for 64-bit high field");
52 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
53 "64-bit accessor invalid for 32-bit field");
54 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
55 "64-bit accessor invalid for natural width field");
56}
57
58static __always_inline void vmcs_checkl(unsigned long field)
59{
60 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
61 "Natural width accessor invalid for 16-bit field");
62 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
63 "Natural width accessor invalid for 64-bit field");
64 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
65 "Natural width accessor invalid for 64-bit high field");
66 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
67 "Natural width accessor invalid for 32-bit field");
68}
69
70static __always_inline unsigned long __vmcs_readl(unsigned long field)
71{
72 unsigned long value;
73
6e202097
SC
74 asm volatile("1: vmread %2, %1\n\t"
75 ".byte 0x3e\n\t" /* branch taken hint */
76 "ja 3f\n\t"
842f4be9
SC
77
78 /*
79 * VMREAD failed. Push '0' for @fault, push the failing
80 * @field, and bounce through the trampoline to preserve
81 * volatile registers.
82 */
83 "push $0\n\t"
84 "push %2\n\t"
85 "2:call vmread_error_trampoline\n\t"
86
87 /*
88 * Unwind the stack. Note, the trampoline zeros out the
89 * memory for @fault so that the result is '0' on error.
90 */
91 "pop %2\n\t"
92 "pop %1\n\t"
6e202097
SC
93 "3:\n\t"
94
842f4be9 95 /* VMREAD faulted. As above, except push '1' for @fault. */
6e202097 96 ".pushsection .fixup, \"ax\"\n\t"
842f4be9
SC
97 "4: push $1\n\t"
98 "push %2\n\t"
6e202097
SC
99 "jmp 2b\n\t"
100 ".popsection\n\t"
101 _ASM_EXTABLE(1b, 4b)
102 : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
89b0c9f5
SC
103 return value;
104}
105
106static __always_inline u16 vmcs_read16(unsigned long field)
107{
108 vmcs_check16(field);
109 if (static_branch_unlikely(&enable_evmcs))
110 return evmcs_read16(field);
111 return __vmcs_readl(field);
112}
113
114static __always_inline u32 vmcs_read32(unsigned long field)
115{
116 vmcs_check32(field);
117 if (static_branch_unlikely(&enable_evmcs))
118 return evmcs_read32(field);
119 return __vmcs_readl(field);
120}
121
122static __always_inline u64 vmcs_read64(unsigned long field)
123{
124 vmcs_check64(field);
125 if (static_branch_unlikely(&enable_evmcs))
126 return evmcs_read64(field);
127#ifdef CONFIG_X86_64
128 return __vmcs_readl(field);
129#else
130 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
131#endif
132}
133
134static __always_inline unsigned long vmcs_readl(unsigned long field)
135{
136 vmcs_checkl(field);
137 if (static_branch_unlikely(&enable_evmcs))
138 return evmcs_read64(field);
139 return __vmcs_readl(field);
140}
141
52a9fcbc
SC
142#define vmx_asm1(insn, op1, error_args...) \
143do { \
144 asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
145 ".byte 0x2e\n\t" /* branch not taken hint */ \
146 "jna %l[error]\n\t" \
147 _ASM_EXTABLE(1b, %l[fault]) \
148 : : op1 : "cc" : error, fault); \
149 return; \
150error: \
3ebccdf3 151 instrumentation_begin(); \
52a9fcbc 152 insn##_error(error_args); \
3ebccdf3 153 instrumentation_end(); \
52a9fcbc
SC
154 return; \
155fault: \
156 kvm_spurious_fault(); \
157} while (0)
158
159#define vmx_asm2(insn, op1, op2, error_args...) \
160do { \
161 asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
162 ".byte 0x2e\n\t" /* branch not taken hint */ \
163 "jna %l[error]\n\t" \
164 _ASM_EXTABLE(1b, %l[fault]) \
165 : : op1, op2 : "cc" : error, fault); \
166 return; \
167error: \
3ebccdf3 168 instrumentation_begin(); \
52a9fcbc 169 insn##_error(error_args); \
3ebccdf3 170 instrumentation_end(); \
52a9fcbc
SC
171 return; \
172fault: \
173 kvm_spurious_fault(); \
174} while (0)
89b0c9f5
SC
175
176static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
177{
52a9fcbc 178 vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
89b0c9f5
SC
179}
180
181static __always_inline void vmcs_write16(unsigned long field, u16 value)
182{
183 vmcs_check16(field);
184 if (static_branch_unlikely(&enable_evmcs))
185 return evmcs_write16(field, value);
186
187 __vmcs_writel(field, value);
188}
189
190static __always_inline void vmcs_write32(unsigned long field, u32 value)
191{
192 vmcs_check32(field);
193 if (static_branch_unlikely(&enable_evmcs))
194 return evmcs_write32(field, value);
195
196 __vmcs_writel(field, value);
197}
198
199static __always_inline void vmcs_write64(unsigned long field, u64 value)
200{
201 vmcs_check64(field);
202 if (static_branch_unlikely(&enable_evmcs))
203 return evmcs_write64(field, value);
204
205 __vmcs_writel(field, value);
206#ifndef CONFIG_X86_64
89b0c9f5
SC
207 __vmcs_writel(field+1, value >> 32);
208#endif
209}
210
211static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
212{
213 vmcs_checkl(field);
214 if (static_branch_unlikely(&enable_evmcs))
215 return evmcs_write64(field, value);
216
217 __vmcs_writel(field, value);
218}
219
220static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
221{
222 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
223 "vmcs_clear_bits does not support 64-bit fields");
224 if (static_branch_unlikely(&enable_evmcs))
225 return evmcs_write32(field, evmcs_read32(field) & ~mask);
226
227 __vmcs_writel(field, __vmcs_readl(field) & ~mask);
228}
229
230static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
231{
232 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
233 "vmcs_set_bits does not support 64-bit fields");
234 if (static_branch_unlikely(&enable_evmcs))
235 return evmcs_write32(field, evmcs_read32(field) | mask);
236
237 __vmcs_writel(field, __vmcs_readl(field) | mask);
238}
239
240static inline void vmcs_clear(struct vmcs *vmcs)
241{
242 u64 phys_addr = __pa(vmcs);
89b0c9f5 243
52a9fcbc 244 vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
89b0c9f5
SC
245}
246
247static inline void vmcs_load(struct vmcs *vmcs)
248{
249 u64 phys_addr = __pa(vmcs);
89b0c9f5
SC
250
251 if (static_branch_unlikely(&enable_evmcs))
252 return evmcs_load(phys_addr);
253
52a9fcbc 254 vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
89b0c9f5
SC
255}
256
257static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
258{
259 struct {
260 u64 vpid : 16;
261 u64 rsvd : 48;
262 u64 gva;
263 } operand = { vpid, 0, gva };
89b0c9f5 264
52a9fcbc 265 vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
89b0c9f5
SC
266}
267
268static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
269{
270 struct {
271 u64 eptp, gpa;
272 } operand = {eptp, gpa};
89b0c9f5 273
52a9fcbc 274 vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
89b0c9f5
SC
275}
276
89b0c9f5
SC
277static inline void vpid_sync_vcpu_single(int vpid)
278{
279 if (vpid == 0)
280 return;
281
ca431c0c 282 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
89b0c9f5
SC
283}
284
285static inline void vpid_sync_vcpu_global(void)
286{
ca431c0c 287 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
89b0c9f5
SC
288}
289
290static inline void vpid_sync_context(int vpid)
291{
292 if (cpu_has_vmx_invvpid_single())
293 vpid_sync_vcpu_single(vpid);
c746b3a4 294 else if (vpid != 0)
89b0c9f5
SC
295 vpid_sync_vcpu_global();
296}
297
ab4b3597 298static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
8a8b097c
SC
299{
300 if (vpid == 0)
ab4b3597 301 return;
8a8b097c 302
ab4b3597 303 if (cpu_has_vmx_invvpid_individual_addr())
8a8b097c 304 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
ab4b3597
SC
305 else
306 vpid_sync_context(vpid);
8a8b097c
SC
307}
308
89b0c9f5
SC
309static inline void ept_sync_global(void)
310{
311 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
312}
313
314static inline void ept_sync_context(u64 eptp)
315{
316 if (cpu_has_vmx_invept_context())
317 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
318 else
319 ept_sync_global();
320}
321
322#endif /* __KVM_X86_VMX_INSN_H */