]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kvm/vmx/vmx.h
26175a4759fa5f8eef34b443dcb3fd6415b4f6b6
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / vmx / vmx.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
4
5 #include <linux/kvm_host.h>
6
7 #include <asm/kvm.h>
8 #include <asm/intel_pt.h>
9
10 #include "capabilities.h"
11 #include "kvm_cache_regs.h"
12 #include "ops.h"
13 #include "vmcs.h"
14 #include "cpuid.h"
15
16 extern const u32 vmx_msr_index[];
17
18 #define MSR_TYPE_R 1
19 #define MSR_TYPE_W 2
20 #define MSR_TYPE_RW 3
21
22 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
23
24 #ifdef CONFIG_X86_64
25 #define NR_SHARED_MSRS 7
26 #else
27 #define NR_SHARED_MSRS 4
28 #endif
29
30 #define NR_LOADSTORE_MSRS 8
31
32 struct vmx_msrs {
33 unsigned int nr;
34 struct vmx_msr_entry val[NR_LOADSTORE_MSRS];
35 };
36
37 struct shared_msr_entry {
38 unsigned index;
39 u64 data;
40 u64 mask;
41 };
42
43 enum segment_cache_field {
44 SEG_FIELD_SEL = 0,
45 SEG_FIELD_BASE = 1,
46 SEG_FIELD_LIMIT = 2,
47 SEG_FIELD_AR = 3,
48
49 SEG_FIELD_NR = 4
50 };
51
52 /* Posted-Interrupt Descriptor */
53 struct pi_desc {
54 u32 pir[8]; /* Posted interrupt requested */
55 union {
56 struct {
57 /* bit 256 - Outstanding Notification */
58 u16 on : 1,
59 /* bit 257 - Suppress Notification */
60 sn : 1,
61 /* bit 271:258 - Reserved */
62 rsvd_1 : 14;
63 /* bit 279:272 - Notification Vector */
64 u8 nv;
65 /* bit 287:280 - Reserved */
66 u8 rsvd_2;
67 /* bit 319:288 - Notification Destination */
68 u32 ndst;
69 };
70 u64 control;
71 };
72 u32 rsvd[6];
73 } __aligned(64);
74
75 #define RTIT_ADDR_RANGE 4
76
77 struct pt_ctx {
78 u64 ctl;
79 u64 status;
80 u64 output_base;
81 u64 output_mask;
82 u64 cr3_match;
83 u64 addr_a[RTIT_ADDR_RANGE];
84 u64 addr_b[RTIT_ADDR_RANGE];
85 };
86
87 struct pt_desc {
88 u64 ctl_bitmask;
89 u32 addr_range;
90 u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
91 struct pt_ctx host;
92 struct pt_ctx guest;
93 };
94
95 /*
96 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
97 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
98 */
99 struct nested_vmx {
100 /* Has the level1 guest done vmxon? */
101 bool vmxon;
102 gpa_t vmxon_ptr;
103 bool pml_full;
104
105 /* The guest-physical address of the current VMCS L1 keeps for L2 */
106 gpa_t current_vmptr;
107 /*
108 * Cache of the guest's VMCS, existing outside of guest memory.
109 * Loaded from guest memory during VMPTRLD. Flushed to guest
110 * memory during VMCLEAR and VMPTRLD.
111 */
112 struct vmcs12 *cached_vmcs12;
113 /*
114 * Cache of the guest's shadow VMCS, existing outside of guest
115 * memory. Loaded from guest memory during VM entry. Flushed
116 * to guest memory during VM exit.
117 */
118 struct vmcs12 *cached_shadow_vmcs12;
119
120 /*
121 * Indicates if the shadow vmcs or enlightened vmcs must be updated
122 * with the data held by struct vmcs12.
123 */
124 bool need_vmcs12_to_shadow_sync;
125 bool dirty_vmcs12;
126
127 /*
128 * Indicates lazily loaded guest state has not yet been decached from
129 * vmcs02.
130 */
131 bool need_sync_vmcs02_to_vmcs12_rare;
132
133 /*
134 * vmcs02 has been initialized, i.e. state that is constant for
135 * vmcs02 has been written to the backing VMCS. Initialization
136 * is delayed until L1 actually attempts to run a nested VM.
137 */
138 bool vmcs02_initialized;
139
140 bool change_vmcs01_virtual_apic_mode;
141 bool reload_vmcs01_apic_access_page;
142
143 /*
144 * Enlightened VMCS has been enabled. It does not mean that L1 has to
145 * use it. However, VMX features available to L1 will be limited based
146 * on what the enlightened VMCS supports.
147 */
148 bool enlightened_vmcs_enabled;
149
150 /* L2 must run next, and mustn't decide to exit to L1. */
151 bool nested_run_pending;
152
153 /* Pending MTF VM-exit into L1. */
154 bool mtf_pending;
155
156 struct loaded_vmcs vmcs02;
157
158 /*
159 * Guest pages referred to in the vmcs02 with host-physical
160 * pointers, so we must keep them pinned while L2 runs.
161 */
162 struct page *apic_access_page;
163 struct kvm_host_map virtual_apic_map;
164 struct kvm_host_map pi_desc_map;
165
166 struct kvm_host_map msr_bitmap_map;
167
168 struct pi_desc *pi_desc;
169 bool pi_pending;
170 u16 posted_intr_nv;
171
172 struct hrtimer preemption_timer;
173 u64 preemption_timer_deadline;
174 bool has_preemption_timer_deadline;
175 bool preemption_timer_expired;
176
177 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
178 u64 vmcs01_debugctl;
179 u64 vmcs01_guest_bndcfgs;
180
181 /* to migrate it to L1 if L2 writes to L1's CR8 directly */
182 int l1_tpr_threshold;
183
184 u16 vpid02;
185 u16 last_vpid;
186
187 struct nested_vmx_msrs msrs;
188
189 /* SMM related state */
190 struct {
191 /* in VMX operation on SMM entry? */
192 bool vmxon;
193 /* in guest mode on SMM entry? */
194 bool guest_mode;
195 } smm;
196
197 gpa_t hv_evmcs_vmptr;
198 struct kvm_host_map hv_evmcs_map;
199 struct hv_enlightened_vmcs *hv_evmcs;
200 };
201
202 struct vcpu_vmx {
203 struct kvm_vcpu vcpu;
204 u8 fail;
205 u8 msr_bitmap_mode;
206
207 /*
208 * If true, host state has been stored in vmx->loaded_vmcs for
209 * the CPU registers that only need to be switched when transitioning
210 * to/from the kernel, and the registers have been loaded with guest
211 * values. If false, host state is loaded in the CPU registers
212 * and vmx->loaded_vmcs->host_state is invalid.
213 */
214 bool guest_state_loaded;
215
216 unsigned long exit_qualification;
217 u32 exit_intr_info;
218 u32 idt_vectoring_info;
219 ulong rflags;
220
221 struct shared_msr_entry guest_msrs[NR_SHARED_MSRS];
222 int nmsrs;
223 int save_nmsrs;
224 bool guest_msrs_ready;
225 #ifdef CONFIG_X86_64
226 u64 msr_host_kernel_gs_base;
227 u64 msr_guest_kernel_gs_base;
228 #endif
229
230 u64 spec_ctrl;
231 u32 msr_ia32_umwait_control;
232
233 u32 secondary_exec_control;
234
235 /*
236 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
237 * non-nested (L1) guest, it always points to vmcs01. For a nested
238 * guest (L2), it points to a different VMCS.
239 */
240 struct loaded_vmcs vmcs01;
241 struct loaded_vmcs *loaded_vmcs;
242
243 struct msr_autoload {
244 struct vmx_msrs guest;
245 struct vmx_msrs host;
246 } msr_autoload;
247
248 struct msr_autostore {
249 struct vmx_msrs guest;
250 } msr_autostore;
251
252 struct {
253 int vm86_active;
254 ulong save_rflags;
255 struct kvm_segment segs[8];
256 } rmode;
257 struct {
258 u32 bitmask; /* 4 bits per segment (1 bit per field) */
259 struct kvm_save_segment {
260 u16 selector;
261 unsigned long base;
262 u32 limit;
263 u32 ar;
264 } seg[8];
265 } segment_cache;
266 int vpid;
267 bool emulation_required;
268
269 u32 exit_reason;
270
271 /* Posted interrupt descriptor */
272 struct pi_desc pi_desc;
273
274 /* Support for a guest hypervisor (nested VMX) */
275 struct nested_vmx nested;
276
277 /* Dynamic PLE window. */
278 unsigned int ple_window;
279 bool ple_window_dirty;
280
281 bool req_immediate_exit;
282
283 /* Support for PML */
284 #define PML_ENTITY_NUM 512
285 struct page *pml_pg;
286
287 /* apic deadline value in host tsc */
288 u64 hv_deadline_tsc;
289
290 u64 current_tsc_ratio;
291
292 unsigned long host_debugctlmsr;
293
294 /*
295 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
296 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
297 * in msr_ia32_feature_control_valid_bits.
298 */
299 u64 msr_ia32_feature_control;
300 u64 msr_ia32_feature_control_valid_bits;
301 u64 ept_pointer;
302
303 struct pt_desc pt_desc;
304 };
305
306 enum ept_pointers_status {
307 EPT_POINTERS_CHECK = 0,
308 EPT_POINTERS_MATCH = 1,
309 EPT_POINTERS_MISMATCH = 2
310 };
311
312 struct kvm_vmx {
313 struct kvm kvm;
314
315 unsigned int tss_addr;
316 bool ept_identity_pagetable_done;
317 gpa_t ept_identity_map_addr;
318
319 enum ept_pointers_status ept_pointers_match;
320 spinlock_t ept_pointer_lock;
321 };
322
323 bool nested_vmx_allowed(struct kvm_vcpu *vcpu);
324 void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
325 struct loaded_vmcs *buddy);
326 int allocate_vpid(void);
327 void free_vpid(int vpid);
328 void vmx_set_constant_host_state(struct vcpu_vmx *vmx);
329 void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
330 void vmx_set_host_fs_gs(struct vmcs_host_state *host, u16 fs_sel, u16 gs_sel,
331 unsigned long fs_base, unsigned long gs_base);
332 int vmx_get_cpl(struct kvm_vcpu *vcpu);
333 unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu);
334 void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
335 u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu);
336 void vmx_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask);
337 void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer);
338 void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
339 int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
340 void set_cr4_guest_host_mask(struct vcpu_vmx *vmx);
341 void ept_save_pdptrs(struct kvm_vcpu *vcpu);
342 void vmx_get_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
343 void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg);
344 u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa,
345 int root_level);
346 void update_exception_bitmap(struct kvm_vcpu *vcpu);
347 void vmx_update_msr_bitmap(struct kvm_vcpu *vcpu);
348 bool vmx_nmi_blocked(struct kvm_vcpu *vcpu);
349 bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu);
350 bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu);
351 void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked);
352 void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu);
353 struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr);
354 void pt_update_intercept_for_msr(struct vcpu_vmx *vmx);
355 void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp);
356 int vmx_find_msr_index(struct vmx_msrs *m, u32 msr);
357 int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
358 struct x86_exception *e);
359
360 #define POSTED_INTR_ON 0
361 #define POSTED_INTR_SN 1
362
363 static inline bool pi_test_and_set_on(struct pi_desc *pi_desc)
364 {
365 return test_and_set_bit(POSTED_INTR_ON,
366 (unsigned long *)&pi_desc->control);
367 }
368
369 static inline bool pi_test_and_clear_on(struct pi_desc *pi_desc)
370 {
371 return test_and_clear_bit(POSTED_INTR_ON,
372 (unsigned long *)&pi_desc->control);
373 }
374
375 static inline int pi_test_and_set_pir(int vector, struct pi_desc *pi_desc)
376 {
377 return test_and_set_bit(vector, (unsigned long *)pi_desc->pir);
378 }
379
380 static inline bool pi_is_pir_empty(struct pi_desc *pi_desc)
381 {
382 return bitmap_empty((unsigned long *)pi_desc->pir, NR_VECTORS);
383 }
384
385 static inline void pi_set_sn(struct pi_desc *pi_desc)
386 {
387 set_bit(POSTED_INTR_SN,
388 (unsigned long *)&pi_desc->control);
389 }
390
391 static inline void pi_set_on(struct pi_desc *pi_desc)
392 {
393 set_bit(POSTED_INTR_ON,
394 (unsigned long *)&pi_desc->control);
395 }
396
397 static inline void pi_clear_on(struct pi_desc *pi_desc)
398 {
399 clear_bit(POSTED_INTR_ON,
400 (unsigned long *)&pi_desc->control);
401 }
402
403 static inline void pi_clear_sn(struct pi_desc *pi_desc)
404 {
405 clear_bit(POSTED_INTR_SN,
406 (unsigned long *)&pi_desc->control);
407 }
408
409 static inline int pi_test_on(struct pi_desc *pi_desc)
410 {
411 return test_bit(POSTED_INTR_ON,
412 (unsigned long *)&pi_desc->control);
413 }
414
415 static inline int pi_test_sn(struct pi_desc *pi_desc)
416 {
417 return test_bit(POSTED_INTR_SN,
418 (unsigned long *)&pi_desc->control);
419 }
420
421 static inline u8 vmx_get_rvi(void)
422 {
423 return vmcs_read16(GUEST_INTR_STATUS) & 0xff;
424 }
425
426 #define BUILD_CONTROLS_SHADOW(lname, uname) \
427 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
428 { \
429 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
430 vmcs_write32(uname, val); \
431 vmx->loaded_vmcs->controls_shadow.lname = val; \
432 } \
433 } \
434 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
435 { \
436 return vmx->loaded_vmcs->controls_shadow.lname; \
437 } \
438 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
439 { \
440 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
441 } \
442 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
443 { \
444 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
445 }
446 BUILD_CONTROLS_SHADOW(vm_entry, VM_ENTRY_CONTROLS)
447 BUILD_CONTROLS_SHADOW(vm_exit, VM_EXIT_CONTROLS)
448 BUILD_CONTROLS_SHADOW(pin, PIN_BASED_VM_EXEC_CONTROL)
449 BUILD_CONTROLS_SHADOW(exec, CPU_BASED_VM_EXEC_CONTROL)
450 BUILD_CONTROLS_SHADOW(secondary_exec, SECONDARY_VM_EXEC_CONTROL)
451
452 static inline void vmx_register_cache_reset(struct kvm_vcpu *vcpu)
453 {
454 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
455 | (1 << VCPU_EXREG_RFLAGS)
456 | (1 << VCPU_EXREG_PDPTR)
457 | (1 << VCPU_EXREG_SEGMENTS)
458 | (1 << VCPU_EXREG_CR0)
459 | (1 << VCPU_EXREG_CR3)
460 | (1 << VCPU_EXREG_CR4)
461 | (1 << VCPU_EXREG_EXIT_INFO_1)
462 | (1 << VCPU_EXREG_EXIT_INFO_2));
463 vcpu->arch.regs_dirty = 0;
464 }
465
466 static inline u32 vmx_vmentry_ctrl(void)
467 {
468 u32 vmentry_ctrl = vmcs_config.vmentry_ctrl;
469 if (vmx_pt_mode_is_system())
470 vmentry_ctrl &= ~(VM_ENTRY_PT_CONCEAL_PIP |
471 VM_ENTRY_LOAD_IA32_RTIT_CTL);
472 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
473 return vmentry_ctrl &
474 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL | VM_ENTRY_LOAD_IA32_EFER);
475 }
476
477 static inline u32 vmx_vmexit_ctrl(void)
478 {
479 u32 vmexit_ctrl = vmcs_config.vmexit_ctrl;
480 if (vmx_pt_mode_is_system())
481 vmexit_ctrl &= ~(VM_EXIT_PT_CONCEAL_PIP |
482 VM_EXIT_CLEAR_IA32_RTIT_CTL);
483 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
484 return vmexit_ctrl &
485 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL | VM_EXIT_LOAD_IA32_EFER);
486 }
487
488 u32 vmx_exec_control(struct vcpu_vmx *vmx);
489 u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx);
490
491 static inline struct kvm_vmx *to_kvm_vmx(struct kvm *kvm)
492 {
493 return container_of(kvm, struct kvm_vmx, kvm);
494 }
495
496 static inline struct vcpu_vmx *to_vmx(struct kvm_vcpu *vcpu)
497 {
498 return container_of(vcpu, struct vcpu_vmx, vcpu);
499 }
500
501 static inline struct pi_desc *vcpu_to_pi_desc(struct kvm_vcpu *vcpu)
502 {
503 return &(to_vmx(vcpu)->pi_desc);
504 }
505
506 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu *vcpu)
507 {
508 struct vcpu_vmx *vmx = to_vmx(vcpu);
509
510 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_1)) {
511 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_1);
512 vmx->exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
513 }
514 return vmx->exit_qualification;
515 }
516
517 static inline u32 vmx_get_intr_info(struct kvm_vcpu *vcpu)
518 {
519 struct vcpu_vmx *vmx = to_vmx(vcpu);
520
521 if (!kvm_register_is_available(vcpu, VCPU_EXREG_EXIT_INFO_2)) {
522 kvm_register_mark_available(vcpu, VCPU_EXREG_EXIT_INFO_2);
523 vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
524 }
525 return vmx->exit_intr_info;
526 }
527
528 struct vmcs *alloc_vmcs_cpu(bool shadow, int cpu, gfp_t flags);
529 void free_vmcs(struct vmcs *vmcs);
530 int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
531 void free_loaded_vmcs(struct loaded_vmcs *loaded_vmcs);
532 void loaded_vmcs_clear(struct loaded_vmcs *loaded_vmcs);
533
534 static inline struct vmcs *alloc_vmcs(bool shadow)
535 {
536 return alloc_vmcs_cpu(shadow, raw_smp_processor_id(),
537 GFP_KERNEL_ACCOUNT);
538 }
539
540 static inline void decache_tsc_multiplier(struct vcpu_vmx *vmx)
541 {
542 vmx->current_tsc_ratio = vmx->vcpu.arch.tsc_scaling_ratio;
543 vmcs_write64(TSC_MULTIPLIER, vmx->current_tsc_ratio);
544 }
545
546 static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx)
547 {
548 return vmx->secondary_exec_control &
549 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
550 }
551
552 static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu)
553 {
554 return !enable_ept || cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits;
555 }
556
557 void dump_vmcs(void);
558
559 #endif /* __KVM_X86_VMX_H */