1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
5 #include <linux/kvm_host.h>
8 #include <asm/intel_pt.h>
10 #include "capabilities.h"
11 #include "kvm_cache_regs.h"
12 #include "posted_intr.h"
21 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
24 #define MAX_NR_USER_RETURN_MSRS 7
26 #define MAX_NR_USER_RETURN_MSRS 4
29 #define MAX_NR_LOADSTORE_MSRS 8
33 struct vmx_msr_entry val
[MAX_NR_LOADSTORE_MSRS
];
37 bool load_into_hardware
;
42 enum segment_cache_field
{
51 #define RTIT_ADDR_RANGE 4
59 u64 addr_a
[RTIT_ADDR_RANGE
];
60 u64 addr_b
[RTIT_ADDR_RANGE
];
66 u32 caps
[PT_CPUID_REGS_NUM
* PT_CPUID_LEAVES
];
71 union vmx_exit_reason
{
84 u32 bus_lock_detected
: 1;
86 u32 smi_pending_mtf
: 1;
87 u32 smi_from_vmx_root
: 1;
89 u32 failed_vmentry
: 1;
94 #define vcpu_to_lbr_desc(vcpu) (&to_vmx(vcpu)->lbr_desc)
95 #define vcpu_to_lbr_records(vcpu) (&to_vmx(vcpu)->lbr_desc.records)
97 bool intel_pmu_lbr_is_compatible(struct kvm_vcpu
*vcpu
);
98 bool intel_pmu_lbr_is_enabled(struct kvm_vcpu
*vcpu
);
100 int intel_pmu_create_guest_lbr_event(struct kvm_vcpu
*vcpu
);
101 void vmx_passthrough_lbr_msrs(struct kvm_vcpu
*vcpu
);
104 /* Basic info about guest LBR records. */
105 struct x86_pmu_lbr records
;
108 * Emulate LBR feature via passthrough LBR registers when the
109 * per-vcpu guest LBR event is scheduled on the current pcpu.
111 * The records may be inaccurate if the host reclaims the LBR.
113 struct perf_event
*event
;
115 /* True if LBRs are marked as not intercepted in the MSR bitmap */
116 bool msr_passthrough
;
120 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
121 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
124 /* Has the level1 guest done vmxon? */
129 /* The guest-physical address of the current VMCS L1 keeps for L2 */
132 * Cache of the guest's VMCS, existing outside of guest memory.
133 * Loaded from guest memory during VMPTRLD. Flushed to guest
134 * memory during VMCLEAR and VMPTRLD.
136 struct vmcs12
*cached_vmcs12
;
138 * Cache of the guest's shadow VMCS, existing outside of guest
139 * memory. Loaded from guest memory during VM entry. Flushed
140 * to guest memory during VM exit.
142 struct vmcs12
*cached_shadow_vmcs12
;
145 * Indicates if the shadow vmcs or enlightened vmcs must be updated
146 * with the data held by struct vmcs12.
148 bool need_vmcs12_to_shadow_sync
;
152 * Indicates lazily loaded guest state has not yet been decached from
155 bool need_sync_vmcs02_to_vmcs12_rare
;
158 * vmcs02 has been initialized, i.e. state that is constant for
159 * vmcs02 has been written to the backing VMCS. Initialization
160 * is delayed until L1 actually attempts to run a nested VM.
162 bool vmcs02_initialized
;
164 bool change_vmcs01_virtual_apic_mode
;
165 bool reload_vmcs01_apic_access_page
;
166 bool update_vmcs01_cpu_dirty_logging
;
169 * Enlightened VMCS has been enabled. It does not mean that L1 has to
170 * use it. However, VMX features available to L1 will be limited based
171 * on what the enlightened VMCS supports.
173 bool enlightened_vmcs_enabled
;
175 /* L2 must run next, and mustn't decide to exit to L1. */
176 bool nested_run_pending
;
178 /* Pending MTF VM-exit into L1. */
181 struct loaded_vmcs vmcs02
;
184 * Guest pages referred to in the vmcs02 with host-physical
185 * pointers, so we must keep them pinned while L2 runs.
187 struct page
*apic_access_page
;
188 struct kvm_host_map virtual_apic_map
;
189 struct kvm_host_map pi_desc_map
;
191 struct kvm_host_map msr_bitmap_map
;
193 struct pi_desc
*pi_desc
;
197 struct hrtimer preemption_timer
;
198 u64 preemption_timer_deadline
;
199 bool has_preemption_timer_deadline
;
200 bool preemption_timer_expired
;
202 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
204 u64 vmcs01_guest_bndcfgs
;
206 /* to migrate it to L1 if L2 writes to L1's CR8 directly */
207 int l1_tpr_threshold
;
212 struct nested_vmx_msrs msrs
;
214 /* SMM related state */
216 /* in VMX operation on SMM entry? */
218 /* in guest mode on SMM entry? */
222 gpa_t hv_evmcs_vmptr
;
223 struct kvm_host_map hv_evmcs_map
;
224 struct hv_enlightened_vmcs
*hv_evmcs
;
228 struct kvm_vcpu vcpu
;
230 u8 x2apic_msr_bitmap_mode
;
233 * If true, host state has been stored in vmx->loaded_vmcs for
234 * the CPU registers that only need to be switched when transitioning
235 * to/from the kernel, and the registers have been loaded with guest
236 * values. If false, host state is loaded in the CPU registers
237 * and vmx->loaded_vmcs->host_state is invalid.
239 bool guest_state_loaded
;
241 unsigned long exit_qualification
;
243 u32 idt_vectoring_info
;
247 * User return MSRs are always emulated when enabled in the guest, but
248 * only loaded into hardware when necessary, e.g. SYSCALL #UDs outside
249 * of 64-bit mode or if EFER.SCE=1, thus the SYSCALL MSRs don't need to
250 * be loaded into hardware if those conditions aren't met.
251 * nr_active_uret_msrs tracks the number of MSRs that need to be loaded
252 * into hardware when running the guest. guest_uret_msrs[] is resorted
253 * whenever the number of "active" uret MSRs is modified.
255 struct vmx_uret_msr guest_uret_msrs
[MAX_NR_USER_RETURN_MSRS
];
256 int nr_active_uret_msrs
;
257 bool guest_uret_msrs_loaded
;
259 u64 msr_host_kernel_gs_base
;
260 u64 msr_guest_kernel_gs_base
;
264 u32 msr_ia32_umwait_control
;
267 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
268 * non-nested (L1) guest, it always points to vmcs01. For a nested
269 * guest (L2), it points to a different VMCS.
271 struct loaded_vmcs vmcs01
;
272 struct loaded_vmcs
*loaded_vmcs
;
274 struct msr_autoload
{
275 struct vmx_msrs guest
;
276 struct vmx_msrs host
;
279 struct msr_autostore
{
280 struct vmx_msrs guest
;
286 struct kvm_segment segs
[8];
289 u32 bitmask
; /* 4 bits per segment (1 bit per field) */
290 struct kvm_save_segment
{
298 bool emulation_required
;
300 union vmx_exit_reason exit_reason
;
302 /* Posted interrupt descriptor */
303 struct pi_desc pi_desc
;
305 /* Support for a guest hypervisor (nested VMX) */
306 struct nested_vmx nested
;
308 /* Dynamic PLE window. */
309 unsigned int ple_window
;
310 bool ple_window_dirty
;
312 bool req_immediate_exit
;
314 /* Support for PML */
315 #define PML_ENTITY_NUM 512
318 /* apic deadline value in host tsc */
321 unsigned long host_debugctlmsr
;
324 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
325 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
326 * in msr_ia32_feature_control_valid_bits.
328 u64 msr_ia32_feature_control
;
329 u64 msr_ia32_feature_control_valid_bits
;
330 /* SGX Launch Control public key hash */
331 u64 msr_ia32_sgxlepubkeyhash
[4];
333 struct pt_desc pt_desc
;
334 struct lbr_desc lbr_desc
;
336 /* Save desired MSR intercept (read: pass-through) state */
337 #define MAX_POSSIBLE_PASSTHROUGH_MSRS 13
339 DECLARE_BITMAP(read
, MAX_POSSIBLE_PASSTHROUGH_MSRS
);
340 DECLARE_BITMAP(write
, MAX_POSSIBLE_PASSTHROUGH_MSRS
);
341 } shadow_msr_intercept
;
347 unsigned int tss_addr
;
348 bool ept_identity_pagetable_done
;
349 gpa_t ept_identity_map_addr
;
352 bool nested_vmx_allowed(struct kvm_vcpu
*vcpu
);
353 void vmx_vcpu_load_vmcs(struct kvm_vcpu
*vcpu
, int cpu
,
354 struct loaded_vmcs
*buddy
);
355 int allocate_vpid(void);
356 void free_vpid(int vpid
);
357 void vmx_set_constant_host_state(struct vcpu_vmx
*vmx
);
358 void vmx_prepare_switch_to_guest(struct kvm_vcpu
*vcpu
);
359 void vmx_set_host_fs_gs(struct vmcs_host_state
*host
, u16 fs_sel
, u16 gs_sel
,
360 unsigned long fs_base
, unsigned long gs_base
);
361 int vmx_get_cpl(struct kvm_vcpu
*vcpu
);
362 unsigned long vmx_get_rflags(struct kvm_vcpu
*vcpu
);
363 void vmx_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
);
364 u32
vmx_get_interrupt_shadow(struct kvm_vcpu
*vcpu
);
365 void vmx_set_interrupt_shadow(struct kvm_vcpu
*vcpu
, int mask
);
366 int vmx_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
);
367 void vmx_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
368 void vmx_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
369 void set_cr4_guest_host_mask(struct vcpu_vmx
*vmx
);
370 void ept_save_pdptrs(struct kvm_vcpu
*vcpu
);
371 void vmx_get_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
);
372 void __vmx_set_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
);
373 u64
construct_eptp(struct kvm_vcpu
*vcpu
, hpa_t root_hpa
, int root_level
);
375 bool vmx_guest_inject_ac(struct kvm_vcpu
*vcpu
);
376 void vmx_update_exception_bitmap(struct kvm_vcpu
*vcpu
);
377 bool vmx_nmi_blocked(struct kvm_vcpu
*vcpu
);
378 bool vmx_interrupt_blocked(struct kvm_vcpu
*vcpu
);
379 bool vmx_get_nmi_mask(struct kvm_vcpu
*vcpu
);
380 void vmx_set_nmi_mask(struct kvm_vcpu
*vcpu
, bool masked
);
381 void vmx_set_virtual_apic_mode(struct kvm_vcpu
*vcpu
);
382 struct vmx_uret_msr
*vmx_find_uret_msr(struct vcpu_vmx
*vmx
, u32 msr
);
383 void pt_update_intercept_for_msr(struct kvm_vcpu
*vcpu
);
384 void vmx_update_host_rsp(struct vcpu_vmx
*vmx
, unsigned long host_rsp
);
385 bool __vmx_vcpu_run(struct vcpu_vmx
*vmx
, unsigned long *regs
, bool launched
);
386 int vmx_find_loadstore_msr_slot(struct vmx_msrs
*m
, u32 msr
);
387 void vmx_ept_load_pdptrs(struct kvm_vcpu
*vcpu
);
389 void vmx_disable_intercept_for_msr(struct kvm_vcpu
*vcpu
, u32 msr
, int type
);
390 void vmx_enable_intercept_for_msr(struct kvm_vcpu
*vcpu
, u32 msr
, int type
);
392 u64
vmx_get_l2_tsc_offset(struct kvm_vcpu
*vcpu
);
393 u64
vmx_get_l2_tsc_multiplier(struct kvm_vcpu
*vcpu
);
395 static inline void vmx_set_intercept_for_msr(struct kvm_vcpu
*vcpu
, u32 msr
,
396 int type
, bool value
)
399 vmx_enable_intercept_for_msr(vcpu
, msr
, type
);
401 vmx_disable_intercept_for_msr(vcpu
, msr
, type
);
404 void vmx_update_cpu_dirty_logging(struct kvm_vcpu
*vcpu
);
406 static inline u8
vmx_get_rvi(void)
408 return vmcs_read16(GUEST_INTR_STATUS
) & 0xff;
411 #define BUILD_CONTROLS_SHADOW(lname, uname) \
412 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
414 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
415 vmcs_write32(uname, val); \
416 vmx->loaded_vmcs->controls_shadow.lname = val; \
419 static inline u32 __##lname##_controls_get(struct loaded_vmcs *vmcs) \
421 return vmcs->controls_shadow.lname; \
423 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
425 return __##lname##_controls_get(vmx->loaded_vmcs); \
427 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
429 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
431 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
433 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
435 BUILD_CONTROLS_SHADOW(vm_entry
, VM_ENTRY_CONTROLS
)
436 BUILD_CONTROLS_SHADOW(vm_exit
, VM_EXIT_CONTROLS
)
437 BUILD_CONTROLS_SHADOW(pin
, PIN_BASED_VM_EXEC_CONTROL
)
438 BUILD_CONTROLS_SHADOW(exec
, CPU_BASED_VM_EXEC_CONTROL
)
439 BUILD_CONTROLS_SHADOW(secondary_exec
, SECONDARY_VM_EXEC_CONTROL
)
441 static inline void vmx_register_cache_reset(struct kvm_vcpu
*vcpu
)
443 vcpu
->arch
.regs_avail
= ~((1 << VCPU_REGS_RIP
) | (1 << VCPU_REGS_RSP
)
444 | (1 << VCPU_EXREG_RFLAGS
)
445 | (1 << VCPU_EXREG_PDPTR
)
446 | (1 << VCPU_EXREG_SEGMENTS
)
447 | (1 << VCPU_EXREG_CR0
)
448 | (1 << VCPU_EXREG_CR3
)
449 | (1 << VCPU_EXREG_CR4
)
450 | (1 << VCPU_EXREG_EXIT_INFO_1
)
451 | (1 << VCPU_EXREG_EXIT_INFO_2
));
452 vcpu
->arch
.regs_dirty
= 0;
455 static inline struct kvm_vmx
*to_kvm_vmx(struct kvm
*kvm
)
457 return container_of(kvm
, struct kvm_vmx
, kvm
);
460 static inline struct vcpu_vmx
*to_vmx(struct kvm_vcpu
*vcpu
)
462 return container_of(vcpu
, struct vcpu_vmx
, vcpu
);
465 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu
*vcpu
)
467 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
469 if (!kvm_register_is_available(vcpu
, VCPU_EXREG_EXIT_INFO_1
)) {
470 kvm_register_mark_available(vcpu
, VCPU_EXREG_EXIT_INFO_1
);
471 vmx
->exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
473 return vmx
->exit_qualification
;
476 static inline u32
vmx_get_intr_info(struct kvm_vcpu
*vcpu
)
478 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
480 if (!kvm_register_is_available(vcpu
, VCPU_EXREG_EXIT_INFO_2
)) {
481 kvm_register_mark_available(vcpu
, VCPU_EXREG_EXIT_INFO_2
);
482 vmx
->exit_intr_info
= vmcs_read32(VM_EXIT_INTR_INFO
);
484 return vmx
->exit_intr_info
;
487 struct vmcs
*alloc_vmcs_cpu(bool shadow
, int cpu
, gfp_t flags
);
488 void free_vmcs(struct vmcs
*vmcs
);
489 int alloc_loaded_vmcs(struct loaded_vmcs
*loaded_vmcs
);
490 void free_loaded_vmcs(struct loaded_vmcs
*loaded_vmcs
);
491 void loaded_vmcs_clear(struct loaded_vmcs
*loaded_vmcs
);
493 static inline struct vmcs
*alloc_vmcs(bool shadow
)
495 return alloc_vmcs_cpu(shadow
, raw_smp_processor_id(),
499 static inline bool vmx_has_waitpkg(struct vcpu_vmx
*vmx
)
501 return secondary_exec_controls_get(vmx
) &
502 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE
;
505 static inline bool vmx_need_pf_intercept(struct kvm_vcpu
*vcpu
)
510 return allow_smaller_maxphyaddr
&& cpuid_maxphyaddr(vcpu
) < boot_cpu_data
.x86_phys_bits
;
513 static inline bool is_unrestricted_guest(struct kvm_vcpu
*vcpu
)
515 return enable_unrestricted_guest
&& (!is_guest_mode(vcpu
) ||
516 (secondary_exec_controls_get(to_vmx(vcpu
)) &
517 SECONDARY_EXEC_UNRESTRICTED_GUEST
));
520 bool __vmx_guest_state_valid(struct kvm_vcpu
*vcpu
);
521 static inline bool vmx_guest_state_valid(struct kvm_vcpu
*vcpu
)
523 return is_unrestricted_guest(vcpu
) || __vmx_guest_state_valid(vcpu
);
526 void dump_vmcs(struct kvm_vcpu
*vcpu
);
528 #endif /* __KVM_X86_VMX_H */