1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_H
3 #define __KVM_X86_VMX_H
5 #include <linux/kvm_host.h>
8 #include <asm/intel_pt.h>
10 #include "capabilities.h"
11 #include "kvm_cache_regs.h"
16 extern const u32 vmx_msr_index
[];
22 #define X2APIC_MSR(r) (APIC_BASE_MSR + ((r) >> 4))
25 #define NR_SHARED_MSRS 7
27 #define NR_SHARED_MSRS 4
30 #define NR_LOADSTORE_MSRS 8
34 struct vmx_msr_entry val
[NR_LOADSTORE_MSRS
];
37 struct shared_msr_entry
{
43 enum segment_cache_field
{
52 /* Posted-Interrupt Descriptor */
54 u32 pir
[8]; /* Posted interrupt requested */
57 /* bit 256 - Outstanding Notification */
59 /* bit 257 - Suppress Notification */
61 /* bit 271:258 - Reserved */
63 /* bit 279:272 - Notification Vector */
65 /* bit 287:280 - Reserved */
67 /* bit 319:288 - Notification Destination */
75 #define RTIT_ADDR_RANGE 4
83 u64 addr_a
[RTIT_ADDR_RANGE
];
84 u64 addr_b
[RTIT_ADDR_RANGE
];
90 u32 caps
[PT_CPUID_REGS_NUM
* PT_CPUID_LEAVES
];
96 * The nested_vmx structure is part of vcpu_vmx, and holds information we need
97 * for correct emulation of VMX (i.e., nested VMX) on this vcpu.
100 /* Has the level1 guest done vmxon? */
105 /* The guest-physical address of the current VMCS L1 keeps for L2 */
108 * Cache of the guest's VMCS, existing outside of guest memory.
109 * Loaded from guest memory during VMPTRLD. Flushed to guest
110 * memory during VMCLEAR and VMPTRLD.
112 struct vmcs12
*cached_vmcs12
;
114 * Cache of the guest's shadow VMCS, existing outside of guest
115 * memory. Loaded from guest memory during VM entry. Flushed
116 * to guest memory during VM exit.
118 struct vmcs12
*cached_shadow_vmcs12
;
121 * Indicates if the shadow vmcs or enlightened vmcs must be updated
122 * with the data held by struct vmcs12.
124 bool need_vmcs12_to_shadow_sync
;
128 * Indicates lazily loaded guest state has not yet been decached from
131 bool need_sync_vmcs02_to_vmcs12_rare
;
134 * vmcs02 has been initialized, i.e. state that is constant for
135 * vmcs02 has been written to the backing VMCS. Initialization
136 * is delayed until L1 actually attempts to run a nested VM.
138 bool vmcs02_initialized
;
140 bool change_vmcs01_virtual_apic_mode
;
141 bool reload_vmcs01_apic_access_page
;
144 * Enlightened VMCS has been enabled. It does not mean that L1 has to
145 * use it. However, VMX features available to L1 will be limited based
146 * on what the enlightened VMCS supports.
148 bool enlightened_vmcs_enabled
;
150 /* L2 must run next, and mustn't decide to exit to L1. */
151 bool nested_run_pending
;
153 /* Pending MTF VM-exit into L1. */
156 struct loaded_vmcs vmcs02
;
159 * Guest pages referred to in the vmcs02 with host-physical
160 * pointers, so we must keep them pinned while L2 runs.
162 struct page
*apic_access_page
;
163 struct kvm_host_map virtual_apic_map
;
164 struct kvm_host_map pi_desc_map
;
166 struct kvm_host_map msr_bitmap_map
;
168 struct pi_desc
*pi_desc
;
172 struct hrtimer preemption_timer
;
173 u64 preemption_timer_deadline
;
174 bool has_preemption_timer_deadline
;
175 bool preemption_timer_expired
;
177 /* to migrate it to L2 if VM_ENTRY_LOAD_DEBUG_CONTROLS is off */
179 u64 vmcs01_guest_bndcfgs
;
181 /* to migrate it to L1 if L2 writes to L1's CR8 directly */
182 int l1_tpr_threshold
;
187 struct nested_vmx_msrs msrs
;
189 /* SMM related state */
191 /* in VMX operation on SMM entry? */
193 /* in guest mode on SMM entry? */
197 gpa_t hv_evmcs_vmptr
;
198 struct kvm_host_map hv_evmcs_map
;
199 struct hv_enlightened_vmcs
*hv_evmcs
;
203 struct kvm_vcpu vcpu
;
208 * If true, host state has been stored in vmx->loaded_vmcs for
209 * the CPU registers that only need to be switched when transitioning
210 * to/from the kernel, and the registers have been loaded with guest
211 * values. If false, host state is loaded in the CPU registers
212 * and vmx->loaded_vmcs->host_state is invalid.
214 bool guest_state_loaded
;
216 unsigned long exit_qualification
;
218 u32 idt_vectoring_info
;
221 struct shared_msr_entry guest_msrs
[NR_SHARED_MSRS
];
224 bool guest_msrs_ready
;
226 u64 msr_host_kernel_gs_base
;
227 u64 msr_guest_kernel_gs_base
;
231 u32 msr_ia32_umwait_control
;
233 u32 secondary_exec_control
;
236 * loaded_vmcs points to the VMCS currently used in this vcpu. For a
237 * non-nested (L1) guest, it always points to vmcs01. For a nested
238 * guest (L2), it points to a different VMCS.
240 struct loaded_vmcs vmcs01
;
241 struct loaded_vmcs
*loaded_vmcs
;
243 struct msr_autoload
{
244 struct vmx_msrs guest
;
245 struct vmx_msrs host
;
248 struct msr_autostore
{
249 struct vmx_msrs guest
;
255 struct kvm_segment segs
[8];
258 u32 bitmask
; /* 4 bits per segment (1 bit per field) */
259 struct kvm_save_segment
{
267 bool emulation_required
;
271 /* Posted interrupt descriptor */
272 struct pi_desc pi_desc
;
274 /* Support for a guest hypervisor (nested VMX) */
275 struct nested_vmx nested
;
277 /* Dynamic PLE window. */
278 unsigned int ple_window
;
279 bool ple_window_dirty
;
281 bool req_immediate_exit
;
283 /* Support for PML */
284 #define PML_ENTITY_NUM 512
287 /* apic deadline value in host tsc */
290 u64 current_tsc_ratio
;
292 unsigned long host_debugctlmsr
;
295 * Only bits masked by msr_ia32_feature_control_valid_bits can be set in
296 * msr_ia32_feature_control. FEAT_CTL_LOCKED is always included
297 * in msr_ia32_feature_control_valid_bits.
299 u64 msr_ia32_feature_control
;
300 u64 msr_ia32_feature_control_valid_bits
;
303 struct pt_desc pt_desc
;
306 enum ept_pointers_status
{
307 EPT_POINTERS_CHECK
= 0,
308 EPT_POINTERS_MATCH
= 1,
309 EPT_POINTERS_MISMATCH
= 2
315 unsigned int tss_addr
;
316 bool ept_identity_pagetable_done
;
317 gpa_t ept_identity_map_addr
;
319 enum ept_pointers_status ept_pointers_match
;
320 spinlock_t ept_pointer_lock
;
323 bool nested_vmx_allowed(struct kvm_vcpu
*vcpu
);
324 void vmx_vcpu_load_vmcs(struct kvm_vcpu
*vcpu
, int cpu
,
325 struct loaded_vmcs
*buddy
);
326 int allocate_vpid(void);
327 void free_vpid(int vpid
);
328 void vmx_set_constant_host_state(struct vcpu_vmx
*vmx
);
329 void vmx_prepare_switch_to_guest(struct kvm_vcpu
*vcpu
);
330 void vmx_set_host_fs_gs(struct vmcs_host_state
*host
, u16 fs_sel
, u16 gs_sel
,
331 unsigned long fs_base
, unsigned long gs_base
);
332 int vmx_get_cpl(struct kvm_vcpu
*vcpu
);
333 unsigned long vmx_get_rflags(struct kvm_vcpu
*vcpu
);
334 void vmx_set_rflags(struct kvm_vcpu
*vcpu
, unsigned long rflags
);
335 u32
vmx_get_interrupt_shadow(struct kvm_vcpu
*vcpu
);
336 void vmx_set_interrupt_shadow(struct kvm_vcpu
*vcpu
, int mask
);
337 void vmx_set_efer(struct kvm_vcpu
*vcpu
, u64 efer
);
338 void vmx_set_cr0(struct kvm_vcpu
*vcpu
, unsigned long cr0
);
339 int vmx_set_cr4(struct kvm_vcpu
*vcpu
, unsigned long cr4
);
340 void set_cr4_guest_host_mask(struct vcpu_vmx
*vmx
);
341 void ept_save_pdptrs(struct kvm_vcpu
*vcpu
);
342 void vmx_get_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
);
343 void vmx_set_segment(struct kvm_vcpu
*vcpu
, struct kvm_segment
*var
, int seg
);
344 u64
construct_eptp(struct kvm_vcpu
*vcpu
, unsigned long root_hpa
,
346 void update_exception_bitmap(struct kvm_vcpu
*vcpu
);
347 void vmx_update_msr_bitmap(struct kvm_vcpu
*vcpu
);
348 bool vmx_nmi_blocked(struct kvm_vcpu
*vcpu
);
349 bool vmx_interrupt_blocked(struct kvm_vcpu
*vcpu
);
350 bool vmx_get_nmi_mask(struct kvm_vcpu
*vcpu
);
351 void vmx_set_nmi_mask(struct kvm_vcpu
*vcpu
, bool masked
);
352 void vmx_set_virtual_apic_mode(struct kvm_vcpu
*vcpu
);
353 struct shared_msr_entry
*find_msr_entry(struct vcpu_vmx
*vmx
, u32 msr
);
354 void pt_update_intercept_for_msr(struct vcpu_vmx
*vmx
);
355 void vmx_update_host_rsp(struct vcpu_vmx
*vmx
, unsigned long host_rsp
);
356 int vmx_find_msr_index(struct vmx_msrs
*m
, u32 msr
);
357 int vmx_handle_memory_failure(struct kvm_vcpu
*vcpu
, int r
,
358 struct x86_exception
*e
);
360 #define POSTED_INTR_ON 0
361 #define POSTED_INTR_SN 1
363 static inline bool pi_test_and_set_on(struct pi_desc
*pi_desc
)
365 return test_and_set_bit(POSTED_INTR_ON
,
366 (unsigned long *)&pi_desc
->control
);
369 static inline bool pi_test_and_clear_on(struct pi_desc
*pi_desc
)
371 return test_and_clear_bit(POSTED_INTR_ON
,
372 (unsigned long *)&pi_desc
->control
);
375 static inline int pi_test_and_set_pir(int vector
, struct pi_desc
*pi_desc
)
377 return test_and_set_bit(vector
, (unsigned long *)pi_desc
->pir
);
380 static inline bool pi_is_pir_empty(struct pi_desc
*pi_desc
)
382 return bitmap_empty((unsigned long *)pi_desc
->pir
, NR_VECTORS
);
385 static inline void pi_set_sn(struct pi_desc
*pi_desc
)
387 set_bit(POSTED_INTR_SN
,
388 (unsigned long *)&pi_desc
->control
);
391 static inline void pi_set_on(struct pi_desc
*pi_desc
)
393 set_bit(POSTED_INTR_ON
,
394 (unsigned long *)&pi_desc
->control
);
397 static inline void pi_clear_on(struct pi_desc
*pi_desc
)
399 clear_bit(POSTED_INTR_ON
,
400 (unsigned long *)&pi_desc
->control
);
403 static inline void pi_clear_sn(struct pi_desc
*pi_desc
)
405 clear_bit(POSTED_INTR_SN
,
406 (unsigned long *)&pi_desc
->control
);
409 static inline int pi_test_on(struct pi_desc
*pi_desc
)
411 return test_bit(POSTED_INTR_ON
,
412 (unsigned long *)&pi_desc
->control
);
415 static inline int pi_test_sn(struct pi_desc
*pi_desc
)
417 return test_bit(POSTED_INTR_SN
,
418 (unsigned long *)&pi_desc
->control
);
421 static inline u8
vmx_get_rvi(void)
423 return vmcs_read16(GUEST_INTR_STATUS
) & 0xff;
426 #define BUILD_CONTROLS_SHADOW(lname, uname) \
427 static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
429 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \
430 vmcs_write32(uname, val); \
431 vmx->loaded_vmcs->controls_shadow.lname = val; \
434 static inline u32 lname##_controls_get(struct vcpu_vmx *vmx) \
436 return vmx->loaded_vmcs->controls_shadow.lname; \
438 static inline void lname##_controls_setbit(struct vcpu_vmx *vmx, u32 val) \
440 lname##_controls_set(vmx, lname##_controls_get(vmx) | val); \
442 static inline void lname##_controls_clearbit(struct vcpu_vmx *vmx, u32 val) \
444 lname##_controls_set(vmx, lname##_controls_get(vmx) & ~val); \
446 BUILD_CONTROLS_SHADOW(vm_entry
, VM_ENTRY_CONTROLS
)
447 BUILD_CONTROLS_SHADOW(vm_exit
, VM_EXIT_CONTROLS
)
448 BUILD_CONTROLS_SHADOW(pin
, PIN_BASED_VM_EXEC_CONTROL
)
449 BUILD_CONTROLS_SHADOW(exec
, CPU_BASED_VM_EXEC_CONTROL
)
450 BUILD_CONTROLS_SHADOW(secondary_exec
, SECONDARY_VM_EXEC_CONTROL
)
452 static inline void vmx_register_cache_reset(struct kvm_vcpu
*vcpu
)
454 vcpu
->arch
.regs_avail
= ~((1 << VCPU_REGS_RIP
) | (1 << VCPU_REGS_RSP
)
455 | (1 << VCPU_EXREG_RFLAGS
)
456 | (1 << VCPU_EXREG_PDPTR
)
457 | (1 << VCPU_EXREG_SEGMENTS
)
458 | (1 << VCPU_EXREG_CR0
)
459 | (1 << VCPU_EXREG_CR3
)
460 | (1 << VCPU_EXREG_CR4
)
461 | (1 << VCPU_EXREG_EXIT_INFO_1
)
462 | (1 << VCPU_EXREG_EXIT_INFO_2
));
463 vcpu
->arch
.regs_dirty
= 0;
466 static inline u32
vmx_vmentry_ctrl(void)
468 u32 vmentry_ctrl
= vmcs_config
.vmentry_ctrl
;
469 if (vmx_pt_mode_is_system())
470 vmentry_ctrl
&= ~(VM_ENTRY_PT_CONCEAL_PIP
|
471 VM_ENTRY_LOAD_IA32_RTIT_CTL
);
472 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
473 return vmentry_ctrl
&
474 ~(VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL
| VM_ENTRY_LOAD_IA32_EFER
);
477 static inline u32
vmx_vmexit_ctrl(void)
479 u32 vmexit_ctrl
= vmcs_config
.vmexit_ctrl
;
480 if (vmx_pt_mode_is_system())
481 vmexit_ctrl
&= ~(VM_EXIT_PT_CONCEAL_PIP
|
482 VM_EXIT_CLEAR_IA32_RTIT_CTL
);
483 /* Loading of EFER and PERF_GLOBAL_CTRL are toggled dynamically */
485 ~(VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL
| VM_EXIT_LOAD_IA32_EFER
);
488 u32
vmx_exec_control(struct vcpu_vmx
*vmx
);
489 u32
vmx_pin_based_exec_ctrl(struct vcpu_vmx
*vmx
);
491 static inline struct kvm_vmx
*to_kvm_vmx(struct kvm
*kvm
)
493 return container_of(kvm
, struct kvm_vmx
, kvm
);
496 static inline struct vcpu_vmx
*to_vmx(struct kvm_vcpu
*vcpu
)
498 return container_of(vcpu
, struct vcpu_vmx
, vcpu
);
501 static inline struct pi_desc
*vcpu_to_pi_desc(struct kvm_vcpu
*vcpu
)
503 return &(to_vmx(vcpu
)->pi_desc
);
506 static inline unsigned long vmx_get_exit_qual(struct kvm_vcpu
*vcpu
)
508 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
510 if (!kvm_register_is_available(vcpu
, VCPU_EXREG_EXIT_INFO_1
)) {
511 kvm_register_mark_available(vcpu
, VCPU_EXREG_EXIT_INFO_1
);
512 vmx
->exit_qualification
= vmcs_readl(EXIT_QUALIFICATION
);
514 return vmx
->exit_qualification
;
517 static inline u32
vmx_get_intr_info(struct kvm_vcpu
*vcpu
)
519 struct vcpu_vmx
*vmx
= to_vmx(vcpu
);
521 if (!kvm_register_is_available(vcpu
, VCPU_EXREG_EXIT_INFO_2
)) {
522 kvm_register_mark_available(vcpu
, VCPU_EXREG_EXIT_INFO_2
);
523 vmx
->exit_intr_info
= vmcs_read32(VM_EXIT_INTR_INFO
);
525 return vmx
->exit_intr_info
;
528 struct vmcs
*alloc_vmcs_cpu(bool shadow
, int cpu
, gfp_t flags
);
529 void free_vmcs(struct vmcs
*vmcs
);
530 int alloc_loaded_vmcs(struct loaded_vmcs
*loaded_vmcs
);
531 void free_loaded_vmcs(struct loaded_vmcs
*loaded_vmcs
);
532 void loaded_vmcs_clear(struct loaded_vmcs
*loaded_vmcs
);
534 static inline struct vmcs
*alloc_vmcs(bool shadow
)
536 return alloc_vmcs_cpu(shadow
, raw_smp_processor_id(),
540 static inline void decache_tsc_multiplier(struct vcpu_vmx
*vmx
)
542 vmx
->current_tsc_ratio
= vmx
->vcpu
.arch
.tsc_scaling_ratio
;
543 vmcs_write64(TSC_MULTIPLIER
, vmx
->current_tsc_ratio
);
546 static inline bool vmx_has_waitpkg(struct vcpu_vmx
*vmx
)
548 return vmx
->secondary_exec_control
&
549 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE
;
552 static inline bool vmx_need_pf_intercept(struct kvm_vcpu
*vcpu
)
554 return !enable_ept
|| cpuid_maxphyaddr(vcpu
) < boot_cpu_data
.x86_phys_bits
;
557 void dump_vmcs(void);
559 #endif /* __KVM_X86_VMX_H */