*/
struct loaded_vmcs {
struct vmcs *vmcs;
+ struct vmcs *shadow_vmcs;
int cpu;
int launched;
struct list_head loaded_vmcss_on_cpu_link;
* memory during VMXOFF, VMCLEAR, VMPTRLD.
*/
struct vmcs12 *cached_vmcs12;
- struct vmcs *current_shadow_vmcs;
/*
* Indicates if the shadow vmcs must be updated with the
* data hold by vmcs12
/* vmcs02_list cache of VMCSs recently used to run L2 guests */
struct list_head vmcs02_pool;
int vmcs02_num;
- u64 vmcs01_tsc_offset;
bool change_vmcs01_virtual_x2apic_mode;
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
static unsigned long *vmx_io_bitmap_b;
static unsigned long *vmx_msr_bitmap_legacy;
static unsigned long *vmx_msr_bitmap_longmode;
+static unsigned long *vmx_msr_bitmap_legacy_x2apic_apicv;
+static unsigned long *vmx_msr_bitmap_longmode_x2apic_apicv;
static unsigned long *vmx_msr_bitmap_legacy_x2apic;
static unsigned long *vmx_msr_bitmap_longmode_x2apic;
-static unsigned long *vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
-static unsigned long *vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
static unsigned long *vmx_vmread_bitmap;
static unsigned long *vmx_vmwrite_bitmap;
static inline void loaded_vmcs_init(struct loaded_vmcs *loaded_vmcs)
{
vmcs_clear(loaded_vmcs->vmcs);
+ if (loaded_vmcs->shadow_vmcs && loaded_vmcs->launched)
+ vmcs_clear(loaded_vmcs->shadow_vmcs);
loaded_vmcs->cpu = -1;
loaded_vmcs->launched = 0;
}
SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE)) {
if (enable_apicv && kvm_vcpu_apicv_active(vcpu)) {
if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
+ msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv;
else
- msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
+ msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv;
} else {
if (is_long_mode(vcpu))
- msr_bitmap = vmx_msr_bitmap_longmode_x2apic_apicv_inactive;
+ msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
else
- msr_bitmap = vmx_msr_bitmap_legacy_x2apic_apicv_inactive;
+ msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
}
} else {
if (is_long_mode(vcpu))
return kvm_scale_tsc(vcpu, host_tsc) + tsc_offset;
}
-/*
- * Like guest_read_tsc, but always returns L1's notion of the timestamp
- * counter, even if a nested guest (L2) is currently running.
- */
-static u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu, u64 host_tsc)
-{
- u64 tsc_offset;
-
- tsc_offset = is_guest_mode(vcpu) ?
- to_vmx(vcpu)->nested.vmcs01_tsc_offset :
- vmcs_read64(TSC_OFFSET);
- return host_tsc + tsc_offset;
-}
-
/*
* writes 'offset' into guest's timestamp counter offset register
*/
* to the newly set TSC to get L2's TSC.
*/
struct vmcs12 *vmcs12;
- to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset;
/* recalculate vmcs02.TSC_OFFSET: */
vmcs12 = get_vmcs12(vcpu);
vmcs_write64(TSC_OFFSET, offset +
}
}
-static void vmx_adjust_tsc_offset_guest(struct kvm_vcpu *vcpu, s64 adjustment)
-{
- u64 offset = vmcs_read64(TSC_OFFSET);
-
- vmcs_write64(TSC_OFFSET, offset + adjustment);
- if (is_guest_mode(vcpu)) {
- /* Even when running L2, the adjustment needs to apply to L1 */
- to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment;
- } else
- trace_kvm_write_tsc_offset(vcpu->vcpu_id, offset,
- offset + adjustment);
-}
-
static bool guest_cpuid_has_vmx(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best = kvm_find_cpuid_entry(vcpu, 1, 0);
loaded_vmcs_clear(loaded_vmcs);
free_vmcs(loaded_vmcs->vmcs);
loaded_vmcs->vmcs = NULL;
+ WARN_ON(loaded_vmcs->shadow_vmcs != NULL);
}
static void free_kvm_area(void)
static void vmx_enable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
{
if (apicv_active) {
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv,
msr, MSR_TYPE_R);
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv,
msr, MSR_TYPE_R);
} else {
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
msr, MSR_TYPE_R);
- __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
+ __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
msr, MSR_TYPE_R);
}
}
static void vmx_disable_intercept_msr_read_x2apic(u32 msr, bool apicv_active)
{
if (apicv_active) {
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv,
msr, MSR_TYPE_R);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv,
msr, MSR_TYPE_R);
} else {
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
msr, MSR_TYPE_R);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
msr, MSR_TYPE_R);
}
}
static void vmx_disable_intercept_msr_write_x2apic(u32 msr, bool apicv_active)
{
if (apicv_active) {
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv,
msr, MSR_TYPE_W);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv,
msr, MSR_TYPE_W);
} else {
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
msr, MSR_TYPE_W);
- __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
+ __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
msr, MSR_TYPE_W);
}
}
if (!vmx_msr_bitmap_legacy)
goto out1;
- vmx_msr_bitmap_legacy_x2apic =
+ vmx_msr_bitmap_legacy_x2apic_apicv =
(unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy_x2apic)
+ if (!vmx_msr_bitmap_legacy_x2apic_apicv)
goto out2;
- vmx_msr_bitmap_legacy_x2apic_apicv_inactive =
+ vmx_msr_bitmap_legacy_x2apic =
(unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_legacy_x2apic_apicv_inactive)
+ if (!vmx_msr_bitmap_legacy_x2apic)
goto out3;
vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
if (!vmx_msr_bitmap_longmode)
goto out4;
- vmx_msr_bitmap_longmode_x2apic =
+ vmx_msr_bitmap_longmode_x2apic_apicv =
(unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode_x2apic)
+ if (!vmx_msr_bitmap_longmode_x2apic_apicv)
goto out5;
- vmx_msr_bitmap_longmode_x2apic_apicv_inactive =
+ vmx_msr_bitmap_longmode_x2apic =
(unsigned long *)__get_free_page(GFP_KERNEL);
- if (!vmx_msr_bitmap_longmode_x2apic_apicv_inactive)
+ if (!vmx_msr_bitmap_longmode_x2apic)
goto out6;
vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
- memcpy(vmx_msr_bitmap_legacy_x2apic,
+ memcpy(vmx_msr_bitmap_legacy_x2apic_apicv,
vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic,
+ memcpy(vmx_msr_bitmap_longmode_x2apic_apicv,
vmx_msr_bitmap_longmode, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_legacy_x2apic_apicv_inactive,
+ memcpy(vmx_msr_bitmap_legacy_x2apic,
vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic_apicv_inactive,
+ memcpy(vmx_msr_bitmap_longmode_x2apic,
vmx_msr_bitmap_longmode, PAGE_SIZE);
set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
out8:
free_page((unsigned long)vmx_vmread_bitmap);
out7:
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
-out6:
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
+out6:
+ free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv);
out5:
free_page((unsigned long)vmx_msr_bitmap_longmode);
out4:
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
-out3:
free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
+out3:
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv);
out2:
free_page((unsigned long)vmx_msr_bitmap_legacy);
out1:
static __exit void hardware_unsetup(void)
{
+ free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv);
free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic_apicv_inactive);
+ free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv);
free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
- free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic_apicv_inactive);
free_page((unsigned long)vmx_msr_bitmap_legacy);
free_page((unsigned long)vmx_msr_bitmap_longmode);
free_page((unsigned long)vmx_io_bitmap_b);
if (!item)
return NULL;
item->vmcs02.vmcs = alloc_vmcs();
+ item->vmcs02.shadow_vmcs = NULL;
if (!item->vmcs02.vmcs) {
kfree(item);
return NULL;
shadow_vmcs->revision_id |= (1u << 31);
/* init shadow vmcs */
vmcs_clear(shadow_vmcs);
- vmx->nested.current_shadow_vmcs = shadow_vmcs;
+ vmx->vmcs01.shadow_vmcs = shadow_vmcs;
}
INIT_LIST_HEAD(&(vmx->nested.vmcs02_pool));
free_page((unsigned long)vmx->nested.msr_bitmap);
vmx->nested.msr_bitmap = NULL;
}
- if (enable_shadow_vmcs)
- free_vmcs(vmx->nested.current_shadow_vmcs);
+ if (enable_shadow_vmcs) {
+ vmcs_clear(vmx->vmcs01.shadow_vmcs);
+ free_vmcs(vmx->vmcs01.shadow_vmcs);
+ vmx->vmcs01.shadow_vmcs = NULL;
+ }
kfree(vmx->nested.cached_vmcs12);
/* Unpin physical memory we referred to in current vmcs02 */
if (vmx->nested.apic_access_page) {
int i;
unsigned long field;
u64 field_value;
- struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+ struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
const unsigned long *fields = shadow_read_write_fields;
const int num_fields = max_shadow_read_write_fields;
int i, q;
unsigned long field;
u64 field_value = 0;
- struct vmcs *shadow_vmcs = vmx->nested.current_shadow_vmcs;
+ struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
vmcs_load(shadow_vmcs);
vmcs_set_bits(SECONDARY_VM_EXEC_CONTROL,
SECONDARY_EXEC_SHADOW_VMCS);
vmcs_write64(VMCS_LINK_POINTER,
- __pa(vmx->nested.current_shadow_vmcs));
+ __pa(vmx->vmcs01.shadow_vmcs));
vmx->nested.sync_shadow_vmcs = true;
}
}
types = (vmx->nested.nested_vmx_ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
- if (!(types & (1UL << type))) {
+ if (type >= 32 || !(types & (1 << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
skip_emulated_instruction(vcpu);
types = (vmx->nested.nested_vmx_vpid_caps >> 8) & 0x7;
- if (!(types & (1UL << type))) {
+ if (type >= 32 || !(types & (1 << type))) {
nested_vmx_failValid(vcpu,
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
skip_emulated_instruction(vcpu);
vmx->loaded_vmcs = &vmx->vmcs01;
vmx->loaded_vmcs->vmcs = alloc_vmcs();
+ vmx->loaded_vmcs->shadow_vmcs = NULL;
if (!vmx->loaded_vmcs->vmcs)
goto free_msrs;
if (!vmm_exclusive)
if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING)
vmcs_write64(TSC_OFFSET,
- vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset);
+ vcpu->arch.tsc_offset + vmcs12->tsc_offset);
else
- vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (kvm_has_tsc_control)
decache_tsc_multiplier(vmx);
enter_guest_mode(vcpu);
- vmx->nested.vmcs01_tsc_offset = vmcs_read64(TSC_OFFSET);
-
if (!(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ u32 vm_inst_error = 0;
/* trying to cancel vmlaunch/vmresume is a bug */
WARN_ON_ONCE(vmx->nested.nested_run_pending);
vmcs12->vm_exit_msr_store_count))
nested_vmx_abort(vcpu, VMX_ABORT_SAVE_GUEST_MSR_FAIL);
+ if (unlikely(vmx->fail))
+ vm_inst_error = vmcs_read32(VM_INSTRUCTION_ERROR);
+
vmx_load_vmcs01(vcpu);
if ((exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT)
load_vmcs12_host_state(vcpu, vmcs12);
/* Update any VMCS fields that might have changed while L2 ran */
- vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset);
+ vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
if (vmx->hv_deadline_tsc == -1)
vmcs_clear_bits(PIN_BASED_VM_EXEC_CONTROL,
PIN_BASED_VMX_PREEMPTION_TIMER);
*/
if (unlikely(vmx->fail)) {
vmx->fail = 0;
- nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
+ nested_vmx_failValid(vcpu, vm_inst_error);
} else
nested_vmx_succeed(vcpu);
if (enable_shadow_vmcs)
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
.write_tsc_offset = vmx_write_tsc_offset,
- .adjust_tsc_offset_guest = vmx_adjust_tsc_offset_guest,
- .read_l1_tsc = vmx_read_l1_tsc,
.set_tdp_cr3 = vmx_set_cr3,