]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
KVM: VMX: Update necessary state when guest enters long mode
authorAmit Shah <amit.shah@redhat.com>
Fri, 20 Feb 2009 17:23:37 +0000 (22:53 +0530)
committerAvi Kivity <avi@redhat.com>
Tue, 24 Mar 2009 09:03:13 +0000 (11:03 +0200)
setup_msrs() should be called when entering long mode to save the
shadow state for the 64-bit guest state.

Using vmx_set_efer() in enter_lmode() removes some duplicated code
and also ensures we call setup_msrs(). We can safely pass the value
of shadow_efer to vmx_set_efer() as no other bits in the efer change
while enabling long mode (guest first sets EFER.LME, then sets CR0.PG
which causes a vmexit where we activate long mode).

With this fix, is_long_mode() can check for EFER.LMA set instead of
EFER.LME and 5e23049e86dd298b72e206b420513dbc3a240cd9 can be reverted.

Signed-off-by: Amit Shah <amit.shah@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/kvm/vmx.c

index cb27ffccf466a8fb446d9366c5f81cc86444a4f1..48063a0aa24369cd24306c7754e752a36ab15d4e 100644 (file)
@@ -1430,6 +1430,29 @@ continue_rmode:
        init_rmode(vcpu->kvm);
 }
 
+static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
+{
+       struct vcpu_vmx *vmx = to_vmx(vcpu);
+       struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
+
+       vcpu->arch.shadow_efer = efer;
+       if (!msr)
+               return;
+       if (efer & EFER_LMA) {
+               vmcs_write32(VM_ENTRY_CONTROLS,
+                            vmcs_read32(VM_ENTRY_CONTROLS) |
+                            VM_ENTRY_IA32E_MODE);
+               msr->data = efer;
+       } else {
+               vmcs_write32(VM_ENTRY_CONTROLS,
+                            vmcs_read32(VM_ENTRY_CONTROLS) &
+                            ~VM_ENTRY_IA32E_MODE);
+
+               msr->data = efer & ~EFER_LME;
+       }
+       setup_msrs(vmx);
+}
+
 #ifdef CONFIG_X86_64
 
 static void enter_lmode(struct kvm_vcpu *vcpu)
@@ -1444,13 +1467,8 @@ static void enter_lmode(struct kvm_vcpu *vcpu)
                             (guest_tr_ar & ~AR_TYPE_MASK)
                             | AR_TYPE_BUSY_64_TSS);
        }
-
        vcpu->arch.shadow_efer |= EFER_LMA;
-
-       find_msr_entry(to_vmx(vcpu), MSR_EFER)->data |= EFER_LMA | EFER_LME;
-       vmcs_write32(VM_ENTRY_CONTROLS,
-                    vmcs_read32(VM_ENTRY_CONTROLS)
-                    | VM_ENTRY_IA32E_MODE);
+       vmx_set_efer(vcpu, vcpu->arch.shadow_efer);
 }
 
 static void exit_lmode(struct kvm_vcpu *vcpu)
@@ -1609,30 +1627,6 @@ static void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        vmcs_writel(GUEST_CR4, hw_cr4);
 }
 
-static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
-{
-       struct vcpu_vmx *vmx = to_vmx(vcpu);
-       struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
-
-       vcpu->arch.shadow_efer = efer;
-       if (!msr)
-               return;
-       if (efer & EFER_LMA) {
-               vmcs_write32(VM_ENTRY_CONTROLS,
-                                    vmcs_read32(VM_ENTRY_CONTROLS) |
-                                    VM_ENTRY_IA32E_MODE);
-               msr->data = efer;
-
-       } else {
-               vmcs_write32(VM_ENTRY_CONTROLS,
-                                    vmcs_read32(VM_ENTRY_CONTROLS) &
-                                    ~VM_ENTRY_IA32E_MODE);
-
-               msr->data = efer & ~EFER_LME;
-       }
-       setup_msrs(vmx);
-}
-
 static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
 {
        struct kvm_vmx_segment_field *sf = &kvm_vmx_segment_fields[seg];