]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
KVM: VMX: Define new functions to wrapper direct call of asm code
authorDongxiao Xu <dongxiao.xu@intel.com>
Tue, 11 May 2010 10:29:38 +0000 (18:29 +0800)
committerAvi Kivity <avi@redhat.com>
Sun, 1 Aug 2010 07:35:41 +0000 (10:35 +0300)
Define vmcs_load() and kvm_cpu_vmxon() to avoid direct call of asm
code. Also move VMXE bit operation out of kvm_cpu_vmxoff().

Signed-off-by: Dongxiao Xu <dongxiao.xu@intel.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
arch/x86/kvm/vmx.c

index a82cfa1e2a4054ae9b09d679e13e71ce9617f802..8232888214449ea1cede5884cff246844eda59ef 100644 (file)
@@ -453,6 +453,19 @@ static void vmcs_clear(struct vmcs *vmcs)
                       vmcs, phys_addr);
 }
 
+static void vmcs_load(struct vmcs *vmcs)
+{
+       u64 phys_addr = __pa(vmcs);
+       u8 error;
+
+       asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
+                       : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
+                       : "cc", "memory");
+       if (error)
+               printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
+                      vmcs, phys_addr);
+}
+
 static void __vcpu_clear(void *arg)
 {
        struct vcpu_vmx *vmx = arg;
@@ -830,7 +843,6 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
 static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
-       u64 phys_addr = __pa(vmx->vmcs);
        u64 tsc_this, delta, new_offset;
 
        if (vcpu->cpu != cpu) {
@@ -844,15 +856,8 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        }
 
        if (per_cpu(current_vmcs, cpu) != vmx->vmcs) {
-               u8 error;
-
                per_cpu(current_vmcs, cpu) = vmx->vmcs;
-               asm volatile (__ex(ASM_VMX_VMPTRLD_RAX) "; setna %0"
-                             : "=g"(error) : "a"(&phys_addr), "m"(phys_addr)
-                             : "cc");
-               if (error)
-                       printk(KERN_ERR "kvm: vmptrld %p/%llx fail\n",
-                              vmx->vmcs, phys_addr);
+               vmcs_load(vmx->vmcs);
        }
 
        if (vcpu->cpu != cpu) {
@@ -1288,6 +1293,13 @@ static __init int vmx_disabled_by_bios(void)
        /* locked but not enabled */
 }
 
+static void kvm_cpu_vmxon(u64 addr)
+{
+       asm volatile (ASM_VMX_VMXON_RAX
+                       : : "a"(&addr), "m"(addr)
+                       : "memory", "cc");
+}
+
 static int hardware_enable(void *garbage)
 {
        int cpu = raw_smp_processor_id();
@@ -1310,9 +1322,7 @@ static int hardware_enable(void *garbage)
                wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits);
        }
        write_cr4(read_cr4() | X86_CR4_VMXE); /* FIXME: not cpu hotplug safe */
-       asm volatile (ASM_VMX_VMXON_RAX
-                     : : "a"(&phys_addr), "m"(phys_addr)
-                     : "memory", "cc");
+       kvm_cpu_vmxon(phys_addr);
 
        ept_sync_global();
 
@@ -1336,13 +1346,13 @@ static void vmclear_local_vcpus(void)
 static void kvm_cpu_vmxoff(void)
 {
        asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
-       write_cr4(read_cr4() & ~X86_CR4_VMXE);
 }
 
 static void hardware_disable(void *garbage)
 {
        vmclear_local_vcpus();
        kvm_cpu_vmxoff();
+       write_cr4(read_cr4() & ~X86_CR4_VMXE);
 }
 
 static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt,