]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
KVM: Allow adjust_tsc_offset to be in host or guest cycles
authorMarcelo Tosatti <mtosatti@redhat.com>
Fri, 3 Feb 2012 17:43:55 +0000 (15:43 -0200)
committerAvi Kivity <avi@redhat.com>
Thu, 8 Mar 2012 12:10:07 +0000 (14:10 +0200)
Redefine the API to take a parameter indicating whether an
adjustment is in host or guest cycles.

Signed-off-by: Zachary Amsden <zamsden@gmail.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index b23682900f41febf7066d1f5fe80b4beae075a76..dd439f13df84870d716116b317f43998f857e697 100644 (file)
@@ -646,7 +646,7 @@ struct kvm_x86_ops {
        u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
        int (*get_lpage_level)(void);
        bool (*rdtscp_supported)(void);
-       void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
+       void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
 
        void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
 
@@ -676,6 +676,17 @@ struct kvm_arch_async_pf {
 
 extern struct kvm_x86_ops *kvm_x86_ops;
 
+static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
+                                          s64 adjustment)
+{
+       kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
+}
+
+static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
+{
+       kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
+}
+
 int kvm_mmu_module_init(void);
 void kvm_mmu_module_exit(void);
 
index e12026e5244ee217dd603778eeb38ef229ba4275..0b7690ee20bdafa722fb160b25e214290d11915c 100644 (file)
@@ -1016,10 +1016,14 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
 }
 
-static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
+static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
 
+       WARN_ON(adjustment < 0);
+       if (host)
+               adjustment = svm_scale_tsc(vcpu, adjustment);
+
        svm->vmcb->control.tsc_offset += adjustment;
        if (is_guest_mode(vcpu))
                svm->nested.hsave->control.tsc_offset += adjustment;
index e6bf61fa1c035735677f7f2d3a6d765a6459f531..575fb742a6fc3e757e3e8ef534e8ac2f8e31dd03 100644 (file)
@@ -1856,7 +1856,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        }
 }
 
-static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
+static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
 {
        u64 offset = vmcs_read64(TSC_OFFSET);
        vmcs_write64(TSC_OFFSET, offset + adjustment);
index 39a57dac884acb15c141a626a85e1122daee5f7c..3b931302fa55396316a02591d059a399fa3f0308 100644 (file)
@@ -1116,7 +1116,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        if (vcpu->tsc_catchup) {
                u64 tsc = compute_guest_tsc(v, kernel_ns);
                if (tsc > tsc_timestamp) {
-                       kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp);
+                       adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
                        tsc_timestamp = tsc;
                }
        }