]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
KVM: nVMX/nSVM: Fix bug which sets vcpu->arch.tsc_offset to L1 tsc_offset
authorLeonid Shatz <leonid.shatz@oracle.com>
Tue, 6 Nov 2018 10:14:25 +0000 (12:14 +0200)
committerSultan Alsawaf <sultan.alsawaf@canonical.com>
Wed, 24 Jul 2019 15:44:59 +0000 (09:44 -0600)
BugLink: https://bugs.launchpad.net/bugs/1836968
commit 326e742533bf0a23f0127d8ea62fb558ba665f08 upstream.

Since commit e79f245ddec1 ("X86/KVM: Properly update 'tsc_offset' to
represent the running guest"), vcpu->arch.tsc_offset meaning was
changed to always reflect the tsc_offset value set on active VMCS.
Regardless if vCPU is currently running L1 or L2.

However, above mentioned commit failed to also change
kvm_vcpu_write_tsc_offset() to set vcpu->arch.tsc_offset correctly.
This is because vmx_write_tsc_offset() could set the tsc_offset value
in active VMCS to given offset parameter *plus vmcs12->tsc_offset*.
However, kvm_vcpu_write_tsc_offset() just sets vcpu->arch.tsc_offset
to given offset parameter. Without taking into account the possible
addition of vmcs12->tsc_offset. (Same is true for SVM case).

Fix this issue by changing kvm_x86_ops->write_tsc_offset() to return
actually set tsc_offset in active VMCS and modify
kvm_vcpu_write_tsc_offset() to set returned value in
vcpu->arch.tsc_offset.
In addition, rename write_tsc_offset() callback to write_l1_tsc_offset()
to make it clear that it is meant to set L1 TSC offset.

Fixes: e79f245ddec1 ("X86/KVM: Properly update 'tsc_offset' to represent the running guest")
Reviewed-by: Liran Alon <liran.alon@oracle.com>
Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com>
Reviewed-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
Signed-off-by: Leonid Shatz <leonid.shatz@oracle.com>
Cc: stable@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index e2f0e1b1af1452b079ff1285b73e5d200db1da36..c2bb7da8e5b42697c2e747680509babcf81f1735 100644 (file)
@@ -1015,7 +1015,8 @@ struct kvm_x86_ops {
        bool (*has_wbinvd_exit)(void);
 
        u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
        bool (*has_wbinvd_exit)(void);
 
        u64 (*read_l1_tsc_offset)(struct kvm_vcpu *vcpu);
-       void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+       /* Returns actual tsc_offset set in active VMCS */
+       u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
        void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
 
 
        void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
 
index 4f9bd710bf5c2013de5e64c88551980527a2924d..acf1f7cd9e70cc7bd0356111954ee159651ea911 100644 (file)
@@ -1209,7 +1209,7 @@ static u64 svm_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
        return vcpu->arch.tsc_offset;
 }
 
        return vcpu->arch.tsc_offset;
 }
 
-static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        u64 g_tsc_offset = 0;
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        u64 g_tsc_offset = 0;
@@ -1227,6 +1227,7 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
        svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
 
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
        svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
 
        mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
+       return svm->vmcb->control.tsc_offset;
 }
 
 static void avic_init_vmcb(struct vcpu_svm *svm)
 }
 
 static void avic_init_vmcb(struct vcpu_svm *svm)
@@ -5814,7 +5815,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
        .has_wbinvd_exit = svm_has_wbinvd_exit,
 
        .read_l1_tsc_offset = svm_read_l1_tsc_offset,
        .has_wbinvd_exit = svm_has_wbinvd_exit,
 
        .read_l1_tsc_offset = svm_read_l1_tsc_offset,
-       .write_tsc_offset = svm_write_tsc_offset,
+       .write_l1_tsc_offset = svm_write_l1_tsc_offset,
 
        .set_tdp_cr3 = set_tdp_cr3,
 
 
        .set_tdp_cr3 = set_tdp_cr3,
 
index ece371d13cd03f4ca7da0ec5be5471539b761c13..953e17525004296d1b2774f478972a9b51c3b09f 100644 (file)
@@ -2862,11 +2862,9 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
        return vcpu->arch.tsc_offset;
 }
 
        return vcpu->arch.tsc_offset;
 }
 
-/*
- * writes 'offset' into guest's timestamp counter offset register
- */
-static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
 {
+       u64 active_offset = offset;
        if (is_guest_mode(vcpu)) {
                /*
                 * We're here if L1 chose not to trap WRMSR to TSC. According
        if (is_guest_mode(vcpu)) {
                /*
                 * We're here if L1 chose not to trap WRMSR to TSC. According
@@ -2874,17 +2872,16 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
                 * set for L2 remains unchanged, and still needs to be added
                 * to the newly set TSC to get L2's TSC.
                 */
                 * set for L2 remains unchanged, and still needs to be added
                 * to the newly set TSC to get L2's TSC.
                 */
-               struct vmcs12 *vmcs12;
-               /* recalculate vmcs02.TSC_OFFSET: */
-               vmcs12 = get_vmcs12(vcpu);
-               vmcs_write64(TSC_OFFSET, offset +
-                       (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
-                        vmcs12->tsc_offset : 0));
+               struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+               if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING))
+                       active_offset += vmcs12->tsc_offset;
        } else {
                trace_kvm_write_tsc_offset(vcpu->vcpu_id,
                                           vmcs_read64(TSC_OFFSET), offset);
        } else {
                trace_kvm_write_tsc_offset(vcpu->vcpu_id,
                                           vmcs_read64(TSC_OFFSET), offset);
-               vmcs_write64(TSC_OFFSET, offset);
        }
        }
+
+       vmcs_write64(TSC_OFFSET, active_offset);
+       return active_offset;
 }
 
 /*
 }
 
 /*
@@ -12664,7 +12661,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
        .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
 
        .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
        .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
 
        .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
-       .write_tsc_offset = vmx_write_tsc_offset,
+       .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
 
        .set_tdp_cr3 = vmx_set_cr3,
 
 
        .set_tdp_cr3 = vmx_set_cr3,
 
index ac7e1a5fa2a8c7b4213047d815b5881dc3444e48..0bc041658a1b2a12ccd5db867cb6b7963602b26b 100644 (file)
@@ -1528,8 +1528,7 @@ EXPORT_SYMBOL_GPL(kvm_read_l1_tsc);
 
 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
 
 static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
-       kvm_x86_ops->write_tsc_offset(vcpu, offset);
-       vcpu->arch.tsc_offset = offset;
+       vcpu->arch.tsc_offset = kvm_x86_ops->write_l1_tsc_offset(vcpu, offset);
 }
 
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 }
 
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
@@ -1644,7 +1643,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);
 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
                                           s64 adjustment)
 {
 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
                                           s64 adjustment)
 {
-       kvm_vcpu_write_tsc_offset(vcpu, vcpu->arch.tsc_offset + adjustment);
+       u64 tsc_offset = kvm_x86_ops->read_l1_tsc_offset(vcpu);
+       kvm_vcpu_write_tsc_offset(vcpu, tsc_offset + adjustment);
 }
 
 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
 }
 
 static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)