]> git.proxmox.com Git - pve-kernel.git/blob - patches/kernel/0014-KVM-x86-revalidate-steal-time-cache-if-MSR-value-cha.patch
3b19c890366e040817b76ee8a3409b0c246ce54e
[pve-kernel.git] / patches / kernel / 0014-KVM-x86-revalidate-steal-time-cache-if-MSR-value-cha.patch
1 From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
2 From: Paolo Bonzini <pbonzini@redhat.com>
3 Date: Thu, 4 Aug 2022 15:28:32 +0200
4 Subject: [PATCH] KVM: x86: revalidate steal time cache if MSR value changes
5
6 commit 901d3765fa804ce42812f1d5b1f3de2dfbb26723 upstream.
7
8 Commit 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time
9 / preempted status", 2021-11-11) open coded the previous call to
10 kvm_map_gfn, but in doing so it dropped the comparison between the cached
11 guest physical address and the one in the MSR. This cause an incorrect
12 cache hit if the guest modifies the steal time address while the memslots
13 remain the same. This can happen with kexec, in which case the steal
14 time data is written at the address used by the old kernel instead of
15 the old one.
16
17 While at it, rename the variable from gfn to gpa since it is a plain
18 physical address and not a right-shifted one.
19
20 Reported-by: Dave Young <ruyang@redhat.com>
21 Reported-by: Xiaoying Yan <yiyan@redhat.com>
22 Analyzed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
23 Cc: David Woodhouse <dwmw@amazon.co.uk>
24 Cc: stable@vger.kernel.org
25 Fixes: 7e2175ebd695 ("KVM: x86: Fix recording of guest steal time / preempted status")
26 Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
27 Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
28 Signed-off-by: Thomas Lamprecht <t.lamprecht@proxmox.com>
29 ---
30 arch/x86/kvm/x86.c | 6 +++---
31 1 file changed, 3 insertions(+), 3 deletions(-)
32
33 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
34 index aec63cebe0b7..a99eec435652 100644
35 --- a/arch/x86/kvm/x86.c
36 +++ b/arch/x86/kvm/x86.c
37 @@ -3356,6 +3356,7 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
38 struct gfn_to_hva_cache *ghc = &vcpu->arch.st.cache;
39 struct kvm_steal_time __user *st;
40 struct kvm_memslots *slots;
41 + gpa_t gpa = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
42 u64 steal;
43 u32 version;
44
45 @@ -3373,13 +3374,12 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
46 slots = kvm_memslots(vcpu->kvm);
47
48 if (unlikely(slots->generation != ghc->generation ||
49 + gpa != ghc->gpa ||
50 kvm_is_error_hva(ghc->hva) || !ghc->memslot)) {
51 - gfn_t gfn = vcpu->arch.st.msr_val & KVM_STEAL_VALID_BITS;
52 -
53 /* We rely on the fact that it fits in a single page. */
54 BUILD_BUG_ON((sizeof(*st) - 1) & KVM_STEAL_VALID_BITS);
55
56 - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gfn, sizeof(*st)) ||
57 + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, gpa, sizeof(*st)) ||
58 kvm_is_error_hva(ghc->hva) || !ghc->memslot)
59 return;
60 }