]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/commitdiff
KVM: Use vcpu-specific gva->hva translation when querying host page size
authorSean Christopherson <sean.j.christopherson@intel.com>
Wed, 8 Jan 2020 20:24:37 +0000 (12:24 -0800)
committerKhalid Elmously <khalid.elmously@canonical.com>
Fri, 13 Mar 2020 04:31:00 +0000 (00:31 -0400)
BugLink: https://bugs.launchpad.net/bugs/1866678
[ Upstream commit f9b84e19221efc5f493156ee0329df3142085f28 ]

Use kvm_vcpu_gfn_to_hva() when retrieving the host page size so that the
correct set of memslots is used when handling x86 page faults in SMM.

Fixes: 54bf36aac520 ("KVM: x86: use vcpu-specific functions to read/write/translate GFNs")
Cc: stable@vger.kernel.org
Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Sasha Levin <sashal@kernel.org>
Signed-off-by: Kamal Mostafa <kamal@canonical.com>
Signed-off-by: Khalid Elmously <khalid.elmously@canonical.com>
arch/x86/kvm/mmu.c
include/linux/kvm_host.h
virt/kvm/kvm_main.c

index bd4f2da927e7ee1113e210c024c172cbcf77db1d..c000e35feccef79a786380e825a5fb5d61f28a94 100644 (file)
@@ -1166,12 +1166,12 @@ static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
        return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
 }
 
-static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
+static int host_mapping_level(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        unsigned long page_size;
        int i, ret = 0;
 
-       page_size = kvm_host_page_size(kvm, gfn);
+       page_size = kvm_host_page_size(vcpu, gfn);
 
        for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
                if (page_size >= KVM_HPAGE_SIZE(i))
@@ -1221,7 +1221,7 @@ static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn,
        if (unlikely(*force_pt_level))
                return PT_PAGE_TABLE_LEVEL;
 
-       host_level = host_mapping_level(vcpu->kvm, large_gfn);
+       host_level = host_mapping_level(vcpu, large_gfn);
 
        if (host_level == PT_PAGE_TABLE_LEVEL)
                return host_level;
index 69d53c748ab80ebdb9241530c87d4338a903234d..7d948827cc8e7eea15bfc5e94ef479a6517b38ab 100644 (file)
@@ -696,7 +696,7 @@ int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
 bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
-unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
+unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
 
 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
index 50a9eb1b62f449a34b1298d5ac1a0d26f7be6220..be92173e229b859096f474c319f23c5a501ba215 100644 (file)
@@ -1276,14 +1276,14 @@ bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
 }
 EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
 
-unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn)
+unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn)
 {
        struct vm_area_struct *vma;
        unsigned long addr, size;
 
        size = PAGE_SIZE;
 
-       addr = gfn_to_hva(kvm, gfn);
+       addr = kvm_vcpu_gfn_to_hva(vcpu, gfn);
        if (kvm_is_error_hva(addr))
                return PAGE_SIZE;