]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/commitdiff
KVM: Replace old tlb flush function with new one to flush a specified range.
authorLan Tianyu <Tianyu.Lan@microsoft.com>
Thu, 6 Dec 2018 13:21:09 +0000 (21:21 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 21 Dec 2018 10:28:41 +0000 (11:28 +0100)
This patch is to replace kvm_flush_remote_tlbs() with kvm_flush_
remote_tlbs_with_address() in some functions without logic change.

Signed-off-by: Lan Tianyu <Tianyu.Lan@microsoft.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu.c
arch/x86/kvm/paging_tmpl.h

index f105f17fb4c71b160394c0fa5b0e731a5e3cae63..711a41a85dfb2e17208d7fe36a61ed7e4bbb24de 100644 (file)
@@ -1485,8 +1485,12 @@ static bool __drop_large_spte(struct kvm *kvm, u64 *sptep)
 
 static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
 {
-       if (__drop_large_spte(vcpu->kvm, sptep))
-               kvm_flush_remote_tlbs(vcpu->kvm);
+       if (__drop_large_spte(vcpu->kvm, sptep)) {
+               struct kvm_mmu_page *sp = page_header(__pa(sptep));
+
+               kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
+                       KVM_PAGES_PER_HPAGE(sp->role.level));
+       }
 }
 
 /*
@@ -1954,7 +1958,8 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
        rmap_head = gfn_to_rmap(vcpu->kvm, gfn, sp);
 
        kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, 0);
-       kvm_flush_remote_tlbs(vcpu->kvm);
+       kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
+                       KVM_PAGES_PER_HPAGE(sp->role.level));
 }
 
 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
@@ -2470,7 +2475,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
                account_shadowed(vcpu->kvm, sp);
                if (level == PT_PAGE_TABLE_LEVEL &&
                      rmap_write_protect(vcpu, gfn))
-                       kvm_flush_remote_tlbs(vcpu->kvm);
+                       kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn, 1);
 
                if (level > PT_PAGE_TABLE_LEVEL && need_sync)
                        flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
@@ -2590,7 +2595,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        return;
 
                drop_parent_pte(child, sptep);
-               kvm_flush_remote_tlbs(vcpu->kvm);
+               kvm_flush_remote_tlbs_with_address(vcpu->kvm, child->gfn, 1);
        }
 }
 
@@ -3014,8 +3019,10 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep, unsigned pte_access,
                        ret = RET_PF_EMULATE;
                kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
        }
+
        if (set_spte_ret & SET_SPTE_NEED_REMOTE_TLB_FLUSH || flush)
-               kvm_flush_remote_tlbs(vcpu->kvm);
+               kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
+                               KVM_PAGES_PER_HPAGE(level));
 
        if (unlikely(is_mmio_spte(*sptep)))
                ret = RET_PF_EMULATE;
@@ -5672,7 +5679,8 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
         * on PT_WRITABLE_MASK anymore.
         */
        if (flush)
-               kvm_flush_remote_tlbs(kvm);
+               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+                       memslot->npages);
 }
 
 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
@@ -5742,7 +5750,8 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
         * dirty_bitmap.
         */
        if (flush)
-               kvm_flush_remote_tlbs(kvm);
+               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+                               memslot->npages);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
 
@@ -5760,7 +5769,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
        lockdep_assert_held(&kvm->slots_lock);
 
        if (flush)
-               kvm_flush_remote_tlbs(kvm);
+               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+                               memslot->npages);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
 
@@ -5777,7 +5787,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
 
        /* see kvm_mmu_slot_leaf_clear_dirty */
        if (flush)
-               kvm_flush_remote_tlbs(kvm);
+               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
+                               memslot->npages);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);
 
index 7cf2185b7eb515ec36894af95d2121cceaec5214..6bdca39829bc8ed611800458e6dcf29d0d759830 100644 (file)
@@ -894,7 +894,8 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva, hpa_t root_hpa)
                        pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
 
                        if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
-                               kvm_flush_remote_tlbs(vcpu->kvm);
+                               kvm_flush_remote_tlbs_with_address(vcpu->kvm,
+                                       sp->gfn, KVM_PAGES_PER_HPAGE(sp->role.level));
 
                        if (!rmap_can_add(vcpu))
                                break;