]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/commitdiff
KVM: x86/mmu: Consolidate open coded variants of memslot TLB flushes
authorSean Christopherson <sean.j.christopherson@intel.com>
Tue, 18 Feb 2020 21:07:36 +0000 (13:07 -0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Mon, 16 Mar 2020 16:57:29 +0000 (17:57 +0100)
Replace open coded instances of kvm_arch_flush_remote_tlbs_memslot()'s
functionality with calls to the aforementioned function.  Update the
comment in kvm_arch_flush_remote_tlbs_memslot() to elaborate on how it
is used and why it asserts that slots_lock is held.

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/mmu/mmu.c

index 7268c6ffb643a6e5b67477d6c8f27341ab724073..c4e0b97f82accb34751304c5294a76daed154e97 100644 (file)
@@ -5862,13 +5862,6 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
                                      false);
        spin_unlock(&kvm->mmu_lock);
 
-       /*
-        * kvm_mmu_slot_remove_write_access() and kvm_vm_ioctl_get_dirty_log()
-        * which do tlb flush out of mmu-lock should be serialized by
-        * kvm->slots_lock otherwise tlb flush would be missed.
-        */
-       lockdep_assert_held(&kvm->slots_lock);
-
        /*
         * We can flush all the TLBs out of the mmu lock without TLB
         * corruption since we just change the spte from writable to
@@ -5881,8 +5874,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm,
         * on PT_WRITABLE_MASK anymore.
         */
        if (flush)
-               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
-                       memslot->npages);
+               kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
 }
 
 static bool kvm_mmu_zap_collapsible_spte(struct kvm *kvm,
@@ -5938,8 +5930,11 @@ void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
                                        struct kvm_memory_slot *memslot)
 {
        /*
-        * All the TLBs can be flushed out of mmu lock, see the comments in
-        * kvm_mmu_slot_remove_write_access().
+        * All current use cases for flushing the TLBs for a specific memslot
+        * are related to dirty logging, and do the TLB flush out of mmu_lock.
+        * The interaction between the various operations on memslot must be
+        * serialized by slots_locks to ensure the TLB flush from one operation
+        * is observed by any other operation on the same memslot.
         */
        lockdep_assert_held(&kvm->slots_lock);
        kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
@@ -5955,8 +5950,6 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
        flush = slot_handle_leaf(kvm, memslot, __rmap_clear_dirty, false);
        spin_unlock(&kvm->mmu_lock);
 
-       lockdep_assert_held(&kvm->slots_lock);
-
        /*
         * It's also safe to flush TLBs out of mmu lock here as currently this
         * function is only used for dirty logging, in which case flushing TLB
@@ -5964,8 +5957,7 @@ void kvm_mmu_slot_leaf_clear_dirty(struct kvm *kvm,
         * dirty_bitmap.
         */
        if (flush)
-               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
-                               memslot->npages);
+               kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_leaf_clear_dirty);
 
@@ -5979,12 +5971,8 @@ void kvm_mmu_slot_largepage_remove_write_access(struct kvm *kvm,
                                        false);
        spin_unlock(&kvm->mmu_lock);
 
-       /* see kvm_mmu_slot_remove_write_access */
-       lockdep_assert_held(&kvm->slots_lock);
-
        if (flush)
-               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
-                               memslot->npages);
+               kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_largepage_remove_write_access);
 
@@ -5997,12 +5985,8 @@ void kvm_mmu_slot_set_dirty(struct kvm *kvm,
        flush = slot_handle_all_level(kvm, memslot, __rmap_set_dirty, false);
        spin_unlock(&kvm->mmu_lock);
 
-       lockdep_assert_held(&kvm->slots_lock);
-
-       /* see kvm_mmu_slot_leaf_clear_dirty */
        if (flush)
-               kvm_flush_remote_tlbs_with_address(kvm, memslot->base_gfn,
-                               memslot->npages);
+               kvm_arch_flush_remote_tlbs_memslot(kvm, memslot);
 }
 EXPORT_SYMBOL_GPL(kvm_mmu_slot_set_dirty);