]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/commitdiff
KVM: X86: MMU: no mmu_notifier_seq++ in kvm_age_hva
authorPeter Feiner <pfeiner@google.com>
Mon, 26 Sep 2016 17:45:34 +0000 (10:45 -0700)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 2 Nov 2016 20:32:17 +0000 (21:32 +0100)
The MMU notifier sequence number keeps GPA->HPA mappings in sync when
GPA->HPA lookups are done outside of the MMU lock (e.g., in
tdp_page_fault). Since kvm_age_hva doesn't change GPA->HPA, it's
unnecessary to increment the sequence number.

Signed-off-by: Peter Feiner <pfeiner@google.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
arch/x86/kvm/mmu.c

index d9c7e986b4e4e7e0e7bd9bc28223485e42d7c3bb..d3a94ea9f6acd60e119fcd89768eafc83424a7df 100644 (file)
@@ -1660,17 +1660,9 @@ int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
         * This has some overhead, but not as much as the cost of swapping
         * out actively used pages or breaking up actively used hugepages.
         */
-       if (!shadow_accessed_mask) {
-               /*
-                * We are holding the kvm->mmu_lock, and we are blowing up
-                * shadow PTEs. MMU notifier consumers need to be kept at bay.
-                * This is correct as long as we don't decouple the mmu_lock
-                * protected regions (like invalidate_range_start|end does).
-                */
-               kvm->mmu_notifier_seq++;
+       if (!shadow_accessed_mask)
                return kvm_handle_hva_range(kvm, start, end, 0,
                                            kvm_unmap_rmapp);
-       }
 
        return kvm_handle_hva_range(kvm, start, end, 0, kvm_age_rmapp);
 }