]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - arch/x86/kvm/mmu/mmu.c
KVM: x86/mmu: allow kvm_faultin_pfn to return page fault handling code
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / mmu / mmu.c
index d282ccf5f0c59defd15275e0cc63ebe3ff1eec60..38e36cff82af61b2be6663758711c290fbed7bf0 100644 (file)
@@ -1035,6 +1035,26 @@ out:
        return true;
 }
 
+unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
+{
+       struct pte_list_desc *desc;
+       unsigned int count = 0;
+
+       if (!rmap_head->val)
+               return 0;
+       else if (!(rmap_head->val & 1))
+               return 1;
+
+       desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+
+       while (desc) {
+               count += desc->spte_count;
+               desc = desc->more;
+       }
+
+       return count;
+}
+
 static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
                                         const struct kvm_memory_slot *slot)
 {
@@ -3864,9 +3884,9 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                                  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
 }
 
-static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
                         gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
-                        bool write, bool *writable)
+                        bool write, bool *writable, int *r)
 {
        struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
        bool async;
@@ -3877,7 +3897,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
         * be zapped before KVM inserts a new MMIO SPTE for the gfn.
         */
        if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
-               return true;
+               goto out_retry;
 
        /* Don't expose private memslots to L2. */
        if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
@@ -3897,14 +3917,17 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
                if (kvm_find_async_pf_gfn(vcpu, gfn)) {
                        trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
                        kvm_make_request(KVM_REQ_APF_HALT, vcpu);
-                       return true;
+                       goto out_retry;
                } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
-                       return true;
+                       goto out_retry;
        }
 
        *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
                                    write, writable, hva);
-       return false;
+
+out_retry:
+       *r = RET_PF_RETRY;
+       return true;
 }
 
 static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
@@ -3934,9 +3957,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
 
-       if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva,
-                        write, &map_writable))
-               return RET_PF_RETRY;
+       if (kvm_faultin_pfn(vcpu, prefault, gfn, gpa, &pfn, &hva,
+                        write, &map_writable, &r))
+               return r;
 
        if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
                return r;
@@ -5655,6 +5678,10 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
        kvm_mmu_uninit_tdp_mmu(kvm);
 }
 
+/*
+ * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
+ * (not including it)
+ */
 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
 {
        struct kvm_memslots *slots;
@@ -5662,8 +5689,11 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
        int i;
        bool flush = false;
 
+       write_lock(&kvm->mmu_lock);
+
+       kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
+
        if (kvm_memslots_have_rmaps(kvm)) {
-               write_lock(&kvm->mmu_lock);
                for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
                        slots = __kvm_memslots(kvm, i);
                        kvm_for_each_memslot(memslot, slots) {
@@ -5682,23 +5712,25 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
                        }
                }
                if (flush)
-                       kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
-               write_unlock(&kvm->mmu_lock);
+                       kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+                                                          gfn_end - gfn_start);
        }
 
        if (is_tdp_mmu_enabled(kvm)) {
-               flush = false;
-
-               read_lock(&kvm->mmu_lock);
                for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
                        flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
-                                                         gfn_end, flush, true);
+                                                         gfn_end, flush);
                if (flush)
                        kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
-                                                          gfn_end);
-
-               read_unlock(&kvm->mmu_lock);
+                                                          gfn_end - gfn_start);
        }
+
+       if (flush)
+               kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
+
+       kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
+
+       write_unlock(&kvm->mmu_lock);
 }
 
 static bool slot_rmap_write_protect(struct kvm *kvm,