]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blobdiff - arch/x86/kvm/mmu/mmu.c
KVM: x86/mmu: allow kvm_faultin_pfn to return page fault handling code
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / mmu / mmu.c
index 3a0ae48a26e96bd3cdf6b407a1495a57d965a17b..38e36cff82af61b2be6663758711c290fbed7bf0 100644 (file)
@@ -1035,8 +1035,28 @@ out:
        return true;
 }
 
-static struct kvm_rmap_head *__gfn_to_rmap(gfn_t gfn, int level,
-                                          const struct kvm_memory_slot *slot)
+unsigned int pte_list_count(struct kvm_rmap_head *rmap_head)
+{
+       struct pte_list_desc *desc;
+       unsigned int count = 0;
+
+       if (!rmap_head->val)
+               return 0;
+       else if (!(rmap_head->val & 1))
+               return 1;
+
+       desc = (struct pte_list_desc *)(rmap_head->val & ~1ul);
+
+       while (desc) {
+               count += desc->spte_count;
+               desc = desc->more;
+       }
+
+       return count;
+}
+
+static struct kvm_rmap_head *gfn_to_rmap(gfn_t gfn, int level,
+                                        const struct kvm_memory_slot *slot)
 {
        unsigned long idx;
 
@@ -1061,7 +1081,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
        sp = sptep_to_sp(spte);
        kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
        slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-       rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
+       rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
        return pte_list_add(vcpu, spte, rmap_head);
 }
 
@@ -1085,7 +1105,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
        slots = kvm_memslots_for_spte_role(kvm, sp->role);
 
        slot = __gfn_to_memslot(slots, gfn);
-       rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
+       rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
 
        __pte_list_remove(spte, rmap_head);
 }
@@ -1307,8 +1327,8 @@ static void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
                return;
 
        while (mask) {
-               rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
-                                         PG_LEVEL_4K, slot);
+               rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+                                       PG_LEVEL_4K, slot);
                __rmap_write_protect(kvm, rmap_head, false);
 
                /* clear the first set bit */
@@ -1340,8 +1360,8 @@ static void kvm_mmu_clear_dirty_pt_masked(struct kvm *kvm,
                return;
 
        while (mask) {
-               rmap_head = __gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
-                                         PG_LEVEL_4K, slot);
+               rmap_head = gfn_to_rmap(slot->base_gfn + gfn_offset + __ffs(mask),
+                                       PG_LEVEL_4K, slot);
                __rmap_clear_dirty(kvm, rmap_head, slot);
 
                /* clear the first set bit */
@@ -1407,7 +1427,7 @@ bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
 
        if (kvm_memslots_have_rmaps(kvm)) {
                for (i = min_level; i <= KVM_MAX_HUGEPAGE_LEVEL; ++i) {
-                       rmap_head = __gfn_to_rmap(gfn, i, slot);
+                       rmap_head = gfn_to_rmap(gfn, i, slot);
                        write_protected |= __rmap_write_protect(kvm, rmap_head, true);
                }
        }
@@ -1502,9 +1522,8 @@ rmap_walk_init_level(struct slot_rmap_walk_iterator *iterator, int level)
 {
        iterator->level = level;
        iterator->gfn = iterator->start_gfn;
-       iterator->rmap = __gfn_to_rmap(iterator->gfn, level, iterator->slot);
-       iterator->end_rmap = __gfn_to_rmap(iterator->end_gfn, level,
-                                          iterator->slot);
+       iterator->rmap = gfn_to_rmap(iterator->gfn, level, iterator->slot);
+       iterator->end_rmap = gfn_to_rmap(iterator->end_gfn, level, iterator->slot);
 }
 
 static void
@@ -1630,7 +1649,7 @@ static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
 
        sp = sptep_to_sp(spte);
        slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-       rmap_head = __gfn_to_rmap(gfn, sp->role.level, slot);
+       rmap_head = gfn_to_rmap(gfn, sp->role.level, slot);
 
        kvm_unmap_rmapp(vcpu->kvm, rmap_head, NULL, gfn, sp->role.level, __pte(0));
        kvm_flush_remote_tlbs_with_address(vcpu->kvm, sp->gfn,
@@ -1685,7 +1704,7 @@ static int is_empty_shadow_page(u64 *spt)
  * aggregate version in order to make the slab shrinker
  * faster
  */
-static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, unsigned long nr)
+static inline void kvm_mod_used_mmu_pages(struct kvm *kvm, long nr)
 {
        kvm->arch.n_used_mmu_pages += nr;
        percpu_counter_add(&kvm_total_used_mmu_pages, nr);
@@ -2576,6 +2595,7 @@ static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
 int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
 {
        struct kvm_mmu_page *sp;
+       bool locked = false;
 
        /*
         * Force write-protection if the page is being tracked.  Note, the page
@@ -2598,9 +2618,34 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync)
                if (sp->unsync)
                        continue;
 
+               /*
+                * TDP MMU page faults require an additional spinlock as they
+                * run with mmu_lock held for read, not write, and the unsync
+                * logic is not thread safe.  Take the spinklock regardless of
+                * the MMU type to avoid extra conditionals/parameters, there's
+                * no meaningful penalty if mmu_lock is held for write.
+                */
+               if (!locked) {
+                       locked = true;
+                       spin_lock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
+
+                       /*
+                        * Recheck after taking the spinlock, a different vCPU
+                        * may have since marked the page unsync.  A false
+                        * positive on the unprotected check above is not
+                        * possible as clearing sp->unsync _must_ hold mmu_lock
+                        * for write, i.e. unsync cannot transition from 0->1
+                        * while this CPU holds mmu_lock for read (or write).
+                        */
+                       if (READ_ONCE(sp->unsync))
+                               continue;
+               }
+
                WARN_ON(sp->role.level != PG_LEVEL_4K);
                kvm_unsync_page(vcpu, sp);
        }
+       if (locked)
+               spin_unlock(&vcpu->kvm->arch.mmu_unsync_pages_lock);
 
        /*
         * We need to ensure that the marking of unsync pages is visible
@@ -3839,9 +3884,9 @@ static bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                                  kvm_vcpu_gfn_to_hva(vcpu, gfn), &arch);
 }
 
-static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
+static bool kvm_faultin_pfn(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
                         gpa_t cr2_or_gpa, kvm_pfn_t *pfn, hva_t *hva,
-                        bool write, bool *writable)
+                        bool write, bool *writable, int *r)
 {
        struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
        bool async;
@@ -3852,7 +3897,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
         * be zapped before KVM inserts a new MMIO SPTE for the gfn.
         */
        if (slot && (slot->flags & KVM_MEMSLOT_INVALID))
-               return true;
+               goto out_retry;
 
        /* Don't expose private memslots to L2. */
        if (is_guest_mode(vcpu) && !kvm_is_visible_memslot(slot)) {
@@ -3872,14 +3917,17 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, bool prefault, gfn_t gfn,
                if (kvm_find_async_pf_gfn(vcpu, gfn)) {
                        trace_kvm_async_pf_doublefault(cr2_or_gpa, gfn);
                        kvm_make_request(KVM_REQ_APF_HALT, vcpu);
-                       return true;
+                       goto out_retry;
                } else if (kvm_arch_setup_async_pf(vcpu, cr2_or_gpa, gfn))
-                       return true;
+                       goto out_retry;
        }
 
        *pfn = __gfn_to_pfn_memslot(slot, gfn, false, NULL,
                                    write, writable, hva);
-       return false;
+
+out_retry:
+       *r = RET_PF_RETRY;
+       return true;
 }
 
 static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
@@ -3909,9 +3957,9 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        smp_rmb();
 
-       if (try_async_pf(vcpu, prefault, gfn, gpa, &pfn, &hva,
-                        write, &map_writable))
-               return RET_PF_RETRY;
+       if (kvm_faultin_pfn(vcpu, prefault, gfn, gpa, &pfn, &hva,
+                        write, &map_writable, &r))
+               return r;
 
        if (handle_abnormal_pfn(vcpu, is_tdp ? 0 : gpa, gfn, pfn, ACC_ALL, &r))
                return r;
@@ -5606,6 +5654,8 @@ void kvm_mmu_init_vm(struct kvm *kvm)
 {
        struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
 
+       spin_lock_init(&kvm->arch.mmu_unsync_pages_lock);
+
        if (!kvm_mmu_init_tdp_mmu(kvm))
                /*
                 * No smp_load/store wrappers needed here as we are in
@@ -5628,6 +5678,10 @@ void kvm_mmu_uninit_vm(struct kvm *kvm)
        kvm_mmu_uninit_tdp_mmu(kvm);
 }
 
+/*
+ * Invalidate (zap) SPTEs that cover GFNs from gfn_start and up to gfn_end
+ * (not including it)
+ */
 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
 {
        struct kvm_memslots *slots;
@@ -5635,8 +5689,11 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
        int i;
        bool flush = false;
 
+       write_lock(&kvm->mmu_lock);
+
+       kvm_inc_notifier_count(kvm, gfn_start, gfn_end);
+
        if (kvm_memslots_have_rmaps(kvm)) {
-               write_lock(&kvm->mmu_lock);
                for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) {
                        slots = __kvm_memslots(kvm, i);
                        kvm_for_each_memslot(memslot, slots) {
@@ -5655,23 +5712,25 @@ void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end)
                        }
                }
                if (flush)
-                       kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
-               write_unlock(&kvm->mmu_lock);
+                       kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
+                                                          gfn_end - gfn_start);
        }
 
        if (is_tdp_mmu_enabled(kvm)) {
-               flush = false;
-
-               read_lock(&kvm->mmu_lock);
                for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
                        flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, gfn_start,
-                                                         gfn_end, flush, true);
+                                                         gfn_end, flush);
                if (flush)
                        kvm_flush_remote_tlbs_with_address(kvm, gfn_start,
-                                                          gfn_end);
-
-               read_unlock(&kvm->mmu_lock);
+                                                          gfn_end - gfn_start);
        }
+
+       if (flush)
+               kvm_flush_remote_tlbs_with_address(kvm, gfn_start, gfn_end);
+
+       kvm_dec_notifier_count(kvm, gfn_start, gfn_end);
+
+       write_unlock(&kvm->mmu_lock);
 }
 
 static bool slot_rmap_write_protect(struct kvm *kvm,