]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/commitdiff
KVM: arm64: Convert write-protect operation to generic page-table API
authorQuentin Perret <qperret@google.com>
Fri, 11 Sep 2020 13:25:21 +0000 (14:25 +0100)
committerMarc Zyngier <maz@kernel.org>
Fri, 11 Sep 2020 14:51:14 +0000 (15:51 +0100)
Convert stage2_wp_range() to call the kvm_pgtable_stage2_wrprotect()
function of the generic page-table code instead of walking the page-table
directly.

Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Reviewed-by: Gavin Shan <gshan@redhat.com>
Cc: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20200911132529.19844-14-will@kernel.org
arch/arm64/kvm/mmu.c

index 36407dcdf01e2075008223f456c7b592ac8d2baf..a37da87eb7ebef58937885d794dbe59d78cffb83 100644 (file)
@@ -71,6 +71,9 @@ static int stage2_apply_range(struct kvm *kvm, phys_addr_t addr,
        return ret;
 }
 
+#define stage2_apply_range_resched(kvm, addr, end, fn)                 \
+       stage2_apply_range(kvm, addr, end, fn, true)
+
 static bool memslot_is_logging(struct kvm_memory_slot *memslot)
 {
        return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY);
@@ -1302,27 +1305,7 @@ static void  stage2_wp_p4ds(struct kvm_s2_mmu *mmu, pgd_t *pgd,
 static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end)
 {
        struct kvm *kvm = mmu->kvm;
-       pgd_t *pgd;
-       phys_addr_t next;
-
-       pgd = mmu->pgd + stage2_pgd_index(kvm, addr);
-       do {
-               /*
-                * Release kvm_mmu_lock periodically if the memory region is
-                * large. Otherwise, we may see kernel panics with
-                * CONFIG_DETECT_HUNG_TASK, CONFIG_LOCKUP_DETECTOR,
-                * CONFIG_LOCKDEP. Additionally, holding the lock too long
-                * will also starve other vCPUs. We have to also make sure
-                * that the page tables are not freed while we released
-                * the lock.
-                */
-               cond_resched_lock(&kvm->mmu_lock);
-               if (!READ_ONCE(mmu->pgd))
-                       break;
-               next = stage2_pgd_addr_end(kvm, addr, end);
-               if (stage2_pgd_present(kvm, *pgd))
-                       stage2_wp_p4ds(mmu, pgd, addr, next);
-       } while (pgd++, addr = next, addr != end);
+       stage2_apply_range_resched(kvm, addr, end, kvm_pgtable_stage2_wrprotect);
 }
 
 /**