1 // SPDX-License-Identifier: GPL-2.0
4 #include "mmu_internal.h"
11 static bool __read_mostly tdp_mmu_enabled
= false;
12 module_param_named(tdp_mmu
, tdp_mmu_enabled
, bool, 0644);
15 static bool is_tdp_mmu_enabled(void)
18 return tdp_enabled
&& READ_ONCE(tdp_mmu_enabled
);
21 #endif /* CONFIG_X86_64 */
24 /* Initializes the TDP MMU for the VM, if enabled. */
25 void kvm_mmu_init_tdp_mmu(struct kvm
*kvm
)
27 if (!is_tdp_mmu_enabled())
30 /* This should not be changed for the lifetime of the VM. */
31 kvm
->arch
.tdp_mmu_enabled
= true;
33 INIT_LIST_HEAD(&kvm
->arch
.tdp_mmu_roots
);
34 INIT_LIST_HEAD(&kvm
->arch
.tdp_mmu_pages
);
37 void kvm_mmu_uninit_tdp_mmu(struct kvm
*kvm
)
39 if (!kvm
->arch
.tdp_mmu_enabled
)
42 WARN_ON(!list_empty(&kvm
->arch
.tdp_mmu_roots
));
45 #define for_each_tdp_mmu_root(_kvm, _root) \
46 list_for_each_entry(_root, &_kvm->arch.tdp_mmu_roots, link)
48 bool is_tdp_mmu_root(struct kvm
*kvm
, hpa_t hpa
)
50 struct kvm_mmu_page
*sp
;
52 if (!kvm
->arch
.tdp_mmu_enabled
)
54 if (WARN_ON(!VALID_PAGE(hpa
)))
57 sp
= to_shadow_page(hpa
);
61 return sp
->tdp_mmu_page
&& sp
->root_count
;
64 static bool zap_gfn_range(struct kvm
*kvm
, struct kvm_mmu_page
*root
,
65 gfn_t start
, gfn_t end
, bool can_yield
);
67 void kvm_tdp_mmu_free_root(struct kvm
*kvm
, struct kvm_mmu_page
*root
)
69 gfn_t max_gfn
= 1ULL << (boot_cpu_data
.x86_phys_bits
- PAGE_SHIFT
);
71 lockdep_assert_held(&kvm
->mmu_lock
);
73 WARN_ON(root
->root_count
);
74 WARN_ON(!root
->tdp_mmu_page
);
76 list_del(&root
->link
);
78 zap_gfn_range(kvm
, root
, 0, max_gfn
, false);
80 free_page((unsigned long)root
->spt
);
81 kmem_cache_free(mmu_page_header_cache
, root
);
84 static union kvm_mmu_page_role
page_role_for_level(struct kvm_vcpu
*vcpu
,
87 union kvm_mmu_page_role role
;
89 role
= vcpu
->arch
.mmu
->mmu_role
.base
;
92 role
.gpte_is_8_bytes
= true;
93 role
.access
= ACC_ALL
;
98 static struct kvm_mmu_page
*alloc_tdp_mmu_page(struct kvm_vcpu
*vcpu
, gfn_t gfn
,
101 struct kvm_mmu_page
*sp
;
103 sp
= kvm_mmu_memory_cache_alloc(&vcpu
->arch
.mmu_page_header_cache
);
104 sp
->spt
= kvm_mmu_memory_cache_alloc(&vcpu
->arch
.mmu_shadow_page_cache
);
105 set_page_private(virt_to_page(sp
->spt
), (unsigned long)sp
);
107 sp
->role
.word
= page_role_for_level(vcpu
, level
).word
;
109 sp
->tdp_mmu_page
= true;
114 static struct kvm_mmu_page
*get_tdp_mmu_vcpu_root(struct kvm_vcpu
*vcpu
)
116 union kvm_mmu_page_role role
;
117 struct kvm
*kvm
= vcpu
->kvm
;
118 struct kvm_mmu_page
*root
;
120 role
= page_role_for_level(vcpu
, vcpu
->arch
.mmu
->shadow_root_level
);
122 spin_lock(&kvm
->mmu_lock
);
124 /* Check for an existing root before allocating a new one. */
125 for_each_tdp_mmu_root(kvm
, root
) {
126 if (root
->role
.word
== role
.word
) {
127 kvm_mmu_get_root(kvm
, root
);
128 spin_unlock(&kvm
->mmu_lock
);
133 root
= alloc_tdp_mmu_page(vcpu
, 0, vcpu
->arch
.mmu
->shadow_root_level
);
134 root
->root_count
= 1;
136 list_add(&root
->link
, &kvm
->arch
.tdp_mmu_roots
);
138 spin_unlock(&kvm
->mmu_lock
);
143 hpa_t
kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu
*vcpu
)
145 struct kvm_mmu_page
*root
;
147 root
= get_tdp_mmu_vcpu_root(vcpu
);
151 return __pa(root
->spt
);
154 static void handle_changed_spte(struct kvm
*kvm
, int as_id
, gfn_t gfn
,
155 u64 old_spte
, u64 new_spte
, int level
);
157 static int kvm_mmu_page_as_id(struct kvm_mmu_page
*sp
)
159 return sp
->role
.smm
? 1 : 0;
162 static void handle_changed_spte_acc_track(u64 old_spte
, u64 new_spte
, int level
)
164 bool pfn_changed
= spte_to_pfn(old_spte
) != spte_to_pfn(new_spte
);
166 if (!is_shadow_present_pte(old_spte
) || !is_last_spte(old_spte
, level
))
169 if (is_accessed_spte(old_spte
) &&
170 (!is_accessed_spte(new_spte
) || pfn_changed
))
171 kvm_set_pfn_accessed(spte_to_pfn(old_spte
));
174 static void handle_changed_spte_dirty_log(struct kvm
*kvm
, int as_id
, gfn_t gfn
,
175 u64 old_spte
, u64 new_spte
, int level
)
178 struct kvm_memory_slot
*slot
;
180 if (level
> PG_LEVEL_4K
)
183 pfn_changed
= spte_to_pfn(old_spte
) != spte_to_pfn(new_spte
);
185 if ((!is_writable_pte(old_spte
) || pfn_changed
) &&
186 is_writable_pte(new_spte
)) {
187 slot
= __gfn_to_memslot(__kvm_memslots(kvm
, as_id
), gfn
);
188 mark_page_dirty_in_slot(slot
, gfn
);
193 * handle_changed_spte - handle bookkeeping associated with an SPTE change
195 * @as_id: the address space of the paging structure the SPTE was a part of
196 * @gfn: the base GFN that was mapped by the SPTE
197 * @old_spte: The value of the SPTE before the change
198 * @new_spte: The value of the SPTE after the change
199 * @level: the level of the PT the SPTE is part of in the paging structure
201 * Handle bookkeeping that might result from the modification of a SPTE.
202 * This function must be called for all TDP SPTE modifications.
204 static void __handle_changed_spte(struct kvm
*kvm
, int as_id
, gfn_t gfn
,
205 u64 old_spte
, u64 new_spte
, int level
)
207 bool was_present
= is_shadow_present_pte(old_spte
);
208 bool is_present
= is_shadow_present_pte(new_spte
);
209 bool was_leaf
= was_present
&& is_last_spte(old_spte
, level
);
210 bool is_leaf
= is_present
&& is_last_spte(new_spte
, level
);
211 bool pfn_changed
= spte_to_pfn(old_spte
) != spte_to_pfn(new_spte
);
213 struct kvm_mmu_page
*sp
;
217 WARN_ON(level
> PT64_ROOT_MAX_LEVEL
);
218 WARN_ON(level
< PG_LEVEL_4K
);
219 WARN_ON(gfn
& (KVM_PAGES_PER_HPAGE(level
) - 1));
222 * If this warning were to trigger it would indicate that there was a
223 * missing MMU notifier or a race with some notifier handler.
224 * A present, leaf SPTE should never be directly replaced with another
225 * present leaf SPTE pointing to a differnt PFN. A notifier handler
226 * should be zapping the SPTE before the main MM's page table is
227 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
228 * thread before replacement.
230 if (was_leaf
&& is_leaf
&& pfn_changed
) {
231 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
232 "SPTE with another present leaf SPTE mapping a\n"
234 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
235 as_id
, gfn
, old_spte
, new_spte
, level
);
238 * Crash the host to prevent error propagation and guest data
244 if (old_spte
== new_spte
)
248 * The only times a SPTE should be changed from a non-present to
249 * non-present state is when an MMIO entry is installed/modified/
250 * removed. In that case, there is nothing to do here.
252 if (!was_present
&& !is_present
) {
254 * If this change does not involve a MMIO SPTE, it is
255 * unexpected. Log the change, though it should not impact the
256 * guest since both the former and current SPTEs are nonpresent.
258 if (WARN_ON(!is_mmio_spte(old_spte
) && !is_mmio_spte(new_spte
)))
259 pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
260 "should not be replaced with another,\n"
261 "different nonpresent SPTE, unless one or both\n"
263 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
264 as_id
, gfn
, old_spte
, new_spte
, level
);
269 if (was_leaf
&& is_dirty_spte(old_spte
) &&
270 (!is_dirty_spte(new_spte
) || pfn_changed
))
271 kvm_set_pfn_dirty(spte_to_pfn(old_spte
));
274 * Recursively handle child PTs if the change removed a subtree from
275 * the paging structure.
277 if (was_present
&& !was_leaf
&& (pfn_changed
|| !is_present
)) {
278 pt
= spte_to_child_pt(old_spte
, level
);
279 sp
= sptep_to_sp(pt
);
283 if (sp
->lpage_disallowed
)
284 unaccount_huge_nx_page(kvm
, sp
);
286 for (i
= 0; i
< PT64_ENT_PER_PAGE
; i
++) {
287 old_child_spte
= READ_ONCE(*(pt
+ i
));
288 WRITE_ONCE(*(pt
+ i
), 0);
289 handle_changed_spte(kvm
, as_id
,
290 gfn
+ (i
* KVM_PAGES_PER_HPAGE(level
- 1)),
291 old_child_spte
, 0, level
- 1);
294 kvm_flush_remote_tlbs_with_address(kvm
, gfn
,
295 KVM_PAGES_PER_HPAGE(level
));
297 free_page((unsigned long)pt
);
298 kmem_cache_free(mmu_page_header_cache
, sp
);
302 static void handle_changed_spte(struct kvm
*kvm
, int as_id
, gfn_t gfn
,
303 u64 old_spte
, u64 new_spte
, int level
)
305 __handle_changed_spte(kvm
, as_id
, gfn
, old_spte
, new_spte
, level
);
306 handle_changed_spte_acc_track(old_spte
, new_spte
, level
);
307 handle_changed_spte_dirty_log(kvm
, as_id
, gfn
, old_spte
,
311 static inline void __tdp_mmu_set_spte(struct kvm
*kvm
, struct tdp_iter
*iter
,
312 u64 new_spte
, bool record_acc_track
,
313 bool record_dirty_log
)
315 u64
*root_pt
= tdp_iter_root_pt(iter
);
316 struct kvm_mmu_page
*root
= sptep_to_sp(root_pt
);
317 int as_id
= kvm_mmu_page_as_id(root
);
319 WRITE_ONCE(*iter
->sptep
, new_spte
);
321 __handle_changed_spte(kvm
, as_id
, iter
->gfn
, iter
->old_spte
, new_spte
,
323 if (record_acc_track
)
324 handle_changed_spte_acc_track(iter
->old_spte
, new_spte
,
326 if (record_dirty_log
)
327 handle_changed_spte_dirty_log(kvm
, as_id
, iter
->gfn
,
328 iter
->old_spte
, new_spte
,
332 static inline void tdp_mmu_set_spte(struct kvm
*kvm
, struct tdp_iter
*iter
,
335 __tdp_mmu_set_spte(kvm
, iter
, new_spte
, true, true);
338 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm
*kvm
,
339 struct tdp_iter
*iter
,
342 __tdp_mmu_set_spte(kvm
, iter
, new_spte
, false, true);
345 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm
*kvm
,
346 struct tdp_iter
*iter
,
349 __tdp_mmu_set_spte(kvm
, iter
, new_spte
, true, false);
352 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
353 for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
355 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
356 tdp_root_for_each_pte(_iter, _root, _start, _end) \
357 if (!is_shadow_present_pte(_iter.old_spte) || \
358 !is_last_spte(_iter.old_spte, _iter.level)) \
362 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
363 for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \
364 _mmu->shadow_root_level, _start, _end)
367 * Flush the TLB if the process should drop kvm->mmu_lock.
368 * Return whether the caller still needs to flush the tlb.
370 static bool tdp_mmu_iter_flush_cond_resched(struct kvm
*kvm
, struct tdp_iter
*iter
)
372 if (need_resched() || spin_needbreak(&kvm
->mmu_lock
)) {
373 kvm_flush_remote_tlbs(kvm
);
374 cond_resched_lock(&kvm
->mmu_lock
);
375 tdp_iter_refresh_walk(iter
);
382 static void tdp_mmu_iter_cond_resched(struct kvm
*kvm
, struct tdp_iter
*iter
)
384 if (need_resched() || spin_needbreak(&kvm
->mmu_lock
)) {
385 cond_resched_lock(&kvm
->mmu_lock
);
386 tdp_iter_refresh_walk(iter
);
391 * Tears down the mappings for the range of gfns, [start, end), and frees the
392 * non-root pages mapping GFNs strictly within that range. Returns true if
393 * SPTEs have been cleared and a TLB flush is needed before releasing the
395 * If can_yield is true, will release the MMU lock and reschedule if the
396 * scheduler needs the CPU or there is contention on the MMU lock. If this
397 * function cannot yield, it will not release the MMU lock or reschedule and
398 * the caller must ensure it does not supply too large a GFN range, or the
399 * operation can cause a soft lockup.
401 static bool zap_gfn_range(struct kvm
*kvm
, struct kvm_mmu_page
*root
,
402 gfn_t start
, gfn_t end
, bool can_yield
)
404 struct tdp_iter iter
;
405 bool flush_needed
= false;
407 tdp_root_for_each_pte(iter
, root
, start
, end
) {
408 if (!is_shadow_present_pte(iter
.old_spte
))
412 * If this is a non-last-level SPTE that covers a larger range
413 * than should be zapped, continue, and zap the mappings at a
416 if ((iter
.gfn
< start
||
417 iter
.gfn
+ KVM_PAGES_PER_HPAGE(iter
.level
) > end
) &&
418 !is_last_spte(iter
.old_spte
, iter
.level
))
421 tdp_mmu_set_spte(kvm
, &iter
, 0);
424 flush_needed
= tdp_mmu_iter_flush_cond_resched(kvm
, &iter
);
432 * Tears down the mappings for the range of gfns, [start, end), and frees the
433 * non-root pages mapping GFNs strictly within that range. Returns true if
434 * SPTEs have been cleared and a TLB flush is needed before releasing the
437 bool kvm_tdp_mmu_zap_gfn_range(struct kvm
*kvm
, gfn_t start
, gfn_t end
)
439 struct kvm_mmu_page
*root
;
442 for_each_tdp_mmu_root(kvm
, root
) {
444 * Take a reference on the root so that it cannot be freed if
445 * this thread releases the MMU lock and yields in this loop.
447 kvm_mmu_get_root(kvm
, root
);
449 flush
|= zap_gfn_range(kvm
, root
, start
, end
, true);
451 kvm_mmu_put_root(kvm
, root
);
457 void kvm_tdp_mmu_zap_all(struct kvm
*kvm
)
459 gfn_t max_gfn
= 1ULL << (boot_cpu_data
.x86_phys_bits
- PAGE_SHIFT
);
462 flush
= kvm_tdp_mmu_zap_gfn_range(kvm
, 0, max_gfn
);
464 kvm_flush_remote_tlbs(kvm
);
468 * Installs a last-level SPTE to handle a TDP page fault.
469 * (NPT/EPT violation/misconfiguration)
471 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu
*vcpu
, int write
,
473 struct tdp_iter
*iter
,
474 kvm_pfn_t pfn
, bool prefault
)
478 int make_spte_ret
= 0;
480 if (unlikely(is_noslot_pfn(pfn
))) {
481 new_spte
= make_mmio_spte(vcpu
, iter
->gfn
, ACC_ALL
);
482 trace_mark_mmio_spte(iter
->sptep
, iter
->gfn
, new_spte
);
484 make_spte_ret
= make_spte(vcpu
, ACC_ALL
, iter
->level
, iter
->gfn
,
485 pfn
, iter
->old_spte
, prefault
, true,
486 map_writable
, !shadow_accessed_mask
,
489 if (new_spte
== iter
->old_spte
)
490 ret
= RET_PF_SPURIOUS
;
492 tdp_mmu_set_spte(vcpu
->kvm
, iter
, new_spte
);
495 * If the page fault was caused by a write but the page is write
496 * protected, emulation is needed. If the emulation was skipped,
497 * the vCPU would have the same fault again.
499 if (make_spte_ret
& SET_SPTE_WRITE_PROTECTED_PT
) {
501 ret
= RET_PF_EMULATE
;
502 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT
, vcpu
);
505 /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
506 if (unlikely(is_mmio_spte(new_spte
)))
507 ret
= RET_PF_EMULATE
;
509 trace_kvm_mmu_set_spte(iter
->level
, iter
->gfn
, iter
->sptep
);
511 vcpu
->stat
.pf_fixed
++;
517 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
518 * page tables and SPTEs to translate the faulting guest physical address.
520 int kvm_tdp_mmu_map(struct kvm_vcpu
*vcpu
, gpa_t gpa
, u32 error_code
,
521 int map_writable
, int max_level
, kvm_pfn_t pfn
,
524 bool nx_huge_page_workaround_enabled
= is_nx_huge_page_enabled();
525 bool write
= error_code
& PFERR_WRITE_MASK
;
526 bool exec
= error_code
& PFERR_FETCH_MASK
;
527 bool huge_page_disallowed
= exec
&& nx_huge_page_workaround_enabled
;
528 struct kvm_mmu
*mmu
= vcpu
->arch
.mmu
;
529 struct tdp_iter iter
;
530 struct kvm_mmu_page
*sp
;
534 gfn_t gfn
= gpa
>> PAGE_SHIFT
;
538 if (WARN_ON(!VALID_PAGE(vcpu
->arch
.mmu
->root_hpa
)))
540 if (WARN_ON(!is_tdp_mmu_root(vcpu
->kvm
, vcpu
->arch
.mmu
->root_hpa
)))
543 level
= kvm_mmu_hugepage_adjust(vcpu
, gfn
, max_level
, &pfn
,
544 huge_page_disallowed
, &req_level
);
546 trace_kvm_mmu_spte_requested(gpa
, level
, pfn
);
547 tdp_mmu_for_each_pte(iter
, mmu
, gfn
, gfn
+ 1) {
548 if (nx_huge_page_workaround_enabled
)
549 disallowed_hugepage_adjust(iter
.old_spte
, gfn
,
550 iter
.level
, &pfn
, &level
);
552 if (iter
.level
== level
)
556 * If there is an SPTE mapping a large page at a higher level
557 * than the target, that SPTE must be cleared and replaced
558 * with a non-leaf SPTE.
560 if (is_shadow_present_pte(iter
.old_spte
) &&
561 is_large_pte(iter
.old_spte
)) {
562 tdp_mmu_set_spte(vcpu
->kvm
, &iter
, 0);
564 kvm_flush_remote_tlbs_with_address(vcpu
->kvm
, iter
.gfn
,
565 KVM_PAGES_PER_HPAGE(iter
.level
));
568 * The iter must explicitly re-read the spte here
569 * because the new value informs the !present
572 iter
.old_spte
= READ_ONCE(*iter
.sptep
);
575 if (!is_shadow_present_pte(iter
.old_spte
)) {
576 sp
= alloc_tdp_mmu_page(vcpu
, iter
.gfn
, iter
.level
);
577 list_add(&sp
->link
, &vcpu
->kvm
->arch
.tdp_mmu_pages
);
579 clear_page(child_pt
);
580 new_spte
= make_nonleaf_spte(child_pt
,
581 !shadow_accessed_mask
);
583 trace_kvm_mmu_get_page(sp
, true);
584 if (huge_page_disallowed
&& req_level
>= iter
.level
)
585 account_huge_nx_page(vcpu
->kvm
, sp
);
587 tdp_mmu_set_spte(vcpu
->kvm
, &iter
, new_spte
);
591 if (WARN_ON(iter
.level
!= level
))
594 ret
= tdp_mmu_map_handle_target_level(vcpu
, write
, map_writable
, &iter
,
600 static int kvm_tdp_mmu_handle_hva_range(struct kvm
*kvm
, unsigned long start
,
601 unsigned long end
, unsigned long data
,
602 int (*handler
)(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
603 struct kvm_mmu_page
*root
, gfn_t start
,
604 gfn_t end
, unsigned long data
))
606 struct kvm_memslots
*slots
;
607 struct kvm_memory_slot
*memslot
;
608 struct kvm_mmu_page
*root
;
612 for_each_tdp_mmu_root(kvm
, root
) {
614 * Take a reference on the root so that it cannot be freed if
615 * this thread releases the MMU lock and yields in this loop.
617 kvm_mmu_get_root(kvm
, root
);
619 as_id
= kvm_mmu_page_as_id(root
);
620 slots
= __kvm_memslots(kvm
, as_id
);
621 kvm_for_each_memslot(memslot
, slots
) {
622 unsigned long hva_start
, hva_end
;
623 gfn_t gfn_start
, gfn_end
;
625 hva_start
= max(start
, memslot
->userspace_addr
);
626 hva_end
= min(end
, memslot
->userspace_addr
+
627 (memslot
->npages
<< PAGE_SHIFT
));
628 if (hva_start
>= hva_end
)
631 * {gfn(page) | page intersects with [hva_start, hva_end)} =
632 * {gfn_start, gfn_start+1, ..., gfn_end-1}.
634 gfn_start
= hva_to_gfn_memslot(hva_start
, memslot
);
635 gfn_end
= hva_to_gfn_memslot(hva_end
+ PAGE_SIZE
- 1, memslot
);
637 ret
|= handler(kvm
, memslot
, root
, gfn_start
,
641 kvm_mmu_put_root(kvm
, root
);
647 static int zap_gfn_range_hva_wrapper(struct kvm
*kvm
,
648 struct kvm_memory_slot
*slot
,
649 struct kvm_mmu_page
*root
, gfn_t start
,
650 gfn_t end
, unsigned long unused
)
652 return zap_gfn_range(kvm
, root
, start
, end
, false);
655 int kvm_tdp_mmu_zap_hva_range(struct kvm
*kvm
, unsigned long start
,
658 return kvm_tdp_mmu_handle_hva_range(kvm
, start
, end
, 0,
659 zap_gfn_range_hva_wrapper
);
663 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
664 * if any of the GFNs in the range have been accessed.
666 static int age_gfn_range(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
667 struct kvm_mmu_page
*root
, gfn_t start
, gfn_t end
,
668 unsigned long unused
)
670 struct tdp_iter iter
;
674 tdp_root_for_each_leaf_pte(iter
, root
, start
, end
) {
676 * If we have a non-accessed entry we don't need to change the
679 if (!is_accessed_spte(iter
.old_spte
))
682 new_spte
= iter
.old_spte
;
684 if (spte_ad_enabled(new_spte
)) {
685 clear_bit((ffs(shadow_accessed_mask
) - 1),
686 (unsigned long *)&new_spte
);
689 * Capture the dirty status of the page, so that it doesn't get
690 * lost when the SPTE is marked for access tracking.
692 if (is_writable_pte(new_spte
))
693 kvm_set_pfn_dirty(spte_to_pfn(new_spte
));
695 new_spte
= mark_spte_for_access_track(new_spte
);
697 new_spte
&= ~shadow_dirty_mask
;
699 tdp_mmu_set_spte_no_acc_track(kvm
, &iter
, new_spte
);
706 int kvm_tdp_mmu_age_hva_range(struct kvm
*kvm
, unsigned long start
,
709 return kvm_tdp_mmu_handle_hva_range(kvm
, start
, end
, 0,
713 static int test_age_gfn(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
714 struct kvm_mmu_page
*root
, gfn_t gfn
, gfn_t unused
,
715 unsigned long unused2
)
717 struct tdp_iter iter
;
719 tdp_root_for_each_leaf_pte(iter
, root
, gfn
, gfn
+ 1)
720 if (is_accessed_spte(iter
.old_spte
))
726 int kvm_tdp_mmu_test_age_hva(struct kvm
*kvm
, unsigned long hva
)
728 return kvm_tdp_mmu_handle_hva_range(kvm
, hva
, hva
+ 1, 0,
733 * Handle the changed_pte MMU notifier for the TDP MMU.
734 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
736 * Returns non-zero if a flush is needed before releasing the MMU lock.
738 static int set_tdp_spte(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
739 struct kvm_mmu_page
*root
, gfn_t gfn
, gfn_t unused
,
742 struct tdp_iter iter
;
743 pte_t
*ptep
= (pte_t
*)data
;
748 WARN_ON(pte_huge(*ptep
));
750 new_pfn
= pte_pfn(*ptep
);
752 tdp_root_for_each_pte(iter
, root
, gfn
, gfn
+ 1) {
753 if (iter
.level
!= PG_LEVEL_4K
)
756 if (!is_shadow_present_pte(iter
.old_spte
))
759 tdp_mmu_set_spte(kvm
, &iter
, 0);
761 kvm_flush_remote_tlbs_with_address(kvm
, iter
.gfn
, 1);
763 if (!pte_write(*ptep
)) {
764 new_spte
= kvm_mmu_changed_pte_notifier_make_spte(
765 iter
.old_spte
, new_pfn
);
767 tdp_mmu_set_spte(kvm
, &iter
, new_spte
);
774 kvm_flush_remote_tlbs_with_address(kvm
, gfn
, 1);
779 int kvm_tdp_mmu_set_spte_hva(struct kvm
*kvm
, unsigned long address
,
782 return kvm_tdp_mmu_handle_hva_range(kvm
, address
, address
+ 1,
783 (unsigned long)host_ptep
,
788 * Remove write access from all the SPTEs mapping GFNs [start, end). If
789 * skip_4k is set, SPTEs that map 4k pages, will not be write-protected.
790 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
792 static bool wrprot_gfn_range(struct kvm
*kvm
, struct kvm_mmu_page
*root
,
793 gfn_t start
, gfn_t end
, int min_level
)
795 struct tdp_iter iter
;
797 bool spte_set
= false;
799 BUG_ON(min_level
> KVM_MAX_HUGEPAGE_LEVEL
);
801 for_each_tdp_pte_min_level(iter
, root
->spt
, root
->role
.level
,
802 min_level
, start
, end
) {
803 if (!is_shadow_present_pte(iter
.old_spte
) ||
804 !is_last_spte(iter
.old_spte
, iter
.level
))
807 new_spte
= iter
.old_spte
& ~PT_WRITABLE_MASK
;
809 tdp_mmu_set_spte_no_dirty_log(kvm
, &iter
, new_spte
);
812 tdp_mmu_iter_cond_resched(kvm
, &iter
);
818 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
819 * only affect leaf SPTEs down to min_level.
820 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
822 bool kvm_tdp_mmu_wrprot_slot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
825 struct kvm_mmu_page
*root
;
827 bool spte_set
= false;
829 for_each_tdp_mmu_root(kvm
, root
) {
830 root_as_id
= kvm_mmu_page_as_id(root
);
831 if (root_as_id
!= slot
->as_id
)
835 * Take a reference on the root so that it cannot be freed if
836 * this thread releases the MMU lock and yields in this loop.
838 kvm_mmu_get_root(kvm
, root
);
840 spte_set
|= wrprot_gfn_range(kvm
, root
, slot
->base_gfn
,
841 slot
->base_gfn
+ slot
->npages
, min_level
);
843 kvm_mmu_put_root(kvm
, root
);
850 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
851 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
852 * If AD bits are not enabled, this will require clearing the writable bit on
853 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
856 static bool clear_dirty_gfn_range(struct kvm
*kvm
, struct kvm_mmu_page
*root
,
857 gfn_t start
, gfn_t end
)
859 struct tdp_iter iter
;
861 bool spte_set
= false;
863 tdp_root_for_each_leaf_pte(iter
, root
, start
, end
) {
864 if (spte_ad_need_write_protect(iter
.old_spte
)) {
865 if (is_writable_pte(iter
.old_spte
))
866 new_spte
= iter
.old_spte
& ~PT_WRITABLE_MASK
;
870 if (iter
.old_spte
& shadow_dirty_mask
)
871 new_spte
= iter
.old_spte
& ~shadow_dirty_mask
;
876 tdp_mmu_set_spte_no_dirty_log(kvm
, &iter
, new_spte
);
879 tdp_mmu_iter_cond_resched(kvm
, &iter
);
885 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
886 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
887 * If AD bits are not enabled, this will require clearing the writable bit on
888 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
891 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
893 struct kvm_mmu_page
*root
;
895 bool spte_set
= false;
897 for_each_tdp_mmu_root(kvm
, root
) {
898 root_as_id
= kvm_mmu_page_as_id(root
);
899 if (root_as_id
!= slot
->as_id
)
903 * Take a reference on the root so that it cannot be freed if
904 * this thread releases the MMU lock and yields in this loop.
906 kvm_mmu_get_root(kvm
, root
);
908 spte_set
|= clear_dirty_gfn_range(kvm
, root
, slot
->base_gfn
,
909 slot
->base_gfn
+ slot
->npages
);
911 kvm_mmu_put_root(kvm
, root
);
918 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
919 * set in mask, starting at gfn. The given memslot is expected to contain all
920 * the GFNs represented by set bits in the mask. If AD bits are enabled,
921 * clearing the dirty status will involve clearing the dirty bit on each SPTE
922 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
924 static void clear_dirty_pt_masked(struct kvm
*kvm
, struct kvm_mmu_page
*root
,
925 gfn_t gfn
, unsigned long mask
, bool wrprot
)
927 struct tdp_iter iter
;
930 tdp_root_for_each_leaf_pte(iter
, root
, gfn
+ __ffs(mask
),
931 gfn
+ BITS_PER_LONG
) {
935 if (iter
.level
> PG_LEVEL_4K
||
936 !(mask
& (1UL << (iter
.gfn
- gfn
))))
939 if (wrprot
|| spte_ad_need_write_protect(iter
.old_spte
)) {
940 if (is_writable_pte(iter
.old_spte
))
941 new_spte
= iter
.old_spte
& ~PT_WRITABLE_MASK
;
945 if (iter
.old_spte
& shadow_dirty_mask
)
946 new_spte
= iter
.old_spte
& ~shadow_dirty_mask
;
951 tdp_mmu_set_spte_no_dirty_log(kvm
, &iter
, new_spte
);
953 mask
&= ~(1UL << (iter
.gfn
- gfn
));
958 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
959 * set in mask, starting at gfn. The given memslot is expected to contain all
960 * the GFNs represented by set bits in the mask. If AD bits are enabled,
961 * clearing the dirty status will involve clearing the dirty bit on each SPTE
962 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
964 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm
*kvm
,
965 struct kvm_memory_slot
*slot
,
966 gfn_t gfn
, unsigned long mask
,
969 struct kvm_mmu_page
*root
;
972 lockdep_assert_held(&kvm
->mmu_lock
);
973 for_each_tdp_mmu_root(kvm
, root
) {
974 root_as_id
= kvm_mmu_page_as_id(root
);
975 if (root_as_id
!= slot
->as_id
)
978 clear_dirty_pt_masked(kvm
, root
, gfn
, mask
, wrprot
);
983 * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
984 * only used for PML, and so will involve setting the dirty bit on each SPTE.
985 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
987 static bool set_dirty_gfn_range(struct kvm
*kvm
, struct kvm_mmu_page
*root
,
988 gfn_t start
, gfn_t end
)
990 struct tdp_iter iter
;
992 bool spte_set
= false;
994 tdp_root_for_each_pte(iter
, root
, start
, end
) {
995 if (!is_shadow_present_pte(iter
.old_spte
))
998 new_spte
= iter
.old_spte
| shadow_dirty_mask
;
1000 tdp_mmu_set_spte(kvm
, &iter
, new_spte
);
1003 tdp_mmu_iter_cond_resched(kvm
, &iter
);
1010 * Set the dirty status of all the SPTEs mapping GFNs in the memslot. This is
1011 * only used for PML, and so will involve setting the dirty bit on each SPTE.
1012 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1014 bool kvm_tdp_mmu_slot_set_dirty(struct kvm
*kvm
, struct kvm_memory_slot
*slot
)
1016 struct kvm_mmu_page
*root
;
1018 bool spte_set
= false;
1020 for_each_tdp_mmu_root(kvm
, root
) {
1021 root_as_id
= kvm_mmu_page_as_id(root
);
1022 if (root_as_id
!= slot
->as_id
)
1026 * Take a reference on the root so that it cannot be freed if
1027 * this thread releases the MMU lock and yields in this loop.
1029 kvm_mmu_get_root(kvm
, root
);
1031 spte_set
|= set_dirty_gfn_range(kvm
, root
, slot
->base_gfn
,
1032 slot
->base_gfn
+ slot
->npages
);
1034 kvm_mmu_put_root(kvm
, root
);
1040 * Clear non-leaf entries (and free associated page tables) which could
1041 * be replaced by large mappings, for GFNs within the slot.
1043 static void zap_collapsible_spte_range(struct kvm
*kvm
,
1044 struct kvm_mmu_page
*root
,
1045 gfn_t start
, gfn_t end
)
1047 struct tdp_iter iter
;
1049 bool spte_set
= false;
1051 tdp_root_for_each_pte(iter
, root
, start
, end
) {
1052 if (!is_shadow_present_pte(iter
.old_spte
) ||
1053 is_last_spte(iter
.old_spte
, iter
.level
))
1056 pfn
= spte_to_pfn(iter
.old_spte
);
1057 if (kvm_is_reserved_pfn(pfn
) ||
1058 !PageTransCompoundMap(pfn_to_page(pfn
)))
1061 tdp_mmu_set_spte(kvm
, &iter
, 0);
1063 spte_set
= tdp_mmu_iter_flush_cond_resched(kvm
, &iter
);
1067 kvm_flush_remote_tlbs(kvm
);
1071 * Clear non-leaf entries (and free associated page tables) which could
1072 * be replaced by large mappings, for GFNs within the slot.
1074 void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm
*kvm
,
1075 const struct kvm_memory_slot
*slot
)
1077 struct kvm_mmu_page
*root
;
1080 for_each_tdp_mmu_root(kvm
, root
) {
1081 root_as_id
= kvm_mmu_page_as_id(root
);
1082 if (root_as_id
!= slot
->as_id
)
1086 * Take a reference on the root so that it cannot be freed if
1087 * this thread releases the MMU lock and yields in this loop.
1089 kvm_mmu_get_root(kvm
, root
);
1091 zap_collapsible_spte_range(kvm
, root
, slot
->base_gfn
,
1092 slot
->base_gfn
+ slot
->npages
);
1094 kvm_mmu_put_root(kvm
, root
);
1099 * Removes write access on the last level SPTE mapping this GFN and unsets the
1100 * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
1101 * Returns true if an SPTE was set and a TLB flush is needed.
1103 static bool write_protect_gfn(struct kvm
*kvm
, struct kvm_mmu_page
*root
,
1106 struct tdp_iter iter
;
1108 bool spte_set
= false;
1110 tdp_root_for_each_leaf_pte(iter
, root
, gfn
, gfn
+ 1) {
1111 if (!is_writable_pte(iter
.old_spte
))
1114 new_spte
= iter
.old_spte
&
1115 ~(PT_WRITABLE_MASK
| SPTE_MMU_WRITEABLE
);
1117 tdp_mmu_set_spte(kvm
, &iter
, new_spte
);
1125 * Removes write access on the last level SPTE mapping this GFN and unsets the
1126 * SPTE_MMU_WRITABLE bit to ensure future writes continue to be intercepted.
1127 * Returns true if an SPTE was set and a TLB flush is needed.
1129 bool kvm_tdp_mmu_write_protect_gfn(struct kvm
*kvm
,
1130 struct kvm_memory_slot
*slot
, gfn_t gfn
)
1132 struct kvm_mmu_page
*root
;
1134 bool spte_set
= false;
1136 lockdep_assert_held(&kvm
->mmu_lock
);
1137 for_each_tdp_mmu_root(kvm
, root
) {
1138 root_as_id
= kvm_mmu_page_as_id(root
);
1139 if (root_as_id
!= slot
->as_id
)
1142 spte_set
|= write_protect_gfn(kvm
, root
, gfn
);
1148 * Return the level of the lowest level SPTE added to sptes.
1149 * That SPTE may be non-present.
1151 int kvm_tdp_mmu_get_walk(struct kvm_vcpu
*vcpu
, u64 addr
, u64
*sptes
)
1153 struct tdp_iter iter
;
1154 struct kvm_mmu
*mmu
= vcpu
->arch
.mmu
;
1155 int leaf
= vcpu
->arch
.mmu
->shadow_root_level
;
1156 gfn_t gfn
= addr
>> PAGE_SHIFT
;
1158 tdp_mmu_for_each_pte(iter
, mmu
, gfn
, gfn
+ 1) {
1160 sptes
[leaf
- 1] = iter
.old_spte
;