]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kvm/mmu/tdp_mmu.c
KVM: x86/mmu: Skip tlb flush if it has been done in zap_gfn_range()
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / mmu / tdp_mmu.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include "mmu.h"
4 #include "mmu_internal.h"
5 #include "mmutrace.h"
6 #include "tdp_iter.h"
7 #include "tdp_mmu.h"
8 #include "spte.h"
9
10 #include <asm/cmpxchg.h>
11 #include <trace/events/kvm.h>
12
13 static bool __read_mostly tdp_mmu_enabled = true;
14 module_param_named(tdp_mmu, tdp_mmu_enabled, bool, 0644);
15
16 /* Initializes the TDP MMU for the VM, if enabled. */
17 bool kvm_mmu_init_tdp_mmu(struct kvm *kvm)
18 {
19 if (!tdp_enabled || !READ_ONCE(tdp_mmu_enabled))
20 return false;
21
22 /* This should not be changed for the lifetime of the VM. */
23 kvm->arch.tdp_mmu_enabled = true;
24
25 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_roots);
26 spin_lock_init(&kvm->arch.tdp_mmu_pages_lock);
27 INIT_LIST_HEAD(&kvm->arch.tdp_mmu_pages);
28
29 return true;
30 }
31
32 static __always_inline void kvm_lockdep_assert_mmu_lock_held(struct kvm *kvm,
33 bool shared)
34 {
35 if (shared)
36 lockdep_assert_held_read(&kvm->mmu_lock);
37 else
38 lockdep_assert_held_write(&kvm->mmu_lock);
39 }
40
41 void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm)
42 {
43 if (!kvm->arch.tdp_mmu_enabled)
44 return;
45
46 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_pages));
47 WARN_ON(!list_empty(&kvm->arch.tdp_mmu_roots));
48
49 /*
50 * Ensure that all the outstanding RCU callbacks to free shadow pages
51 * can run before the VM is torn down.
52 */
53 rcu_barrier();
54 }
55
56 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
57 gfn_t start, gfn_t end, bool can_yield, bool flush,
58 bool shared);
59
60 static void tdp_mmu_free_sp(struct kvm_mmu_page *sp)
61 {
62 free_page((unsigned long)sp->spt);
63 kmem_cache_free(mmu_page_header_cache, sp);
64 }
65
66 /*
67 * This is called through call_rcu in order to free TDP page table memory
68 * safely with respect to other kernel threads that may be operating on
69 * the memory.
70 * By only accessing TDP MMU page table memory in an RCU read critical
71 * section, and freeing it after a grace period, lockless access to that
72 * memory won't use it after it is freed.
73 */
74 static void tdp_mmu_free_sp_rcu_callback(struct rcu_head *head)
75 {
76 struct kvm_mmu_page *sp = container_of(head, struct kvm_mmu_page,
77 rcu_head);
78
79 tdp_mmu_free_sp(sp);
80 }
81
82 void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
83 bool shared)
84 {
85 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
86
87 if (!refcount_dec_and_test(&root->tdp_mmu_root_count))
88 return;
89
90 WARN_ON(!root->tdp_mmu_page);
91
92 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
93 list_del_rcu(&root->link);
94 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
95
96 zap_gfn_range(kvm, root, 0, -1ull, false, false, shared);
97
98 call_rcu(&root->rcu_head, tdp_mmu_free_sp_rcu_callback);
99 }
100
101 /*
102 * Finds the next valid root after root (or the first valid root if root
103 * is NULL), takes a reference on it, and returns that next root. If root
104 * is not NULL, this thread should have already taken a reference on it, and
105 * that reference will be dropped. If no valid root is found, this
106 * function will return NULL.
107 */
108 static struct kvm_mmu_page *tdp_mmu_next_root(struct kvm *kvm,
109 struct kvm_mmu_page *prev_root,
110 bool shared)
111 {
112 struct kvm_mmu_page *next_root;
113
114 rcu_read_lock();
115
116 if (prev_root)
117 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
118 &prev_root->link,
119 typeof(*prev_root), link);
120 else
121 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
122 typeof(*next_root), link);
123
124 while (next_root && !kvm_tdp_mmu_get_root(kvm, next_root))
125 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
126 &next_root->link, typeof(*next_root), link);
127
128 rcu_read_unlock();
129
130 if (prev_root)
131 kvm_tdp_mmu_put_root(kvm, prev_root, shared);
132
133 return next_root;
134 }
135
136 /*
137 * Note: this iterator gets and puts references to the roots it iterates over.
138 * This makes it safe to release the MMU lock and yield within the loop, but
139 * if exiting the loop early, the caller must drop the reference to the most
140 * recent root. (Unless keeping a live reference is desirable.)
141 *
142 * If shared is set, this function is operating under the MMU lock in read
143 * mode. In the unlikely event that this thread must free a root, the lock
144 * will be temporarily dropped and reacquired in write mode.
145 */
146 #define for_each_tdp_mmu_root_yield_safe(_kvm, _root, _as_id, _shared) \
147 for (_root = tdp_mmu_next_root(_kvm, NULL, _shared); \
148 _root; \
149 _root = tdp_mmu_next_root(_kvm, _root, _shared)) \
150 if (kvm_mmu_page_as_id(_root) != _as_id) { \
151 } else
152
153 #define for_each_tdp_mmu_root(_kvm, _root, _as_id) \
154 list_for_each_entry_rcu(_root, &_kvm->arch.tdp_mmu_roots, link, \
155 lockdep_is_held_type(&kvm->mmu_lock, 0) || \
156 lockdep_is_held(&kvm->arch.tdp_mmu_pages_lock)) \
157 if (kvm_mmu_page_as_id(_root) != _as_id) { \
158 } else
159
160 static union kvm_mmu_page_role page_role_for_level(struct kvm_vcpu *vcpu,
161 int level)
162 {
163 union kvm_mmu_page_role role;
164
165 role = vcpu->arch.mmu->mmu_role.base;
166 role.level = level;
167 role.direct = true;
168 role.gpte_is_8_bytes = true;
169 role.access = ACC_ALL;
170
171 return role;
172 }
173
174 static struct kvm_mmu_page *alloc_tdp_mmu_page(struct kvm_vcpu *vcpu, gfn_t gfn,
175 int level)
176 {
177 struct kvm_mmu_page *sp;
178
179 sp = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache);
180 sp->spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_shadow_page_cache);
181 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
182
183 sp->role.word = page_role_for_level(vcpu, level).word;
184 sp->gfn = gfn;
185 sp->tdp_mmu_page = true;
186
187 trace_kvm_mmu_get_page(sp, true);
188
189 return sp;
190 }
191
192 hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu)
193 {
194 union kvm_mmu_page_role role;
195 struct kvm *kvm = vcpu->kvm;
196 struct kvm_mmu_page *root;
197
198 lockdep_assert_held_write(&kvm->mmu_lock);
199
200 role = page_role_for_level(vcpu, vcpu->arch.mmu->shadow_root_level);
201
202 /* Check for an existing root before allocating a new one. */
203 for_each_tdp_mmu_root(kvm, root, kvm_mmu_role_as_id(role)) {
204 if (root->role.word == role.word &&
205 kvm_tdp_mmu_get_root(kvm, root))
206 goto out;
207 }
208
209 root = alloc_tdp_mmu_page(vcpu, 0, vcpu->arch.mmu->shadow_root_level);
210 refcount_set(&root->tdp_mmu_root_count, 1);
211
212 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
213 list_add_rcu(&root->link, &kvm->arch.tdp_mmu_roots);
214 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
215
216 out:
217 return __pa(root->spt);
218 }
219
220 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
221 u64 old_spte, u64 new_spte, int level,
222 bool shared);
223
224 static void handle_changed_spte_acc_track(u64 old_spte, u64 new_spte, int level)
225 {
226 if (!is_shadow_present_pte(old_spte) || !is_last_spte(old_spte, level))
227 return;
228
229 if (is_accessed_spte(old_spte) &&
230 (!is_shadow_present_pte(new_spte) || !is_accessed_spte(new_spte) ||
231 spte_to_pfn(old_spte) != spte_to_pfn(new_spte)))
232 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
233 }
234
235 static void handle_changed_spte_dirty_log(struct kvm *kvm, int as_id, gfn_t gfn,
236 u64 old_spte, u64 new_spte, int level)
237 {
238 bool pfn_changed;
239 struct kvm_memory_slot *slot;
240
241 if (level > PG_LEVEL_4K)
242 return;
243
244 pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
245
246 if ((!is_writable_pte(old_spte) || pfn_changed) &&
247 is_writable_pte(new_spte)) {
248 slot = __gfn_to_memslot(__kvm_memslots(kvm, as_id), gfn);
249 mark_page_dirty_in_slot(kvm, slot, gfn);
250 }
251 }
252
253 /**
254 * tdp_mmu_link_page - Add a new page to the list of pages used by the TDP MMU
255 *
256 * @kvm: kvm instance
257 * @sp: the new page
258 * @account_nx: This page replaces a NX large page and should be marked for
259 * eventual reclaim.
260 */
261 static void tdp_mmu_link_page(struct kvm *kvm, struct kvm_mmu_page *sp,
262 bool account_nx)
263 {
264 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
265 list_add(&sp->link, &kvm->arch.tdp_mmu_pages);
266 if (account_nx)
267 account_huge_nx_page(kvm, sp);
268 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
269 }
270
271 /**
272 * tdp_mmu_unlink_page - Remove page from the list of pages used by the TDP MMU
273 *
274 * @kvm: kvm instance
275 * @sp: the page to be removed
276 * @shared: This operation may not be running under the exclusive use of
277 * the MMU lock and the operation must synchronize with other
278 * threads that might be adding or removing pages.
279 */
280 static void tdp_mmu_unlink_page(struct kvm *kvm, struct kvm_mmu_page *sp,
281 bool shared)
282 {
283 if (shared)
284 spin_lock(&kvm->arch.tdp_mmu_pages_lock);
285 else
286 lockdep_assert_held_write(&kvm->mmu_lock);
287
288 list_del(&sp->link);
289 if (sp->lpage_disallowed)
290 unaccount_huge_nx_page(kvm, sp);
291
292 if (shared)
293 spin_unlock(&kvm->arch.tdp_mmu_pages_lock);
294 }
295
296 /**
297 * handle_removed_tdp_mmu_page - handle a pt removed from the TDP structure
298 *
299 * @kvm: kvm instance
300 * @pt: the page removed from the paging structure
301 * @shared: This operation may not be running under the exclusive use
302 * of the MMU lock and the operation must synchronize with other
303 * threads that might be modifying SPTEs.
304 *
305 * Given a page table that has been removed from the TDP paging structure,
306 * iterates through the page table to clear SPTEs and free child page tables.
307 *
308 * Note that pt is passed in as a tdp_ptep_t, but it does not need RCU
309 * protection. Since this thread removed it from the paging structure,
310 * this thread will be responsible for ensuring the page is freed. Hence the
311 * early rcu_dereferences in the function.
312 */
313 static void handle_removed_tdp_mmu_page(struct kvm *kvm, tdp_ptep_t pt,
314 bool shared)
315 {
316 struct kvm_mmu_page *sp = sptep_to_sp(rcu_dereference(pt));
317 int level = sp->role.level;
318 gfn_t base_gfn = sp->gfn;
319 int i;
320
321 trace_kvm_mmu_prepare_zap_page(sp);
322
323 tdp_mmu_unlink_page(kvm, sp, shared);
324
325 for (i = 0; i < PT64_ENT_PER_PAGE; i++) {
326 u64 *sptep = rcu_dereference(pt) + i;
327 gfn_t gfn = base_gfn + i * KVM_PAGES_PER_HPAGE(level);
328 u64 old_child_spte;
329
330 if (shared) {
331 /*
332 * Set the SPTE to a nonpresent value that other
333 * threads will not overwrite. If the SPTE was
334 * already marked as removed then another thread
335 * handling a page fault could overwrite it, so
336 * set the SPTE until it is set from some other
337 * value to the removed SPTE value.
338 */
339 for (;;) {
340 old_child_spte = xchg(sptep, REMOVED_SPTE);
341 if (!is_removed_spte(old_child_spte))
342 break;
343 cpu_relax();
344 }
345 } else {
346 /*
347 * If the SPTE is not MMU-present, there is no backing
348 * page associated with the SPTE and so no side effects
349 * that need to be recorded, and exclusive ownership of
350 * mmu_lock ensures the SPTE can't be made present.
351 * Note, zapping MMIO SPTEs is also unnecessary as they
352 * are guarded by the memslots generation, not by being
353 * unreachable.
354 */
355 old_child_spte = READ_ONCE(*sptep);
356 if (!is_shadow_present_pte(old_child_spte))
357 continue;
358
359 /*
360 * Marking the SPTE as a removed SPTE is not
361 * strictly necessary here as the MMU lock will
362 * stop other threads from concurrently modifying
363 * this SPTE. Using the removed SPTE value keeps
364 * the two branches consistent and simplifies
365 * the function.
366 */
367 WRITE_ONCE(*sptep, REMOVED_SPTE);
368 }
369 handle_changed_spte(kvm, kvm_mmu_page_as_id(sp), gfn,
370 old_child_spte, REMOVED_SPTE, level,
371 shared);
372 }
373
374 kvm_flush_remote_tlbs_with_address(kvm, base_gfn,
375 KVM_PAGES_PER_HPAGE(level + 1));
376
377 call_rcu(&sp->rcu_head, tdp_mmu_free_sp_rcu_callback);
378 }
379
380 /**
381 * __handle_changed_spte - handle bookkeeping associated with an SPTE change
382 * @kvm: kvm instance
383 * @as_id: the address space of the paging structure the SPTE was a part of
384 * @gfn: the base GFN that was mapped by the SPTE
385 * @old_spte: The value of the SPTE before the change
386 * @new_spte: The value of the SPTE after the change
387 * @level: the level of the PT the SPTE is part of in the paging structure
388 * @shared: This operation may not be running under the exclusive use of
389 * the MMU lock and the operation must synchronize with other
390 * threads that might be modifying SPTEs.
391 *
392 * Handle bookkeeping that might result from the modification of a SPTE.
393 * This function must be called for all TDP SPTE modifications.
394 */
395 static void __handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
396 u64 old_spte, u64 new_spte, int level,
397 bool shared)
398 {
399 bool was_present = is_shadow_present_pte(old_spte);
400 bool is_present = is_shadow_present_pte(new_spte);
401 bool was_leaf = was_present && is_last_spte(old_spte, level);
402 bool is_leaf = is_present && is_last_spte(new_spte, level);
403 bool pfn_changed = spte_to_pfn(old_spte) != spte_to_pfn(new_spte);
404
405 WARN_ON(level > PT64_ROOT_MAX_LEVEL);
406 WARN_ON(level < PG_LEVEL_4K);
407 WARN_ON(gfn & (KVM_PAGES_PER_HPAGE(level) - 1));
408
409 /*
410 * If this warning were to trigger it would indicate that there was a
411 * missing MMU notifier or a race with some notifier handler.
412 * A present, leaf SPTE should never be directly replaced with another
413 * present leaf SPTE pointing to a different PFN. A notifier handler
414 * should be zapping the SPTE before the main MM's page table is
415 * changed, or the SPTE should be zeroed, and the TLBs flushed by the
416 * thread before replacement.
417 */
418 if (was_leaf && is_leaf && pfn_changed) {
419 pr_err("Invalid SPTE change: cannot replace a present leaf\n"
420 "SPTE with another present leaf SPTE mapping a\n"
421 "different PFN!\n"
422 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
423 as_id, gfn, old_spte, new_spte, level);
424
425 /*
426 * Crash the host to prevent error propagation and guest data
427 * corruption.
428 */
429 BUG();
430 }
431
432 if (old_spte == new_spte)
433 return;
434
435 trace_kvm_tdp_mmu_spte_changed(as_id, gfn, level, old_spte, new_spte);
436
437 /*
438 * The only times a SPTE should be changed from a non-present to
439 * non-present state is when an MMIO entry is installed/modified/
440 * removed. In that case, there is nothing to do here.
441 */
442 if (!was_present && !is_present) {
443 /*
444 * If this change does not involve a MMIO SPTE or removed SPTE,
445 * it is unexpected. Log the change, though it should not
446 * impact the guest since both the former and current SPTEs
447 * are nonpresent.
448 */
449 if (WARN_ON(!is_mmio_spte(old_spte) &&
450 !is_mmio_spte(new_spte) &&
451 !is_removed_spte(new_spte)))
452 pr_err("Unexpected SPTE change! Nonpresent SPTEs\n"
453 "should not be replaced with another,\n"
454 "different nonpresent SPTE, unless one or both\n"
455 "are MMIO SPTEs, or the new SPTE is\n"
456 "a temporary removed SPTE.\n"
457 "as_id: %d gfn: %llx old_spte: %llx new_spte: %llx level: %d",
458 as_id, gfn, old_spte, new_spte, level);
459 return;
460 }
461
462 if (is_leaf != was_leaf)
463 kvm_update_page_stats(kvm, level, is_leaf ? 1 : -1);
464
465 if (was_leaf && is_dirty_spte(old_spte) &&
466 (!is_present || !is_dirty_spte(new_spte) || pfn_changed))
467 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
468
469 /*
470 * Recursively handle child PTs if the change removed a subtree from
471 * the paging structure.
472 */
473 if (was_present && !was_leaf && (pfn_changed || !is_present))
474 handle_removed_tdp_mmu_page(kvm,
475 spte_to_child_pt(old_spte, level), shared);
476 }
477
478 static void handle_changed_spte(struct kvm *kvm, int as_id, gfn_t gfn,
479 u64 old_spte, u64 new_spte, int level,
480 bool shared)
481 {
482 __handle_changed_spte(kvm, as_id, gfn, old_spte, new_spte, level,
483 shared);
484 handle_changed_spte_acc_track(old_spte, new_spte, level);
485 handle_changed_spte_dirty_log(kvm, as_id, gfn, old_spte,
486 new_spte, level);
487 }
488
489 /*
490 * tdp_mmu_set_spte_atomic_no_dirty_log - Set a TDP MMU SPTE atomically
491 * and handle the associated bookkeeping, but do not mark the page dirty
492 * in KVM's dirty bitmaps.
493 *
494 * @kvm: kvm instance
495 * @iter: a tdp_iter instance currently on the SPTE that should be set
496 * @new_spte: The value the SPTE should be set to
497 * Returns: true if the SPTE was set, false if it was not. If false is returned,
498 * this function will have no side-effects.
499 */
500 static inline bool tdp_mmu_set_spte_atomic_no_dirty_log(struct kvm *kvm,
501 struct tdp_iter *iter,
502 u64 new_spte)
503 {
504 lockdep_assert_held_read(&kvm->mmu_lock);
505
506 /*
507 * Do not change removed SPTEs. Only the thread that froze the SPTE
508 * may modify it.
509 */
510 if (is_removed_spte(iter->old_spte))
511 return false;
512
513 /*
514 * Note, fast_pf_fix_direct_spte() can also modify TDP MMU SPTEs and
515 * does not hold the mmu_lock.
516 */
517 if (cmpxchg64(rcu_dereference(iter->sptep), iter->old_spte,
518 new_spte) != iter->old_spte)
519 return false;
520
521 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
522 new_spte, iter->level, true);
523 handle_changed_spte_acc_track(iter->old_spte, new_spte, iter->level);
524
525 return true;
526 }
527
528 /*
529 * tdp_mmu_map_set_spte_atomic - Set a leaf TDP MMU SPTE atomically to resolve a
530 * TDP page fault.
531 *
532 * @vcpu: The vcpu instance that took the TDP page fault.
533 * @iter: a tdp_iter instance currently on the SPTE that should be set
534 * @new_spte: The value the SPTE should be set to
535 *
536 * Returns: true if the SPTE was set, false if it was not. If false is returned,
537 * this function will have no side-effects.
538 */
539 static inline bool tdp_mmu_map_set_spte_atomic(struct kvm_vcpu *vcpu,
540 struct tdp_iter *iter,
541 u64 new_spte)
542 {
543 struct kvm *kvm = vcpu->kvm;
544
545 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, new_spte))
546 return false;
547
548 /*
549 * Use kvm_vcpu_gfn_to_memslot() instead of going through
550 * handle_changed_spte_dirty_log() to leverage vcpu->last_used_slot.
551 */
552 if (is_writable_pte(new_spte)) {
553 struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, iter->gfn);
554
555 if (slot && kvm_slot_dirty_track_enabled(slot)) {
556 /* Enforced by kvm_mmu_hugepage_adjust. */
557 WARN_ON_ONCE(iter->level > PG_LEVEL_4K);
558 mark_page_dirty_in_slot(kvm, slot, iter->gfn);
559 }
560 }
561
562 return true;
563 }
564
565 static inline bool tdp_mmu_zap_spte_atomic(struct kvm *kvm,
566 struct tdp_iter *iter)
567 {
568 /*
569 * Freeze the SPTE by setting it to a special,
570 * non-present value. This will stop other threads from
571 * immediately installing a present entry in its place
572 * before the TLBs are flushed.
573 */
574 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, iter, REMOVED_SPTE))
575 return false;
576
577 kvm_flush_remote_tlbs_with_address(kvm, iter->gfn,
578 KVM_PAGES_PER_HPAGE(iter->level));
579
580 /*
581 * No other thread can overwrite the removed SPTE as they
582 * must either wait on the MMU lock or use
583 * tdp_mmu_set_spte_atomic which will not overwrite the
584 * special removed SPTE value. No bookkeeping is needed
585 * here since the SPTE is going from non-present
586 * to non-present.
587 */
588 WRITE_ONCE(*rcu_dereference(iter->sptep), 0);
589
590 return true;
591 }
592
593
594 /*
595 * __tdp_mmu_set_spte - Set a TDP MMU SPTE and handle the associated bookkeeping
596 * @kvm: kvm instance
597 * @iter: a tdp_iter instance currently on the SPTE that should be set
598 * @new_spte: The value the SPTE should be set to
599 * @record_acc_track: Notify the MM subsystem of changes to the accessed state
600 * of the page. Should be set unless handling an MMU
601 * notifier for access tracking. Leaving record_acc_track
602 * unset in that case prevents page accesses from being
603 * double counted.
604 * @record_dirty_log: Record the page as dirty in the dirty bitmap if
605 * appropriate for the change being made. Should be set
606 * unless performing certain dirty logging operations.
607 * Leaving record_dirty_log unset in that case prevents page
608 * writes from being double counted.
609 */
610 static inline void __tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
611 u64 new_spte, bool record_acc_track,
612 bool record_dirty_log)
613 {
614 lockdep_assert_held_write(&kvm->mmu_lock);
615
616 /*
617 * No thread should be using this function to set SPTEs to the
618 * temporary removed SPTE value.
619 * If operating under the MMU lock in read mode, tdp_mmu_set_spte_atomic
620 * should be used. If operating under the MMU lock in write mode, the
621 * use of the removed SPTE should not be necessary.
622 */
623 WARN_ON(is_removed_spte(iter->old_spte));
624
625 WRITE_ONCE(*rcu_dereference(iter->sptep), new_spte);
626
627 __handle_changed_spte(kvm, iter->as_id, iter->gfn, iter->old_spte,
628 new_spte, iter->level, false);
629 if (record_acc_track)
630 handle_changed_spte_acc_track(iter->old_spte, new_spte,
631 iter->level);
632 if (record_dirty_log)
633 handle_changed_spte_dirty_log(kvm, iter->as_id, iter->gfn,
634 iter->old_spte, new_spte,
635 iter->level);
636 }
637
638 static inline void tdp_mmu_set_spte(struct kvm *kvm, struct tdp_iter *iter,
639 u64 new_spte)
640 {
641 __tdp_mmu_set_spte(kvm, iter, new_spte, true, true);
642 }
643
644 static inline void tdp_mmu_set_spte_no_acc_track(struct kvm *kvm,
645 struct tdp_iter *iter,
646 u64 new_spte)
647 {
648 __tdp_mmu_set_spte(kvm, iter, new_spte, false, true);
649 }
650
651 static inline void tdp_mmu_set_spte_no_dirty_log(struct kvm *kvm,
652 struct tdp_iter *iter,
653 u64 new_spte)
654 {
655 __tdp_mmu_set_spte(kvm, iter, new_spte, true, false);
656 }
657
658 #define tdp_root_for_each_pte(_iter, _root, _start, _end) \
659 for_each_tdp_pte(_iter, _root->spt, _root->role.level, _start, _end)
660
661 #define tdp_root_for_each_leaf_pte(_iter, _root, _start, _end) \
662 tdp_root_for_each_pte(_iter, _root, _start, _end) \
663 if (!is_shadow_present_pte(_iter.old_spte) || \
664 !is_last_spte(_iter.old_spte, _iter.level)) \
665 continue; \
666 else
667
668 #define tdp_mmu_for_each_pte(_iter, _mmu, _start, _end) \
669 for_each_tdp_pte(_iter, __va(_mmu->root_hpa), \
670 _mmu->shadow_root_level, _start, _end)
671
672 /*
673 * Yield if the MMU lock is contended or this thread needs to return control
674 * to the scheduler.
675 *
676 * If this function should yield and flush is set, it will perform a remote
677 * TLB flush before yielding.
678 *
679 * If this function yields, it will also reset the tdp_iter's walk over the
680 * paging structure and the calling function should skip to the next
681 * iteration to allow the iterator to continue its traversal from the
682 * paging structure root.
683 *
684 * Return true if this function yielded and the iterator's traversal was reset.
685 * Return false if a yield was not needed.
686 */
687 static inline bool tdp_mmu_iter_cond_resched(struct kvm *kvm,
688 struct tdp_iter *iter, bool flush,
689 bool shared)
690 {
691 /* Ensure forward progress has been made before yielding. */
692 if (iter->next_last_level_gfn == iter->yielded_gfn)
693 return false;
694
695 if (need_resched() || rwlock_needbreak(&kvm->mmu_lock)) {
696 rcu_read_unlock();
697
698 if (flush)
699 kvm_flush_remote_tlbs(kvm);
700
701 if (shared)
702 cond_resched_rwlock_read(&kvm->mmu_lock);
703 else
704 cond_resched_rwlock_write(&kvm->mmu_lock);
705
706 rcu_read_lock();
707
708 WARN_ON(iter->gfn > iter->next_last_level_gfn);
709
710 tdp_iter_restart(iter);
711
712 return true;
713 }
714
715 return false;
716 }
717
718 /*
719 * Tears down the mappings for the range of gfns, [start, end), and frees the
720 * non-root pages mapping GFNs strictly within that range. Returns true if
721 * SPTEs have been cleared and a TLB flush is needed before releasing the
722 * MMU lock.
723 *
724 * If can_yield is true, will release the MMU lock and reschedule if the
725 * scheduler needs the CPU or there is contention on the MMU lock. If this
726 * function cannot yield, it will not release the MMU lock or reschedule and
727 * the caller must ensure it does not supply too large a GFN range, or the
728 * operation can cause a soft lockup.
729 *
730 * If shared is true, this thread holds the MMU lock in read mode and must
731 * account for the possibility that other threads are modifying the paging
732 * structures concurrently. If shared is false, this thread should hold the
733 * MMU lock in write mode.
734 */
735 static bool zap_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
736 gfn_t start, gfn_t end, bool can_yield, bool flush,
737 bool shared)
738 {
739 gfn_t max_gfn_host = 1ULL << (shadow_phys_bits - PAGE_SHIFT);
740 bool zap_all = (start == 0 && end >= max_gfn_host);
741 struct tdp_iter iter;
742
743 /*
744 * No need to try to step down in the iterator when zapping all SPTEs,
745 * zapping the top-level non-leaf SPTEs will recurse on their children.
746 */
747 int min_level = zap_all ? root->role.level : PG_LEVEL_4K;
748
749 /*
750 * Bound the walk at host.MAXPHYADDR, guest accesses beyond that will
751 * hit a #PF(RSVD) and never get to an EPT Violation/Misconfig / #NPF,
752 * and so KVM will never install a SPTE for such addresses.
753 */
754 end = min(end, max_gfn_host);
755
756 kvm_lockdep_assert_mmu_lock_held(kvm, shared);
757
758 rcu_read_lock();
759
760 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
761 min_level, start, end) {
762 retry:
763 if (can_yield &&
764 tdp_mmu_iter_cond_resched(kvm, &iter, flush, shared)) {
765 flush = false;
766 continue;
767 }
768
769 if (!is_shadow_present_pte(iter.old_spte))
770 continue;
771
772 /*
773 * If this is a non-last-level SPTE that covers a larger range
774 * than should be zapped, continue, and zap the mappings at a
775 * lower level, except when zapping all SPTEs.
776 */
777 if (!zap_all &&
778 (iter.gfn < start ||
779 iter.gfn + KVM_PAGES_PER_HPAGE(iter.level) > end) &&
780 !is_last_spte(iter.old_spte, iter.level))
781 continue;
782
783 if (!shared) {
784 tdp_mmu_set_spte(kvm, &iter, 0);
785 flush = true;
786 } else if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
787 /*
788 * The iter must explicitly re-read the SPTE because
789 * the atomic cmpxchg failed.
790 */
791 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
792 goto retry;
793 }
794 }
795
796 rcu_read_unlock();
797 return flush;
798 }
799
800 /*
801 * Tears down the mappings for the range of gfns, [start, end), and frees the
802 * non-root pages mapping GFNs strictly within that range. Returns true if
803 * SPTEs have been cleared and a TLB flush is needed before releasing the
804 * MMU lock.
805 */
806 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
807 gfn_t end, bool can_yield, bool flush)
808 {
809 struct kvm_mmu_page *root;
810
811 for_each_tdp_mmu_root_yield_safe(kvm, root, as_id, false)
812 flush = zap_gfn_range(kvm, root, start, end, can_yield, flush,
813 false);
814
815 return flush;
816 }
817
818 void kvm_tdp_mmu_zap_all(struct kvm *kvm)
819 {
820 bool flush = false;
821 int i;
822
823 for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++)
824 flush = kvm_tdp_mmu_zap_gfn_range(kvm, i, 0, -1ull, flush);
825
826 if (flush)
827 kvm_flush_remote_tlbs(kvm);
828 }
829
830 static struct kvm_mmu_page *next_invalidated_root(struct kvm *kvm,
831 struct kvm_mmu_page *prev_root)
832 {
833 struct kvm_mmu_page *next_root;
834
835 if (prev_root)
836 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
837 &prev_root->link,
838 typeof(*prev_root), link);
839 else
840 next_root = list_first_or_null_rcu(&kvm->arch.tdp_mmu_roots,
841 typeof(*next_root), link);
842
843 while (next_root && !(next_root->role.invalid &&
844 refcount_read(&next_root->tdp_mmu_root_count)))
845 next_root = list_next_or_null_rcu(&kvm->arch.tdp_mmu_roots,
846 &next_root->link,
847 typeof(*next_root), link);
848
849 return next_root;
850 }
851
852 /*
853 * Since kvm_tdp_mmu_zap_all_fast has acquired a reference to each
854 * invalidated root, they will not be freed until this function drops the
855 * reference. Before dropping that reference, tear down the paging
856 * structure so that whichever thread does drop the last reference
857 * only has to do a trivial amount of work. Since the roots are invalid,
858 * no new SPTEs should be created under them.
859 */
860 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm)
861 {
862 struct kvm_mmu_page *next_root;
863 struct kvm_mmu_page *root;
864 bool flush = false;
865
866 lockdep_assert_held_read(&kvm->mmu_lock);
867
868 rcu_read_lock();
869
870 root = next_invalidated_root(kvm, NULL);
871
872 while (root) {
873 next_root = next_invalidated_root(kvm, root);
874
875 rcu_read_unlock();
876
877 flush = zap_gfn_range(kvm, root, 0, -1ull, true, flush, true);
878
879 /*
880 * Put the reference acquired in
881 * kvm_tdp_mmu_invalidate_roots
882 */
883 kvm_tdp_mmu_put_root(kvm, root, true);
884
885 root = next_root;
886
887 rcu_read_lock();
888 }
889
890 rcu_read_unlock();
891
892 if (flush)
893 kvm_flush_remote_tlbs(kvm);
894 }
895
896 /*
897 * Mark each TDP MMU root as invalid so that other threads
898 * will drop their references and allow the root count to
899 * go to 0.
900 *
901 * Also take a reference on all roots so that this thread
902 * can do the bulk of the work required to free the roots
903 * once they are invalidated. Without this reference, a
904 * vCPU thread might drop the last reference to a root and
905 * get stuck with tearing down the entire paging structure.
906 *
907 * Roots which have a zero refcount should be skipped as
908 * they're already being torn down.
909 * Already invalid roots should be referenced again so that
910 * they aren't freed before kvm_tdp_mmu_zap_all_fast is
911 * done with them.
912 *
913 * This has essentially the same effect for the TDP MMU
914 * as updating mmu_valid_gen does for the shadow MMU.
915 */
916 void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm)
917 {
918 struct kvm_mmu_page *root;
919
920 lockdep_assert_held_write(&kvm->mmu_lock);
921 list_for_each_entry(root, &kvm->arch.tdp_mmu_roots, link)
922 if (refcount_inc_not_zero(&root->tdp_mmu_root_count))
923 root->role.invalid = true;
924 }
925
926 /*
927 * Installs a last-level SPTE to handle a TDP page fault.
928 * (NPT/EPT violation/misconfiguration)
929 */
930 static int tdp_mmu_map_handle_target_level(struct kvm_vcpu *vcpu, int write,
931 int map_writable,
932 struct tdp_iter *iter,
933 kvm_pfn_t pfn, bool prefault)
934 {
935 u64 new_spte;
936 int ret = RET_PF_FIXED;
937 int make_spte_ret = 0;
938
939 if (unlikely(is_noslot_pfn(pfn)))
940 new_spte = make_mmio_spte(vcpu, iter->gfn, ACC_ALL);
941 else
942 make_spte_ret = make_spte(vcpu, ACC_ALL, iter->level, iter->gfn,
943 pfn, iter->old_spte, prefault, true,
944 map_writable, !shadow_accessed_mask,
945 &new_spte);
946
947 if (new_spte == iter->old_spte)
948 ret = RET_PF_SPURIOUS;
949 else if (!tdp_mmu_map_set_spte_atomic(vcpu, iter, new_spte))
950 return RET_PF_RETRY;
951
952 /*
953 * If the page fault was caused by a write but the page is write
954 * protected, emulation is needed. If the emulation was skipped,
955 * the vCPU would have the same fault again.
956 */
957 if (make_spte_ret & SET_SPTE_WRITE_PROTECTED_PT) {
958 if (write)
959 ret = RET_PF_EMULATE;
960 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
961 }
962
963 /* If a MMIO SPTE is installed, the MMIO will need to be emulated. */
964 if (unlikely(is_mmio_spte(new_spte))) {
965 trace_mark_mmio_spte(rcu_dereference(iter->sptep), iter->gfn,
966 new_spte);
967 ret = RET_PF_EMULATE;
968 } else {
969 trace_kvm_mmu_set_spte(iter->level, iter->gfn,
970 rcu_dereference(iter->sptep));
971 }
972
973 /*
974 * Increase pf_fixed in both RET_PF_EMULATE and RET_PF_FIXED to be
975 * consistent with legacy MMU behavior.
976 */
977 if (ret != RET_PF_SPURIOUS)
978 vcpu->stat.pf_fixed++;
979
980 return ret;
981 }
982
983 /*
984 * Handle a TDP page fault (NPT/EPT violation/misconfiguration) by installing
985 * page tables and SPTEs to translate the faulting guest physical address.
986 */
987 int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
988 int map_writable, int max_level, kvm_pfn_t pfn,
989 bool prefault)
990 {
991 bool nx_huge_page_workaround_enabled = is_nx_huge_page_enabled();
992 bool write = error_code & PFERR_WRITE_MASK;
993 bool exec = error_code & PFERR_FETCH_MASK;
994 bool huge_page_disallowed = exec && nx_huge_page_workaround_enabled;
995 struct kvm_mmu *mmu = vcpu->arch.mmu;
996 struct tdp_iter iter;
997 struct kvm_mmu_page *sp;
998 u64 *child_pt;
999 u64 new_spte;
1000 int ret;
1001 gfn_t gfn = gpa >> PAGE_SHIFT;
1002 int level;
1003 int req_level;
1004
1005 level = kvm_mmu_hugepage_adjust(vcpu, gfn, max_level, &pfn,
1006 huge_page_disallowed, &req_level);
1007
1008 trace_kvm_mmu_spte_requested(gpa, level, pfn);
1009
1010 rcu_read_lock();
1011
1012 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1013 if (nx_huge_page_workaround_enabled)
1014 disallowed_hugepage_adjust(iter.old_spte, gfn,
1015 iter.level, &pfn, &level);
1016
1017 if (iter.level == level)
1018 break;
1019
1020 /*
1021 * If there is an SPTE mapping a large page at a higher level
1022 * than the target, that SPTE must be cleared and replaced
1023 * with a non-leaf SPTE.
1024 */
1025 if (is_shadow_present_pte(iter.old_spte) &&
1026 is_large_pte(iter.old_spte)) {
1027 if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
1028 break;
1029
1030 /*
1031 * The iter must explicitly re-read the spte here
1032 * because the new value informs the !present
1033 * path below.
1034 */
1035 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1036 }
1037
1038 if (!is_shadow_present_pte(iter.old_spte)) {
1039 /*
1040 * If SPTE has been frozen by another thread, just
1041 * give up and retry, avoiding unnecessary page table
1042 * allocation and free.
1043 */
1044 if (is_removed_spte(iter.old_spte))
1045 break;
1046
1047 sp = alloc_tdp_mmu_page(vcpu, iter.gfn, iter.level - 1);
1048 child_pt = sp->spt;
1049
1050 new_spte = make_nonleaf_spte(child_pt,
1051 !shadow_accessed_mask);
1052
1053 if (tdp_mmu_set_spte_atomic_no_dirty_log(vcpu->kvm, &iter, new_spte)) {
1054 tdp_mmu_link_page(vcpu->kvm, sp,
1055 huge_page_disallowed &&
1056 req_level >= iter.level);
1057
1058 trace_kvm_mmu_get_page(sp, true);
1059 } else {
1060 tdp_mmu_free_sp(sp);
1061 break;
1062 }
1063 }
1064 }
1065
1066 if (iter.level != level) {
1067 rcu_read_unlock();
1068 return RET_PF_RETRY;
1069 }
1070
1071 ret = tdp_mmu_map_handle_target_level(vcpu, write, map_writable, &iter,
1072 pfn, prefault);
1073 rcu_read_unlock();
1074
1075 return ret;
1076 }
1077
1078 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
1079 bool flush)
1080 {
1081 struct kvm_mmu_page *root;
1082
1083 for_each_tdp_mmu_root(kvm, root, range->slot->as_id)
1084 flush = zap_gfn_range(kvm, root, range->start, range->end,
1085 range->may_block, flush, false);
1086
1087 return flush;
1088 }
1089
1090 typedef bool (*tdp_handler_t)(struct kvm *kvm, struct tdp_iter *iter,
1091 struct kvm_gfn_range *range);
1092
1093 static __always_inline bool kvm_tdp_mmu_handle_gfn(struct kvm *kvm,
1094 struct kvm_gfn_range *range,
1095 tdp_handler_t handler)
1096 {
1097 struct kvm_mmu_page *root;
1098 struct tdp_iter iter;
1099 bool ret = false;
1100
1101 rcu_read_lock();
1102
1103 /*
1104 * Don't support rescheduling, none of the MMU notifiers that funnel
1105 * into this helper allow blocking; it'd be dead, wasteful code.
1106 */
1107 for_each_tdp_mmu_root(kvm, root, range->slot->as_id) {
1108 tdp_root_for_each_leaf_pte(iter, root, range->start, range->end)
1109 ret |= handler(kvm, &iter, range);
1110 }
1111
1112 rcu_read_unlock();
1113
1114 return ret;
1115 }
1116
1117 /*
1118 * Mark the SPTEs range of GFNs [start, end) unaccessed and return non-zero
1119 * if any of the GFNs in the range have been accessed.
1120 */
1121 static bool age_gfn_range(struct kvm *kvm, struct tdp_iter *iter,
1122 struct kvm_gfn_range *range)
1123 {
1124 u64 new_spte = 0;
1125
1126 /* If we have a non-accessed entry we don't need to change the pte. */
1127 if (!is_accessed_spte(iter->old_spte))
1128 return false;
1129
1130 new_spte = iter->old_spte;
1131
1132 if (spte_ad_enabled(new_spte)) {
1133 new_spte &= ~shadow_accessed_mask;
1134 } else {
1135 /*
1136 * Capture the dirty status of the page, so that it doesn't get
1137 * lost when the SPTE is marked for access tracking.
1138 */
1139 if (is_writable_pte(new_spte))
1140 kvm_set_pfn_dirty(spte_to_pfn(new_spte));
1141
1142 new_spte = mark_spte_for_access_track(new_spte);
1143 }
1144
1145 tdp_mmu_set_spte_no_acc_track(kvm, iter, new_spte);
1146
1147 return true;
1148 }
1149
1150 bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
1151 {
1152 return kvm_tdp_mmu_handle_gfn(kvm, range, age_gfn_range);
1153 }
1154
1155 static bool test_age_gfn(struct kvm *kvm, struct tdp_iter *iter,
1156 struct kvm_gfn_range *range)
1157 {
1158 return is_accessed_spte(iter->old_spte);
1159 }
1160
1161 bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1162 {
1163 return kvm_tdp_mmu_handle_gfn(kvm, range, test_age_gfn);
1164 }
1165
1166 static bool set_spte_gfn(struct kvm *kvm, struct tdp_iter *iter,
1167 struct kvm_gfn_range *range)
1168 {
1169 u64 new_spte;
1170
1171 /* Huge pages aren't expected to be modified without first being zapped. */
1172 WARN_ON(pte_huge(range->pte) || range->start + 1 != range->end);
1173
1174 if (iter->level != PG_LEVEL_4K ||
1175 !is_shadow_present_pte(iter->old_spte))
1176 return false;
1177
1178 /*
1179 * Note, when changing a read-only SPTE, it's not strictly necessary to
1180 * zero the SPTE before setting the new PFN, but doing so preserves the
1181 * invariant that the PFN of a present * leaf SPTE can never change.
1182 * See __handle_changed_spte().
1183 */
1184 tdp_mmu_set_spte(kvm, iter, 0);
1185
1186 if (!pte_write(range->pte)) {
1187 new_spte = kvm_mmu_changed_pte_notifier_make_spte(iter->old_spte,
1188 pte_pfn(range->pte));
1189
1190 tdp_mmu_set_spte(kvm, iter, new_spte);
1191 }
1192
1193 return true;
1194 }
1195
1196 /*
1197 * Handle the changed_pte MMU notifier for the TDP MMU.
1198 * data is a pointer to the new pte_t mapping the HVA specified by the MMU
1199 * notifier.
1200 * Returns non-zero if a flush is needed before releasing the MMU lock.
1201 */
1202 bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
1203 {
1204 bool flush = kvm_tdp_mmu_handle_gfn(kvm, range, set_spte_gfn);
1205
1206 /* FIXME: return 'flush' instead of flushing here. */
1207 if (flush)
1208 kvm_flush_remote_tlbs_with_address(kvm, range->start, 1);
1209
1210 return false;
1211 }
1212
1213 /*
1214 * Remove write access from all SPTEs at or above min_level that map GFNs
1215 * [start, end). Returns true if an SPTE has been changed and the TLBs need to
1216 * be flushed.
1217 */
1218 static bool wrprot_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1219 gfn_t start, gfn_t end, int min_level)
1220 {
1221 struct tdp_iter iter;
1222 u64 new_spte;
1223 bool spte_set = false;
1224
1225 rcu_read_lock();
1226
1227 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1228
1229 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1230 min_level, start, end) {
1231 retry:
1232 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1233 continue;
1234
1235 if (!is_shadow_present_pte(iter.old_spte) ||
1236 !is_last_spte(iter.old_spte, iter.level) ||
1237 !(iter.old_spte & PT_WRITABLE_MASK))
1238 continue;
1239
1240 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1241
1242 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
1243 new_spte)) {
1244 /*
1245 * The iter must explicitly re-read the SPTE because
1246 * the atomic cmpxchg failed.
1247 */
1248 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1249 goto retry;
1250 }
1251 spte_set = true;
1252 }
1253
1254 rcu_read_unlock();
1255 return spte_set;
1256 }
1257
1258 /*
1259 * Remove write access from all the SPTEs mapping GFNs in the memslot. Will
1260 * only affect leaf SPTEs down to min_level.
1261 * Returns true if an SPTE has been changed and the TLBs need to be flushed.
1262 */
1263 bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
1264 const struct kvm_memory_slot *slot, int min_level)
1265 {
1266 struct kvm_mmu_page *root;
1267 bool spte_set = false;
1268
1269 lockdep_assert_held_read(&kvm->mmu_lock);
1270
1271 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1272 spte_set |= wrprot_gfn_range(kvm, root, slot->base_gfn,
1273 slot->base_gfn + slot->npages, min_level);
1274
1275 return spte_set;
1276 }
1277
1278 /*
1279 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1280 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1281 * If AD bits are not enabled, this will require clearing the writable bit on
1282 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1283 * be flushed.
1284 */
1285 static bool clear_dirty_gfn_range(struct kvm *kvm, struct kvm_mmu_page *root,
1286 gfn_t start, gfn_t end)
1287 {
1288 struct tdp_iter iter;
1289 u64 new_spte;
1290 bool spte_set = false;
1291
1292 rcu_read_lock();
1293
1294 tdp_root_for_each_leaf_pte(iter, root, start, end) {
1295 retry:
1296 if (tdp_mmu_iter_cond_resched(kvm, &iter, false, true))
1297 continue;
1298
1299 if (spte_ad_need_write_protect(iter.old_spte)) {
1300 if (is_writable_pte(iter.old_spte))
1301 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1302 else
1303 continue;
1304 } else {
1305 if (iter.old_spte & shadow_dirty_mask)
1306 new_spte = iter.old_spte & ~shadow_dirty_mask;
1307 else
1308 continue;
1309 }
1310
1311 if (!tdp_mmu_set_spte_atomic_no_dirty_log(kvm, &iter,
1312 new_spte)) {
1313 /*
1314 * The iter must explicitly re-read the SPTE because
1315 * the atomic cmpxchg failed.
1316 */
1317 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1318 goto retry;
1319 }
1320 spte_set = true;
1321 }
1322
1323 rcu_read_unlock();
1324 return spte_set;
1325 }
1326
1327 /*
1328 * Clear the dirty status of all the SPTEs mapping GFNs in the memslot. If
1329 * AD bits are enabled, this will involve clearing the dirty bit on each SPTE.
1330 * If AD bits are not enabled, this will require clearing the writable bit on
1331 * each SPTE. Returns true if an SPTE has been changed and the TLBs need to
1332 * be flushed.
1333 */
1334 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
1335 const struct kvm_memory_slot *slot)
1336 {
1337 struct kvm_mmu_page *root;
1338 bool spte_set = false;
1339
1340 lockdep_assert_held_read(&kvm->mmu_lock);
1341
1342 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1343 spte_set |= clear_dirty_gfn_range(kvm, root, slot->base_gfn,
1344 slot->base_gfn + slot->npages);
1345
1346 return spte_set;
1347 }
1348
1349 /*
1350 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1351 * set in mask, starting at gfn. The given memslot is expected to contain all
1352 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1353 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1354 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1355 */
1356 static void clear_dirty_pt_masked(struct kvm *kvm, struct kvm_mmu_page *root,
1357 gfn_t gfn, unsigned long mask, bool wrprot)
1358 {
1359 struct tdp_iter iter;
1360 u64 new_spte;
1361
1362 rcu_read_lock();
1363
1364 tdp_root_for_each_leaf_pte(iter, root, gfn + __ffs(mask),
1365 gfn + BITS_PER_LONG) {
1366 if (!mask)
1367 break;
1368
1369 if (iter.level > PG_LEVEL_4K ||
1370 !(mask & (1UL << (iter.gfn - gfn))))
1371 continue;
1372
1373 mask &= ~(1UL << (iter.gfn - gfn));
1374
1375 if (wrprot || spte_ad_need_write_protect(iter.old_spte)) {
1376 if (is_writable_pte(iter.old_spte))
1377 new_spte = iter.old_spte & ~PT_WRITABLE_MASK;
1378 else
1379 continue;
1380 } else {
1381 if (iter.old_spte & shadow_dirty_mask)
1382 new_spte = iter.old_spte & ~shadow_dirty_mask;
1383 else
1384 continue;
1385 }
1386
1387 tdp_mmu_set_spte_no_dirty_log(kvm, &iter, new_spte);
1388 }
1389
1390 rcu_read_unlock();
1391 }
1392
1393 /*
1394 * Clears the dirty status of all the 4k SPTEs mapping GFNs for which a bit is
1395 * set in mask, starting at gfn. The given memslot is expected to contain all
1396 * the GFNs represented by set bits in the mask. If AD bits are enabled,
1397 * clearing the dirty status will involve clearing the dirty bit on each SPTE
1398 * or, if AD bits are not enabled, clearing the writable bit on each SPTE.
1399 */
1400 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
1401 struct kvm_memory_slot *slot,
1402 gfn_t gfn, unsigned long mask,
1403 bool wrprot)
1404 {
1405 struct kvm_mmu_page *root;
1406
1407 lockdep_assert_held_write(&kvm->mmu_lock);
1408 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1409 clear_dirty_pt_masked(kvm, root, gfn, mask, wrprot);
1410 }
1411
1412 /*
1413 * Clear leaf entries which could be replaced by large mappings, for
1414 * GFNs within the slot.
1415 */
1416 static bool zap_collapsible_spte_range(struct kvm *kvm,
1417 struct kvm_mmu_page *root,
1418 const struct kvm_memory_slot *slot,
1419 bool flush)
1420 {
1421 gfn_t start = slot->base_gfn;
1422 gfn_t end = start + slot->npages;
1423 struct tdp_iter iter;
1424 kvm_pfn_t pfn;
1425
1426 rcu_read_lock();
1427
1428 tdp_root_for_each_pte(iter, root, start, end) {
1429 retry:
1430 if (tdp_mmu_iter_cond_resched(kvm, &iter, flush, true)) {
1431 flush = false;
1432 continue;
1433 }
1434
1435 if (!is_shadow_present_pte(iter.old_spte) ||
1436 !is_last_spte(iter.old_spte, iter.level))
1437 continue;
1438
1439 pfn = spte_to_pfn(iter.old_spte);
1440 if (kvm_is_reserved_pfn(pfn) ||
1441 iter.level >= kvm_mmu_max_mapping_level(kvm, slot, iter.gfn,
1442 pfn, PG_LEVEL_NUM))
1443 continue;
1444
1445 if (!tdp_mmu_zap_spte_atomic(kvm, &iter)) {
1446 /*
1447 * The iter must explicitly re-read the SPTE because
1448 * the atomic cmpxchg failed.
1449 */
1450 iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
1451 goto retry;
1452 }
1453 flush = true;
1454 }
1455
1456 rcu_read_unlock();
1457
1458 return flush;
1459 }
1460
1461 /*
1462 * Clear non-leaf entries (and free associated page tables) which could
1463 * be replaced by large mappings, for GFNs within the slot.
1464 */
1465 bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
1466 const struct kvm_memory_slot *slot,
1467 bool flush)
1468 {
1469 struct kvm_mmu_page *root;
1470
1471 lockdep_assert_held_read(&kvm->mmu_lock);
1472
1473 for_each_tdp_mmu_root_yield_safe(kvm, root, slot->as_id, true)
1474 flush = zap_collapsible_spte_range(kvm, root, slot, flush);
1475
1476 return flush;
1477 }
1478
1479 /*
1480 * Removes write access on the last level SPTE mapping this GFN and unsets the
1481 * MMU-writable bit to ensure future writes continue to be intercepted.
1482 * Returns true if an SPTE was set and a TLB flush is needed.
1483 */
1484 static bool write_protect_gfn(struct kvm *kvm, struct kvm_mmu_page *root,
1485 gfn_t gfn, int min_level)
1486 {
1487 struct tdp_iter iter;
1488 u64 new_spte;
1489 bool spte_set = false;
1490
1491 BUG_ON(min_level > KVM_MAX_HUGEPAGE_LEVEL);
1492
1493 rcu_read_lock();
1494
1495 for_each_tdp_pte_min_level(iter, root->spt, root->role.level,
1496 min_level, gfn, gfn + 1) {
1497 if (!is_shadow_present_pte(iter.old_spte) ||
1498 !is_last_spte(iter.old_spte, iter.level))
1499 continue;
1500
1501 if (!is_writable_pte(iter.old_spte))
1502 break;
1503
1504 new_spte = iter.old_spte &
1505 ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask);
1506
1507 tdp_mmu_set_spte(kvm, &iter, new_spte);
1508 spte_set = true;
1509 }
1510
1511 rcu_read_unlock();
1512
1513 return spte_set;
1514 }
1515
1516 /*
1517 * Removes write access on the last level SPTE mapping this GFN and unsets the
1518 * MMU-writable bit to ensure future writes continue to be intercepted.
1519 * Returns true if an SPTE was set and a TLB flush is needed.
1520 */
1521 bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
1522 struct kvm_memory_slot *slot, gfn_t gfn,
1523 int min_level)
1524 {
1525 struct kvm_mmu_page *root;
1526 bool spte_set = false;
1527
1528 lockdep_assert_held_write(&kvm->mmu_lock);
1529 for_each_tdp_mmu_root(kvm, root, slot->as_id)
1530 spte_set |= write_protect_gfn(kvm, root, gfn, min_level);
1531
1532 return spte_set;
1533 }
1534
1535 /*
1536 * Return the level of the lowest level SPTE added to sptes.
1537 * That SPTE may be non-present.
1538 *
1539 * Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1540 */
1541 int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
1542 int *root_level)
1543 {
1544 struct tdp_iter iter;
1545 struct kvm_mmu *mmu = vcpu->arch.mmu;
1546 gfn_t gfn = addr >> PAGE_SHIFT;
1547 int leaf = -1;
1548
1549 *root_level = vcpu->arch.mmu->shadow_root_level;
1550
1551 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1552 leaf = iter.level;
1553 sptes[leaf] = iter.old_spte;
1554 }
1555
1556 return leaf;
1557 }
1558
1559 /*
1560 * Returns the last level spte pointer of the shadow page walk for the given
1561 * gpa, and sets *spte to the spte value. This spte may be non-preset. If no
1562 * walk could be performed, returns NULL and *spte does not contain valid data.
1563 *
1564 * Contract:
1565 * - Must be called between kvm_tdp_mmu_walk_lockless_{begin,end}.
1566 * - The returned sptep must not be used after kvm_tdp_mmu_walk_lockless_end.
1567 *
1568 * WARNING: This function is only intended to be called during fast_page_fault.
1569 */
1570 u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, u64 addr,
1571 u64 *spte)
1572 {
1573 struct tdp_iter iter;
1574 struct kvm_mmu *mmu = vcpu->arch.mmu;
1575 gfn_t gfn = addr >> PAGE_SHIFT;
1576 tdp_ptep_t sptep = NULL;
1577
1578 tdp_mmu_for_each_pte(iter, mmu, gfn, gfn + 1) {
1579 *spte = iter.old_spte;
1580 sptep = iter.sptep;
1581 }
1582
1583 /*
1584 * Perform the rcu_dereference to get the raw spte pointer value since
1585 * we are passing it up to fast_page_fault, which is shared with the
1586 * legacy MMU and thus does not retain the TDP MMU-specific __rcu
1587 * annotation.
1588 *
1589 * This is safe since fast_page_fault obeys the contracts of this
1590 * function as well as all TDP MMU contracts around modifying SPTEs
1591 * outside of mmu_lock.
1592 */
1593 return rcu_dereference(sptep);
1594 }