]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kvm/mmu/tdp_mmu.h
KVM: x86: Stub out is_tdp_mmu_root on 32-bit hosts
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / mmu / tdp_mmu.h
CommitLineData
fe5db27d
BG
1// SPDX-License-Identifier: GPL-2.0
2
3#ifndef __KVM_X86_MMU_TDP_MMU_H
4#define __KVM_X86_MMU_TDP_MMU_H
5
6#include <linux/kvm_host.h>
7
02c00b3a 8hpa_t kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu *vcpu);
02c00b3a 9
fb101293
BG
10__must_check static inline bool kvm_tdp_mmu_get_root(struct kvm *kvm,
11 struct kvm_mmu_page *root)
76eb54e7 12{
b7cccd39
BG
13 if (root->role.invalid)
14 return false;
15
fb101293 16 return refcount_inc_not_zero(&root->tdp_mmu_root_count);
76eb54e7
BG
17}
18
6103bc07
BG
19void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root,
20 bool shared);
76eb54e7 21
2b9663d8 22bool __kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id, gfn_t start,
6103bc07
BG
23 gfn_t end, bool can_yield, bool flush,
24 bool shared);
2b9663d8 25static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm *kvm, int as_id,
6103bc07
BG
26 gfn_t start, gfn_t end, bool flush,
27 bool shared)
33a31641 28{
6103bc07
BG
29 return __kvm_tdp_mmu_zap_gfn_range(kvm, as_id, start, end, true, flush,
30 shared);
33a31641
SC
31}
32static inline bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
33{
f1b83255 34 gfn_t end = sp->gfn + KVM_PAGES_PER_HPAGE(sp->role.level + 1);
33a31641
SC
35
36 /*
37 * Don't allow yielding, as the caller may have a flush pending. Note,
38 * if mmu_lock is held for write, zapping will never yield in this case,
39 * but explicitly disallow it for safety. The TDP MMU does not yield
40 * until it has made forward progress (steps sideways), and when zapping
41 * a single shadow page that it's guaranteed to see (thus the mmu_lock
42 * requirement), its "step sideways" will always step beyond the bounds
43 * of the shadow page's gfn range and stop iterating before yielding.
44 */
45 lockdep_assert_held_write(&kvm->mmu_lock);
2b9663d8 46 return __kvm_tdp_mmu_zap_gfn_range(kvm, kvm_mmu_page_as_id(sp),
6103bc07 47 sp->gfn, end, false, false, false);
33a31641 48}
b7cccd39 49
faaf05b0 50void kvm_tdp_mmu_zap_all(struct kvm *kvm);
b7cccd39 51void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
4c6654bd 52void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
bb18842e
BG
53
54int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
55 int map_writable, int max_level, kvm_pfn_t pfn,
56 bool prefault);
063afacd 57
3039bcc7
SC
58bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
59 bool flush);
60bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
61bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
62bool kvm_tdp_mmu_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
a6a0b05d
BG
63
64bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm, struct kvm_memory_slot *slot,
65 int min_level);
66bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
67 struct kvm_memory_slot *slot);
68void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
69 struct kvm_memory_slot *slot,
70 gfn_t gfn, unsigned long mask,
71 bool wrprot);
142ccde1 72bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
8ca6f063
BG
73 const struct kvm_memory_slot *slot,
74 bool flush);
46044f72
BG
75
76bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
3ad93562
KZ
77 struct kvm_memory_slot *slot, gfn_t gfn,
78 int min_level);
95fb5b02 79
39b4d43e
SC
80int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
81 int *root_level);
82
897218ff 83#ifdef CONFIG_X86_64
d501f747 84bool kvm_mmu_init_tdp_mmu(struct kvm *kvm);
897218ff
PB
85void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
86static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return kvm->arch.tdp_mmu_enabled; }
87static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
897218ff
PB
88
89static inline bool is_tdp_mmu_root(struct kvm *kvm, hpa_t hpa)
90{
91 struct kvm_mmu_page *sp;
92
93 if (!is_tdp_mmu_enabled(kvm))
94 return false;
95 if (WARN_ON(!VALID_PAGE(hpa)))
96 return false;
97
98 sp = to_shadow_page(hpa);
99 if (WARN_ON(!sp))
100 return false;
101
102 return is_tdp_mmu_page(sp) && sp->root_count;
103}
c62efff2
PB
104#else
105static inline bool kvm_mmu_init_tdp_mmu(struct kvm *kvm) { return false; }
106static inline void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm) {}
107static inline bool is_tdp_mmu_enabled(struct kvm *kvm) { return false; }
108static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
109static inline bool is_tdp_mmu_root(hpa_t hpa) { return false; }
110#endif
897218ff 111
fe5db27d 112#endif /* __KVM_X86_MMU_TDP_MMU_H */