]>
git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/x86/kvm/mmu/tdp_mmu.h
1 // SPDX-License-Identifier: GPL-2.0
3 #ifndef __KVM_X86_MMU_TDP_MMU_H
4 #define __KVM_X86_MMU_TDP_MMU_H
6 #include <linux/kvm_host.h>
8 hpa_t
kvm_tdp_mmu_get_vcpu_root_hpa(struct kvm_vcpu
*vcpu
);
10 __must_check
static inline bool kvm_tdp_mmu_get_root(struct kvm
*kvm
,
11 struct kvm_mmu_page
*root
)
13 if (root
->role
.invalid
)
16 return refcount_inc_not_zero(&root
->tdp_mmu_root_count
);
19 void kvm_tdp_mmu_put_root(struct kvm
*kvm
, struct kvm_mmu_page
*root
,
22 bool __kvm_tdp_mmu_zap_gfn_range(struct kvm
*kvm
, int as_id
, gfn_t start
,
23 gfn_t end
, bool can_yield
, bool flush
,
25 static inline bool kvm_tdp_mmu_zap_gfn_range(struct kvm
*kvm
, int as_id
,
26 gfn_t start
, gfn_t end
, bool flush
,
29 return __kvm_tdp_mmu_zap_gfn_range(kvm
, as_id
, start
, end
, true, flush
,
32 static inline bool kvm_tdp_mmu_zap_sp(struct kvm
*kvm
, struct kvm_mmu_page
*sp
)
34 gfn_t end
= sp
->gfn
+ KVM_PAGES_PER_HPAGE(sp
->role
.level
+ 1);
37 * Don't allow yielding, as the caller may have a flush pending. Note,
38 * if mmu_lock is held for write, zapping will never yield in this case,
39 * but explicitly disallow it for safety. The TDP MMU does not yield
40 * until it has made forward progress (steps sideways), and when zapping
41 * a single shadow page that it's guaranteed to see (thus the mmu_lock
42 * requirement), its "step sideways" will always step beyond the bounds
43 * of the shadow page's gfn range and stop iterating before yielding.
45 lockdep_assert_held_write(&kvm
->mmu_lock
);
46 return __kvm_tdp_mmu_zap_gfn_range(kvm
, kvm_mmu_page_as_id(sp
),
47 sp
->gfn
, end
, false, false, false);
50 void kvm_tdp_mmu_zap_all(struct kvm
*kvm
);
51 void kvm_tdp_mmu_invalidate_all_roots(struct kvm
*kvm
);
52 void kvm_tdp_mmu_zap_invalidated_roots(struct kvm
*kvm
);
54 int kvm_tdp_mmu_map(struct kvm_vcpu
*vcpu
, gpa_t gpa
, u32 error_code
,
55 int map_writable
, int max_level
, kvm_pfn_t pfn
,
58 bool kvm_tdp_mmu_unmap_gfn_range(struct kvm
*kvm
, struct kvm_gfn_range
*range
,
60 bool kvm_tdp_mmu_age_gfn_range(struct kvm
*kvm
, struct kvm_gfn_range
*range
);
61 bool kvm_tdp_mmu_test_age_gfn(struct kvm
*kvm
, struct kvm_gfn_range
*range
);
62 bool kvm_tdp_mmu_set_spte_gfn(struct kvm
*kvm
, struct kvm_gfn_range
*range
);
64 bool kvm_tdp_mmu_wrprot_slot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
66 bool kvm_tdp_mmu_clear_dirty_slot(struct kvm
*kvm
,
67 struct kvm_memory_slot
*slot
);
68 void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm
*kvm
,
69 struct kvm_memory_slot
*slot
,
70 gfn_t gfn
, unsigned long mask
,
72 bool kvm_tdp_mmu_zap_collapsible_sptes(struct kvm
*kvm
,
73 const struct kvm_memory_slot
*slot
,
76 bool kvm_tdp_mmu_write_protect_gfn(struct kvm
*kvm
,
77 struct kvm_memory_slot
*slot
, gfn_t gfn
,
80 int kvm_tdp_mmu_get_walk(struct kvm_vcpu
*vcpu
, u64 addr
, u64
*sptes
,
84 bool kvm_mmu_init_tdp_mmu(struct kvm
*kvm
);
85 void kvm_mmu_uninit_tdp_mmu(struct kvm
*kvm
);
86 static inline bool is_tdp_mmu_enabled(struct kvm
*kvm
) { return kvm
->arch
.tdp_mmu_enabled
; }
87 static inline bool is_tdp_mmu_page(struct kvm_mmu_page
*sp
) { return sp
->tdp_mmu_page
; }
89 static inline bool is_tdp_mmu(struct kvm_mmu
*mmu
)
91 struct kvm_mmu_page
*sp
;
92 hpa_t hpa
= mmu
->root_hpa
;
94 if (WARN_ON(!VALID_PAGE(hpa
)))
98 * A NULL shadow page is legal when shadowing a non-paging guest with
99 * PAE paging, as the MMU will be direct with root_hpa pointing at the
100 * pae_root page, not a shadow page.
102 sp
= to_shadow_page(hpa
);
103 return sp
&& is_tdp_mmu_page(sp
) && sp
->root_count
;
106 static inline bool kvm_mmu_init_tdp_mmu(struct kvm
*kvm
) { return false; }
107 static inline void kvm_mmu_uninit_tdp_mmu(struct kvm
*kvm
) {}
108 static inline bool is_tdp_mmu_enabled(struct kvm
*kvm
) { return false; }
109 static inline bool is_tdp_mmu_page(struct kvm_mmu_page
*sp
) { return false; }
110 static inline bool is_tdp_mmu(struct kvm_mmu
*mmu
) { return false; }
113 #endif /* __KVM_X86_MMU_TDP_MMU_H */