]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kvm/mmu/mmu_internal.h
KVM: const-ify all relevant uses of struct kvm_memory_slot
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / mmu / mmu_internal.h
CommitLineData
6ca9a6f3
SC
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __KVM_X86_MMU_INTERNAL_H
3#define __KVM_X86_MMU_INTERNAL_H
4
985ab278 5#include <linux/types.h>
5a9624af 6#include <linux/kvm_host.h>
985ab278
SC
7#include <asm/kvm_host.h>
8
5a9624af
PB
9#undef MMU_DEBUG
10
11#ifdef MMU_DEBUG
12extern bool dbg;
13
14#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
805a0f83 15#define rmap_printk(fmt, args...) do { if (dbg) printk("%s: " fmt, __func__, ## args); } while (0)
5a9624af
PB
16#define MMU_WARN_ON(x) WARN_ON(x)
17#else
18#define pgprintk(x...) do { } while (0)
19#define rmap_printk(x...) do { } while (0)
20#define MMU_WARN_ON(x) do { } while (0)
21#endif
22
c834e5e4
SC
23/*
24 * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT
25 * bit, and thus are guaranteed to be non-zero when valid. And, when a guest
26 * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE,
27 * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use
28 * '0' instead of INVALID_PAGE to indicate an invalid PAE root.
29 */
30#define INVALID_PAE_ROOT 0
31#define IS_VALID_PAE_ROOT(x) (!!(x))
32
985ab278
SC
33struct kvm_mmu_page {
34 struct list_head link;
35 struct hlist_node hash_link;
36 struct list_head lpage_disallowed_link;
37
38 bool unsync;
39 u8 mmu_valid_gen;
40 bool mmio_cached;
41 bool lpage_disallowed; /* Can't be replaced by an equiv large page */
42
43 /*
44 * The following two entries are used to key the shadow page in the
45 * hash table.
46 */
47 union kvm_mmu_page_role role;
48 gfn_t gfn;
49
50 u64 *spt;
51 /* hold the gfn of each spte inside spt */
52 gfn_t *gfns;
11cccf5c
BG
53 /* Currently serving as active root */
54 union {
55 int root_count;
56 refcount_t tdp_mmu_root_count;
57 };
985ab278
SC
58 unsigned int unsync_children;
59 struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */
60 DECLARE_BITMAP(unsync_child_bitmap, 512);
61
62#ifdef CONFIG_X86_32
63 /*
64 * Used out of the mmu-lock to avoid reading spte values while an
65 * update is in progress; see the comments in __get_spte_lockless().
66 */
67 int clear_spte_count;
68#endif
69
70 /* Number of writes since the last time traversal visited this page. */
71 atomic_t write_flooding_count;
02c00b3a 72
897218ff 73#ifdef CONFIG_X86_64
02c00b3a 74 bool tdp_mmu_page;
7cca2d0b 75
d9f6e12f 76 /* Used for freeing the page asynchronously if it is a TDP MMU page. */
7cca2d0b 77 struct rcu_head rcu_head;
897218ff 78#endif
985ab278
SC
79};
80
02c00b3a
BG
81extern struct kmem_cache *mmu_page_header_cache;
82
e47c4aee 83static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
985ab278
SC
84{
85 struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
86
87 return (struct kvm_mmu_page *)page_private(page);
88}
89
57354682
SC
90static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
91{
e47c4aee 92 return to_shadow_page(__pa(sptep));
57354682
SC
93}
94
a3f15bda
SC
95static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role)
96{
97 return role.smm ? 1 : 0;
98}
99
08889894
SC
100static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp)
101{
a3f15bda 102 return kvm_mmu_role_as_id(sp->role);
08889894
SC
103}
104
5a9624af
PB
105static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
106{
107 /*
44ac5958
SC
108 * When using the EPT page-modification log, the GPAs in the CPU dirty
109 * log would come from L2 rather than L1. Therefore, we need to rely
110 * on write protection to record dirty pages, which bypasses PML, since
111 * writes now result in a vmexit. Note, the check on CPU dirty logging
112 * being enabled is mandatory as the bits used to denote WP-only SPTEs
113 * are reserved for NPT w/ PAE (32-bit KVM).
5a9624af 114 */
44ac5958
SC
115 return vcpu->arch.mmu == &vcpu->arch.guest_mmu &&
116 kvm_x86_ops.cpu_dirty_log_size;
5a9624af
PB
117}
118
a9d6496d
SZ
119extern int nx_huge_pages;
120static inline bool is_nx_huge_page_enabled(void)
121{
122 return READ_ONCE(nx_huge_pages);
123}
124
0337f585 125int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync);
5a9624af 126
269e9552
HM
127void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
128void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
6ca9a6f3 129bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
3ad93562
KZ
130 struct kvm_memory_slot *slot, u64 gfn,
131 int min_level);
2f2fad08
BG
132void kvm_flush_remote_tlbs_with_address(struct kvm *kvm,
133 u64 start_gfn, u64 pages);
6ca9a6f3 134
bb18842e
BG
135/*
136 * Return values of handle_mmio_page_fault, mmu.page_fault, and fast_page_fault().
137 *
138 * RET_PF_RETRY: let CPU fault again on the address.
139 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
140 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
141 * RET_PF_FIXED: The faulting entry has been fixed.
142 * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU.
61bcd360
DM
143 *
144 * Any names added to this enum should be exported to userspace for use in
145 * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h
bb18842e
BG
146 */
147enum {
148 RET_PF_RETRY = 0,
149 RET_PF_EMULATE,
150 RET_PF_INVALID,
151 RET_PF_FIXED,
152 RET_PF_SPURIOUS,
153};
154
155/* Bits which may be returned by set_spte() */
156#define SET_SPTE_WRITE_PROTECTED_PT BIT(0)
157#define SET_SPTE_NEED_REMOTE_TLB_FLUSH BIT(1)
158#define SET_SPTE_SPURIOUS BIT(2)
159
8ca6f063
BG
160int kvm_mmu_max_mapping_level(struct kvm *kvm,
161 const struct kvm_memory_slot *slot, gfn_t gfn,
162 kvm_pfn_t pfn, int max_level);
bb18842e
BG
163int kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
164 int max_level, kvm_pfn_t *pfnp,
165 bool huge_page_disallowed, int *req_level);
166void disallowed_hugepage_adjust(u64 spte, gfn_t gfn, int cur_level,
167 kvm_pfn_t *pfnp, int *goal_levelp);
168
bb18842e
BG
169void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
170
29cf0f50
BG
171void account_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
172void unaccount_huge_nx_page(struct kvm *kvm, struct kvm_mmu_page *sp);
173
6ca9a6f3 174#endif /* __KVM_X86_MMU_INTERNAL_H */