]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/x86/kvm/mmu.h
KVM: x86: Add more protection against undefined behavior in rsvd_bits()
[mirror_ubuntu-hirsute-kernel.git] / arch / x86 / kvm / mmu.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_H
3 #define __KVM_X86_MMU_H
4
5 #include <linux/kvm_host.h>
6 #include "kvm_cache_regs.h"
7 #include "cpuid.h"
8
9 #define PT64_PT_BITS 9
10 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
11 #define PT32_PT_BITS 10
12 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
13
14 #define PT_WRITABLE_SHIFT 1
15 #define PT_USER_SHIFT 2
16
17 #define PT_PRESENT_MASK (1ULL << 0)
18 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
19 #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
20 #define PT_PWT_MASK (1ULL << 3)
21 #define PT_PCD_MASK (1ULL << 4)
22 #define PT_ACCESSED_SHIFT 5
23 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
24 #define PT_DIRTY_SHIFT 6
25 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
26 #define PT_PAGE_SIZE_SHIFT 7
27 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
28 #define PT_PAT_MASK (1ULL << 7)
29 #define PT_GLOBAL_MASK (1ULL << 8)
30 #define PT64_NX_SHIFT 63
31 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
32
33 #define PT_PAT_SHIFT 7
34 #define PT_DIR_PAT_SHIFT 12
35 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
36
37 #define PT32_DIR_PSE36_SIZE 4
38 #define PT32_DIR_PSE36_SHIFT 13
39 #define PT32_DIR_PSE36_MASK \
40 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
41
42 #define PT64_ROOT_5LEVEL 5
43 #define PT64_ROOT_4LEVEL 4
44 #define PT32_ROOT_LEVEL 2
45 #define PT32E_ROOT_LEVEL 3
46
47 static __always_inline u64 rsvd_bits(int s, int e)
48 {
49 BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
50
51 if (__builtin_constant_p(e))
52 BUILD_BUG_ON(e > 63);
53 else
54 e &= 63;
55
56 if (e < s)
57 return 0;
58
59 return ((2ULL << (e - s)) - 1) << s;
60 }
61
62 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 access_mask);
63
64 void
65 reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context);
66
67 void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots);
68 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer,
69 gpa_t nested_cr3);
70 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
71 bool accessed_dirty, gpa_t new_eptp);
72 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
73 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
74 u64 fault_address, char *insn, int insn_len);
75
76 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
77 {
78 if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
79 return 0;
80
81 return kvm_mmu_load(vcpu);
82 }
83
84 static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
85 {
86 BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
87
88 return kvm_read_cr4_bits(vcpu, X86_CR4_PCIDE)
89 ? cr3 & X86_CR3_PCID_MASK
90 : 0;
91 }
92
93 static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
94 {
95 return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
96 }
97
98 static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
99 {
100 u64 root_hpa = vcpu->arch.mmu->root_hpa;
101
102 if (!VALID_PAGE(root_hpa))
103 return;
104
105 kvm_x86_ops.load_mmu_pgd(vcpu, root_hpa | kvm_get_active_pcid(vcpu),
106 vcpu->arch.mmu->shadow_root_level);
107 }
108
109 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
110 bool prefault);
111
112 static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
113 u32 err, bool prefault)
114 {
115 #ifdef CONFIG_RETPOLINE
116 if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
117 return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
118 #endif
119 return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
120 }
121
122 /*
123 * Currently, we have two sorts of write-protection, a) the first one
124 * write-protects guest page to sync the guest modification, b) another one is
125 * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences
126 * between these two sorts are:
127 * 1) the first case clears SPTE_MMU_WRITEABLE bit.
128 * 2) the first case requires flushing tlb immediately avoiding corrupting
129 * shadow page table between all vcpus so it should be in the protection of
130 * mmu-lock. And the another case does not need to flush tlb until returning
131 * the dirty bitmap to userspace since it only write-protects the page
132 * logged in the bitmap, that means the page in the dirty bitmap is not
133 * missed, so it can flush tlb out of mmu-lock.
134 *
135 * So, there is the problem: the first case can meet the corrupted tlb caused
136 * by another case which write-protects pages but without flush tlb
137 * immediately. In order to making the first case be aware this problem we let
138 * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit
139 * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit.
140 *
141 * Anyway, whenever a spte is updated (only permission and status bits are
142 * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes
143 * readonly, if that happens, we need to flush tlb. Fortunately,
144 * mmu_spte_update() has already handled it perfectly.
145 *
146 * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK:
147 * - if we want to see if it has writable tlb entry or if the spte can be
148 * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most
149 * case, otherwise
150 * - if we fix page fault on the spte or do write-protection by dirty logging,
151 * check PT_WRITABLE_MASK.
152 *
153 * TODO: introduce APIs to split these two cases.
154 */
155 static inline int is_writable_pte(unsigned long pte)
156 {
157 return pte & PT_WRITABLE_MASK;
158 }
159
160 static inline bool is_write_protection(struct kvm_vcpu *vcpu)
161 {
162 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
163 }
164
165 /*
166 * Check if a given access (described through the I/D, W/R and U/S bits of a
167 * page fault error code pfec) causes a permission fault with the given PTE
168 * access rights (in ACC_* format).
169 *
170 * Return zero if the access does not fault; return the page fault error code
171 * if the access faults.
172 */
173 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
174 unsigned pte_access, unsigned pte_pkey,
175 unsigned pfec)
176 {
177 int cpl = kvm_x86_ops.get_cpl(vcpu);
178 unsigned long rflags = kvm_x86_ops.get_rflags(vcpu);
179
180 /*
181 * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1.
182 *
183 * If CPL = 3, SMAP applies to all supervisor-mode data accesses
184 * (these are implicit supervisor accesses) regardless of the value
185 * of EFLAGS.AC.
186 *
187 * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving
188 * the result in X86_EFLAGS_AC. We then insert it in place of
189 * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec,
190 * but it will be one in index if SMAP checks are being overridden.
191 * It is important to keep this branchless.
192 */
193 unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC);
194 int index = (pfec >> 1) +
195 (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
196 bool fault = (mmu->permissions[index] >> pte_access) & 1;
197 u32 errcode = PFERR_PRESENT_MASK;
198
199 WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
200 if (unlikely(mmu->pkru_mask)) {
201 u32 pkru_bits, offset;
202
203 /*
204 * PKRU defines 32 bits, there are 16 domains and 2
205 * attribute bits per domain in pkru. pte_pkey is the
206 * index of the protection domain, so pte_pkey * 2 is
207 * is the index of the first bit for the domain.
208 */
209 pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
210
211 /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
212 offset = (pfec & ~1) +
213 ((pte_access & PT_USER_MASK) << (PFERR_RSVD_BIT - PT_USER_SHIFT));
214
215 pkru_bits &= mmu->pkru_mask >> offset;
216 errcode |= -pkru_bits & PFERR_PK_MASK;
217 fault |= (pkru_bits != 0);
218 }
219
220 return -(u32)fault & errcode;
221 }
222
223 void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
224
225 int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu);
226
227 int kvm_mmu_post_init_vm(struct kvm *kvm);
228 void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
229
230 #endif