]>
Commit | Line | Data |
---|---|---|
1 | #ifndef __KVM_X86_MMU_H | |
2 | #define __KVM_X86_MMU_H | |
3 | ||
4 | #include <linux/kvm_host.h> | |
5 | #include "kvm_cache_regs.h" | |
6 | ||
7 | #define PT64_PT_BITS 9 | |
8 | #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS) | |
9 | #define PT32_PT_BITS 10 | |
10 | #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS) | |
11 | ||
12 | #define PT_WRITABLE_SHIFT 1 | |
13 | ||
14 | #define PT_PRESENT_MASK (1ULL << 0) | |
15 | #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT) | |
16 | #define PT_USER_MASK (1ULL << 2) | |
17 | #define PT_PWT_MASK (1ULL << 3) | |
18 | #define PT_PCD_MASK (1ULL << 4) | |
19 | #define PT_ACCESSED_SHIFT 5 | |
20 | #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT) | |
21 | #define PT_DIRTY_SHIFT 6 | |
22 | #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT) | |
23 | #define PT_PAGE_SIZE_SHIFT 7 | |
24 | #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT) | |
25 | #define PT_PAT_MASK (1ULL << 7) | |
26 | #define PT_GLOBAL_MASK (1ULL << 8) | |
27 | #define PT64_NX_SHIFT 63 | |
28 | #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT) | |
29 | ||
30 | #define PT_PAT_SHIFT 7 | |
31 | #define PT_DIR_PAT_SHIFT 12 | |
32 | #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT) | |
33 | ||
34 | #define PT32_DIR_PSE36_SIZE 4 | |
35 | #define PT32_DIR_PSE36_SHIFT 13 | |
36 | #define PT32_DIR_PSE36_MASK \ | |
37 | (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT) | |
38 | ||
39 | #define PT64_ROOT_LEVEL 4 | |
40 | #define PT32_ROOT_LEVEL 2 | |
41 | #define PT32E_ROOT_LEVEL 3 | |
42 | ||
43 | #define PT_PDPE_LEVEL 3 | |
44 | #define PT_DIRECTORY_LEVEL 2 | |
45 | #define PT_PAGE_TABLE_LEVEL 1 | |
46 | ||
47 | #define PFERR_PRESENT_BIT 0 | |
48 | #define PFERR_WRITE_BIT 1 | |
49 | #define PFERR_USER_BIT 2 | |
50 | #define PFERR_RSVD_BIT 3 | |
51 | #define PFERR_FETCH_BIT 4 | |
52 | ||
53 | #define PFERR_PRESENT_MASK (1U << PFERR_PRESENT_BIT) | |
54 | #define PFERR_WRITE_MASK (1U << PFERR_WRITE_BIT) | |
55 | #define PFERR_USER_MASK (1U << PFERR_USER_BIT) | |
56 | #define PFERR_RSVD_MASK (1U << PFERR_RSVD_BIT) | |
57 | #define PFERR_FETCH_MASK (1U << PFERR_FETCH_BIT) | |
58 | ||
59 | static inline u64 rsvd_bits(int s, int e) | |
60 | { | |
61 | return ((1ULL << (e - s + 1)) - 1) << s; | |
62 | } | |
63 | ||
64 | int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); | |
65 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_mask); | |
66 | ||
67 | /* | |
68 | * Return values of handle_mmio_page_fault_common: | |
69 | * RET_MMIO_PF_EMULATE: it is a real mmio page fault, emulate the instruction | |
70 | * directly. | |
71 | * RET_MMIO_PF_INVALID: invalid spte is detected then let the real page | |
72 | * fault path update the mmio spte. | |
73 | * RET_MMIO_PF_RETRY: let CPU fault again on the address. | |
74 | * RET_MMIO_PF_BUG: bug is detected. | |
75 | */ | |
76 | enum { | |
77 | RET_MMIO_PF_EMULATE = 1, | |
78 | RET_MMIO_PF_INVALID = 2, | |
79 | RET_MMIO_PF_RETRY = 0, | |
80 | RET_MMIO_PF_BUG = -1 | |
81 | }; | |
82 | ||
83 | int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct); | |
84 | void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context); | |
85 | void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, struct kvm_mmu *context, | |
86 | bool execonly); | |
87 | void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |
88 | bool ept); | |
89 | ||
90 | static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm) | |
91 | { | |
92 | if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages) | |
93 | return kvm->arch.n_max_mmu_pages - | |
94 | kvm->arch.n_used_mmu_pages; | |
95 | ||
96 | return 0; | |
97 | } | |
98 | ||
99 | static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu) | |
100 | { | |
101 | if (likely(vcpu->arch.mmu.root_hpa != INVALID_PAGE)) | |
102 | return 0; | |
103 | ||
104 | return kvm_mmu_load(vcpu); | |
105 | } | |
106 | ||
107 | static inline int is_present_gpte(unsigned long pte) | |
108 | { | |
109 | return pte & PT_PRESENT_MASK; | |
110 | } | |
111 | ||
112 | /* | |
113 | * Currently, we have two sorts of write-protection, a) the first one | |
114 | * write-protects guest page to sync the guest modification, b) another one is | |
115 | * used to sync dirty bitmap when we do KVM_GET_DIRTY_LOG. The differences | |
116 | * between these two sorts are: | |
117 | * 1) the first case clears SPTE_MMU_WRITEABLE bit. | |
118 | * 2) the first case requires flushing tlb immediately avoiding corrupting | |
119 | * shadow page table between all vcpus so it should be in the protection of | |
120 | * mmu-lock. And the another case does not need to flush tlb until returning | |
121 | * the dirty bitmap to userspace since it only write-protects the page | |
122 | * logged in the bitmap, that means the page in the dirty bitmap is not | |
123 | * missed, so it can flush tlb out of mmu-lock. | |
124 | * | |
125 | * So, there is the problem: the first case can meet the corrupted tlb caused | |
126 | * by another case which write-protects pages but without flush tlb | |
127 | * immediately. In order to making the first case be aware this problem we let | |
128 | * it flush tlb if we try to write-protect a spte whose SPTE_MMU_WRITEABLE bit | |
129 | * is set, it works since another case never touches SPTE_MMU_WRITEABLE bit. | |
130 | * | |
131 | * Anyway, whenever a spte is updated (only permission and status bits are | |
132 | * changed) we need to check whether the spte with SPTE_MMU_WRITEABLE becomes | |
133 | * readonly, if that happens, we need to flush tlb. Fortunately, | |
134 | * mmu_spte_update() has already handled it perfectly. | |
135 | * | |
136 | * The rules to use SPTE_MMU_WRITEABLE and PT_WRITABLE_MASK: | |
137 | * - if we want to see if it has writable tlb entry or if the spte can be | |
138 | * writable on the mmu mapping, check SPTE_MMU_WRITEABLE, this is the most | |
139 | * case, otherwise | |
140 | * - if we fix page fault on the spte or do write-protection by dirty logging, | |
141 | * check PT_WRITABLE_MASK. | |
142 | * | |
143 | * TODO: introduce APIs to split these two cases. | |
144 | */ | |
145 | static inline int is_writable_pte(unsigned long pte) | |
146 | { | |
147 | return pte & PT_WRITABLE_MASK; | |
148 | } | |
149 | ||
150 | static inline bool is_write_protection(struct kvm_vcpu *vcpu) | |
151 | { | |
152 | return kvm_read_cr0_bits(vcpu, X86_CR0_WP); | |
153 | } | |
154 | ||
155 | /* | |
156 | * Will a fault with a given page-fault error code (pfec) cause a permission | |
157 | * fault with the given access (in ACC_* format)? | |
158 | */ | |
159 | static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |
160 | unsigned pte_access, unsigned pfec) | |
161 | { | |
162 | int cpl = kvm_x86_ops->get_cpl(vcpu); | |
163 | unsigned long rflags = kvm_x86_ops->get_rflags(vcpu); | |
164 | ||
165 | /* | |
166 | * If CPL < 3, SMAP prevention are disabled if EFLAGS.AC = 1. | |
167 | * | |
168 | * If CPL = 3, SMAP applies to all supervisor-mode data accesses | |
169 | * (these are implicit supervisor accesses) regardless of the value | |
170 | * of EFLAGS.AC. | |
171 | * | |
172 | * This computes (cpl < 3) && (rflags & X86_EFLAGS_AC), leaving | |
173 | * the result in X86_EFLAGS_AC. We then insert it in place of | |
174 | * the PFERR_RSVD_MASK bit; this bit will always be zero in pfec, | |
175 | * but it will be one in index if SMAP checks are being overridden. | |
176 | * It is important to keep this branchless. | |
177 | */ | |
178 | unsigned long smap = (cpl - 3) & (rflags & X86_EFLAGS_AC); | |
179 | int index = (pfec >> 1) + | |
180 | (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1)); | |
181 | ||
182 | return (mmu->permissions[index] >> pte_access) & 1; | |
183 | } | |
184 | ||
185 | void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm); | |
186 | #endif |