]>
Commit | Line | Data |
---|---|---|
2f4f3372 XG |
1 | /* |
2 | * mmu_audit.c: | |
3 | * | |
4 | * Audit code for KVM MMU | |
5 | * | |
6 | * Copyright (C) 2006 Qumranet, Inc. | |
7 | * Copyright 2010 Red Hat, Inc. and/or its affilates. | |
8 | * | |
9 | * Authors: | |
10 | * Yaniv Kamay <yaniv@qumranet.com> | |
11 | * Avi Kivity <avi@qumranet.com> | |
12 | * Marcelo Tosatti <mtosatti@redhat.com> | |
13 | * Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | |
14 | * | |
15 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
16 | * the COPYING file in the top-level directory. | |
17 | * | |
18 | */ | |
19 | ||
20 | static const char *audit_msg; | |
21 | ||
eb259186 | 22 | typedef void (*inspect_spte_fn) (struct kvm_vcpu *vcpu, u64 *sptep, int level); |
2f4f3372 | 23 | |
eb259186 XG |
24 | static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
25 | inspect_spte_fn fn, int level) | |
2f4f3372 XG |
26 | { |
27 | int i; | |
28 | ||
29 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { | |
eb259186 XG |
30 | u64 *ent = sp->spt; |
31 | ||
32 | fn(vcpu, ent + i, level); | |
33 | ||
34 | if (is_shadow_present_pte(ent[i]) && | |
35 | !is_last_spte(ent[i], level)) { | |
36 | struct kvm_mmu_page *child; | |
37 | ||
38 | child = page_header(ent[i] & PT64_BASE_ADDR_MASK); | |
39 | __mmu_spte_walk(vcpu, child, fn, level - 1); | |
2f4f3372 XG |
40 | } |
41 | } | |
42 | } | |
43 | ||
44 | static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn) | |
45 | { | |
46 | int i; | |
47 | struct kvm_mmu_page *sp; | |
48 | ||
49 | if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) | |
50 | return; | |
eb259186 | 51 | |
2f4f3372 XG |
52 | if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) { |
53 | hpa_t root = vcpu->arch.mmu.root_hpa; | |
eb259186 | 54 | |
2f4f3372 | 55 | sp = page_header(root); |
eb259186 | 56 | __mmu_spte_walk(vcpu, sp, fn, PT64_ROOT_LEVEL); |
2f4f3372 XG |
57 | return; |
58 | } | |
eb259186 | 59 | |
2f4f3372 XG |
60 | for (i = 0; i < 4; ++i) { |
61 | hpa_t root = vcpu->arch.mmu.pae_root[i]; | |
62 | ||
63 | if (root && VALID_PAGE(root)) { | |
64 | root &= PT64_BASE_ADDR_MASK; | |
65 | sp = page_header(root); | |
eb259186 | 66 | __mmu_spte_walk(vcpu, sp, fn, 2); |
2f4f3372 XG |
67 | } |
68 | } | |
eb259186 | 69 | |
2f4f3372 XG |
70 | return; |
71 | } | |
72 | ||
49edf878 XG |
73 | typedef void (*sp_handler) (struct kvm *kvm, struct kvm_mmu_page *sp); |
74 | ||
75 | static void walk_all_active_sps(struct kvm *kvm, sp_handler fn) | |
76 | { | |
77 | struct kvm_mmu_page *sp; | |
78 | ||
79 | list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) | |
80 | fn(kvm, sp); | |
81 | } | |
82 | ||
eb259186 | 83 | static void audit_mappings(struct kvm_vcpu *vcpu, u64 *sptep, int level) |
2f4f3372 | 84 | { |
eb259186 XG |
85 | struct kvm_mmu_page *sp; |
86 | gfn_t gfn; | |
87 | pfn_t pfn; | |
88 | hpa_t hpa; | |
2f4f3372 | 89 | |
eb259186 XG |
90 | sp = page_header(__pa(sptep)); |
91 | ||
92 | if (sp->unsync) { | |
93 | if (level != PT_PAGE_TABLE_LEVEL) { | |
94 | printk(KERN_ERR "audit: (%s) error: unsync sp: %p level = %d\n", | |
95 | audit_msg, sp, level); | |
2f4f3372 XG |
96 | return; |
97 | } | |
98 | ||
eb259186 XG |
99 | if (*sptep == shadow_notrap_nonpresent_pte) { |
100 | printk(KERN_ERR "audit: (%s) error: notrap spte in unsync sp: %p\n", | |
101 | audit_msg, sp); | |
2f4f3372 | 102 | return; |
eb259186 XG |
103 | } |
104 | } | |
2f4f3372 | 105 | |
eb259186 XG |
106 | if (sp->role.direct && *sptep == shadow_notrap_nonpresent_pte) { |
107 | printk(KERN_ERR "audit: (%s) error: notrap spte in direct sp: %p\n", | |
108 | audit_msg, sp); | |
109 | return; | |
110 | } | |
2f4f3372 | 111 | |
eb259186 XG |
112 | if (!is_shadow_present_pte(*sptep) || !is_last_spte(*sptep, level)) |
113 | return; | |
2f4f3372 | 114 | |
eb259186 XG |
115 | gfn = kvm_mmu_page_get_gfn(sp, sptep - sp->spt); |
116 | pfn = gfn_to_pfn_atomic(vcpu->kvm, gfn); | |
2f4f3372 | 117 | |
eb259186 XG |
118 | if (is_error_pfn(pfn)) { |
119 | kvm_release_pfn_clean(pfn); | |
120 | return; | |
2f4f3372 | 121 | } |
2f4f3372 | 122 | |
eb259186 XG |
123 | hpa = pfn << PAGE_SHIFT; |
124 | if ((*sptep & PT64_BASE_ADDR_MASK) != hpa) | |
125 | printk(KERN_ERR "xx audit error: (%s) levels %d" | |
126 | "pfn %llx hpa %llx ent %llxn", | |
127 | audit_msg, vcpu->arch.mmu.root_level, | |
128 | pfn, hpa, *sptep); | |
2f4f3372 XG |
129 | } |
130 | ||
eb259186 | 131 | static void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep) |
2f4f3372 XG |
132 | { |
133 | unsigned long *rmapp; | |
134 | struct kvm_mmu_page *rev_sp; | |
135 | gfn_t gfn; | |
136 | ||
137 | ||
138 | rev_sp = page_header(__pa(sptep)); | |
139 | gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt); | |
140 | ||
141 | if (!gfn_to_memslot(kvm, gfn)) { | |
142 | if (!printk_ratelimit()) | |
143 | return; | |
144 | printk(KERN_ERR "%s: no memslot for gfn %llx\n", | |
145 | audit_msg, gfn); | |
146 | printk(KERN_ERR "%s: index %ld of sp (gfn=%llx)\n", | |
147 | audit_msg, (long int)(sptep - rev_sp->spt), | |
148 | rev_sp->gfn); | |
149 | dump_stack(); | |
150 | return; | |
151 | } | |
152 | ||
153 | rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level); | |
154 | if (!*rmapp) { | |
155 | if (!printk_ratelimit()) | |
156 | return; | |
157 | printk(KERN_ERR "%s: no rmap for writable spte %llx\n", | |
158 | audit_msg, *sptep); | |
159 | dump_stack(); | |
160 | } | |
161 | } | |
162 | ||
eb259186 | 163 | static void audit_sptes_have_rmaps(struct kvm_vcpu *vcpu, u64 *sptep, int level) |
2f4f3372 | 164 | { |
eb259186 XG |
165 | if (is_shadow_present_pte(*sptep) && is_last_spte(*sptep, level)) |
166 | inspect_spte_has_rmap(vcpu->kvm, sptep); | |
2f4f3372 XG |
167 | } |
168 | ||
49edf878 | 169 | static void check_mappings_rmap(struct kvm *kvm, struct kvm_mmu_page *sp) |
2f4f3372 | 170 | { |
2f4f3372 XG |
171 | int i; |
172 | ||
49edf878 XG |
173 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) |
174 | return; | |
2f4f3372 | 175 | |
49edf878 XG |
176 | for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { |
177 | if (!is_rmap_spte(sp->spt[i])) | |
2f4f3372 XG |
178 | continue; |
179 | ||
49edf878 | 180 | inspect_spte_has_rmap(kvm, sp->spt + i); |
2f4f3372 | 181 | } |
2f4f3372 XG |
182 | } |
183 | ||
49edf878 | 184 | void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp) |
2f4f3372 | 185 | { |
2f4f3372 XG |
186 | struct kvm_memory_slot *slot; |
187 | unsigned long *rmapp; | |
188 | u64 *spte; | |
189 | ||
49edf878 XG |
190 | if (sp->role.direct || sp->unsync || sp->role.invalid) |
191 | return; | |
2f4f3372 | 192 | |
49edf878 XG |
193 | slot = gfn_to_memslot(kvm, sp->gfn); |
194 | rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; | |
2f4f3372 | 195 | |
49edf878 XG |
196 | spte = rmap_next(kvm, rmapp, NULL); |
197 | while (spte) { | |
198 | if (is_writable_pte(*spte)) | |
199 | printk(KERN_ERR "%s: (%s) shadow page has " | |
2f4f3372 XG |
200 | "writable mappings: gfn %llx role %x\n", |
201 | __func__, audit_msg, sp->gfn, | |
202 | sp->role.word); | |
49edf878 | 203 | spte = rmap_next(kvm, rmapp, spte); |
2f4f3372 XG |
204 | } |
205 | } | |
206 | ||
49edf878 XG |
207 | static void audit_sp(struct kvm *kvm, struct kvm_mmu_page *sp) |
208 | { | |
209 | check_mappings_rmap(kvm, sp); | |
210 | audit_write_protection(kvm, sp); | |
211 | } | |
212 | ||
213 | static void audit_all_active_sps(struct kvm *kvm) | |
214 | { | |
215 | walk_all_active_sps(kvm, audit_sp); | |
216 | } | |
217 | ||
eb259186 XG |
218 | static void audit_spte(struct kvm_vcpu *vcpu, u64 *sptep, int level) |
219 | { | |
220 | audit_sptes_have_rmaps(vcpu, sptep, level); | |
221 | audit_mappings(vcpu, sptep, level); | |
222 | } | |
223 | ||
224 | static void audit_vcpu_spte(struct kvm_vcpu *vcpu) | |
225 | { | |
226 | mmu_spte_walk(vcpu, audit_spte); | |
227 | } | |
228 | ||
2f4f3372 XG |
229 | static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int audit_point) |
230 | { | |
231 | audit_msg = audit_point_name[audit_point]; | |
49edf878 | 232 | audit_all_active_sps(vcpu->kvm); |
eb259186 | 233 | audit_vcpu_spte(vcpu); |
2f4f3372 XG |
234 | } |
235 | ||
236 | static bool mmu_audit; | |
237 | ||
238 | static void mmu_audit_enable(void) | |
239 | { | |
240 | int ret; | |
241 | ||
242 | if (mmu_audit) | |
243 | return; | |
244 | ||
245 | ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); | |
246 | WARN_ON(ret); | |
247 | ||
248 | mmu_audit = true; | |
249 | } | |
250 | ||
251 | static void mmu_audit_disable(void) | |
252 | { | |
253 | if (!mmu_audit) | |
254 | return; | |
255 | ||
256 | unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); | |
257 | tracepoint_synchronize_unregister(); | |
258 | mmu_audit = false; | |
259 | } | |
260 | ||
261 | static int mmu_audit_set(const char *val, const struct kernel_param *kp) | |
262 | { | |
263 | int ret; | |
264 | unsigned long enable; | |
265 | ||
266 | ret = strict_strtoul(val, 10, &enable); | |
267 | if (ret < 0) | |
268 | return -EINVAL; | |
269 | ||
270 | switch (enable) { | |
271 | case 0: | |
272 | mmu_audit_disable(); | |
273 | break; | |
274 | case 1: | |
275 | mmu_audit_enable(); | |
276 | break; | |
277 | default: | |
278 | return -EINVAL; | |
279 | } | |
280 | ||
281 | return 0; | |
282 | } | |
283 | ||
284 | static struct kernel_param_ops audit_param_ops = { | |
285 | .set = mmu_audit_set, | |
286 | .get = param_get_bool, | |
287 | }; | |
288 | ||
289 | module_param_cb(mmu_audit, &audit_param_ops, &mmu_audit, 0644); |