]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - drivers/kvm/mmu.c
KVM: Add statistic for remote tlb flushes
[mirror_ubuntu-jammy-kernel.git] / drivers / kvm / mmu.c
1 /*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
19
20 #include "vmx.h"
21 #include "kvm.h"
22 #include "x86.h"
23
24 #include <linux/types.h>
25 #include <linux/string.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/module.h>
29
30 #include <asm/page.h>
31 #include <asm/cmpxchg.h>
32
33 #undef MMU_DEBUG
34
35 #undef AUDIT
36
37 #ifdef AUDIT
38 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
39 #else
40 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
41 #endif
42
43 #ifdef MMU_DEBUG
44
45 #define pgprintk(x...) do { if (dbg) printk(x); } while (0)
46 #define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
47
48 #else
49
50 #define pgprintk(x...) do { } while (0)
51 #define rmap_printk(x...) do { } while (0)
52
53 #endif
54
55 #if defined(MMU_DEBUG) || defined(AUDIT)
56 static int dbg = 1;
57 #endif
58
59 #ifndef MMU_DEBUG
60 #define ASSERT(x) do { } while (0)
61 #else
62 #define ASSERT(x) \
63 if (!(x)) { \
64 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
65 __FILE__, __LINE__, #x); \
66 }
67 #endif
68
69 #define PT64_PT_BITS 9
70 #define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
71 #define PT32_PT_BITS 10
72 #define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
73
74 #define PT_WRITABLE_SHIFT 1
75
76 #define PT_PRESENT_MASK (1ULL << 0)
77 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
78 #define PT_USER_MASK (1ULL << 2)
79 #define PT_PWT_MASK (1ULL << 3)
80 #define PT_PCD_MASK (1ULL << 4)
81 #define PT_ACCESSED_MASK (1ULL << 5)
82 #define PT_DIRTY_MASK (1ULL << 6)
83 #define PT_PAGE_SIZE_MASK (1ULL << 7)
84 #define PT_PAT_MASK (1ULL << 7)
85 #define PT_GLOBAL_MASK (1ULL << 8)
86 #define PT64_NX_MASK (1ULL << 63)
87
88 #define PT_PAT_SHIFT 7
89 #define PT_DIR_PAT_SHIFT 12
90 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
91
92 #define PT32_DIR_PSE36_SIZE 4
93 #define PT32_DIR_PSE36_SHIFT 13
94 #define PT32_DIR_PSE36_MASK \
95 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
96
97
98 #define PT_FIRST_AVAIL_BITS_SHIFT 9
99 #define PT64_SECOND_AVAIL_BITS_SHIFT 52
100
101 #define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
102
103 #define VALID_PAGE(x) ((x) != INVALID_PAGE)
104
105 #define PT64_LEVEL_BITS 9
106
107 #define PT64_LEVEL_SHIFT(level) \
108 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
109
110 #define PT64_LEVEL_MASK(level) \
111 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
112
113 #define PT64_INDEX(address, level)\
114 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
115
116
117 #define PT32_LEVEL_BITS 10
118
119 #define PT32_LEVEL_SHIFT(level) \
120 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
121
122 #define PT32_LEVEL_MASK(level) \
123 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
124
125 #define PT32_INDEX(address, level)\
126 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
127
128
129 #define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
130 #define PT64_DIR_BASE_ADDR_MASK \
131 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
132
133 #define PT32_BASE_ADDR_MASK PAGE_MASK
134 #define PT32_DIR_BASE_ADDR_MASK \
135 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
136
137
138 #define PFERR_PRESENT_MASK (1U << 0)
139 #define PFERR_WRITE_MASK (1U << 1)
140 #define PFERR_USER_MASK (1U << 2)
141 #define PFERR_FETCH_MASK (1U << 4)
142
143 #define PT64_ROOT_LEVEL 4
144 #define PT32_ROOT_LEVEL 2
145 #define PT32E_ROOT_LEVEL 3
146
147 #define PT_DIRECTORY_LEVEL 2
148 #define PT_PAGE_TABLE_LEVEL 1
149
150 #define RMAP_EXT 4
151
152 struct kvm_rmap_desc {
153 u64 *shadow_ptes[RMAP_EXT];
154 struct kvm_rmap_desc *more;
155 };
156
157 static struct kmem_cache *pte_chain_cache;
158 static struct kmem_cache *rmap_desc_cache;
159 static struct kmem_cache *mmu_page_header_cache;
160
161 static u64 __read_mostly shadow_trap_nonpresent_pte;
162 static u64 __read_mostly shadow_notrap_nonpresent_pte;
163
164 void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
165 {
166 shadow_trap_nonpresent_pte = trap_pte;
167 shadow_notrap_nonpresent_pte = notrap_pte;
168 }
169 EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
170
171 static int is_write_protection(struct kvm_vcpu *vcpu)
172 {
173 return vcpu->cr0 & X86_CR0_WP;
174 }
175
176 static int is_cpuid_PSE36(void)
177 {
178 return 1;
179 }
180
181 static int is_nx(struct kvm_vcpu *vcpu)
182 {
183 return vcpu->shadow_efer & EFER_NX;
184 }
185
186 static int is_present_pte(unsigned long pte)
187 {
188 return pte & PT_PRESENT_MASK;
189 }
190
191 static int is_shadow_present_pte(u64 pte)
192 {
193 pte &= ~PT_SHADOW_IO_MARK;
194 return pte != shadow_trap_nonpresent_pte
195 && pte != shadow_notrap_nonpresent_pte;
196 }
197
198 static int is_writeble_pte(unsigned long pte)
199 {
200 return pte & PT_WRITABLE_MASK;
201 }
202
203 static int is_dirty_pte(unsigned long pte)
204 {
205 return pte & PT_DIRTY_MASK;
206 }
207
208 static int is_io_pte(unsigned long pte)
209 {
210 return pte & PT_SHADOW_IO_MARK;
211 }
212
213 static int is_rmap_pte(u64 pte)
214 {
215 return pte != shadow_trap_nonpresent_pte
216 && pte != shadow_notrap_nonpresent_pte;
217 }
218
219 static void set_shadow_pte(u64 *sptep, u64 spte)
220 {
221 #ifdef CONFIG_X86_64
222 set_64bit((unsigned long *)sptep, spte);
223 #else
224 set_64bit((unsigned long long *)sptep, spte);
225 #endif
226 }
227
228 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
229 struct kmem_cache *base_cache, int min)
230 {
231 void *obj;
232
233 if (cache->nobjs >= min)
234 return 0;
235 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
236 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
237 if (!obj)
238 return -ENOMEM;
239 cache->objects[cache->nobjs++] = obj;
240 }
241 return 0;
242 }
243
244 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
245 {
246 while (mc->nobjs)
247 kfree(mc->objects[--mc->nobjs]);
248 }
249
250 static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
251 int min)
252 {
253 struct page *page;
254
255 if (cache->nobjs >= min)
256 return 0;
257 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
258 page = alloc_page(GFP_KERNEL);
259 if (!page)
260 return -ENOMEM;
261 set_page_private(page, 0);
262 cache->objects[cache->nobjs++] = page_address(page);
263 }
264 return 0;
265 }
266
267 static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
268 {
269 while (mc->nobjs)
270 free_page((unsigned long)mc->objects[--mc->nobjs]);
271 }
272
273 static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
274 {
275 int r;
276
277 kvm_mmu_free_some_pages(vcpu);
278 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
279 pte_chain_cache, 4);
280 if (r)
281 goto out;
282 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
283 rmap_desc_cache, 1);
284 if (r)
285 goto out;
286 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 8);
287 if (r)
288 goto out;
289 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
290 mmu_page_header_cache, 4);
291 out:
292 return r;
293 }
294
295 static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
296 {
297 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
298 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
299 mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
300 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
301 }
302
303 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
304 size_t size)
305 {
306 void *p;
307
308 BUG_ON(!mc->nobjs);
309 p = mc->objects[--mc->nobjs];
310 memset(p, 0, size);
311 return p;
312 }
313
314 static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
315 {
316 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
317 sizeof(struct kvm_pte_chain));
318 }
319
320 static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
321 {
322 kfree(pc);
323 }
324
325 static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
326 {
327 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
328 sizeof(struct kvm_rmap_desc));
329 }
330
331 static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
332 {
333 kfree(rd);
334 }
335
336 /*
337 * Take gfn and return the reverse mapping to it.
338 * Note: gfn must be unaliased before this function get called
339 */
340
341 static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn)
342 {
343 struct kvm_memory_slot *slot;
344
345 slot = gfn_to_memslot(kvm, gfn);
346 return &slot->rmap[gfn - slot->base_gfn];
347 }
348
349 /*
350 * Reverse mapping data structures:
351 *
352 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
353 * that points to page_address(page).
354 *
355 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
356 * containing more mappings.
357 */
358 static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
359 {
360 struct kvm_mmu_page *page;
361 struct kvm_rmap_desc *desc;
362 unsigned long *rmapp;
363 int i;
364
365 if (!is_rmap_pte(*spte))
366 return;
367 gfn = unalias_gfn(vcpu->kvm, gfn);
368 page = page_header(__pa(spte));
369 page->gfns[spte - page->spt] = gfn;
370 rmapp = gfn_to_rmap(vcpu->kvm, gfn);
371 if (!*rmapp) {
372 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
373 *rmapp = (unsigned long)spte;
374 } else if (!(*rmapp & 1)) {
375 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
376 desc = mmu_alloc_rmap_desc(vcpu);
377 desc->shadow_ptes[0] = (u64 *)*rmapp;
378 desc->shadow_ptes[1] = spte;
379 *rmapp = (unsigned long)desc | 1;
380 } else {
381 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
382 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
383 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
384 desc = desc->more;
385 if (desc->shadow_ptes[RMAP_EXT-1]) {
386 desc->more = mmu_alloc_rmap_desc(vcpu);
387 desc = desc->more;
388 }
389 for (i = 0; desc->shadow_ptes[i]; ++i)
390 ;
391 desc->shadow_ptes[i] = spte;
392 }
393 }
394
395 static void rmap_desc_remove_entry(unsigned long *rmapp,
396 struct kvm_rmap_desc *desc,
397 int i,
398 struct kvm_rmap_desc *prev_desc)
399 {
400 int j;
401
402 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
403 ;
404 desc->shadow_ptes[i] = desc->shadow_ptes[j];
405 desc->shadow_ptes[j] = NULL;
406 if (j != 0)
407 return;
408 if (!prev_desc && !desc->more)
409 *rmapp = (unsigned long)desc->shadow_ptes[0];
410 else
411 if (prev_desc)
412 prev_desc->more = desc->more;
413 else
414 *rmapp = (unsigned long)desc->more | 1;
415 mmu_free_rmap_desc(desc);
416 }
417
418 static void rmap_remove(struct kvm *kvm, u64 *spte)
419 {
420 struct kvm_rmap_desc *desc;
421 struct kvm_rmap_desc *prev_desc;
422 struct kvm_mmu_page *page;
423 struct page *release_page;
424 unsigned long *rmapp;
425 int i;
426
427 if (!is_rmap_pte(*spte))
428 return;
429 page = page_header(__pa(spte));
430 release_page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
431 if (is_writeble_pte(*spte))
432 kvm_release_page_dirty(release_page);
433 else
434 kvm_release_page_clean(release_page);
435 rmapp = gfn_to_rmap(kvm, page->gfns[spte - page->spt]);
436 if (!*rmapp) {
437 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
438 BUG();
439 } else if (!(*rmapp & 1)) {
440 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
441 if ((u64 *)*rmapp != spte) {
442 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
443 spte, *spte);
444 BUG();
445 }
446 *rmapp = 0;
447 } else {
448 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
449 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
450 prev_desc = NULL;
451 while (desc) {
452 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
453 if (desc->shadow_ptes[i] == spte) {
454 rmap_desc_remove_entry(rmapp,
455 desc, i,
456 prev_desc);
457 return;
458 }
459 prev_desc = desc;
460 desc = desc->more;
461 }
462 BUG();
463 }
464 }
465
466 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
467 {
468 struct kvm_rmap_desc *desc;
469 struct kvm_rmap_desc *prev_desc;
470 u64 *prev_spte;
471 int i;
472
473 if (!*rmapp)
474 return NULL;
475 else if (!(*rmapp & 1)) {
476 if (!spte)
477 return (u64 *)*rmapp;
478 return NULL;
479 }
480 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
481 prev_desc = NULL;
482 prev_spte = NULL;
483 while (desc) {
484 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
485 if (prev_spte == spte)
486 return desc->shadow_ptes[i];
487 prev_spte = desc->shadow_ptes[i];
488 }
489 desc = desc->more;
490 }
491 return NULL;
492 }
493
494 static void rmap_write_protect(struct kvm *kvm, u64 gfn)
495 {
496 unsigned long *rmapp;
497 u64 *spte;
498
499 gfn = unalias_gfn(kvm, gfn);
500 rmapp = gfn_to_rmap(kvm, gfn);
501
502 spte = rmap_next(kvm, rmapp, NULL);
503 while (spte) {
504 BUG_ON(!spte);
505 BUG_ON(!(*spte & PT_PRESENT_MASK));
506 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
507 if (is_writeble_pte(*spte))
508 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
509 kvm_flush_remote_tlbs(kvm);
510 spte = rmap_next(kvm, rmapp, spte);
511 }
512 }
513
514 #ifdef MMU_DEBUG
515 static int is_empty_shadow_page(u64 *spt)
516 {
517 u64 *pos;
518 u64 *end;
519
520 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
521 if ((*pos & ~PT_SHADOW_IO_MARK) != shadow_trap_nonpresent_pte) {
522 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
523 pos, *pos);
524 return 0;
525 }
526 return 1;
527 }
528 #endif
529
530 static void kvm_mmu_free_page(struct kvm *kvm,
531 struct kvm_mmu_page *page_head)
532 {
533 ASSERT(is_empty_shadow_page(page_head->spt));
534 list_del(&page_head->link);
535 __free_page(virt_to_page(page_head->spt));
536 __free_page(virt_to_page(page_head->gfns));
537 kfree(page_head);
538 ++kvm->n_free_mmu_pages;
539 }
540
541 static unsigned kvm_page_table_hashfn(gfn_t gfn)
542 {
543 return gfn;
544 }
545
546 static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
547 u64 *parent_pte)
548 {
549 struct kvm_mmu_page *page;
550
551 if (!vcpu->kvm->n_free_mmu_pages)
552 return NULL;
553
554 page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
555 sizeof *page);
556 page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
557 page->gfns = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
558 set_page_private(virt_to_page(page->spt), (unsigned long)page);
559 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
560 ASSERT(is_empty_shadow_page(page->spt));
561 page->slot_bitmap = 0;
562 page->multimapped = 0;
563 page->parent_pte = parent_pte;
564 --vcpu->kvm->n_free_mmu_pages;
565 return page;
566 }
567
568 static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
569 struct kvm_mmu_page *page, u64 *parent_pte)
570 {
571 struct kvm_pte_chain *pte_chain;
572 struct hlist_node *node;
573 int i;
574
575 if (!parent_pte)
576 return;
577 if (!page->multimapped) {
578 u64 *old = page->parent_pte;
579
580 if (!old) {
581 page->parent_pte = parent_pte;
582 return;
583 }
584 page->multimapped = 1;
585 pte_chain = mmu_alloc_pte_chain(vcpu);
586 INIT_HLIST_HEAD(&page->parent_ptes);
587 hlist_add_head(&pte_chain->link, &page->parent_ptes);
588 pte_chain->parent_ptes[0] = old;
589 }
590 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
591 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
592 continue;
593 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
594 if (!pte_chain->parent_ptes[i]) {
595 pte_chain->parent_ptes[i] = parent_pte;
596 return;
597 }
598 }
599 pte_chain = mmu_alloc_pte_chain(vcpu);
600 BUG_ON(!pte_chain);
601 hlist_add_head(&pte_chain->link, &page->parent_ptes);
602 pte_chain->parent_ptes[0] = parent_pte;
603 }
604
605 static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
606 u64 *parent_pte)
607 {
608 struct kvm_pte_chain *pte_chain;
609 struct hlist_node *node;
610 int i;
611
612 if (!page->multimapped) {
613 BUG_ON(page->parent_pte != parent_pte);
614 page->parent_pte = NULL;
615 return;
616 }
617 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
618 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
619 if (!pte_chain->parent_ptes[i])
620 break;
621 if (pte_chain->parent_ptes[i] != parent_pte)
622 continue;
623 while (i + 1 < NR_PTE_CHAIN_ENTRIES
624 && pte_chain->parent_ptes[i + 1]) {
625 pte_chain->parent_ptes[i]
626 = pte_chain->parent_ptes[i + 1];
627 ++i;
628 }
629 pte_chain->parent_ptes[i] = NULL;
630 if (i == 0) {
631 hlist_del(&pte_chain->link);
632 mmu_free_pte_chain(pte_chain);
633 if (hlist_empty(&page->parent_ptes)) {
634 page->multimapped = 0;
635 page->parent_pte = NULL;
636 }
637 }
638 return;
639 }
640 BUG();
641 }
642
643 static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm,
644 gfn_t gfn)
645 {
646 unsigned index;
647 struct hlist_head *bucket;
648 struct kvm_mmu_page *page;
649 struct hlist_node *node;
650
651 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
652 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
653 bucket = &kvm->mmu_page_hash[index];
654 hlist_for_each_entry(page, node, bucket, hash_link)
655 if (page->gfn == gfn && !page->role.metaphysical) {
656 pgprintk("%s: found role %x\n",
657 __FUNCTION__, page->role.word);
658 return page;
659 }
660 return NULL;
661 }
662
663 static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
664 gfn_t gfn,
665 gva_t gaddr,
666 unsigned level,
667 int metaphysical,
668 unsigned hugepage_access,
669 u64 *parent_pte)
670 {
671 union kvm_mmu_page_role role;
672 unsigned index;
673 unsigned quadrant;
674 struct hlist_head *bucket;
675 struct kvm_mmu_page *page;
676 struct hlist_node *node;
677
678 role.word = 0;
679 role.glevels = vcpu->mmu.root_level;
680 role.level = level;
681 role.metaphysical = metaphysical;
682 role.hugepage_access = hugepage_access;
683 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
684 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
685 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
686 role.quadrant = quadrant;
687 }
688 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
689 gfn, role.word);
690 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
691 bucket = &vcpu->kvm->mmu_page_hash[index];
692 hlist_for_each_entry(page, node, bucket, hash_link)
693 if (page->gfn == gfn && page->role.word == role.word) {
694 mmu_page_add_parent_pte(vcpu, page, parent_pte);
695 pgprintk("%s: found\n", __FUNCTION__);
696 return page;
697 }
698 page = kvm_mmu_alloc_page(vcpu, parent_pte);
699 if (!page)
700 return page;
701 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
702 page->gfn = gfn;
703 page->role = role;
704 hlist_add_head(&page->hash_link, bucket);
705 vcpu->mmu.prefetch_page(vcpu, page);
706 if (!metaphysical)
707 rmap_write_protect(vcpu->kvm, gfn);
708 return page;
709 }
710
711 static void kvm_mmu_page_unlink_children(struct kvm *kvm,
712 struct kvm_mmu_page *page)
713 {
714 unsigned i;
715 u64 *pt;
716 u64 ent;
717
718 pt = page->spt;
719
720 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
721 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
722 if (is_shadow_present_pte(pt[i]))
723 rmap_remove(kvm, &pt[i]);
724 pt[i] = shadow_trap_nonpresent_pte;
725 }
726 kvm_flush_remote_tlbs(kvm);
727 return;
728 }
729
730 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
731 ent = pt[i];
732
733 pt[i] = shadow_trap_nonpresent_pte;
734 if (!is_shadow_present_pte(ent))
735 continue;
736 ent &= PT64_BASE_ADDR_MASK;
737 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
738 }
739 kvm_flush_remote_tlbs(kvm);
740 }
741
742 static void kvm_mmu_put_page(struct kvm_mmu_page *page,
743 u64 *parent_pte)
744 {
745 mmu_page_remove_parent_pte(page, parent_pte);
746 }
747
748 static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
749 {
750 int i;
751
752 for (i = 0; i < KVM_MAX_VCPUS; ++i)
753 if (kvm->vcpus[i])
754 kvm->vcpus[i]->last_pte_updated = NULL;
755 }
756
757 static void kvm_mmu_zap_page(struct kvm *kvm,
758 struct kvm_mmu_page *page)
759 {
760 u64 *parent_pte;
761
762 ++kvm->stat.mmu_shadow_zapped;
763 while (page->multimapped || page->parent_pte) {
764 if (!page->multimapped)
765 parent_pte = page->parent_pte;
766 else {
767 struct kvm_pte_chain *chain;
768
769 chain = container_of(page->parent_ptes.first,
770 struct kvm_pte_chain, link);
771 parent_pte = chain->parent_ptes[0];
772 }
773 BUG_ON(!parent_pte);
774 kvm_mmu_put_page(page, parent_pte);
775 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
776 }
777 kvm_mmu_page_unlink_children(kvm, page);
778 if (!page->root_count) {
779 hlist_del(&page->hash_link);
780 kvm_mmu_free_page(kvm, page);
781 } else
782 list_move(&page->link, &kvm->active_mmu_pages);
783 kvm_mmu_reset_last_pte_updated(kvm);
784 }
785
786 /*
787 * Changing the number of mmu pages allocated to the vm
788 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
789 */
790 void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
791 {
792 /*
793 * If we set the number of mmu pages to be smaller be than the
794 * number of actived pages , we must to free some mmu pages before we
795 * change the value
796 */
797
798 if ((kvm->n_alloc_mmu_pages - kvm->n_free_mmu_pages) >
799 kvm_nr_mmu_pages) {
800 int n_used_mmu_pages = kvm->n_alloc_mmu_pages
801 - kvm->n_free_mmu_pages;
802
803 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
804 struct kvm_mmu_page *page;
805
806 page = container_of(kvm->active_mmu_pages.prev,
807 struct kvm_mmu_page, link);
808 kvm_mmu_zap_page(kvm, page);
809 n_used_mmu_pages--;
810 }
811 kvm->n_free_mmu_pages = 0;
812 }
813 else
814 kvm->n_free_mmu_pages += kvm_nr_mmu_pages
815 - kvm->n_alloc_mmu_pages;
816
817 kvm->n_alloc_mmu_pages = kvm_nr_mmu_pages;
818 }
819
820 static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
821 {
822 unsigned index;
823 struct hlist_head *bucket;
824 struct kvm_mmu_page *page;
825 struct hlist_node *node, *n;
826 int r;
827
828 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
829 r = 0;
830 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
831 bucket = &kvm->mmu_page_hash[index];
832 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
833 if (page->gfn == gfn && !page->role.metaphysical) {
834 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
835 page->role.word);
836 kvm_mmu_zap_page(kvm, page);
837 r = 1;
838 }
839 return r;
840 }
841
842 static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
843 {
844 struct kvm_mmu_page *page;
845
846 while ((page = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
847 pgprintk("%s: zap %lx %x\n",
848 __FUNCTION__, gfn, page->role.word);
849 kvm_mmu_zap_page(kvm, page);
850 }
851 }
852
853 static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
854 {
855 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
856 struct kvm_mmu_page *page_head = page_header(__pa(pte));
857
858 __set_bit(slot, &page_head->slot_bitmap);
859 }
860
861 hpa_t gpa_to_hpa(struct kvm *kvm, gpa_t gpa)
862 {
863 struct page *page;
864 hpa_t hpa;
865
866 ASSERT((gpa & HPA_ERR_MASK) == 0);
867 page = gfn_to_page(kvm, gpa >> PAGE_SHIFT);
868 hpa = ((hpa_t)page_to_pfn(page) << PAGE_SHIFT) | (gpa & (PAGE_SIZE-1));
869 if (is_error_page(page))
870 return hpa | HPA_ERR_MASK;
871 return hpa;
872 }
873
874 hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
875 {
876 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
877
878 if (gpa == UNMAPPED_GVA)
879 return UNMAPPED_GVA;
880 return gpa_to_hpa(vcpu->kvm, gpa);
881 }
882
883 struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
884 {
885 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
886
887 if (gpa == UNMAPPED_GVA)
888 return NULL;
889 return pfn_to_page(gpa_to_hpa(vcpu->kvm, gpa) >> PAGE_SHIFT);
890 }
891
892 static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
893 {
894 }
895
896 static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
897 {
898 int level = PT32E_ROOT_LEVEL;
899 hpa_t table_addr = vcpu->mmu.root_hpa;
900 struct page *page;
901
902 page = pfn_to_page(p >> PAGE_SHIFT);
903 for (; ; level--) {
904 u32 index = PT64_INDEX(v, level);
905 u64 *table;
906 u64 pte;
907
908 ASSERT(VALID_PAGE(table_addr));
909 table = __va(table_addr);
910
911 if (level == 1) {
912 int was_rmapped;
913
914 pte = table[index];
915 was_rmapped = is_rmap_pte(pte);
916 if (is_shadow_present_pte(pte) && is_writeble_pte(pte)) {
917 kvm_release_page_clean(page);
918 return 0;
919 }
920 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
921 page_header_update_slot(vcpu->kvm, table, v);
922 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
923 PT_USER_MASK;
924 if (!was_rmapped)
925 rmap_add(vcpu, &table[index], v >> PAGE_SHIFT);
926 else
927 kvm_release_page_clean(page);
928
929 return 0;
930 }
931
932 if (table[index] == shadow_trap_nonpresent_pte) {
933 struct kvm_mmu_page *new_table;
934 gfn_t pseudo_gfn;
935
936 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
937 >> PAGE_SHIFT;
938 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
939 v, level - 1,
940 1, 3, &table[index]);
941 if (!new_table) {
942 pgprintk("nonpaging_map: ENOMEM\n");
943 kvm_release_page_clean(page);
944 return -ENOMEM;
945 }
946
947 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
948 | PT_WRITABLE_MASK | PT_USER_MASK;
949 }
950 table_addr = table[index] & PT64_BASE_ADDR_MASK;
951 }
952 }
953
954 static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
955 struct kvm_mmu_page *sp)
956 {
957 int i;
958
959 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
960 sp->spt[i] = shadow_trap_nonpresent_pte;
961 }
962
963 static void mmu_free_roots(struct kvm_vcpu *vcpu)
964 {
965 int i;
966 struct kvm_mmu_page *page;
967
968 if (!VALID_PAGE(vcpu->mmu.root_hpa))
969 return;
970 #ifdef CONFIG_X86_64
971 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
972 hpa_t root = vcpu->mmu.root_hpa;
973
974 page = page_header(root);
975 --page->root_count;
976 vcpu->mmu.root_hpa = INVALID_PAGE;
977 return;
978 }
979 #endif
980 for (i = 0; i < 4; ++i) {
981 hpa_t root = vcpu->mmu.pae_root[i];
982
983 if (root) {
984 root &= PT64_BASE_ADDR_MASK;
985 page = page_header(root);
986 --page->root_count;
987 }
988 vcpu->mmu.pae_root[i] = INVALID_PAGE;
989 }
990 vcpu->mmu.root_hpa = INVALID_PAGE;
991 }
992
993 static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
994 {
995 int i;
996 gfn_t root_gfn;
997 struct kvm_mmu_page *page;
998
999 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
1000
1001 #ifdef CONFIG_X86_64
1002 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1003 hpa_t root = vcpu->mmu.root_hpa;
1004
1005 ASSERT(!VALID_PAGE(root));
1006 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
1007 PT64_ROOT_LEVEL, 0, 0, NULL);
1008 root = __pa(page->spt);
1009 ++page->root_count;
1010 vcpu->mmu.root_hpa = root;
1011 return;
1012 }
1013 #endif
1014 for (i = 0; i < 4; ++i) {
1015 hpa_t root = vcpu->mmu.pae_root[i];
1016
1017 ASSERT(!VALID_PAGE(root));
1018 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
1019 if (!is_present_pte(vcpu->pdptrs[i])) {
1020 vcpu->mmu.pae_root[i] = 0;
1021 continue;
1022 }
1023 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
1024 } else if (vcpu->mmu.root_level == 0)
1025 root_gfn = 0;
1026 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
1027 PT32_ROOT_LEVEL, !is_paging(vcpu),
1028 0, NULL);
1029 root = __pa(page->spt);
1030 ++page->root_count;
1031 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
1032 }
1033 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
1034 }
1035
1036 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1037 {
1038 return vaddr;
1039 }
1040
1041 static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
1042 u32 error_code)
1043 {
1044 gpa_t addr = gva;
1045 hpa_t paddr;
1046 int r;
1047
1048 r = mmu_topup_memory_caches(vcpu);
1049 if (r)
1050 return r;
1051
1052 ASSERT(vcpu);
1053 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
1054
1055
1056 paddr = gpa_to_hpa(vcpu->kvm, addr & PT64_BASE_ADDR_MASK);
1057
1058 if (is_error_hpa(paddr)) {
1059 kvm_release_page_clean(pfn_to_page((paddr & PT64_BASE_ADDR_MASK)
1060 >> PAGE_SHIFT));
1061 return 1;
1062 }
1063
1064 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
1065 }
1066
1067 static void nonpaging_free(struct kvm_vcpu *vcpu)
1068 {
1069 mmu_free_roots(vcpu);
1070 }
1071
1072 static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1073 {
1074 struct kvm_mmu *context = &vcpu->mmu;
1075
1076 context->new_cr3 = nonpaging_new_cr3;
1077 context->page_fault = nonpaging_page_fault;
1078 context->gva_to_gpa = nonpaging_gva_to_gpa;
1079 context->free = nonpaging_free;
1080 context->prefetch_page = nonpaging_prefetch_page;
1081 context->root_level = 0;
1082 context->shadow_root_level = PT32E_ROOT_LEVEL;
1083 context->root_hpa = INVALID_PAGE;
1084 return 0;
1085 }
1086
1087 static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
1088 {
1089 ++vcpu->stat.tlb_flush;
1090 kvm_x86_ops->tlb_flush(vcpu);
1091 }
1092
1093 static void paging_new_cr3(struct kvm_vcpu *vcpu)
1094 {
1095 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
1096 mmu_free_roots(vcpu);
1097 }
1098
1099 static void inject_page_fault(struct kvm_vcpu *vcpu,
1100 u64 addr,
1101 u32 err_code)
1102 {
1103 kvm_x86_ops->inject_page_fault(vcpu, addr, err_code);
1104 }
1105
1106 static void paging_free(struct kvm_vcpu *vcpu)
1107 {
1108 nonpaging_free(vcpu);
1109 }
1110
1111 #define PTTYPE 64
1112 #include "paging_tmpl.h"
1113 #undef PTTYPE
1114
1115 #define PTTYPE 32
1116 #include "paging_tmpl.h"
1117 #undef PTTYPE
1118
1119 static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
1120 {
1121 struct kvm_mmu *context = &vcpu->mmu;
1122
1123 ASSERT(is_pae(vcpu));
1124 context->new_cr3 = paging_new_cr3;
1125 context->page_fault = paging64_page_fault;
1126 context->gva_to_gpa = paging64_gva_to_gpa;
1127 context->prefetch_page = paging64_prefetch_page;
1128 context->free = paging_free;
1129 context->root_level = level;
1130 context->shadow_root_level = level;
1131 context->root_hpa = INVALID_PAGE;
1132 return 0;
1133 }
1134
1135 static int paging64_init_context(struct kvm_vcpu *vcpu)
1136 {
1137 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1138 }
1139
1140 static int paging32_init_context(struct kvm_vcpu *vcpu)
1141 {
1142 struct kvm_mmu *context = &vcpu->mmu;
1143
1144 context->new_cr3 = paging_new_cr3;
1145 context->page_fault = paging32_page_fault;
1146 context->gva_to_gpa = paging32_gva_to_gpa;
1147 context->free = paging_free;
1148 context->prefetch_page = paging32_prefetch_page;
1149 context->root_level = PT32_ROOT_LEVEL;
1150 context->shadow_root_level = PT32E_ROOT_LEVEL;
1151 context->root_hpa = INVALID_PAGE;
1152 return 0;
1153 }
1154
1155 static int paging32E_init_context(struct kvm_vcpu *vcpu)
1156 {
1157 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
1158 }
1159
1160 static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1161 {
1162 ASSERT(vcpu);
1163 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1164
1165 if (!is_paging(vcpu))
1166 return nonpaging_init_context(vcpu);
1167 else if (is_long_mode(vcpu))
1168 return paging64_init_context(vcpu);
1169 else if (is_pae(vcpu))
1170 return paging32E_init_context(vcpu);
1171 else
1172 return paging32_init_context(vcpu);
1173 }
1174
1175 static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1176 {
1177 ASSERT(vcpu);
1178 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1179 vcpu->mmu.free(vcpu);
1180 vcpu->mmu.root_hpa = INVALID_PAGE;
1181 }
1182 }
1183
1184 int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
1185 {
1186 destroy_kvm_mmu(vcpu);
1187 return init_kvm_mmu(vcpu);
1188 }
1189 EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
1190
1191 int kvm_mmu_load(struct kvm_vcpu *vcpu)
1192 {
1193 int r;
1194
1195 mutex_lock(&vcpu->kvm->lock);
1196 r = mmu_topup_memory_caches(vcpu);
1197 if (r)
1198 goto out;
1199 mmu_alloc_roots(vcpu);
1200 kvm_x86_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1201 kvm_mmu_flush_tlb(vcpu);
1202 out:
1203 mutex_unlock(&vcpu->kvm->lock);
1204 return r;
1205 }
1206 EXPORT_SYMBOL_GPL(kvm_mmu_load);
1207
1208 void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1209 {
1210 mmu_free_roots(vcpu);
1211 }
1212
1213 static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
1214 struct kvm_mmu_page *page,
1215 u64 *spte)
1216 {
1217 u64 pte;
1218 struct kvm_mmu_page *child;
1219
1220 pte = *spte;
1221 if (is_shadow_present_pte(pte)) {
1222 if (page->role.level == PT_PAGE_TABLE_LEVEL)
1223 rmap_remove(vcpu->kvm, spte);
1224 else {
1225 child = page_header(pte & PT64_BASE_ADDR_MASK);
1226 mmu_page_remove_parent_pte(child, spte);
1227 }
1228 }
1229 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
1230 kvm_flush_remote_tlbs(vcpu->kvm);
1231 }
1232
1233 static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1234 struct kvm_mmu_page *page,
1235 u64 *spte,
1236 const void *new, int bytes,
1237 int offset_in_pte)
1238 {
1239 if (page->role.level != PT_PAGE_TABLE_LEVEL) {
1240 ++vcpu->kvm->stat.mmu_pde_zapped;
1241 return;
1242 }
1243
1244 ++vcpu->kvm->stat.mmu_pte_updated;
1245 if (page->role.glevels == PT32_ROOT_LEVEL)
1246 paging32_update_pte(vcpu, page, spte, new, bytes,
1247 offset_in_pte);
1248 else
1249 paging64_update_pte(vcpu, page, spte, new, bytes,
1250 offset_in_pte);
1251 }
1252
1253 static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1254 {
1255 u64 *spte = vcpu->last_pte_updated;
1256
1257 return !!(spte && (*spte & PT_ACCESSED_MASK));
1258 }
1259
1260 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1261 const u8 *new, int bytes)
1262 {
1263 gfn_t gfn = gpa >> PAGE_SHIFT;
1264 struct kvm_mmu_page *page;
1265 struct hlist_node *node, *n;
1266 struct hlist_head *bucket;
1267 unsigned index;
1268 u64 *spte;
1269 unsigned offset = offset_in_page(gpa);
1270 unsigned pte_size;
1271 unsigned page_offset;
1272 unsigned misaligned;
1273 unsigned quadrant;
1274 int level;
1275 int flooded = 0;
1276 int npte;
1277
1278 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1279 ++vcpu->kvm->stat.mmu_pte_write;
1280 kvm_mmu_audit(vcpu, "pre pte write");
1281 if (gfn == vcpu->last_pt_write_gfn
1282 && !last_updated_pte_accessed(vcpu)) {
1283 ++vcpu->last_pt_write_count;
1284 if (vcpu->last_pt_write_count >= 3)
1285 flooded = 1;
1286 } else {
1287 vcpu->last_pt_write_gfn = gfn;
1288 vcpu->last_pt_write_count = 1;
1289 vcpu->last_pte_updated = NULL;
1290 }
1291 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1292 bucket = &vcpu->kvm->mmu_page_hash[index];
1293 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
1294 if (page->gfn != gfn || page->role.metaphysical)
1295 continue;
1296 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1297 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
1298 misaligned |= bytes < 4;
1299 if (misaligned || flooded) {
1300 /*
1301 * Misaligned accesses are too much trouble to fix
1302 * up; also, they usually indicate a page is not used
1303 * as a page table.
1304 *
1305 * If we're seeing too many writes to a page,
1306 * it may no longer be a page table, or we may be
1307 * forking, in which case it is better to unmap the
1308 * page.
1309 */
1310 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1311 gpa, bytes, page->role.word);
1312 kvm_mmu_zap_page(vcpu->kvm, page);
1313 ++vcpu->kvm->stat.mmu_flooded;
1314 continue;
1315 }
1316 page_offset = offset;
1317 level = page->role.level;
1318 npte = 1;
1319 if (page->role.glevels == PT32_ROOT_LEVEL) {
1320 page_offset <<= 1; /* 32->64 */
1321 /*
1322 * A 32-bit pde maps 4MB while the shadow pdes map
1323 * only 2MB. So we need to double the offset again
1324 * and zap two pdes instead of one.
1325 */
1326 if (level == PT32_ROOT_LEVEL) {
1327 page_offset &= ~7; /* kill rounding error */
1328 page_offset <<= 1;
1329 npte = 2;
1330 }
1331 quadrant = page_offset >> PAGE_SHIFT;
1332 page_offset &= ~PAGE_MASK;
1333 if (quadrant != page->role.quadrant)
1334 continue;
1335 }
1336 spte = &page->spt[page_offset / sizeof(*spte)];
1337 while (npte--) {
1338 mmu_pte_write_zap_pte(vcpu, page, spte);
1339 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes,
1340 page_offset & (pte_size - 1));
1341 ++spte;
1342 }
1343 }
1344 kvm_mmu_audit(vcpu, "post pte write");
1345 }
1346
1347 int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1348 {
1349 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1350
1351 return kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1352 }
1353
1354 void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1355 {
1356 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1357 struct kvm_mmu_page *page;
1358
1359 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1360 struct kvm_mmu_page, link);
1361 kvm_mmu_zap_page(vcpu->kvm, page);
1362 ++vcpu->kvm->stat.mmu_recycled;
1363 }
1364 }
1365
1366 int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1367 {
1368 int r;
1369 enum emulation_result er;
1370
1371 mutex_lock(&vcpu->kvm->lock);
1372 r = vcpu->mmu.page_fault(vcpu, cr2, error_code);
1373 if (r < 0)
1374 goto out;
1375
1376 if (!r) {
1377 r = 1;
1378 goto out;
1379 }
1380
1381 r = mmu_topup_memory_caches(vcpu);
1382 if (r)
1383 goto out;
1384
1385 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
1386 mutex_unlock(&vcpu->kvm->lock);
1387
1388 switch (er) {
1389 case EMULATE_DONE:
1390 return 1;
1391 case EMULATE_DO_MMIO:
1392 ++vcpu->stat.mmio_exits;
1393 return 0;
1394 case EMULATE_FAIL:
1395 kvm_report_emulation_failure(vcpu, "pagetable");
1396 return 1;
1397 default:
1398 BUG();
1399 }
1400 out:
1401 mutex_unlock(&vcpu->kvm->lock);
1402 return r;
1403 }
1404 EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1405
1406 static void free_mmu_pages(struct kvm_vcpu *vcpu)
1407 {
1408 struct kvm_mmu_page *page;
1409
1410 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1411 page = container_of(vcpu->kvm->active_mmu_pages.next,
1412 struct kvm_mmu_page, link);
1413 kvm_mmu_zap_page(vcpu->kvm, page);
1414 }
1415 free_page((unsigned long)vcpu->mmu.pae_root);
1416 }
1417
1418 static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1419 {
1420 struct page *page;
1421 int i;
1422
1423 ASSERT(vcpu);
1424
1425 if (vcpu->kvm->n_requested_mmu_pages)
1426 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_requested_mmu_pages;
1427 else
1428 vcpu->kvm->n_free_mmu_pages = vcpu->kvm->n_alloc_mmu_pages;
1429 /*
1430 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1431 * Therefore we need to allocate shadow page tables in the first
1432 * 4GB of memory, which happens to fit the DMA32 zone.
1433 */
1434 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1435 if (!page)
1436 goto error_1;
1437 vcpu->mmu.pae_root = page_address(page);
1438 for (i = 0; i < 4; ++i)
1439 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1440
1441 return 0;
1442
1443 error_1:
1444 free_mmu_pages(vcpu);
1445 return -ENOMEM;
1446 }
1447
1448 int kvm_mmu_create(struct kvm_vcpu *vcpu)
1449 {
1450 ASSERT(vcpu);
1451 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1452
1453 return alloc_mmu_pages(vcpu);
1454 }
1455
1456 int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1457 {
1458 ASSERT(vcpu);
1459 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1460
1461 return init_kvm_mmu(vcpu);
1462 }
1463
1464 void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1465 {
1466 ASSERT(vcpu);
1467
1468 destroy_kvm_mmu(vcpu);
1469 free_mmu_pages(vcpu);
1470 mmu_free_memory_caches(vcpu);
1471 }
1472
1473 void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
1474 {
1475 struct kvm_mmu_page *page;
1476
1477 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1478 int i;
1479 u64 *pt;
1480
1481 if (!test_bit(slot, &page->slot_bitmap))
1482 continue;
1483
1484 pt = page->spt;
1485 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1486 /* avoid RMW */
1487 if (pt[i] & PT_WRITABLE_MASK)
1488 pt[i] &= ~PT_WRITABLE_MASK;
1489 }
1490 }
1491
1492 void kvm_mmu_zap_all(struct kvm *kvm)
1493 {
1494 struct kvm_mmu_page *page, *node;
1495
1496 list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
1497 kvm_mmu_zap_page(kvm, page);
1498
1499 kvm_flush_remote_tlbs(kvm);
1500 }
1501
1502 void kvm_mmu_module_exit(void)
1503 {
1504 if (pte_chain_cache)
1505 kmem_cache_destroy(pte_chain_cache);
1506 if (rmap_desc_cache)
1507 kmem_cache_destroy(rmap_desc_cache);
1508 if (mmu_page_header_cache)
1509 kmem_cache_destroy(mmu_page_header_cache);
1510 }
1511
1512 int kvm_mmu_module_init(void)
1513 {
1514 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1515 sizeof(struct kvm_pte_chain),
1516 0, 0, NULL);
1517 if (!pte_chain_cache)
1518 goto nomem;
1519 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1520 sizeof(struct kvm_rmap_desc),
1521 0, 0, NULL);
1522 if (!rmap_desc_cache)
1523 goto nomem;
1524
1525 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1526 sizeof(struct kvm_mmu_page),
1527 0, 0, NULL);
1528 if (!mmu_page_header_cache)
1529 goto nomem;
1530
1531 return 0;
1532
1533 nomem:
1534 kvm_mmu_module_exit();
1535 return -ENOMEM;
1536 }
1537
1538 /*
1539 * Caculate mmu pages needed for kvm.
1540 */
1541 unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1542 {
1543 int i;
1544 unsigned int nr_mmu_pages;
1545 unsigned int nr_pages = 0;
1546
1547 for (i = 0; i < kvm->nmemslots; i++)
1548 nr_pages += kvm->memslots[i].npages;
1549
1550 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
1551 nr_mmu_pages = max(nr_mmu_pages,
1552 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
1553
1554 return nr_mmu_pages;
1555 }
1556
1557 #ifdef AUDIT
1558
1559 static const char *audit_msg;
1560
1561 static gva_t canonicalize(gva_t gva)
1562 {
1563 #ifdef CONFIG_X86_64
1564 gva = (long long)(gva << 16) >> 16;
1565 #endif
1566 return gva;
1567 }
1568
1569 static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1570 gva_t va, int level)
1571 {
1572 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1573 int i;
1574 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1575
1576 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1577 u64 ent = pt[i];
1578
1579 if (ent == shadow_trap_nonpresent_pte)
1580 continue;
1581
1582 va = canonicalize(va);
1583 if (level > 1) {
1584 if (ent == shadow_notrap_nonpresent_pte)
1585 printk(KERN_ERR "audit: (%s) nontrapping pte"
1586 " in nonleaf level: levels %d gva %lx"
1587 " level %d pte %llx\n", audit_msg,
1588 vcpu->mmu.root_level, va, level, ent);
1589
1590 audit_mappings_page(vcpu, ent, va, level - 1);
1591 } else {
1592 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1593 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1594 struct page *page;
1595
1596 if (is_shadow_present_pte(ent)
1597 && (ent & PT64_BASE_ADDR_MASK) != hpa)
1598 printk(KERN_ERR "xx audit error: (%s) levels %d"
1599 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
1600 audit_msg, vcpu->mmu.root_level,
1601 va, gpa, hpa, ent,
1602 is_shadow_present_pte(ent));
1603 else if (ent == shadow_notrap_nonpresent_pte
1604 && !is_error_hpa(hpa))
1605 printk(KERN_ERR "audit: (%s) notrap shadow,"
1606 " valid guest gva %lx\n", audit_msg, va);
1607 page = pfn_to_page((gpa & PT64_BASE_ADDR_MASK)
1608 >> PAGE_SHIFT);
1609 kvm_release_page_clean(page);
1610
1611 }
1612 }
1613 }
1614
1615 static void audit_mappings(struct kvm_vcpu *vcpu)
1616 {
1617 unsigned i;
1618
1619 if (vcpu->mmu.root_level == 4)
1620 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1621 else
1622 for (i = 0; i < 4; ++i)
1623 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1624 audit_mappings_page(vcpu,
1625 vcpu->mmu.pae_root[i],
1626 i << 30,
1627 2);
1628 }
1629
1630 static int count_rmaps(struct kvm_vcpu *vcpu)
1631 {
1632 int nmaps = 0;
1633 int i, j, k;
1634
1635 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1636 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1637 struct kvm_rmap_desc *d;
1638
1639 for (j = 0; j < m->npages; ++j) {
1640 unsigned long *rmapp = &m->rmap[j];
1641
1642 if (!*rmapp)
1643 continue;
1644 if (!(*rmapp & 1)) {
1645 ++nmaps;
1646 continue;
1647 }
1648 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
1649 while (d) {
1650 for (k = 0; k < RMAP_EXT; ++k)
1651 if (d->shadow_ptes[k])
1652 ++nmaps;
1653 else
1654 break;
1655 d = d->more;
1656 }
1657 }
1658 }
1659 return nmaps;
1660 }
1661
1662 static int count_writable_mappings(struct kvm_vcpu *vcpu)
1663 {
1664 int nmaps = 0;
1665 struct kvm_mmu_page *page;
1666 int i;
1667
1668 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1669 u64 *pt = page->spt;
1670
1671 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1672 continue;
1673
1674 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1675 u64 ent = pt[i];
1676
1677 if (!(ent & PT_PRESENT_MASK))
1678 continue;
1679 if (!(ent & PT_WRITABLE_MASK))
1680 continue;
1681 ++nmaps;
1682 }
1683 }
1684 return nmaps;
1685 }
1686
1687 static void audit_rmap(struct kvm_vcpu *vcpu)
1688 {
1689 int n_rmap = count_rmaps(vcpu);
1690 int n_actual = count_writable_mappings(vcpu);
1691
1692 if (n_rmap != n_actual)
1693 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1694 __FUNCTION__, audit_msg, n_rmap, n_actual);
1695 }
1696
1697 static void audit_write_protection(struct kvm_vcpu *vcpu)
1698 {
1699 struct kvm_mmu_page *page;
1700 struct kvm_memory_slot *slot;
1701 unsigned long *rmapp;
1702 gfn_t gfn;
1703
1704 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1705 if (page->role.metaphysical)
1706 continue;
1707
1708 slot = gfn_to_memslot(vcpu->kvm, page->gfn);
1709 gfn = unalias_gfn(vcpu->kvm, page->gfn);
1710 rmapp = &slot->rmap[gfn - slot->base_gfn];
1711 if (*rmapp)
1712 printk(KERN_ERR "%s: (%s) shadow page has writable"
1713 " mappings: gfn %lx role %x\n",
1714 __FUNCTION__, audit_msg, page->gfn,
1715 page->role.word);
1716 }
1717 }
1718
1719 static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1720 {
1721 int olddbg = dbg;
1722
1723 dbg = 0;
1724 audit_msg = msg;
1725 audit_rmap(vcpu);
1726 audit_write_protection(vcpu);
1727 audit_mappings(vcpu);
1728 dbg = olddbg;
1729 }
1730
1731 #endif