]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/kvm/mmu.c
KVM: Use the scheduler preemption notifiers to make kvm preemptible
[mirror_ubuntu-artful-kernel.git] / drivers / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
e495606d
AK
19
20#include "vmx.h"
21#include "kvm.h"
22
6aa8b732
AK
23#include <linux/types.h>
24#include <linux/string.h>
6aa8b732
AK
25#include <linux/mm.h>
26#include <linux/highmem.h>
27#include <linux/module.h>
28
e495606d
AK
29#include <asm/page.h>
30#include <asm/cmpxchg.h>
6aa8b732 31
37a7d8b0
AK
32#undef MMU_DEBUG
33
34#undef AUDIT
35
36#ifdef AUDIT
37static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
38#else
39static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
40#endif
41
42#ifdef MMU_DEBUG
43
44#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
45#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
46
47#else
48
49#define pgprintk(x...) do { } while (0)
50#define rmap_printk(x...) do { } while (0)
51
52#endif
53
54#if defined(MMU_DEBUG) || defined(AUDIT)
55static int dbg = 1;
56#endif
6aa8b732 57
d6c69ee9
YD
58#ifndef MMU_DEBUG
59#define ASSERT(x) do { } while (0)
60#else
6aa8b732
AK
61#define ASSERT(x) \
62 if (!(x)) { \
63 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
64 __FILE__, __LINE__, #x); \
65 }
d6c69ee9 66#endif
6aa8b732 67
cea0f0e7
AK
68#define PT64_PT_BITS 9
69#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
70#define PT32_PT_BITS 10
71#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
6aa8b732
AK
72
73#define PT_WRITABLE_SHIFT 1
74
75#define PT_PRESENT_MASK (1ULL << 0)
76#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
77#define PT_USER_MASK (1ULL << 2)
78#define PT_PWT_MASK (1ULL << 3)
79#define PT_PCD_MASK (1ULL << 4)
80#define PT_ACCESSED_MASK (1ULL << 5)
81#define PT_DIRTY_MASK (1ULL << 6)
82#define PT_PAGE_SIZE_MASK (1ULL << 7)
83#define PT_PAT_MASK (1ULL << 7)
84#define PT_GLOBAL_MASK (1ULL << 8)
85#define PT64_NX_MASK (1ULL << 63)
86
87#define PT_PAT_SHIFT 7
88#define PT_DIR_PAT_SHIFT 12
89#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
90
91#define PT32_DIR_PSE36_SIZE 4
92#define PT32_DIR_PSE36_SHIFT 13
93#define PT32_DIR_PSE36_MASK (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
94
95
6aa8b732
AK
96#define PT_FIRST_AVAIL_BITS_SHIFT 9
97#define PT64_SECOND_AVAIL_BITS_SHIFT 52
98
6aa8b732
AK
99#define PT_SHADOW_IO_MARK (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
100
6aa8b732
AK
101#define VALID_PAGE(x) ((x) != INVALID_PAGE)
102
103#define PT64_LEVEL_BITS 9
104
105#define PT64_LEVEL_SHIFT(level) \
106 ( PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS )
107
108#define PT64_LEVEL_MASK(level) \
109 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
110
111#define PT64_INDEX(address, level)\
112 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
113
114
115#define PT32_LEVEL_BITS 10
116
117#define PT32_LEVEL_SHIFT(level) \
118 ( PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS )
119
120#define PT32_LEVEL_MASK(level) \
121 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
122
123#define PT32_INDEX(address, level)\
124 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
125
126
27aba766 127#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
128#define PT64_DIR_BASE_ADDR_MASK \
129 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
130
131#define PT32_BASE_ADDR_MASK PAGE_MASK
132#define PT32_DIR_BASE_ADDR_MASK \
133 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
134
135
136#define PFERR_PRESENT_MASK (1U << 0)
137#define PFERR_WRITE_MASK (1U << 1)
138#define PFERR_USER_MASK (1U << 2)
73b1087e 139#define PFERR_FETCH_MASK (1U << 4)
6aa8b732
AK
140
141#define PT64_ROOT_LEVEL 4
142#define PT32_ROOT_LEVEL 2
143#define PT32E_ROOT_LEVEL 3
144
145#define PT_DIRECTORY_LEVEL 2
146#define PT_PAGE_TABLE_LEVEL 1
147
cd4a4e53
AK
148#define RMAP_EXT 4
149
150struct kvm_rmap_desc {
151 u64 *shadow_ptes[RMAP_EXT];
152 struct kvm_rmap_desc *more;
153};
154
b5a33a75
AK
155static struct kmem_cache *pte_chain_cache;
156static struct kmem_cache *rmap_desc_cache;
d3d25b04 157static struct kmem_cache *mmu_page_header_cache;
b5a33a75 158
6aa8b732
AK
159static int is_write_protection(struct kvm_vcpu *vcpu)
160{
707d92fa 161 return vcpu->cr0 & X86_CR0_WP;
6aa8b732
AK
162}
163
164static int is_cpuid_PSE36(void)
165{
166 return 1;
167}
168
73b1087e
AK
169static int is_nx(struct kvm_vcpu *vcpu)
170{
171 return vcpu->shadow_efer & EFER_NX;
172}
173
6aa8b732
AK
174static int is_present_pte(unsigned long pte)
175{
176 return pte & PT_PRESENT_MASK;
177}
178
179static int is_writeble_pte(unsigned long pte)
180{
181 return pte & PT_WRITABLE_MASK;
182}
183
184static int is_io_pte(unsigned long pte)
185{
186 return pte & PT_SHADOW_IO_MARK;
187}
188
cd4a4e53
AK
189static int is_rmap_pte(u64 pte)
190{
191 return (pte & (PT_WRITABLE_MASK | PT_PRESENT_MASK))
192 == (PT_WRITABLE_MASK | PT_PRESENT_MASK);
193}
194
e663ee64
AK
195static void set_shadow_pte(u64 *sptep, u64 spte)
196{
197#ifdef CONFIG_X86_64
198 set_64bit((unsigned long *)sptep, spte);
199#else
200 set_64bit((unsigned long long *)sptep, spte);
201#endif
202}
203
e2dec939 204static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
8c438502
AK
205 struct kmem_cache *base_cache, int min,
206 gfp_t gfp_flags)
714b93da
AK
207{
208 void *obj;
209
210 if (cache->nobjs >= min)
e2dec939 211 return 0;
714b93da 212 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
8c438502 213 obj = kmem_cache_zalloc(base_cache, gfp_flags);
714b93da 214 if (!obj)
e2dec939 215 return -ENOMEM;
714b93da
AK
216 cache->objects[cache->nobjs++] = obj;
217 }
e2dec939 218 return 0;
714b93da
AK
219}
220
221static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
222{
223 while (mc->nobjs)
224 kfree(mc->objects[--mc->nobjs]);
225}
226
c1158e63
AK
227static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
228 int min, gfp_t gfp_flags)
229{
230 struct page *page;
231
232 if (cache->nobjs >= min)
233 return 0;
234 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
235 page = alloc_page(gfp_flags);
236 if (!page)
237 return -ENOMEM;
238 set_page_private(page, 0);
239 cache->objects[cache->nobjs++] = page_address(page);
240 }
241 return 0;
242}
243
244static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
245{
246 while (mc->nobjs)
c4d198d5 247 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
248}
249
8c438502 250static int __mmu_topup_memory_caches(struct kvm_vcpu *vcpu, gfp_t gfp_flags)
714b93da 251{
e2dec939
AK
252 int r;
253
254 r = mmu_topup_memory_cache(&vcpu->mmu_pte_chain_cache,
8c438502 255 pte_chain_cache, 4, gfp_flags);
e2dec939
AK
256 if (r)
257 goto out;
258 r = mmu_topup_memory_cache(&vcpu->mmu_rmap_desc_cache,
8c438502 259 rmap_desc_cache, 1, gfp_flags);
d3d25b04
AK
260 if (r)
261 goto out;
c1158e63 262 r = mmu_topup_memory_cache_page(&vcpu->mmu_page_cache, 4, gfp_flags);
d3d25b04
AK
263 if (r)
264 goto out;
265 r = mmu_topup_memory_cache(&vcpu->mmu_page_header_cache,
266 mmu_page_header_cache, 4, gfp_flags);
e2dec939
AK
267out:
268 return r;
714b93da
AK
269}
270
8c438502
AK
271static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
272{
273 int r;
274
275 r = __mmu_topup_memory_caches(vcpu, GFP_NOWAIT);
22d95b12 276 kvm_mmu_free_some_pages(vcpu);
8c438502
AK
277 if (r < 0) {
278 spin_unlock(&vcpu->kvm->lock);
8c438502 279 r = __mmu_topup_memory_caches(vcpu, GFP_KERNEL);
8c438502 280 spin_lock(&vcpu->kvm->lock);
22d95b12 281 kvm_mmu_free_some_pages(vcpu);
8c438502
AK
282 }
283 return r;
284}
285
714b93da
AK
286static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
287{
288 mmu_free_memory_cache(&vcpu->mmu_pte_chain_cache);
289 mmu_free_memory_cache(&vcpu->mmu_rmap_desc_cache);
c1158e63 290 mmu_free_memory_cache_page(&vcpu->mmu_page_cache);
d3d25b04 291 mmu_free_memory_cache(&vcpu->mmu_page_header_cache);
714b93da
AK
292}
293
294static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
295 size_t size)
296{
297 void *p;
298
299 BUG_ON(!mc->nobjs);
300 p = mc->objects[--mc->nobjs];
301 memset(p, 0, size);
302 return p;
303}
304
714b93da
AK
305static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
306{
307 return mmu_memory_cache_alloc(&vcpu->mmu_pte_chain_cache,
308 sizeof(struct kvm_pte_chain));
309}
310
90cb0529 311static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 312{
90cb0529 313 kfree(pc);
714b93da
AK
314}
315
316static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
317{
318 return mmu_memory_cache_alloc(&vcpu->mmu_rmap_desc_cache,
319 sizeof(struct kvm_rmap_desc));
320}
321
90cb0529 322static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 323{
90cb0529 324 kfree(rd);
714b93da
AK
325}
326
cd4a4e53
AK
327/*
328 * Reverse mapping data structures:
329 *
330 * If page->private bit zero is zero, then page->private points to the
331 * shadow page table entry that points to page_address(page).
332 *
333 * If page->private bit zero is one, (then page->private & ~1) points
334 * to a struct kvm_rmap_desc containing more mappings.
335 */
714b93da 336static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte)
cd4a4e53
AK
337{
338 struct page *page;
339 struct kvm_rmap_desc *desc;
340 int i;
341
342 if (!is_rmap_pte(*spte))
343 return;
344 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
5972e953 345 if (!page_private(page)) {
cd4a4e53 346 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
5972e953
MR
347 set_page_private(page,(unsigned long)spte);
348 } else if (!(page_private(page) & 1)) {
cd4a4e53 349 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 350 desc = mmu_alloc_rmap_desc(vcpu);
5972e953 351 desc->shadow_ptes[0] = (u64 *)page_private(page);
cd4a4e53 352 desc->shadow_ptes[1] = spte;
5972e953 353 set_page_private(page,(unsigned long)desc | 1);
cd4a4e53
AK
354 } else {
355 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
5972e953 356 desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
cd4a4e53
AK
357 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
358 desc = desc->more;
359 if (desc->shadow_ptes[RMAP_EXT-1]) {
714b93da 360 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
361 desc = desc->more;
362 }
363 for (i = 0; desc->shadow_ptes[i]; ++i)
364 ;
365 desc->shadow_ptes[i] = spte;
366 }
367}
368
90cb0529 369static void rmap_desc_remove_entry(struct page *page,
cd4a4e53
AK
370 struct kvm_rmap_desc *desc,
371 int i,
372 struct kvm_rmap_desc *prev_desc)
373{
374 int j;
375
376 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
377 ;
378 desc->shadow_ptes[i] = desc->shadow_ptes[j];
11718b4d 379 desc->shadow_ptes[j] = NULL;
cd4a4e53
AK
380 if (j != 0)
381 return;
382 if (!prev_desc && !desc->more)
5972e953 383 set_page_private(page,(unsigned long)desc->shadow_ptes[0]);
cd4a4e53
AK
384 else
385 if (prev_desc)
386 prev_desc->more = desc->more;
387 else
5972e953 388 set_page_private(page,(unsigned long)desc->more | 1);
90cb0529 389 mmu_free_rmap_desc(desc);
cd4a4e53
AK
390}
391
90cb0529 392static void rmap_remove(u64 *spte)
cd4a4e53
AK
393{
394 struct page *page;
395 struct kvm_rmap_desc *desc;
396 struct kvm_rmap_desc *prev_desc;
397 int i;
398
399 if (!is_rmap_pte(*spte))
400 return;
401 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
5972e953 402 if (!page_private(page)) {
cd4a4e53
AK
403 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
404 BUG();
5972e953 405 } else if (!(page_private(page) & 1)) {
cd4a4e53 406 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
5972e953 407 if ((u64 *)page_private(page) != spte) {
cd4a4e53
AK
408 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
409 spte, *spte);
410 BUG();
411 }
5972e953 412 set_page_private(page,0);
cd4a4e53
AK
413 } else {
414 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
5972e953 415 desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
cd4a4e53
AK
416 prev_desc = NULL;
417 while (desc) {
418 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
419 if (desc->shadow_ptes[i] == spte) {
90cb0529 420 rmap_desc_remove_entry(page,
714b93da 421 desc, i,
cd4a4e53
AK
422 prev_desc);
423 return;
424 }
425 prev_desc = desc;
426 desc = desc->more;
427 }
428 BUG();
429 }
430}
431
714b93da 432static void rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
374cbac0 433{
714b93da 434 struct kvm *kvm = vcpu->kvm;
374cbac0 435 struct page *page;
374cbac0
AK
436 struct kvm_rmap_desc *desc;
437 u64 *spte;
438
954bbbc2
AK
439 page = gfn_to_page(kvm, gfn);
440 BUG_ON(!page);
374cbac0 441
5972e953
MR
442 while (page_private(page)) {
443 if (!(page_private(page) & 1))
444 spte = (u64 *)page_private(page);
374cbac0 445 else {
5972e953 446 desc = (struct kvm_rmap_desc *)(page_private(page) & ~1ul);
374cbac0
AK
447 spte = desc->shadow_ptes[0];
448 }
449 BUG_ON(!spte);
27aba766
AK
450 BUG_ON((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT
451 != page_to_pfn(page));
374cbac0
AK
452 BUG_ON(!(*spte & PT_PRESENT_MASK));
453 BUG_ON(!(*spte & PT_WRITABLE_MASK));
454 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
90cb0529 455 rmap_remove(spte);
e663ee64 456 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
88a97f0b 457 kvm_flush_remote_tlbs(vcpu->kvm);
374cbac0
AK
458 }
459}
460
d6c69ee9 461#ifdef MMU_DEBUG
47ad8e68 462static int is_empty_shadow_page(u64 *spt)
6aa8b732 463{
139bdb2d
AK
464 u64 *pos;
465 u64 *end;
466
47ad8e68 467 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
139bdb2d
AK
468 if (*pos != 0) {
469 printk(KERN_ERR "%s: %p %llx\n", __FUNCTION__,
470 pos, *pos);
6aa8b732 471 return 0;
139bdb2d 472 }
6aa8b732
AK
473 return 1;
474}
d6c69ee9 475#endif
6aa8b732 476
90cb0529 477static void kvm_mmu_free_page(struct kvm *kvm,
4b02d6da 478 struct kvm_mmu_page *page_head)
260746c0 479{
47ad8e68 480 ASSERT(is_empty_shadow_page(page_head->spt));
d3d25b04 481 list_del(&page_head->link);
c1158e63 482 __free_page(virt_to_page(page_head->spt));
90cb0529
AK
483 kfree(page_head);
484 ++kvm->n_free_mmu_pages;
260746c0
AK
485}
486
cea0f0e7
AK
487static unsigned kvm_page_table_hashfn(gfn_t gfn)
488{
489 return gfn;
490}
491
25c0de2c
AK
492static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
493 u64 *parent_pte)
6aa8b732
AK
494{
495 struct kvm_mmu_page *page;
496
d3d25b04 497 if (!vcpu->kvm->n_free_mmu_pages)
25c0de2c 498 return NULL;
6aa8b732 499
d3d25b04
AK
500 page = mmu_memory_cache_alloc(&vcpu->mmu_page_header_cache,
501 sizeof *page);
502 page->spt = mmu_memory_cache_alloc(&vcpu->mmu_page_cache, PAGE_SIZE);
503 set_page_private(virt_to_page(page->spt), (unsigned long)page);
504 list_add(&page->link, &vcpu->kvm->active_mmu_pages);
47ad8e68 505 ASSERT(is_empty_shadow_page(page->spt));
6aa8b732 506 page->slot_bitmap = 0;
cea0f0e7 507 page->multimapped = 0;
6aa8b732 508 page->parent_pte = parent_pte;
ebeace86 509 --vcpu->kvm->n_free_mmu_pages;
25c0de2c 510 return page;
6aa8b732
AK
511}
512
714b93da
AK
513static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
514 struct kvm_mmu_page *page, u64 *parent_pte)
cea0f0e7
AK
515{
516 struct kvm_pte_chain *pte_chain;
517 struct hlist_node *node;
518 int i;
519
520 if (!parent_pte)
521 return;
522 if (!page->multimapped) {
523 u64 *old = page->parent_pte;
524
525 if (!old) {
526 page->parent_pte = parent_pte;
527 return;
528 }
529 page->multimapped = 1;
714b93da 530 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7
AK
531 INIT_HLIST_HEAD(&page->parent_ptes);
532 hlist_add_head(&pte_chain->link, &page->parent_ptes);
533 pte_chain->parent_ptes[0] = old;
534 }
535 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link) {
536 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
537 continue;
538 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
539 if (!pte_chain->parent_ptes[i]) {
540 pte_chain->parent_ptes[i] = parent_pte;
541 return;
542 }
543 }
714b93da 544 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7
AK
545 BUG_ON(!pte_chain);
546 hlist_add_head(&pte_chain->link, &page->parent_ptes);
547 pte_chain->parent_ptes[0] = parent_pte;
548}
549
90cb0529 550static void mmu_page_remove_parent_pte(struct kvm_mmu_page *page,
cea0f0e7
AK
551 u64 *parent_pte)
552{
553 struct kvm_pte_chain *pte_chain;
554 struct hlist_node *node;
555 int i;
556
557 if (!page->multimapped) {
558 BUG_ON(page->parent_pte != parent_pte);
559 page->parent_pte = NULL;
560 return;
561 }
562 hlist_for_each_entry(pte_chain, node, &page->parent_ptes, link)
563 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
564 if (!pte_chain->parent_ptes[i])
565 break;
566 if (pte_chain->parent_ptes[i] != parent_pte)
567 continue;
697fe2e2
AK
568 while (i + 1 < NR_PTE_CHAIN_ENTRIES
569 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
570 pte_chain->parent_ptes[i]
571 = pte_chain->parent_ptes[i + 1];
572 ++i;
573 }
574 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
575 if (i == 0) {
576 hlist_del(&pte_chain->link);
90cb0529 577 mmu_free_pte_chain(pte_chain);
697fe2e2
AK
578 if (hlist_empty(&page->parent_ptes)) {
579 page->multimapped = 0;
580 page->parent_pte = NULL;
581 }
582 }
cea0f0e7
AK
583 return;
584 }
585 BUG();
586}
587
588static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm_vcpu *vcpu,
589 gfn_t gfn)
590{
591 unsigned index;
592 struct hlist_head *bucket;
593 struct kvm_mmu_page *page;
594 struct hlist_node *node;
595
596 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
597 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
598 bucket = &vcpu->kvm->mmu_page_hash[index];
599 hlist_for_each_entry(page, node, bucket, hash_link)
600 if (page->gfn == gfn && !page->role.metaphysical) {
601 pgprintk("%s: found role %x\n",
602 __FUNCTION__, page->role.word);
603 return page;
604 }
605 return NULL;
606}
607
608static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
609 gfn_t gfn,
610 gva_t gaddr,
611 unsigned level,
612 int metaphysical,
d28c6cfb 613 unsigned hugepage_access,
cea0f0e7
AK
614 u64 *parent_pte)
615{
616 union kvm_mmu_page_role role;
617 unsigned index;
618 unsigned quadrant;
619 struct hlist_head *bucket;
620 struct kvm_mmu_page *page;
621 struct hlist_node *node;
622
623 role.word = 0;
624 role.glevels = vcpu->mmu.root_level;
625 role.level = level;
626 role.metaphysical = metaphysical;
d28c6cfb 627 role.hugepage_access = hugepage_access;
cea0f0e7
AK
628 if (vcpu->mmu.root_level <= PT32_ROOT_LEVEL) {
629 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
630 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
631 role.quadrant = quadrant;
632 }
633 pgprintk("%s: looking gfn %lx role %x\n", __FUNCTION__,
634 gfn, role.word);
635 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
636 bucket = &vcpu->kvm->mmu_page_hash[index];
637 hlist_for_each_entry(page, node, bucket, hash_link)
638 if (page->gfn == gfn && page->role.word == role.word) {
714b93da 639 mmu_page_add_parent_pte(vcpu, page, parent_pte);
cea0f0e7
AK
640 pgprintk("%s: found\n", __FUNCTION__);
641 return page;
642 }
643 page = kvm_mmu_alloc_page(vcpu, parent_pte);
644 if (!page)
645 return page;
646 pgprintk("%s: adding gfn %lx role %x\n", __FUNCTION__, gfn, role.word);
647 page->gfn = gfn;
648 page->role = role;
649 hlist_add_head(&page->hash_link, bucket);
374cbac0 650 if (!metaphysical)
714b93da 651 rmap_write_protect(vcpu, gfn);
cea0f0e7
AK
652 return page;
653}
654
90cb0529 655static void kvm_mmu_page_unlink_children(struct kvm *kvm,
a436036b
AK
656 struct kvm_mmu_page *page)
657{
697fe2e2
AK
658 unsigned i;
659 u64 *pt;
660 u64 ent;
661
47ad8e68 662 pt = page->spt;
697fe2e2
AK
663
664 if (page->role.level == PT_PAGE_TABLE_LEVEL) {
665 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
666 if (pt[i] & PT_PRESENT_MASK)
90cb0529 667 rmap_remove(&pt[i]);
697fe2e2
AK
668 pt[i] = 0;
669 }
90cb0529 670 kvm_flush_remote_tlbs(kvm);
697fe2e2
AK
671 return;
672 }
673
674 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
675 ent = pt[i];
676
677 pt[i] = 0;
678 if (!(ent & PT_PRESENT_MASK))
679 continue;
680 ent &= PT64_BASE_ADDR_MASK;
90cb0529 681 mmu_page_remove_parent_pte(page_header(ent), &pt[i]);
697fe2e2 682 }
90cb0529 683 kvm_flush_remote_tlbs(kvm);
a436036b
AK
684}
685
90cb0529 686static void kvm_mmu_put_page(struct kvm_mmu_page *page,
cea0f0e7
AK
687 u64 *parent_pte)
688{
90cb0529 689 mmu_page_remove_parent_pte(page, parent_pte);
a436036b
AK
690}
691
90cb0529 692static void kvm_mmu_zap_page(struct kvm *kvm,
a436036b
AK
693 struct kvm_mmu_page *page)
694{
695 u64 *parent_pte;
696
697 while (page->multimapped || page->parent_pte) {
698 if (!page->multimapped)
699 parent_pte = page->parent_pte;
700 else {
701 struct kvm_pte_chain *chain;
702
703 chain = container_of(page->parent_ptes.first,
704 struct kvm_pte_chain, link);
705 parent_pte = chain->parent_ptes[0];
706 }
697fe2e2 707 BUG_ON(!parent_pte);
90cb0529 708 kvm_mmu_put_page(page, parent_pte);
e663ee64 709 set_shadow_pte(parent_pte, 0);
a436036b 710 }
90cb0529 711 kvm_mmu_page_unlink_children(kvm, page);
3bb65a22
AK
712 if (!page->root_count) {
713 hlist_del(&page->hash_link);
90cb0529 714 kvm_mmu_free_page(kvm, page);
36868f7b 715 } else
90cb0529 716 list_move(&page->link, &kvm->active_mmu_pages);
a436036b
AK
717}
718
719static int kvm_mmu_unprotect_page(struct kvm_vcpu *vcpu, gfn_t gfn)
720{
721 unsigned index;
722 struct hlist_head *bucket;
723 struct kvm_mmu_page *page;
724 struct hlist_node *node, *n;
725 int r;
726
727 pgprintk("%s: looking for gfn %lx\n", __FUNCTION__, gfn);
728 r = 0;
729 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
730 bucket = &vcpu->kvm->mmu_page_hash[index];
731 hlist_for_each_entry_safe(page, node, n, bucket, hash_link)
732 if (page->gfn == gfn && !page->role.metaphysical) {
697fe2e2
AK
733 pgprintk("%s: gfn %lx role %x\n", __FUNCTION__, gfn,
734 page->role.word);
90cb0529 735 kvm_mmu_zap_page(vcpu->kvm, page);
a436036b
AK
736 r = 1;
737 }
738 return r;
cea0f0e7
AK
739}
740
97a0a01e
AK
741static void mmu_unshadow(struct kvm_vcpu *vcpu, gfn_t gfn)
742{
743 struct kvm_mmu_page *page;
744
745 while ((page = kvm_mmu_lookup_page(vcpu, gfn)) != NULL) {
746 pgprintk("%s: zap %lx %x\n",
747 __FUNCTION__, gfn, page->role.word);
90cb0529 748 kvm_mmu_zap_page(vcpu->kvm, page);
97a0a01e
AK
749 }
750}
751
6aa8b732
AK
752static void page_header_update_slot(struct kvm *kvm, void *pte, gpa_t gpa)
753{
754 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gpa >> PAGE_SHIFT));
755 struct kvm_mmu_page *page_head = page_header(__pa(pte));
756
757 __set_bit(slot, &page_head->slot_bitmap);
758}
759
760hpa_t safe_gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
761{
762 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
763
764 return is_error_hpa(hpa) ? bad_page_address | (gpa & ~PAGE_MASK): hpa;
765}
766
767hpa_t gpa_to_hpa(struct kvm_vcpu *vcpu, gpa_t gpa)
768{
6aa8b732
AK
769 struct page *page;
770
771 ASSERT((gpa & HPA_ERR_MASK) == 0);
954bbbc2
AK
772 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
773 if (!page)
6aa8b732 774 return gpa | HPA_ERR_MASK;
6aa8b732
AK
775 return ((hpa_t)page_to_pfn(page) << PAGE_SHIFT)
776 | (gpa & (PAGE_SIZE-1));
777}
778
779hpa_t gva_to_hpa(struct kvm_vcpu *vcpu, gva_t gva)
780{
781 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
782
783 if (gpa == UNMAPPED_GVA)
784 return UNMAPPED_GVA;
785 return gpa_to_hpa(vcpu, gpa);
786}
787
039576c0
AK
788struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
789{
790 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
791
792 if (gpa == UNMAPPED_GVA)
793 return NULL;
794 return pfn_to_page(gpa_to_hpa(vcpu, gpa) >> PAGE_SHIFT);
795}
796
6aa8b732
AK
797static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
798{
799}
800
801static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, hpa_t p)
802{
803 int level = PT32E_ROOT_LEVEL;
804 hpa_t table_addr = vcpu->mmu.root_hpa;
805
806 for (; ; level--) {
807 u32 index = PT64_INDEX(v, level);
808 u64 *table;
cea0f0e7 809 u64 pte;
6aa8b732
AK
810
811 ASSERT(VALID_PAGE(table_addr));
812 table = __va(table_addr);
813
814 if (level == 1) {
cea0f0e7
AK
815 pte = table[index];
816 if (is_present_pte(pte) && is_writeble_pte(pte))
817 return 0;
6aa8b732
AK
818 mark_page_dirty(vcpu->kvm, v >> PAGE_SHIFT);
819 page_header_update_slot(vcpu->kvm, table, v);
820 table[index] = p | PT_PRESENT_MASK | PT_WRITABLE_MASK |
821 PT_USER_MASK;
714b93da 822 rmap_add(vcpu, &table[index]);
6aa8b732
AK
823 return 0;
824 }
825
826 if (table[index] == 0) {
25c0de2c 827 struct kvm_mmu_page *new_table;
cea0f0e7 828 gfn_t pseudo_gfn;
6aa8b732 829
cea0f0e7
AK
830 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
831 >> PAGE_SHIFT;
832 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
833 v, level - 1,
d28c6cfb 834 1, 0, &table[index]);
25c0de2c 835 if (!new_table) {
6aa8b732
AK
836 pgprintk("nonpaging_map: ENOMEM\n");
837 return -ENOMEM;
838 }
839
47ad8e68 840 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
25c0de2c 841 | PT_WRITABLE_MASK | PT_USER_MASK;
6aa8b732
AK
842 }
843 table_addr = table[index] & PT64_BASE_ADDR_MASK;
844 }
845}
846
17ac10ad
AK
847static void mmu_free_roots(struct kvm_vcpu *vcpu)
848{
849 int i;
3bb65a22 850 struct kvm_mmu_page *page;
17ac10ad 851
7b53aa56
AK
852 if (!VALID_PAGE(vcpu->mmu.root_hpa))
853 return;
17ac10ad
AK
854#ifdef CONFIG_X86_64
855 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
856 hpa_t root = vcpu->mmu.root_hpa;
857
3bb65a22
AK
858 page = page_header(root);
859 --page->root_count;
17ac10ad
AK
860 vcpu->mmu.root_hpa = INVALID_PAGE;
861 return;
862 }
863#endif
864 for (i = 0; i < 4; ++i) {
865 hpa_t root = vcpu->mmu.pae_root[i];
866
417726a3 867 if (root) {
417726a3
AK
868 root &= PT64_BASE_ADDR_MASK;
869 page = page_header(root);
870 --page->root_count;
871 }
17ac10ad
AK
872 vcpu->mmu.pae_root[i] = INVALID_PAGE;
873 }
874 vcpu->mmu.root_hpa = INVALID_PAGE;
875}
876
877static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
878{
879 int i;
cea0f0e7 880 gfn_t root_gfn;
3bb65a22
AK
881 struct kvm_mmu_page *page;
882
cea0f0e7 883 root_gfn = vcpu->cr3 >> PAGE_SHIFT;
17ac10ad
AK
884
885#ifdef CONFIG_X86_64
886 if (vcpu->mmu.shadow_root_level == PT64_ROOT_LEVEL) {
887 hpa_t root = vcpu->mmu.root_hpa;
888
889 ASSERT(!VALID_PAGE(root));
68a99f6d 890 page = kvm_mmu_get_page(vcpu, root_gfn, 0,
d28c6cfb 891 PT64_ROOT_LEVEL, 0, 0, NULL);
47ad8e68 892 root = __pa(page->spt);
3bb65a22 893 ++page->root_count;
17ac10ad
AK
894 vcpu->mmu.root_hpa = root;
895 return;
896 }
897#endif
898 for (i = 0; i < 4; ++i) {
899 hpa_t root = vcpu->mmu.pae_root[i];
900
901 ASSERT(!VALID_PAGE(root));
417726a3
AK
902 if (vcpu->mmu.root_level == PT32E_ROOT_LEVEL) {
903 if (!is_present_pte(vcpu->pdptrs[i])) {
904 vcpu->mmu.pae_root[i] = 0;
905 continue;
906 }
cea0f0e7 907 root_gfn = vcpu->pdptrs[i] >> PAGE_SHIFT;
417726a3 908 } else if (vcpu->mmu.root_level == 0)
cea0f0e7 909 root_gfn = 0;
68a99f6d 910 page = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
cea0f0e7 911 PT32_ROOT_LEVEL, !is_paging(vcpu),
d28c6cfb 912 0, NULL);
47ad8e68 913 root = __pa(page->spt);
3bb65a22 914 ++page->root_count;
17ac10ad
AK
915 vcpu->mmu.pae_root[i] = root | PT_PRESENT_MASK;
916 }
917 vcpu->mmu.root_hpa = __pa(vcpu->mmu.pae_root);
918}
919
6aa8b732
AK
920static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
921{
922 return vaddr;
923}
924
925static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
926 u32 error_code)
927{
6aa8b732 928 gpa_t addr = gva;
ebeace86 929 hpa_t paddr;
e2dec939 930 int r;
6aa8b732 931
e2dec939
AK
932 r = mmu_topup_memory_caches(vcpu);
933 if (r)
934 return r;
714b93da 935
6aa8b732
AK
936 ASSERT(vcpu);
937 ASSERT(VALID_PAGE(vcpu->mmu.root_hpa));
938
6aa8b732 939
ebeace86 940 paddr = gpa_to_hpa(vcpu , addr & PT64_BASE_ADDR_MASK);
6aa8b732 941
ebeace86
AK
942 if (is_error_hpa(paddr))
943 return 1;
6aa8b732 944
ebeace86 945 return nonpaging_map(vcpu, addr & PAGE_MASK, paddr);
6aa8b732
AK
946}
947
6aa8b732
AK
948static void nonpaging_free(struct kvm_vcpu *vcpu)
949{
17ac10ad 950 mmu_free_roots(vcpu);
6aa8b732
AK
951}
952
953static int nonpaging_init_context(struct kvm_vcpu *vcpu)
954{
955 struct kvm_mmu *context = &vcpu->mmu;
956
957 context->new_cr3 = nonpaging_new_cr3;
958 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
959 context->gva_to_gpa = nonpaging_gva_to_gpa;
960 context->free = nonpaging_free;
cea0f0e7 961 context->root_level = 0;
6aa8b732 962 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 963 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
964 return 0;
965}
966
6aa8b732
AK
967static void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
968{
1165f5fe 969 ++vcpu->stat.tlb_flush;
6aa8b732
AK
970 kvm_arch_ops->tlb_flush(vcpu);
971}
972
973static void paging_new_cr3(struct kvm_vcpu *vcpu)
974{
374cbac0 975 pgprintk("%s: cr3 %lx\n", __FUNCTION__, vcpu->cr3);
cea0f0e7 976 mmu_free_roots(vcpu);
6aa8b732
AK
977}
978
6aa8b732
AK
979static void inject_page_fault(struct kvm_vcpu *vcpu,
980 u64 addr,
981 u32 err_code)
982{
983 kvm_arch_ops->inject_page_fault(vcpu, addr, err_code);
984}
985
6aa8b732
AK
986static void paging_free(struct kvm_vcpu *vcpu)
987{
988 nonpaging_free(vcpu);
989}
990
991#define PTTYPE 64
992#include "paging_tmpl.h"
993#undef PTTYPE
994
995#define PTTYPE 32
996#include "paging_tmpl.h"
997#undef PTTYPE
998
17ac10ad 999static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732
AK
1000{
1001 struct kvm_mmu *context = &vcpu->mmu;
1002
1003 ASSERT(is_pae(vcpu));
1004 context->new_cr3 = paging_new_cr3;
1005 context->page_fault = paging64_page_fault;
6aa8b732
AK
1006 context->gva_to_gpa = paging64_gva_to_gpa;
1007 context->free = paging_free;
17ac10ad
AK
1008 context->root_level = level;
1009 context->shadow_root_level = level;
17c3ba9d 1010 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1011 return 0;
1012}
1013
17ac10ad
AK
1014static int paging64_init_context(struct kvm_vcpu *vcpu)
1015{
1016 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1017}
1018
6aa8b732
AK
1019static int paging32_init_context(struct kvm_vcpu *vcpu)
1020{
1021 struct kvm_mmu *context = &vcpu->mmu;
1022
1023 context->new_cr3 = paging_new_cr3;
1024 context->page_fault = paging32_page_fault;
6aa8b732
AK
1025 context->gva_to_gpa = paging32_gva_to_gpa;
1026 context->free = paging_free;
1027 context->root_level = PT32_ROOT_LEVEL;
1028 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1029 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1030 return 0;
1031}
1032
1033static int paging32E_init_context(struct kvm_vcpu *vcpu)
1034{
17ac10ad 1035 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
1036}
1037
1038static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1039{
1040 ASSERT(vcpu);
1041 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
1042
1043 if (!is_paging(vcpu))
1044 return nonpaging_init_context(vcpu);
a9058ecd 1045 else if (is_long_mode(vcpu))
6aa8b732
AK
1046 return paging64_init_context(vcpu);
1047 else if (is_pae(vcpu))
1048 return paging32E_init_context(vcpu);
1049 else
1050 return paging32_init_context(vcpu);
1051}
1052
1053static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1054{
1055 ASSERT(vcpu);
1056 if (VALID_PAGE(vcpu->mmu.root_hpa)) {
1057 vcpu->mmu.free(vcpu);
1058 vcpu->mmu.root_hpa = INVALID_PAGE;
1059 }
1060}
1061
1062int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
1063{
1064 destroy_kvm_mmu(vcpu);
1065 return init_kvm_mmu(vcpu);
1066}
1067
1068int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 1069{
714b93da
AK
1070 int r;
1071
17c3ba9d 1072 spin_lock(&vcpu->kvm->lock);
e2dec939 1073 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
1074 if (r)
1075 goto out;
1076 mmu_alloc_roots(vcpu);
1077 kvm_arch_ops->set_cr3(vcpu, vcpu->mmu.root_hpa);
1078 kvm_mmu_flush_tlb(vcpu);
714b93da 1079out:
17c3ba9d 1080 spin_unlock(&vcpu->kvm->lock);
714b93da 1081 return r;
6aa8b732 1082}
17c3ba9d
AK
1083EXPORT_SYMBOL_GPL(kvm_mmu_load);
1084
1085void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1086{
1087 mmu_free_roots(vcpu);
1088}
6aa8b732 1089
09072daf 1090static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
ac1b714e
AK
1091 struct kvm_mmu_page *page,
1092 u64 *spte)
1093{
1094 u64 pte;
1095 struct kvm_mmu_page *child;
1096
1097 pte = *spte;
1098 if (is_present_pte(pte)) {
1099 if (page->role.level == PT_PAGE_TABLE_LEVEL)
90cb0529 1100 rmap_remove(spte);
ac1b714e
AK
1101 else {
1102 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 1103 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
1104 }
1105 }
1106 *spte = 0;
d9e368d6 1107 kvm_flush_remote_tlbs(vcpu->kvm);
ac1b714e
AK
1108}
1109
0028425f
AK
1110static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1111 struct kvm_mmu_page *page,
1112 u64 *spte,
1113 const void *new, int bytes)
1114{
1115 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1116 return;
1117
1118 if (page->role.glevels == PT32_ROOT_LEVEL)
1119 paging32_update_pte(vcpu, page, spte, new, bytes);
1120 else
1121 paging64_update_pte(vcpu, page, spte, new, bytes);
1122}
1123
09072daf 1124void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
fe551881 1125 const u8 *new, int bytes)
da4a00f0 1126{
9b7a0325
AK
1127 gfn_t gfn = gpa >> PAGE_SHIFT;
1128 struct kvm_mmu_page *page;
0e7bc4b9 1129 struct hlist_node *node, *n;
9b7a0325
AK
1130 struct hlist_head *bucket;
1131 unsigned index;
1132 u64 *spte;
9b7a0325 1133 unsigned offset = offset_in_page(gpa);
0e7bc4b9 1134 unsigned pte_size;
9b7a0325 1135 unsigned page_offset;
0e7bc4b9 1136 unsigned misaligned;
fce0657f 1137 unsigned quadrant;
9b7a0325 1138 int level;
86a5ba02 1139 int flooded = 0;
ac1b714e 1140 int npte;
9b7a0325 1141
da4a00f0 1142 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
86a5ba02
AK
1143 if (gfn == vcpu->last_pt_write_gfn) {
1144 ++vcpu->last_pt_write_count;
1145 if (vcpu->last_pt_write_count >= 3)
1146 flooded = 1;
1147 } else {
1148 vcpu->last_pt_write_gfn = gfn;
1149 vcpu->last_pt_write_count = 1;
1150 }
9b7a0325
AK
1151 index = kvm_page_table_hashfn(gfn) % KVM_NUM_MMU_PAGES;
1152 bucket = &vcpu->kvm->mmu_page_hash[index];
0e7bc4b9 1153 hlist_for_each_entry_safe(page, node, n, bucket, hash_link) {
9b7a0325
AK
1154 if (page->gfn != gfn || page->role.metaphysical)
1155 continue;
0e7bc4b9
AK
1156 pte_size = page->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
1157 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 1158 misaligned |= bytes < 4;
86a5ba02 1159 if (misaligned || flooded) {
0e7bc4b9
AK
1160 /*
1161 * Misaligned accesses are too much trouble to fix
1162 * up; also, they usually indicate a page is not used
1163 * as a page table.
86a5ba02
AK
1164 *
1165 * If we're seeing too many writes to a page,
1166 * it may no longer be a page table, or we may be
1167 * forking, in which case it is better to unmap the
1168 * page.
0e7bc4b9
AK
1169 */
1170 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1171 gpa, bytes, page->role.word);
90cb0529 1172 kvm_mmu_zap_page(vcpu->kvm, page);
0e7bc4b9
AK
1173 continue;
1174 }
9b7a0325
AK
1175 page_offset = offset;
1176 level = page->role.level;
ac1b714e 1177 npte = 1;
9b7a0325 1178 if (page->role.glevels == PT32_ROOT_LEVEL) {
ac1b714e
AK
1179 page_offset <<= 1; /* 32->64 */
1180 /*
1181 * A 32-bit pde maps 4MB while the shadow pdes map
1182 * only 2MB. So we need to double the offset again
1183 * and zap two pdes instead of one.
1184 */
1185 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 1186 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
1187 page_offset <<= 1;
1188 npte = 2;
1189 }
fce0657f 1190 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 1191 page_offset &= ~PAGE_MASK;
fce0657f
AK
1192 if (quadrant != page->role.quadrant)
1193 continue;
9b7a0325 1194 }
47ad8e68 1195 spte = &page->spt[page_offset / sizeof(*spte)];
ac1b714e 1196 while (npte--) {
09072daf 1197 mmu_pte_write_zap_pte(vcpu, page, spte);
0028425f 1198 mmu_pte_write_new_pte(vcpu, page, spte, new, bytes);
ac1b714e 1199 ++spte;
9b7a0325 1200 }
9b7a0325 1201 }
da4a00f0
AK
1202}
1203
a436036b
AK
1204int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1205{
1206 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, gva);
1207
1208 return kvm_mmu_unprotect_page(vcpu, gpa >> PAGE_SHIFT);
1209}
1210
22d95b12 1211void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86
AK
1212{
1213 while (vcpu->kvm->n_free_mmu_pages < KVM_REFILL_PAGES) {
1214 struct kvm_mmu_page *page;
1215
1216 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1217 struct kvm_mmu_page, link);
90cb0529 1218 kvm_mmu_zap_page(vcpu->kvm, page);
ebeace86
AK
1219 }
1220}
ebeace86 1221
6aa8b732
AK
1222static void free_mmu_pages(struct kvm_vcpu *vcpu)
1223{
f51234c2 1224 struct kvm_mmu_page *page;
6aa8b732 1225
f51234c2
AK
1226 while (!list_empty(&vcpu->kvm->active_mmu_pages)) {
1227 page = container_of(vcpu->kvm->active_mmu_pages.next,
1228 struct kvm_mmu_page, link);
90cb0529 1229 kvm_mmu_zap_page(vcpu->kvm, page);
f51234c2 1230 }
17ac10ad 1231 free_page((unsigned long)vcpu->mmu.pae_root);
6aa8b732
AK
1232}
1233
1234static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1235{
17ac10ad 1236 struct page *page;
6aa8b732
AK
1237 int i;
1238
1239 ASSERT(vcpu);
1240
d3d25b04 1241 vcpu->kvm->n_free_mmu_pages = KVM_NUM_MMU_PAGES;
17ac10ad
AK
1242
1243 /*
1244 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1245 * Therefore we need to allocate shadow page tables in the first
1246 * 4GB of memory, which happens to fit the DMA32 zone.
1247 */
1248 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1249 if (!page)
1250 goto error_1;
1251 vcpu->mmu.pae_root = page_address(page);
1252 for (i = 0; i < 4; ++i)
1253 vcpu->mmu.pae_root[i] = INVALID_PAGE;
1254
6aa8b732
AK
1255 return 0;
1256
1257error_1:
1258 free_mmu_pages(vcpu);
1259 return -ENOMEM;
1260}
1261
8018c27b 1262int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 1263{
6aa8b732
AK
1264 ASSERT(vcpu);
1265 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
6aa8b732 1266
8018c27b
IM
1267 return alloc_mmu_pages(vcpu);
1268}
6aa8b732 1269
8018c27b
IM
1270int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1271{
1272 ASSERT(vcpu);
1273 ASSERT(!VALID_PAGE(vcpu->mmu.root_hpa));
2c264957 1274
8018c27b 1275 return init_kvm_mmu(vcpu);
6aa8b732
AK
1276}
1277
1278void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1279{
1280 ASSERT(vcpu);
1281
1282 destroy_kvm_mmu(vcpu);
1283 free_mmu_pages(vcpu);
714b93da 1284 mmu_free_memory_caches(vcpu);
6aa8b732
AK
1285}
1286
90cb0529 1287void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732
AK
1288{
1289 struct kvm_mmu_page *page;
1290
1291 list_for_each_entry(page, &kvm->active_mmu_pages, link) {
1292 int i;
1293 u64 *pt;
1294
1295 if (!test_bit(slot, &page->slot_bitmap))
1296 continue;
1297
47ad8e68 1298 pt = page->spt;
6aa8b732
AK
1299 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1300 /* avoid RMW */
cd4a4e53 1301 if (pt[i] & PT_WRITABLE_MASK) {
90cb0529 1302 rmap_remove(&pt[i]);
6aa8b732 1303 pt[i] &= ~PT_WRITABLE_MASK;
cd4a4e53 1304 }
6aa8b732
AK
1305 }
1306}
37a7d8b0 1307
90cb0529 1308void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 1309{
90cb0529 1310 struct kvm_mmu_page *page, *node;
e0fa826f 1311
90cb0529
AK
1312 list_for_each_entry_safe(page, node, &kvm->active_mmu_pages, link)
1313 kvm_mmu_zap_page(kvm, page);
e0fa826f 1314
90cb0529 1315 kvm_flush_remote_tlbs(kvm);
e0fa826f
DL
1316}
1317
b5a33a75
AK
1318void kvm_mmu_module_exit(void)
1319{
1320 if (pte_chain_cache)
1321 kmem_cache_destroy(pte_chain_cache);
1322 if (rmap_desc_cache)
1323 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
1324 if (mmu_page_header_cache)
1325 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
1326}
1327
1328int kvm_mmu_module_init(void)
1329{
1330 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1331 sizeof(struct kvm_pte_chain),
20c2df83 1332 0, 0, NULL);
b5a33a75
AK
1333 if (!pte_chain_cache)
1334 goto nomem;
1335 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1336 sizeof(struct kvm_rmap_desc),
20c2df83 1337 0, 0, NULL);
b5a33a75
AK
1338 if (!rmap_desc_cache)
1339 goto nomem;
1340
d3d25b04
AK
1341 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1342 sizeof(struct kvm_mmu_page),
20c2df83 1343 0, 0, NULL);
d3d25b04
AK
1344 if (!mmu_page_header_cache)
1345 goto nomem;
1346
b5a33a75
AK
1347 return 0;
1348
1349nomem:
1350 kvm_mmu_module_exit();
1351 return -ENOMEM;
1352}
1353
37a7d8b0
AK
1354#ifdef AUDIT
1355
1356static const char *audit_msg;
1357
1358static gva_t canonicalize(gva_t gva)
1359{
1360#ifdef CONFIG_X86_64
1361 gva = (long long)(gva << 16) >> 16;
1362#endif
1363 return gva;
1364}
1365
1366static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
1367 gva_t va, int level)
1368{
1369 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
1370 int i;
1371 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
1372
1373 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
1374 u64 ent = pt[i];
1375
2807696c 1376 if (!(ent & PT_PRESENT_MASK))
37a7d8b0
AK
1377 continue;
1378
1379 va = canonicalize(va);
1380 if (level > 1)
1381 audit_mappings_page(vcpu, ent, va, level - 1);
1382 else {
1383 gpa_t gpa = vcpu->mmu.gva_to_gpa(vcpu, va);
1384 hpa_t hpa = gpa_to_hpa(vcpu, gpa);
1385
1386 if ((ent & PT_PRESENT_MASK)
1387 && (ent & PT64_BASE_ADDR_MASK) != hpa)
1388 printk(KERN_ERR "audit error: (%s) levels %d"
1389 " gva %lx gpa %llx hpa %llx ent %llx\n",
1390 audit_msg, vcpu->mmu.root_level,
1391 va, gpa, hpa, ent);
1392 }
1393 }
1394}
1395
1396static void audit_mappings(struct kvm_vcpu *vcpu)
1397{
1ea252af 1398 unsigned i;
37a7d8b0
AK
1399
1400 if (vcpu->mmu.root_level == 4)
1401 audit_mappings_page(vcpu, vcpu->mmu.root_hpa, 0, 4);
1402 else
1403 for (i = 0; i < 4; ++i)
1404 if (vcpu->mmu.pae_root[i] & PT_PRESENT_MASK)
1405 audit_mappings_page(vcpu,
1406 vcpu->mmu.pae_root[i],
1407 i << 30,
1408 2);
1409}
1410
1411static int count_rmaps(struct kvm_vcpu *vcpu)
1412{
1413 int nmaps = 0;
1414 int i, j, k;
1415
1416 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1417 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
1418 struct kvm_rmap_desc *d;
1419
1420 for (j = 0; j < m->npages; ++j) {
1421 struct page *page = m->phys_mem[j];
1422
1423 if (!page->private)
1424 continue;
1425 if (!(page->private & 1)) {
1426 ++nmaps;
1427 continue;
1428 }
1429 d = (struct kvm_rmap_desc *)(page->private & ~1ul);
1430 while (d) {
1431 for (k = 0; k < RMAP_EXT; ++k)
1432 if (d->shadow_ptes[k])
1433 ++nmaps;
1434 else
1435 break;
1436 d = d->more;
1437 }
1438 }
1439 }
1440 return nmaps;
1441}
1442
1443static int count_writable_mappings(struct kvm_vcpu *vcpu)
1444{
1445 int nmaps = 0;
1446 struct kvm_mmu_page *page;
1447 int i;
1448
1449 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
47ad8e68 1450 u64 *pt = page->spt;
37a7d8b0
AK
1451
1452 if (page->role.level != PT_PAGE_TABLE_LEVEL)
1453 continue;
1454
1455 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1456 u64 ent = pt[i];
1457
1458 if (!(ent & PT_PRESENT_MASK))
1459 continue;
1460 if (!(ent & PT_WRITABLE_MASK))
1461 continue;
1462 ++nmaps;
1463 }
1464 }
1465 return nmaps;
1466}
1467
1468static void audit_rmap(struct kvm_vcpu *vcpu)
1469{
1470 int n_rmap = count_rmaps(vcpu);
1471 int n_actual = count_writable_mappings(vcpu);
1472
1473 if (n_rmap != n_actual)
1474 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
1475 __FUNCTION__, audit_msg, n_rmap, n_actual);
1476}
1477
1478static void audit_write_protection(struct kvm_vcpu *vcpu)
1479{
1480 struct kvm_mmu_page *page;
1481
1482 list_for_each_entry(page, &vcpu->kvm->active_mmu_pages, link) {
1483 hfn_t hfn;
1484 struct page *pg;
1485
1486 if (page->role.metaphysical)
1487 continue;
1488
1489 hfn = gpa_to_hpa(vcpu, (gpa_t)page->gfn << PAGE_SHIFT)
1490 >> PAGE_SHIFT;
1491 pg = pfn_to_page(hfn);
1492 if (pg->private)
1493 printk(KERN_ERR "%s: (%s) shadow page has writable"
1494 " mappings: gfn %lx role %x\n",
1495 __FUNCTION__, audit_msg, page->gfn,
1496 page->role.word);
1497 }
1498}
1499
1500static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
1501{
1502 int olddbg = dbg;
1503
1504 dbg = 0;
1505 audit_msg = msg;
1506 audit_rmap(vcpu);
1507 audit_write_protection(vcpu);
1508 audit_mappings(vcpu);
1509 dbg = olddbg;
1510}
1511
1512#endif