]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/x86/kvm/mmu.c
KVM: Provide unlocked version of emulator_write_phys()
[mirror_ubuntu-jammy-kernel.git] / arch / x86 / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
10 *
11 * Authors:
12 * Yaniv Kamay <yaniv@qumranet.com>
13 * Avi Kivity <avi@qumranet.com>
14 *
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
17 *
18 */
e495606d
AK
19
20#include "vmx.h"
1d737c8a 21#include "mmu.h"
e495606d 22
edf88417 23#include <linux/kvm_host.h>
6aa8b732
AK
24#include <linux/types.h>
25#include <linux/string.h>
6aa8b732
AK
26#include <linux/mm.h>
27#include <linux/highmem.h>
28#include <linux/module.h>
448353ca 29#include <linux/swap.h>
05da4558 30#include <linux/hugetlb.h>
6aa8b732 31
e495606d
AK
32#include <asm/page.h>
33#include <asm/cmpxchg.h>
4e542370 34#include <asm/io.h>
6aa8b732 35
18552672
JR
36/*
37 * When setting this variable to true it enables Two-Dimensional-Paging
38 * where the hardware walks 2 page tables:
39 * 1. the guest-virtual to guest-physical
40 * 2. while doing 1. it walks guest-physical to host-physical
41 * If the hardware supports that we don't need to do shadow paging.
42 */
43static bool tdp_enabled = false;
44
37a7d8b0
AK
45#undef MMU_DEBUG
46
47#undef AUDIT
48
49#ifdef AUDIT
50static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
51#else
52static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
53#endif
54
55#ifdef MMU_DEBUG
56
57#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
58#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
59
60#else
61
62#define pgprintk(x...) do { } while (0)
63#define rmap_printk(x...) do { } while (0)
64
65#endif
66
67#if defined(MMU_DEBUG) || defined(AUDIT)
68static int dbg = 1;
69#endif
6aa8b732 70
d6c69ee9
YD
71#ifndef MMU_DEBUG
72#define ASSERT(x) do { } while (0)
73#else
6aa8b732
AK
74#define ASSERT(x) \
75 if (!(x)) { \
76 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
77 __FILE__, __LINE__, #x); \
78 }
d6c69ee9 79#endif
6aa8b732 80
cea0f0e7
AK
81#define PT64_PT_BITS 9
82#define PT64_ENT_PER_PAGE (1 << PT64_PT_BITS)
83#define PT32_PT_BITS 10
84#define PT32_ENT_PER_PAGE (1 << PT32_PT_BITS)
6aa8b732
AK
85
86#define PT_WRITABLE_SHIFT 1
87
88#define PT_PRESENT_MASK (1ULL << 0)
89#define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
90#define PT_USER_MASK (1ULL << 2)
91#define PT_PWT_MASK (1ULL << 3)
92#define PT_PCD_MASK (1ULL << 4)
93#define PT_ACCESSED_MASK (1ULL << 5)
94#define PT_DIRTY_MASK (1ULL << 6)
95#define PT_PAGE_SIZE_MASK (1ULL << 7)
96#define PT_PAT_MASK (1ULL << 7)
97#define PT_GLOBAL_MASK (1ULL << 8)
fe135d2c
AK
98#define PT64_NX_SHIFT 63
99#define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
6aa8b732
AK
100
101#define PT_PAT_SHIFT 7
102#define PT_DIR_PAT_SHIFT 12
103#define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
104
105#define PT32_DIR_PSE36_SIZE 4
106#define PT32_DIR_PSE36_SHIFT 13
d77c26fc
MD
107#define PT32_DIR_PSE36_MASK \
108 (((1ULL << PT32_DIR_PSE36_SIZE) - 1) << PT32_DIR_PSE36_SHIFT)
6aa8b732
AK
109
110
6aa8b732
AK
111#define PT_FIRST_AVAIL_BITS_SHIFT 9
112#define PT64_SECOND_AVAIL_BITS_SHIFT 52
113
6aa8b732
AK
114#define VALID_PAGE(x) ((x) != INVALID_PAGE)
115
116#define PT64_LEVEL_BITS 9
117
118#define PT64_LEVEL_SHIFT(level) \
d77c26fc 119 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
6aa8b732
AK
120
121#define PT64_LEVEL_MASK(level) \
122 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
123
124#define PT64_INDEX(address, level)\
125 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
126
127
128#define PT32_LEVEL_BITS 10
129
130#define PT32_LEVEL_SHIFT(level) \
d77c26fc 131 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
6aa8b732
AK
132
133#define PT32_LEVEL_MASK(level) \
134 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
135
136#define PT32_INDEX(address, level)\
137 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
138
139
27aba766 140#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
141#define PT64_DIR_BASE_ADDR_MASK \
142 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
143
144#define PT32_BASE_ADDR_MASK PAGE_MASK
145#define PT32_DIR_BASE_ADDR_MASK \
146 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
147
79539cec
AK
148#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
149 | PT64_NX_MASK)
6aa8b732
AK
150
151#define PFERR_PRESENT_MASK (1U << 0)
152#define PFERR_WRITE_MASK (1U << 1)
153#define PFERR_USER_MASK (1U << 2)
73b1087e 154#define PFERR_FETCH_MASK (1U << 4)
6aa8b732
AK
155
156#define PT64_ROOT_LEVEL 4
157#define PT32_ROOT_LEVEL 2
158#define PT32E_ROOT_LEVEL 3
159
160#define PT_DIRECTORY_LEVEL 2
161#define PT_PAGE_TABLE_LEVEL 1
162
cd4a4e53
AK
163#define RMAP_EXT 4
164
fe135d2c
AK
165#define ACC_EXEC_MASK 1
166#define ACC_WRITE_MASK PT_WRITABLE_MASK
167#define ACC_USER_MASK PT_USER_MASK
168#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
169
cd4a4e53
AK
170struct kvm_rmap_desc {
171 u64 *shadow_ptes[RMAP_EXT];
172 struct kvm_rmap_desc *more;
173};
174
b5a33a75
AK
175static struct kmem_cache *pte_chain_cache;
176static struct kmem_cache *rmap_desc_cache;
d3d25b04 177static struct kmem_cache *mmu_page_header_cache;
b5a33a75 178
c7addb90
AK
179static u64 __read_mostly shadow_trap_nonpresent_pte;
180static u64 __read_mostly shadow_notrap_nonpresent_pte;
181
182void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
183{
184 shadow_trap_nonpresent_pte = trap_pte;
185 shadow_notrap_nonpresent_pte = notrap_pte;
186}
187EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
188
6aa8b732
AK
189static int is_write_protection(struct kvm_vcpu *vcpu)
190{
ad312c7c 191 return vcpu->arch.cr0 & X86_CR0_WP;
6aa8b732
AK
192}
193
194static int is_cpuid_PSE36(void)
195{
196 return 1;
197}
198
73b1087e
AK
199static int is_nx(struct kvm_vcpu *vcpu)
200{
ad312c7c 201 return vcpu->arch.shadow_efer & EFER_NX;
73b1087e
AK
202}
203
6aa8b732
AK
204static int is_present_pte(unsigned long pte)
205{
206 return pte & PT_PRESENT_MASK;
207}
208
c7addb90
AK
209static int is_shadow_present_pte(u64 pte)
210{
c7addb90
AK
211 return pte != shadow_trap_nonpresent_pte
212 && pte != shadow_notrap_nonpresent_pte;
213}
214
05da4558
MT
215static int is_large_pte(u64 pte)
216{
217 return pte & PT_PAGE_SIZE_MASK;
218}
219
6aa8b732
AK
220static int is_writeble_pte(unsigned long pte)
221{
222 return pte & PT_WRITABLE_MASK;
223}
224
e3c5e7ec
AK
225static int is_dirty_pte(unsigned long pte)
226{
227 return pte & PT_DIRTY_MASK;
228}
229
cd4a4e53
AK
230static int is_rmap_pte(u64 pte)
231{
4b1a80fa 232 return is_shadow_present_pte(pte);
cd4a4e53
AK
233}
234
da928521
AK
235static gfn_t pse36_gfn_delta(u32 gpte)
236{
237 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
238
239 return (gpte & PT32_DIR_PSE36_MASK) << shift;
240}
241
e663ee64
AK
242static void set_shadow_pte(u64 *sptep, u64 spte)
243{
244#ifdef CONFIG_X86_64
245 set_64bit((unsigned long *)sptep, spte);
246#else
247 set_64bit((unsigned long long *)sptep, spte);
248#endif
249}
250
e2dec939 251static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
2e3e5882 252 struct kmem_cache *base_cache, int min)
714b93da
AK
253{
254 void *obj;
255
256 if (cache->nobjs >= min)
e2dec939 257 return 0;
714b93da 258 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 259 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
714b93da 260 if (!obj)
e2dec939 261 return -ENOMEM;
714b93da
AK
262 cache->objects[cache->nobjs++] = obj;
263 }
e2dec939 264 return 0;
714b93da
AK
265}
266
267static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
268{
269 while (mc->nobjs)
270 kfree(mc->objects[--mc->nobjs]);
271}
272
c1158e63 273static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
2e3e5882 274 int min)
c1158e63
AK
275{
276 struct page *page;
277
278 if (cache->nobjs >= min)
279 return 0;
280 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 281 page = alloc_page(GFP_KERNEL);
c1158e63
AK
282 if (!page)
283 return -ENOMEM;
284 set_page_private(page, 0);
285 cache->objects[cache->nobjs++] = page_address(page);
286 }
287 return 0;
288}
289
290static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
291{
292 while (mc->nobjs)
c4d198d5 293 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
294}
295
2e3e5882 296static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
714b93da 297{
e2dec939
AK
298 int r;
299
ad312c7c 300 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
2e3e5882 301 pte_chain_cache, 4);
e2dec939
AK
302 if (r)
303 goto out;
ad312c7c 304 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
2e3e5882 305 rmap_desc_cache, 1);
d3d25b04
AK
306 if (r)
307 goto out;
ad312c7c 308 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
d3d25b04
AK
309 if (r)
310 goto out;
ad312c7c 311 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
2e3e5882 312 mmu_page_header_cache, 4);
e2dec939
AK
313out:
314 return r;
714b93da
AK
315}
316
317static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
318{
ad312c7c
ZX
319 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache);
320 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache);
321 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
322 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache);
714b93da
AK
323}
324
325static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
326 size_t size)
327{
328 void *p;
329
330 BUG_ON(!mc->nobjs);
331 p = mc->objects[--mc->nobjs];
332 memset(p, 0, size);
333 return p;
334}
335
714b93da
AK
336static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
337{
ad312c7c 338 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
714b93da
AK
339 sizeof(struct kvm_pte_chain));
340}
341
90cb0529 342static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 343{
90cb0529 344 kfree(pc);
714b93da
AK
345}
346
347static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
348{
ad312c7c 349 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
714b93da
AK
350 sizeof(struct kvm_rmap_desc));
351}
352
90cb0529 353static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 354{
90cb0529 355 kfree(rd);
714b93da
AK
356}
357
05da4558
MT
358/*
359 * Return the pointer to the largepage write count for a given
360 * gfn, handling slots that are not large page aligned.
361 */
362static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
363{
364 unsigned long idx;
365
366 idx = (gfn / KVM_PAGES_PER_HPAGE) -
367 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
368 return &slot->lpage_info[idx].write_count;
369}
370
371static void account_shadowed(struct kvm *kvm, gfn_t gfn)
372{
373 int *write_count;
374
375 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
376 *write_count += 1;
377 WARN_ON(*write_count > KVM_PAGES_PER_HPAGE);
378}
379
380static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
381{
382 int *write_count;
383
384 write_count = slot_largepage_idx(gfn, gfn_to_memslot(kvm, gfn));
385 *write_count -= 1;
386 WARN_ON(*write_count < 0);
387}
388
389static int has_wrprotected_page(struct kvm *kvm, gfn_t gfn)
390{
391 struct kvm_memory_slot *slot = gfn_to_memslot(kvm, gfn);
392 int *largepage_idx;
393
394 if (slot) {
395 largepage_idx = slot_largepage_idx(gfn, slot);
396 return *largepage_idx;
397 }
398
399 return 1;
400}
401
402static int host_largepage_backed(struct kvm *kvm, gfn_t gfn)
403{
404 struct vm_area_struct *vma;
405 unsigned long addr;
406
407 addr = gfn_to_hva(kvm, gfn);
408 if (kvm_is_error_hva(addr))
409 return 0;
410
411 vma = find_vma(current->mm, addr);
412 if (vma && is_vm_hugetlb_page(vma))
413 return 1;
414
415 return 0;
416}
417
418static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
419{
420 struct kvm_memory_slot *slot;
421
422 if (has_wrprotected_page(vcpu->kvm, large_gfn))
423 return 0;
424
425 if (!host_largepage_backed(vcpu->kvm, large_gfn))
426 return 0;
427
428 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
429 if (slot && slot->dirty_bitmap)
430 return 0;
431
432 return 1;
433}
434
290fc38d
IE
435/*
436 * Take gfn and return the reverse mapping to it.
437 * Note: gfn must be unaliased before this function get called
438 */
439
05da4558 440static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
290fc38d
IE
441{
442 struct kvm_memory_slot *slot;
05da4558 443 unsigned long idx;
290fc38d
IE
444
445 slot = gfn_to_memslot(kvm, gfn);
05da4558
MT
446 if (!lpage)
447 return &slot->rmap[gfn - slot->base_gfn];
448
449 idx = (gfn / KVM_PAGES_PER_HPAGE) -
450 (slot->base_gfn / KVM_PAGES_PER_HPAGE);
451
452 return &slot->lpage_info[idx].rmap_pde;
290fc38d
IE
453}
454
cd4a4e53
AK
455/*
456 * Reverse mapping data structures:
457 *
290fc38d
IE
458 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
459 * that points to page_address(page).
cd4a4e53 460 *
290fc38d
IE
461 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
462 * containing more mappings.
cd4a4e53 463 */
05da4558 464static void rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
cd4a4e53 465{
4db35314 466 struct kvm_mmu_page *sp;
cd4a4e53 467 struct kvm_rmap_desc *desc;
290fc38d 468 unsigned long *rmapp;
cd4a4e53
AK
469 int i;
470
471 if (!is_rmap_pte(*spte))
472 return;
290fc38d 473 gfn = unalias_gfn(vcpu->kvm, gfn);
4db35314
AK
474 sp = page_header(__pa(spte));
475 sp->gfns[spte - sp->spt] = gfn;
05da4558 476 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage);
290fc38d 477 if (!*rmapp) {
cd4a4e53 478 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
290fc38d
IE
479 *rmapp = (unsigned long)spte;
480 } else if (!(*rmapp & 1)) {
cd4a4e53 481 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 482 desc = mmu_alloc_rmap_desc(vcpu);
290fc38d 483 desc->shadow_ptes[0] = (u64 *)*rmapp;
cd4a4e53 484 desc->shadow_ptes[1] = spte;
290fc38d 485 *rmapp = (unsigned long)desc | 1;
cd4a4e53
AK
486 } else {
487 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
290fc38d 488 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
489 while (desc->shadow_ptes[RMAP_EXT-1] && desc->more)
490 desc = desc->more;
491 if (desc->shadow_ptes[RMAP_EXT-1]) {
714b93da 492 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
493 desc = desc->more;
494 }
495 for (i = 0; desc->shadow_ptes[i]; ++i)
496 ;
497 desc->shadow_ptes[i] = spte;
498 }
499}
500
290fc38d 501static void rmap_desc_remove_entry(unsigned long *rmapp,
cd4a4e53
AK
502 struct kvm_rmap_desc *desc,
503 int i,
504 struct kvm_rmap_desc *prev_desc)
505{
506 int j;
507
508 for (j = RMAP_EXT - 1; !desc->shadow_ptes[j] && j > i; --j)
509 ;
510 desc->shadow_ptes[i] = desc->shadow_ptes[j];
11718b4d 511 desc->shadow_ptes[j] = NULL;
cd4a4e53
AK
512 if (j != 0)
513 return;
514 if (!prev_desc && !desc->more)
290fc38d 515 *rmapp = (unsigned long)desc->shadow_ptes[0];
cd4a4e53
AK
516 else
517 if (prev_desc)
518 prev_desc->more = desc->more;
519 else
290fc38d 520 *rmapp = (unsigned long)desc->more | 1;
90cb0529 521 mmu_free_rmap_desc(desc);
cd4a4e53
AK
522}
523
290fc38d 524static void rmap_remove(struct kvm *kvm, u64 *spte)
cd4a4e53 525{
cd4a4e53
AK
526 struct kvm_rmap_desc *desc;
527 struct kvm_rmap_desc *prev_desc;
4db35314 528 struct kvm_mmu_page *sp;
76c35c6e 529 struct page *page;
290fc38d 530 unsigned long *rmapp;
cd4a4e53
AK
531 int i;
532
533 if (!is_rmap_pte(*spte))
534 return;
4db35314 535 sp = page_header(__pa(spte));
76c35c6e 536 page = pfn_to_page((*spte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT);
448353ca 537 mark_page_accessed(page);
b4231d61 538 if (is_writeble_pte(*spte))
76c35c6e 539 kvm_release_page_dirty(page);
b4231d61 540 else
76c35c6e 541 kvm_release_page_clean(page);
05da4558 542 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte));
290fc38d 543 if (!*rmapp) {
cd4a4e53
AK
544 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
545 BUG();
290fc38d 546 } else if (!(*rmapp & 1)) {
cd4a4e53 547 rmap_printk("rmap_remove: %p %llx 1->0\n", spte, *spte);
290fc38d 548 if ((u64 *)*rmapp != spte) {
cd4a4e53
AK
549 printk(KERN_ERR "rmap_remove: %p %llx 1->BUG\n",
550 spte, *spte);
551 BUG();
552 }
290fc38d 553 *rmapp = 0;
cd4a4e53
AK
554 } else {
555 rmap_printk("rmap_remove: %p %llx many->many\n", spte, *spte);
290fc38d 556 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
557 prev_desc = NULL;
558 while (desc) {
559 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i)
560 if (desc->shadow_ptes[i] == spte) {
290fc38d 561 rmap_desc_remove_entry(rmapp,
714b93da 562 desc, i,
cd4a4e53
AK
563 prev_desc);
564 return;
565 }
566 prev_desc = desc;
567 desc = desc->more;
568 }
569 BUG();
570 }
571}
572
98348e95 573static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
374cbac0 574{
374cbac0 575 struct kvm_rmap_desc *desc;
98348e95
IE
576 struct kvm_rmap_desc *prev_desc;
577 u64 *prev_spte;
578 int i;
579
580 if (!*rmapp)
581 return NULL;
582 else if (!(*rmapp & 1)) {
583 if (!spte)
584 return (u64 *)*rmapp;
585 return NULL;
586 }
587 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
588 prev_desc = NULL;
589 prev_spte = NULL;
590 while (desc) {
591 for (i = 0; i < RMAP_EXT && desc->shadow_ptes[i]; ++i) {
592 if (prev_spte == spte)
593 return desc->shadow_ptes[i];
594 prev_spte = desc->shadow_ptes[i];
595 }
596 desc = desc->more;
597 }
598 return NULL;
599}
600
601static void rmap_write_protect(struct kvm *kvm, u64 gfn)
602{
290fc38d 603 unsigned long *rmapp;
374cbac0 604 u64 *spte;
caa5b8a5 605 int write_protected = 0;
374cbac0 606
4a4c9924 607 gfn = unalias_gfn(kvm, gfn);
05da4558 608 rmapp = gfn_to_rmap(kvm, gfn, 0);
374cbac0 609
98348e95
IE
610 spte = rmap_next(kvm, rmapp, NULL);
611 while (spte) {
374cbac0 612 BUG_ON(!spte);
374cbac0 613 BUG_ON(!(*spte & PT_PRESENT_MASK));
374cbac0 614 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
caa5b8a5 615 if (is_writeble_pte(*spte)) {
9647c14c 616 set_shadow_pte(spte, *spte & ~PT_WRITABLE_MASK);
caa5b8a5
ED
617 write_protected = 1;
618 }
9647c14c 619 spte = rmap_next(kvm, rmapp, spte);
374cbac0 620 }
05da4558
MT
621 /* check for huge page mappings */
622 rmapp = gfn_to_rmap(kvm, gfn, 1);
623 spte = rmap_next(kvm, rmapp, NULL);
624 while (spte) {
625 BUG_ON(!spte);
626 BUG_ON(!(*spte & PT_PRESENT_MASK));
627 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
628 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
629 if (is_writeble_pte(*spte)) {
630 rmap_remove(kvm, spte);
631 --kvm->stat.lpages;
632 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
633 write_protected = 1;
634 }
635 spte = rmap_next(kvm, rmapp, spte);
636 }
637
caa5b8a5
ED
638 if (write_protected)
639 kvm_flush_remote_tlbs(kvm);
05da4558
MT
640
641 account_shadowed(kvm, gfn);
374cbac0
AK
642}
643
d6c69ee9 644#ifdef MMU_DEBUG
47ad8e68 645static int is_empty_shadow_page(u64 *spt)
6aa8b732 646{
139bdb2d
AK
647 u64 *pos;
648 u64 *end;
649
47ad8e68 650 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
d196e343 651 if (*pos != shadow_trap_nonpresent_pte) {
b8688d51 652 printk(KERN_ERR "%s: %p %llx\n", __func__,
139bdb2d 653 pos, *pos);
6aa8b732 654 return 0;
139bdb2d 655 }
6aa8b732
AK
656 return 1;
657}
d6c69ee9 658#endif
6aa8b732 659
4db35314 660static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
260746c0 661{
4db35314
AK
662 ASSERT(is_empty_shadow_page(sp->spt));
663 list_del(&sp->link);
664 __free_page(virt_to_page(sp->spt));
665 __free_page(virt_to_page(sp->gfns));
666 kfree(sp);
f05e70ac 667 ++kvm->arch.n_free_mmu_pages;
260746c0
AK
668}
669
cea0f0e7
AK
670static unsigned kvm_page_table_hashfn(gfn_t gfn)
671{
1ae0a13d 672 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
cea0f0e7
AK
673}
674
25c0de2c
AK
675static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
676 u64 *parent_pte)
6aa8b732 677{
4db35314 678 struct kvm_mmu_page *sp;
6aa8b732 679
ad312c7c
ZX
680 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
681 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
682 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
4db35314 683 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
f05e70ac 684 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
4db35314
AK
685 ASSERT(is_empty_shadow_page(sp->spt));
686 sp->slot_bitmap = 0;
687 sp->multimapped = 0;
688 sp->parent_pte = parent_pte;
f05e70ac 689 --vcpu->kvm->arch.n_free_mmu_pages;
4db35314 690 return sp;
6aa8b732
AK
691}
692
714b93da 693static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
4db35314 694 struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7
AK
695{
696 struct kvm_pte_chain *pte_chain;
697 struct hlist_node *node;
698 int i;
699
700 if (!parent_pte)
701 return;
4db35314
AK
702 if (!sp->multimapped) {
703 u64 *old = sp->parent_pte;
cea0f0e7
AK
704
705 if (!old) {
4db35314 706 sp->parent_pte = parent_pte;
cea0f0e7
AK
707 return;
708 }
4db35314 709 sp->multimapped = 1;
714b93da 710 pte_chain = mmu_alloc_pte_chain(vcpu);
4db35314
AK
711 INIT_HLIST_HEAD(&sp->parent_ptes);
712 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
713 pte_chain->parent_ptes[0] = old;
714 }
4db35314 715 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
cea0f0e7
AK
716 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
717 continue;
718 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
719 if (!pte_chain->parent_ptes[i]) {
720 pte_chain->parent_ptes[i] = parent_pte;
721 return;
722 }
723 }
714b93da 724 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7 725 BUG_ON(!pte_chain);
4db35314 726 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
727 pte_chain->parent_ptes[0] = parent_pte;
728}
729
4db35314 730static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
cea0f0e7
AK
731 u64 *parent_pte)
732{
733 struct kvm_pte_chain *pte_chain;
734 struct hlist_node *node;
735 int i;
736
4db35314
AK
737 if (!sp->multimapped) {
738 BUG_ON(sp->parent_pte != parent_pte);
739 sp->parent_pte = NULL;
cea0f0e7
AK
740 return;
741 }
4db35314 742 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
cea0f0e7
AK
743 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
744 if (!pte_chain->parent_ptes[i])
745 break;
746 if (pte_chain->parent_ptes[i] != parent_pte)
747 continue;
697fe2e2
AK
748 while (i + 1 < NR_PTE_CHAIN_ENTRIES
749 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
750 pte_chain->parent_ptes[i]
751 = pte_chain->parent_ptes[i + 1];
752 ++i;
753 }
754 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
755 if (i == 0) {
756 hlist_del(&pte_chain->link);
90cb0529 757 mmu_free_pte_chain(pte_chain);
4db35314
AK
758 if (hlist_empty(&sp->parent_ptes)) {
759 sp->multimapped = 0;
760 sp->parent_pte = NULL;
697fe2e2
AK
761 }
762 }
cea0f0e7
AK
763 return;
764 }
765 BUG();
766}
767
4db35314 768static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn)
cea0f0e7
AK
769{
770 unsigned index;
771 struct hlist_head *bucket;
4db35314 772 struct kvm_mmu_page *sp;
cea0f0e7
AK
773 struct hlist_node *node;
774
b8688d51 775 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
1ae0a13d 776 index = kvm_page_table_hashfn(gfn);
f05e70ac 777 bucket = &kvm->arch.mmu_page_hash[index];
4db35314 778 hlist_for_each_entry(sp, node, bucket, hash_link)
2e53d63a
MT
779 if (sp->gfn == gfn && !sp->role.metaphysical
780 && !sp->role.invalid) {
cea0f0e7 781 pgprintk("%s: found role %x\n",
b8688d51 782 __func__, sp->role.word);
4db35314 783 return sp;
cea0f0e7
AK
784 }
785 return NULL;
786}
787
788static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
789 gfn_t gfn,
790 gva_t gaddr,
791 unsigned level,
792 int metaphysical,
41074d07 793 unsigned access,
f7d9c7b7 794 u64 *parent_pte)
cea0f0e7
AK
795{
796 union kvm_mmu_page_role role;
797 unsigned index;
798 unsigned quadrant;
799 struct hlist_head *bucket;
4db35314 800 struct kvm_mmu_page *sp;
cea0f0e7
AK
801 struct hlist_node *node;
802
803 role.word = 0;
ad312c7c 804 role.glevels = vcpu->arch.mmu.root_level;
cea0f0e7
AK
805 role.level = level;
806 role.metaphysical = metaphysical;
41074d07 807 role.access = access;
ad312c7c 808 if (vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
cea0f0e7
AK
809 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
810 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
811 role.quadrant = quadrant;
812 }
b8688d51 813 pgprintk("%s: looking gfn %lx role %x\n", __func__,
cea0f0e7 814 gfn, role.word);
1ae0a13d 815 index = kvm_page_table_hashfn(gfn);
f05e70ac 816 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
817 hlist_for_each_entry(sp, node, bucket, hash_link)
818 if (sp->gfn == gfn && sp->role.word == role.word) {
819 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
b8688d51 820 pgprintk("%s: found\n", __func__);
4db35314 821 return sp;
cea0f0e7 822 }
dfc5aa00 823 ++vcpu->kvm->stat.mmu_cache_miss;
4db35314
AK
824 sp = kvm_mmu_alloc_page(vcpu, parent_pte);
825 if (!sp)
826 return sp;
b8688d51 827 pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word);
4db35314
AK
828 sp->gfn = gfn;
829 sp->role = role;
830 hlist_add_head(&sp->hash_link, bucket);
ad312c7c 831 vcpu->arch.mmu.prefetch_page(vcpu, sp);
374cbac0 832 if (!metaphysical)
4a4c9924 833 rmap_write_protect(vcpu->kvm, gfn);
4db35314 834 return sp;
cea0f0e7
AK
835}
836
90cb0529 837static void kvm_mmu_page_unlink_children(struct kvm *kvm,
4db35314 838 struct kvm_mmu_page *sp)
a436036b 839{
697fe2e2
AK
840 unsigned i;
841 u64 *pt;
842 u64 ent;
843
4db35314 844 pt = sp->spt;
697fe2e2 845
4db35314 846 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
697fe2e2 847 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
c7addb90 848 if (is_shadow_present_pte(pt[i]))
290fc38d 849 rmap_remove(kvm, &pt[i]);
c7addb90 850 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 851 }
90cb0529 852 kvm_flush_remote_tlbs(kvm);
697fe2e2
AK
853 return;
854 }
855
856 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
857 ent = pt[i];
858
05da4558
MT
859 if (is_shadow_present_pte(ent)) {
860 if (!is_large_pte(ent)) {
861 ent &= PT64_BASE_ADDR_MASK;
862 mmu_page_remove_parent_pte(page_header(ent),
863 &pt[i]);
864 } else {
865 --kvm->stat.lpages;
866 rmap_remove(kvm, &pt[i]);
867 }
868 }
c7addb90 869 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 870 }
90cb0529 871 kvm_flush_remote_tlbs(kvm);
a436036b
AK
872}
873
4db35314 874static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7 875{
4db35314 876 mmu_page_remove_parent_pte(sp, parent_pte);
a436036b
AK
877}
878
12b7d28f
AK
879static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
880{
881 int i;
882
883 for (i = 0; i < KVM_MAX_VCPUS; ++i)
884 if (kvm->vcpus[i])
ad312c7c 885 kvm->vcpus[i]->arch.last_pte_updated = NULL;
12b7d28f
AK
886}
887
4db35314 888static void kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
a436036b
AK
889{
890 u64 *parent_pte;
891
4cee5764 892 ++kvm->stat.mmu_shadow_zapped;
4db35314
AK
893 while (sp->multimapped || sp->parent_pte) {
894 if (!sp->multimapped)
895 parent_pte = sp->parent_pte;
a436036b
AK
896 else {
897 struct kvm_pte_chain *chain;
898
4db35314 899 chain = container_of(sp->parent_ptes.first,
a436036b
AK
900 struct kvm_pte_chain, link);
901 parent_pte = chain->parent_ptes[0];
902 }
697fe2e2 903 BUG_ON(!parent_pte);
4db35314 904 kvm_mmu_put_page(sp, parent_pte);
c7addb90 905 set_shadow_pte(parent_pte, shadow_trap_nonpresent_pte);
a436036b 906 }
4db35314
AK
907 kvm_mmu_page_unlink_children(kvm, sp);
908 if (!sp->root_count) {
05da4558
MT
909 if (!sp->role.metaphysical)
910 unaccount_shadowed(kvm, sp->gfn);
4db35314
AK
911 hlist_del(&sp->hash_link);
912 kvm_mmu_free_page(kvm, sp);
2e53d63a 913 } else {
f05e70ac 914 list_move(&sp->link, &kvm->arch.active_mmu_pages);
2e53d63a
MT
915 sp->role.invalid = 1;
916 kvm_reload_remote_mmus(kvm);
917 }
12b7d28f 918 kvm_mmu_reset_last_pte_updated(kvm);
a436036b
AK
919}
920
82ce2c96
IE
921/*
922 * Changing the number of mmu pages allocated to the vm
923 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock
924 */
925void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
926{
927 /*
928 * If we set the number of mmu pages to be smaller be than the
929 * number of actived pages , we must to free some mmu pages before we
930 * change the value
931 */
932
f05e70ac 933 if ((kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages) >
82ce2c96 934 kvm_nr_mmu_pages) {
f05e70ac
ZX
935 int n_used_mmu_pages = kvm->arch.n_alloc_mmu_pages
936 - kvm->arch.n_free_mmu_pages;
82ce2c96
IE
937
938 while (n_used_mmu_pages > kvm_nr_mmu_pages) {
939 struct kvm_mmu_page *page;
940
f05e70ac 941 page = container_of(kvm->arch.active_mmu_pages.prev,
82ce2c96
IE
942 struct kvm_mmu_page, link);
943 kvm_mmu_zap_page(kvm, page);
944 n_used_mmu_pages--;
945 }
f05e70ac 946 kvm->arch.n_free_mmu_pages = 0;
82ce2c96
IE
947 }
948 else
f05e70ac
ZX
949 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
950 - kvm->arch.n_alloc_mmu_pages;
82ce2c96 951
f05e70ac 952 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages;
82ce2c96
IE
953}
954
f67a46f4 955static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
a436036b
AK
956{
957 unsigned index;
958 struct hlist_head *bucket;
4db35314 959 struct kvm_mmu_page *sp;
a436036b
AK
960 struct hlist_node *node, *n;
961 int r;
962
b8688d51 963 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
a436036b 964 r = 0;
1ae0a13d 965 index = kvm_page_table_hashfn(gfn);
f05e70ac 966 bucket = &kvm->arch.mmu_page_hash[index];
4db35314
AK
967 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link)
968 if (sp->gfn == gfn && !sp->role.metaphysical) {
b8688d51 969 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
4db35314
AK
970 sp->role.word);
971 kvm_mmu_zap_page(kvm, sp);
a436036b
AK
972 r = 1;
973 }
974 return r;
cea0f0e7
AK
975}
976
f67a46f4 977static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
97a0a01e 978{
4db35314 979 struct kvm_mmu_page *sp;
97a0a01e 980
4db35314 981 while ((sp = kvm_mmu_lookup_page(kvm, gfn)) != NULL) {
b8688d51 982 pgprintk("%s: zap %lx %x\n", __func__, gfn, sp->role.word);
4db35314 983 kvm_mmu_zap_page(kvm, sp);
97a0a01e
AK
984 }
985}
986
38c335f1 987static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
6aa8b732 988{
38c335f1 989 int slot = memslot_id(kvm, gfn_to_memslot(kvm, gfn));
4db35314 990 struct kvm_mmu_page *sp = page_header(__pa(pte));
6aa8b732 991
4db35314 992 __set_bit(slot, &sp->slot_bitmap);
6aa8b732
AK
993}
994
039576c0
AK
995struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
996{
72dc67a6
IE
997 struct page *page;
998
ad312c7c 999 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
039576c0
AK
1000
1001 if (gpa == UNMAPPED_GVA)
1002 return NULL;
72dc67a6
IE
1003
1004 down_read(&current->mm->mmap_sem);
1005 page = gfn_to_page(vcpu->kvm, gpa >> PAGE_SHIFT);
1006 up_read(&current->mm->mmap_sem);
1007
1008 return page;
039576c0
AK
1009}
1010
1c4f1fd6
AK
1011static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
1012 unsigned pt_access, unsigned pte_access,
1013 int user_fault, int write_fault, int dirty,
05da4558
MT
1014 int *ptwrite, int largepage, gfn_t gfn,
1015 struct page *page)
1c4f1fd6
AK
1016{
1017 u64 spte;
15aaa819 1018 int was_rmapped = 0;
75e68e60 1019 int was_writeble = is_writeble_pte(*shadow_pte);
15aaa819 1020 hfn_t host_pfn = (*shadow_pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
1c4f1fd6 1021
bc750ba8 1022 pgprintk("%s: spte %llx access %x write_fault %d"
1c4f1fd6 1023 " user_fault %d gfn %lx\n",
b8688d51 1024 __func__, *shadow_pte, pt_access,
1c4f1fd6
AK
1025 write_fault, user_fault, gfn);
1026
15aaa819 1027 if (is_rmap_pte(*shadow_pte)) {
05da4558
MT
1028 /*
1029 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
1030 * the parent of the now unreachable PTE.
1031 */
1032 if (largepage && !is_large_pte(*shadow_pte)) {
1033 struct kvm_mmu_page *child;
1034 u64 pte = *shadow_pte;
1035
1036 child = page_header(pte & PT64_BASE_ADDR_MASK);
1037 mmu_page_remove_parent_pte(child, shadow_pte);
1038 } else if (host_pfn != page_to_pfn(page)) {
15aaa819
MT
1039 pgprintk("hfn old %lx new %lx\n",
1040 host_pfn, page_to_pfn(page));
1041 rmap_remove(vcpu->kvm, shadow_pte);
05da4558
MT
1042 } else {
1043 if (largepage)
1044 was_rmapped = is_large_pte(*shadow_pte);
1045 else
1046 was_rmapped = 1;
15aaa819 1047 }
15aaa819
MT
1048 }
1049
1c4f1fd6
AK
1050 /*
1051 * We don't set the accessed bit, since we sometimes want to see
1052 * whether the guest actually used the pte (in order to detect
1053 * demand paging).
1054 */
1055 spte = PT_PRESENT_MASK | PT_DIRTY_MASK;
1056 if (!dirty)
1057 pte_access &= ~ACC_WRITE_MASK;
1058 if (!(pte_access & ACC_EXEC_MASK))
1059 spte |= PT64_NX_MASK;
1060
1c4f1fd6
AK
1061 spte |= PT_PRESENT_MASK;
1062 if (pte_access & ACC_USER_MASK)
1063 spte |= PT_USER_MASK;
05da4558
MT
1064 if (largepage)
1065 spte |= PT_PAGE_SIZE_MASK;
1c4f1fd6 1066
1c4f1fd6
AK
1067 spte |= page_to_phys(page);
1068
1069 if ((pte_access & ACC_WRITE_MASK)
1070 || (write_fault && !is_write_protection(vcpu) && !user_fault)) {
1071 struct kvm_mmu_page *shadow;
1072
1073 spte |= PT_WRITABLE_MASK;
1074 if (user_fault) {
1075 mmu_unshadow(vcpu->kvm, gfn);
1076 goto unshadowed;
1077 }
1078
1079 shadow = kvm_mmu_lookup_page(vcpu->kvm, gfn);
05da4558
MT
1080 if (shadow ||
1081 (largepage && has_wrprotected_page(vcpu->kvm, gfn))) {
1c4f1fd6 1082 pgprintk("%s: found shadow page for %lx, marking ro\n",
b8688d51 1083 __func__, gfn);
1c4f1fd6
AK
1084 pte_access &= ~ACC_WRITE_MASK;
1085 if (is_writeble_pte(spte)) {
1086 spte &= ~PT_WRITABLE_MASK;
1087 kvm_x86_ops->tlb_flush(vcpu);
1088 }
1089 if (write_fault)
1090 *ptwrite = 1;
1091 }
1092 }
1093
1094unshadowed:
1095
1096 if (pte_access & ACC_WRITE_MASK)
1097 mark_page_dirty(vcpu->kvm, gfn);
1098
b8688d51 1099 pgprintk("%s: setting spte %llx\n", __func__, spte);
05da4558
MT
1100 pgprintk("instantiating %s PTE (%s) at %d (%llx) addr %llx\n",
1101 (spte&PT_PAGE_SIZE_MASK)? "2MB" : "4kB",
1102 (spte&PT_WRITABLE_MASK)?"RW":"R", gfn, spte, shadow_pte);
1c4f1fd6 1103 set_shadow_pte(shadow_pte, spte);
05da4558
MT
1104 if (!was_rmapped && (spte & PT_PAGE_SIZE_MASK)
1105 && (spte & PT_PRESENT_MASK))
1106 ++vcpu->kvm->stat.lpages;
1107
1c4f1fd6
AK
1108 page_header_update_slot(vcpu->kvm, shadow_pte, gfn);
1109 if (!was_rmapped) {
05da4558 1110 rmap_add(vcpu, shadow_pte, gfn, largepage);
1c4f1fd6
AK
1111 if (!is_rmap_pte(*shadow_pte))
1112 kvm_release_page_clean(page);
75e68e60
IE
1113 } else {
1114 if (was_writeble)
1115 kvm_release_page_dirty(page);
1116 else
1117 kvm_release_page_clean(page);
1c4f1fd6 1118 }
1c4f1fd6 1119 if (!ptwrite || !*ptwrite)
ad312c7c 1120 vcpu->arch.last_pte_updated = shadow_pte;
1c4f1fd6
AK
1121}
1122
6aa8b732
AK
1123static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
1124{
1125}
1126
4d9976bb 1127static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
05da4558
MT
1128 int largepage, gfn_t gfn, struct page *page,
1129 int level)
6aa8b732 1130{
ad312c7c 1131 hpa_t table_addr = vcpu->arch.mmu.root_hpa;
e833240f 1132 int pt_write = 0;
6aa8b732
AK
1133
1134 for (; ; level--) {
1135 u32 index = PT64_INDEX(v, level);
1136 u64 *table;
1137
1138 ASSERT(VALID_PAGE(table_addr));
1139 table = __va(table_addr);
1140
1141 if (level == 1) {
e833240f 1142 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
05da4558
MT
1143 0, write, 1, &pt_write, 0, gfn, page);
1144 return pt_write;
1145 }
1146
1147 if (largepage && level == 2) {
1148 mmu_set_spte(vcpu, &table[index], ACC_ALL, ACC_ALL,
1149 0, write, 1, &pt_write, 1, gfn, page);
d196e343 1150 return pt_write;
6aa8b732
AK
1151 }
1152
c7addb90 1153 if (table[index] == shadow_trap_nonpresent_pte) {
25c0de2c 1154 struct kvm_mmu_page *new_table;
cea0f0e7 1155 gfn_t pseudo_gfn;
6aa8b732 1156
cea0f0e7
AK
1157 pseudo_gfn = (v & PT64_DIR_BASE_ADDR_MASK)
1158 >> PAGE_SHIFT;
1159 new_table = kvm_mmu_get_page(vcpu, pseudo_gfn,
1160 v, level - 1,
f7d9c7b7 1161 1, ACC_ALL, &table[index]);
25c0de2c 1162 if (!new_table) {
6aa8b732 1163 pgprintk("nonpaging_map: ENOMEM\n");
d7824fff 1164 kvm_release_page_clean(page);
6aa8b732
AK
1165 return -ENOMEM;
1166 }
1167
47ad8e68 1168 table[index] = __pa(new_table->spt) | PT_PRESENT_MASK
25c0de2c 1169 | PT_WRITABLE_MASK | PT_USER_MASK;
6aa8b732
AK
1170 }
1171 table_addr = table[index] & PT64_BASE_ADDR_MASK;
1172 }
1173}
1174
10589a46
MT
1175static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1176{
1177 int r;
05da4558 1178 int largepage = 0;
10589a46 1179
aaee2c94
MT
1180 struct page *page;
1181
72dc67a6
IE
1182 down_read(&vcpu->kvm->slots_lock);
1183
aaee2c94 1184 down_read(&current->mm->mmap_sem);
05da4558
MT
1185 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1186 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1187 largepage = 1;
1188 }
1189
aaee2c94 1190 page = gfn_to_page(vcpu->kvm, gfn);
72dc67a6 1191 up_read(&current->mm->mmap_sem);
aaee2c94 1192
d196e343
AK
1193 /* mmio */
1194 if (is_error_page(page)) {
1195 kvm_release_page_clean(page);
1196 up_read(&vcpu->kvm->slots_lock);
1197 return 1;
1198 }
1199
aaee2c94 1200 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1201 kvm_mmu_free_some_pages(vcpu);
05da4558
MT
1202 r = __direct_map(vcpu, v, write, largepage, gfn, page,
1203 PT32E_ROOT_LEVEL);
aaee2c94
MT
1204 spin_unlock(&vcpu->kvm->mmu_lock);
1205
72dc67a6 1206 up_read(&vcpu->kvm->slots_lock);
aaee2c94 1207
10589a46
MT
1208 return r;
1209}
1210
1211
c7addb90
AK
1212static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1213 struct kvm_mmu_page *sp)
1214{
1215 int i;
1216
1217 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1218 sp->spt[i] = shadow_trap_nonpresent_pte;
1219}
1220
17ac10ad
AK
1221static void mmu_free_roots(struct kvm_vcpu *vcpu)
1222{
1223 int i;
4db35314 1224 struct kvm_mmu_page *sp;
17ac10ad 1225
ad312c7c 1226 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
7b53aa56 1227 return;
aaee2c94 1228 spin_lock(&vcpu->kvm->mmu_lock);
17ac10ad 1229#ifdef CONFIG_X86_64
ad312c7c
ZX
1230 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1231 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad 1232
4db35314
AK
1233 sp = page_header(root);
1234 --sp->root_count;
2e53d63a
MT
1235 if (!sp->root_count && sp->role.invalid)
1236 kvm_mmu_zap_page(vcpu->kvm, sp);
ad312c7c 1237 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
aaee2c94 1238 spin_unlock(&vcpu->kvm->mmu_lock);
17ac10ad
AK
1239 return;
1240 }
1241#endif
1242 for (i = 0; i < 4; ++i) {
ad312c7c 1243 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad 1244
417726a3 1245 if (root) {
417726a3 1246 root &= PT64_BASE_ADDR_MASK;
4db35314
AK
1247 sp = page_header(root);
1248 --sp->root_count;
2e53d63a
MT
1249 if (!sp->root_count && sp->role.invalid)
1250 kvm_mmu_zap_page(vcpu->kvm, sp);
417726a3 1251 }
ad312c7c 1252 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1253 }
aaee2c94 1254 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1255 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
17ac10ad
AK
1256}
1257
1258static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
1259{
1260 int i;
cea0f0e7 1261 gfn_t root_gfn;
4db35314 1262 struct kvm_mmu_page *sp;
fb72d167 1263 int metaphysical = 0;
3bb65a22 1264
ad312c7c 1265 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
17ac10ad
AK
1266
1267#ifdef CONFIG_X86_64
ad312c7c
ZX
1268 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
1269 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad
AK
1270
1271 ASSERT(!VALID_PAGE(root));
fb72d167
JR
1272 if (tdp_enabled)
1273 metaphysical = 1;
4db35314 1274 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
fb72d167
JR
1275 PT64_ROOT_LEVEL, metaphysical,
1276 ACC_ALL, NULL);
4db35314
AK
1277 root = __pa(sp->spt);
1278 ++sp->root_count;
ad312c7c 1279 vcpu->arch.mmu.root_hpa = root;
17ac10ad
AK
1280 return;
1281 }
1282#endif
fb72d167
JR
1283 metaphysical = !is_paging(vcpu);
1284 if (tdp_enabled)
1285 metaphysical = 1;
17ac10ad 1286 for (i = 0; i < 4; ++i) {
ad312c7c 1287 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad
AK
1288
1289 ASSERT(!VALID_PAGE(root));
ad312c7c
ZX
1290 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
1291 if (!is_present_pte(vcpu->arch.pdptrs[i])) {
1292 vcpu->arch.mmu.pae_root[i] = 0;
417726a3
AK
1293 continue;
1294 }
ad312c7c
ZX
1295 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
1296 } else if (vcpu->arch.mmu.root_level == 0)
cea0f0e7 1297 root_gfn = 0;
4db35314 1298 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
fb72d167 1299 PT32_ROOT_LEVEL, metaphysical,
f7d9c7b7 1300 ACC_ALL, NULL);
4db35314
AK
1301 root = __pa(sp->spt);
1302 ++sp->root_count;
ad312c7c 1303 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
17ac10ad 1304 }
ad312c7c 1305 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
17ac10ad
AK
1306}
1307
6aa8b732
AK
1308static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
1309{
1310 return vaddr;
1311}
1312
1313static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3f3e7124 1314 u32 error_code)
6aa8b732 1315{
e833240f 1316 gfn_t gfn;
e2dec939 1317 int r;
6aa8b732 1318
b8688d51 1319 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
e2dec939
AK
1320 r = mmu_topup_memory_caches(vcpu);
1321 if (r)
1322 return r;
714b93da 1323
6aa8b732 1324 ASSERT(vcpu);
ad312c7c 1325 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1326
e833240f 1327 gfn = gva >> PAGE_SHIFT;
6aa8b732 1328
e833240f
AK
1329 return nonpaging_map(vcpu, gva & PAGE_MASK,
1330 error_code & PFERR_WRITE_MASK, gfn);
6aa8b732
AK
1331}
1332
fb72d167
JR
1333static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
1334 u32 error_code)
1335{
1336 struct page *page;
1337 int r;
05da4558
MT
1338 int largepage = 0;
1339 gfn_t gfn = gpa >> PAGE_SHIFT;
fb72d167
JR
1340
1341 ASSERT(vcpu);
1342 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
1343
1344 r = mmu_topup_memory_caches(vcpu);
1345 if (r)
1346 return r;
1347
1348 down_read(&current->mm->mmap_sem);
05da4558
MT
1349 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) {
1350 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1351 largepage = 1;
1352 }
1353 page = gfn_to_page(vcpu->kvm, gfn);
fb72d167
JR
1354 if (is_error_page(page)) {
1355 kvm_release_page_clean(page);
1356 up_read(&current->mm->mmap_sem);
1357 return 1;
1358 }
1359 spin_lock(&vcpu->kvm->mmu_lock);
1360 kvm_mmu_free_some_pages(vcpu);
1361 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
05da4558 1362 largepage, gfn, page, TDP_ROOT_LEVEL);
fb72d167
JR
1363 spin_unlock(&vcpu->kvm->mmu_lock);
1364 up_read(&current->mm->mmap_sem);
1365
1366 return r;
1367}
1368
6aa8b732
AK
1369static void nonpaging_free(struct kvm_vcpu *vcpu)
1370{
17ac10ad 1371 mmu_free_roots(vcpu);
6aa8b732
AK
1372}
1373
1374static int nonpaging_init_context(struct kvm_vcpu *vcpu)
1375{
ad312c7c 1376 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1377
1378 context->new_cr3 = nonpaging_new_cr3;
1379 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
1380 context->gva_to_gpa = nonpaging_gva_to_gpa;
1381 context->free = nonpaging_free;
c7addb90 1382 context->prefetch_page = nonpaging_prefetch_page;
cea0f0e7 1383 context->root_level = 0;
6aa8b732 1384 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1385 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1386 return 0;
1387}
1388
d835dfec 1389void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
6aa8b732 1390{
1165f5fe 1391 ++vcpu->stat.tlb_flush;
cbdd1bea 1392 kvm_x86_ops->tlb_flush(vcpu);
6aa8b732
AK
1393}
1394
1395static void paging_new_cr3(struct kvm_vcpu *vcpu)
1396{
b8688d51 1397 pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
cea0f0e7 1398 mmu_free_roots(vcpu);
6aa8b732
AK
1399}
1400
6aa8b732
AK
1401static void inject_page_fault(struct kvm_vcpu *vcpu,
1402 u64 addr,
1403 u32 err_code)
1404{
c3c91fee 1405 kvm_inject_page_fault(vcpu, addr, err_code);
6aa8b732
AK
1406}
1407
6aa8b732
AK
1408static void paging_free(struct kvm_vcpu *vcpu)
1409{
1410 nonpaging_free(vcpu);
1411}
1412
1413#define PTTYPE 64
1414#include "paging_tmpl.h"
1415#undef PTTYPE
1416
1417#define PTTYPE 32
1418#include "paging_tmpl.h"
1419#undef PTTYPE
1420
17ac10ad 1421static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732 1422{
ad312c7c 1423 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1424
1425 ASSERT(is_pae(vcpu));
1426 context->new_cr3 = paging_new_cr3;
1427 context->page_fault = paging64_page_fault;
6aa8b732 1428 context->gva_to_gpa = paging64_gva_to_gpa;
c7addb90 1429 context->prefetch_page = paging64_prefetch_page;
6aa8b732 1430 context->free = paging_free;
17ac10ad
AK
1431 context->root_level = level;
1432 context->shadow_root_level = level;
17c3ba9d 1433 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1434 return 0;
1435}
1436
17ac10ad
AK
1437static int paging64_init_context(struct kvm_vcpu *vcpu)
1438{
1439 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
1440}
1441
6aa8b732
AK
1442static int paging32_init_context(struct kvm_vcpu *vcpu)
1443{
ad312c7c 1444 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
1445
1446 context->new_cr3 = paging_new_cr3;
1447 context->page_fault = paging32_page_fault;
6aa8b732
AK
1448 context->gva_to_gpa = paging32_gva_to_gpa;
1449 context->free = paging_free;
c7addb90 1450 context->prefetch_page = paging32_prefetch_page;
6aa8b732
AK
1451 context->root_level = PT32_ROOT_LEVEL;
1452 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 1453 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
1454 return 0;
1455}
1456
1457static int paging32E_init_context(struct kvm_vcpu *vcpu)
1458{
17ac10ad 1459 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
1460}
1461
fb72d167
JR
1462static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
1463{
1464 struct kvm_mmu *context = &vcpu->arch.mmu;
1465
1466 context->new_cr3 = nonpaging_new_cr3;
1467 context->page_fault = tdp_page_fault;
1468 context->free = nonpaging_free;
1469 context->prefetch_page = nonpaging_prefetch_page;
1470 context->shadow_root_level = TDP_ROOT_LEVEL;
1471 context->root_hpa = INVALID_PAGE;
1472
1473 if (!is_paging(vcpu)) {
1474 context->gva_to_gpa = nonpaging_gva_to_gpa;
1475 context->root_level = 0;
1476 } else if (is_long_mode(vcpu)) {
1477 context->gva_to_gpa = paging64_gva_to_gpa;
1478 context->root_level = PT64_ROOT_LEVEL;
1479 } else if (is_pae(vcpu)) {
1480 context->gva_to_gpa = paging64_gva_to_gpa;
1481 context->root_level = PT32E_ROOT_LEVEL;
1482 } else {
1483 context->gva_to_gpa = paging32_gva_to_gpa;
1484 context->root_level = PT32_ROOT_LEVEL;
1485 }
1486
1487 return 0;
1488}
1489
1490static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
6aa8b732
AK
1491{
1492 ASSERT(vcpu);
ad312c7c 1493 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732
AK
1494
1495 if (!is_paging(vcpu))
1496 return nonpaging_init_context(vcpu);
a9058ecd 1497 else if (is_long_mode(vcpu))
6aa8b732
AK
1498 return paging64_init_context(vcpu);
1499 else if (is_pae(vcpu))
1500 return paging32E_init_context(vcpu);
1501 else
1502 return paging32_init_context(vcpu);
1503}
1504
fb72d167
JR
1505static int init_kvm_mmu(struct kvm_vcpu *vcpu)
1506{
1507 if (tdp_enabled)
1508 return init_kvm_tdp_mmu(vcpu);
1509 else
1510 return init_kvm_softmmu(vcpu);
1511}
1512
6aa8b732
AK
1513static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
1514{
1515 ASSERT(vcpu);
ad312c7c
ZX
1516 if (VALID_PAGE(vcpu->arch.mmu.root_hpa)) {
1517 vcpu->arch.mmu.free(vcpu);
1518 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
6aa8b732
AK
1519 }
1520}
1521
1522int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
1523{
1524 destroy_kvm_mmu(vcpu);
1525 return init_kvm_mmu(vcpu);
1526}
8668a3c4 1527EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
17c3ba9d
AK
1528
1529int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 1530{
714b93da
AK
1531 int r;
1532
e2dec939 1533 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
1534 if (r)
1535 goto out;
aaee2c94 1536 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1537 kvm_mmu_free_some_pages(vcpu);
17c3ba9d 1538 mmu_alloc_roots(vcpu);
aaee2c94 1539 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 1540 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
17c3ba9d 1541 kvm_mmu_flush_tlb(vcpu);
714b93da
AK
1542out:
1543 return r;
6aa8b732 1544}
17c3ba9d
AK
1545EXPORT_SYMBOL_GPL(kvm_mmu_load);
1546
1547void kvm_mmu_unload(struct kvm_vcpu *vcpu)
1548{
1549 mmu_free_roots(vcpu);
1550}
6aa8b732 1551
09072daf 1552static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
4db35314 1553 struct kvm_mmu_page *sp,
ac1b714e
AK
1554 u64 *spte)
1555{
1556 u64 pte;
1557 struct kvm_mmu_page *child;
1558
1559 pte = *spte;
c7addb90 1560 if (is_shadow_present_pte(pte)) {
05da4558
MT
1561 if (sp->role.level == PT_PAGE_TABLE_LEVEL ||
1562 is_large_pte(pte))
290fc38d 1563 rmap_remove(vcpu->kvm, spte);
ac1b714e
AK
1564 else {
1565 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 1566 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
1567 }
1568 }
c7addb90 1569 set_shadow_pte(spte, shadow_trap_nonpresent_pte);
05da4558
MT
1570 if (is_large_pte(pte))
1571 --vcpu->kvm->stat.lpages;
ac1b714e
AK
1572}
1573
0028425f 1574static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4db35314 1575 struct kvm_mmu_page *sp,
0028425f 1576 u64 *spte,
489f1d65 1577 const void *new)
0028425f 1578{
05da4558
MT
1579 if ((sp->role.level != PT_PAGE_TABLE_LEVEL)
1580 && !vcpu->arch.update_pte.largepage) {
4cee5764 1581 ++vcpu->kvm->stat.mmu_pde_zapped;
0028425f 1582 return;
4cee5764 1583 }
0028425f 1584
4cee5764 1585 ++vcpu->kvm->stat.mmu_pte_updated;
4db35314 1586 if (sp->role.glevels == PT32_ROOT_LEVEL)
489f1d65 1587 paging32_update_pte(vcpu, sp, spte, new);
0028425f 1588 else
489f1d65 1589 paging64_update_pte(vcpu, sp, spte, new);
0028425f
AK
1590}
1591
79539cec
AK
1592static bool need_remote_flush(u64 old, u64 new)
1593{
1594 if (!is_shadow_present_pte(old))
1595 return false;
1596 if (!is_shadow_present_pte(new))
1597 return true;
1598 if ((old ^ new) & PT64_BASE_ADDR_MASK)
1599 return true;
1600 old ^= PT64_NX_MASK;
1601 new ^= PT64_NX_MASK;
1602 return (old & ~new & PT64_PERM_MASK) != 0;
1603}
1604
1605static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, u64 old, u64 new)
1606{
1607 if (need_remote_flush(old, new))
1608 kvm_flush_remote_tlbs(vcpu->kvm);
1609 else
1610 kvm_mmu_flush_tlb(vcpu);
1611}
1612
12b7d28f
AK
1613static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
1614{
ad312c7c 1615 u64 *spte = vcpu->arch.last_pte_updated;
12b7d28f
AK
1616
1617 return !!(spte && (*spte & PT_ACCESSED_MASK));
1618}
1619
d7824fff
AK
1620static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1621 const u8 *new, int bytes)
1622{
1623 gfn_t gfn;
1624 int r;
1625 u64 gpte = 0;
72dc67a6 1626 struct page *page;
d7824fff 1627
05da4558
MT
1628 vcpu->arch.update_pte.largepage = 0;
1629
d7824fff
AK
1630 if (bytes != 4 && bytes != 8)
1631 return;
1632
1633 /*
1634 * Assume that the pte write on a page table of the same type
1635 * as the current vcpu paging mode. This is nearly always true
1636 * (might be false while changing modes). Note it is verified later
1637 * by update_pte().
1638 */
1639 if (is_pae(vcpu)) {
1640 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
1641 if ((bytes == 4) && (gpa % 4 == 0)) {
1642 r = kvm_read_guest(vcpu->kvm, gpa & ~(u64)7, &gpte, 8);
1643 if (r)
1644 return;
1645 memcpy((void *)&gpte + (gpa % 8), new, 4);
1646 } else if ((bytes == 8) && (gpa % 8 == 0)) {
1647 memcpy((void *)&gpte, new, 8);
1648 }
1649 } else {
1650 if ((bytes == 4) && (gpa % 4 == 0))
1651 memcpy((void *)&gpte, new, 4);
1652 }
1653 if (!is_present_pte(gpte))
1654 return;
1655 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
72dc67a6 1656
05da4558
MT
1657 down_read(&current->mm->mmap_sem);
1658 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
1659 gfn &= ~(KVM_PAGES_PER_HPAGE-1);
1660 vcpu->arch.update_pte.largepage = 1;
1661 }
72dc67a6 1662 page = gfn_to_page(vcpu->kvm, gfn);
05da4558 1663 up_read(&current->mm->mmap_sem);
72dc67a6 1664
d196e343
AK
1665 if (is_error_page(page)) {
1666 kvm_release_page_clean(page);
1667 return;
1668 }
d7824fff 1669 vcpu->arch.update_pte.gfn = gfn;
e48bb497 1670 vcpu->arch.update_pte.page = page;
d7824fff
AK
1671}
1672
09072daf 1673void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
fe551881 1674 const u8 *new, int bytes)
da4a00f0 1675{
9b7a0325 1676 gfn_t gfn = gpa >> PAGE_SHIFT;
4db35314 1677 struct kvm_mmu_page *sp;
0e7bc4b9 1678 struct hlist_node *node, *n;
9b7a0325
AK
1679 struct hlist_head *bucket;
1680 unsigned index;
489f1d65 1681 u64 entry, gentry;
9b7a0325 1682 u64 *spte;
9b7a0325 1683 unsigned offset = offset_in_page(gpa);
0e7bc4b9 1684 unsigned pte_size;
9b7a0325 1685 unsigned page_offset;
0e7bc4b9 1686 unsigned misaligned;
fce0657f 1687 unsigned quadrant;
9b7a0325 1688 int level;
86a5ba02 1689 int flooded = 0;
ac1b714e 1690 int npte;
489f1d65 1691 int r;
9b7a0325 1692
b8688d51 1693 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
d7824fff 1694 mmu_guess_page_from_pte_write(vcpu, gpa, new, bytes);
aaee2c94 1695 spin_lock(&vcpu->kvm->mmu_lock);
eb787d10 1696 kvm_mmu_free_some_pages(vcpu);
4cee5764 1697 ++vcpu->kvm->stat.mmu_pte_write;
c7addb90 1698 kvm_mmu_audit(vcpu, "pre pte write");
ad312c7c 1699 if (gfn == vcpu->arch.last_pt_write_gfn
12b7d28f 1700 && !last_updated_pte_accessed(vcpu)) {
ad312c7c
ZX
1701 ++vcpu->arch.last_pt_write_count;
1702 if (vcpu->arch.last_pt_write_count >= 3)
86a5ba02
AK
1703 flooded = 1;
1704 } else {
ad312c7c
ZX
1705 vcpu->arch.last_pt_write_gfn = gfn;
1706 vcpu->arch.last_pt_write_count = 1;
1707 vcpu->arch.last_pte_updated = NULL;
86a5ba02 1708 }
1ae0a13d 1709 index = kvm_page_table_hashfn(gfn);
f05e70ac 1710 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
4db35314
AK
1711 hlist_for_each_entry_safe(sp, node, n, bucket, hash_link) {
1712 if (sp->gfn != gfn || sp->role.metaphysical)
9b7a0325 1713 continue;
4db35314 1714 pte_size = sp->role.glevels == PT32_ROOT_LEVEL ? 4 : 8;
0e7bc4b9 1715 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 1716 misaligned |= bytes < 4;
86a5ba02 1717 if (misaligned || flooded) {
0e7bc4b9
AK
1718 /*
1719 * Misaligned accesses are too much trouble to fix
1720 * up; also, they usually indicate a page is not used
1721 * as a page table.
86a5ba02
AK
1722 *
1723 * If we're seeing too many writes to a page,
1724 * it may no longer be a page table, or we may be
1725 * forking, in which case it is better to unmap the
1726 * page.
0e7bc4b9
AK
1727 */
1728 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4db35314
AK
1729 gpa, bytes, sp->role.word);
1730 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1731 ++vcpu->kvm->stat.mmu_flooded;
0e7bc4b9
AK
1732 continue;
1733 }
9b7a0325 1734 page_offset = offset;
4db35314 1735 level = sp->role.level;
ac1b714e 1736 npte = 1;
4db35314 1737 if (sp->role.glevels == PT32_ROOT_LEVEL) {
ac1b714e
AK
1738 page_offset <<= 1; /* 32->64 */
1739 /*
1740 * A 32-bit pde maps 4MB while the shadow pdes map
1741 * only 2MB. So we need to double the offset again
1742 * and zap two pdes instead of one.
1743 */
1744 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 1745 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
1746 page_offset <<= 1;
1747 npte = 2;
1748 }
fce0657f 1749 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 1750 page_offset &= ~PAGE_MASK;
4db35314 1751 if (quadrant != sp->role.quadrant)
fce0657f 1752 continue;
9b7a0325 1753 }
4db35314 1754 spte = &sp->spt[page_offset / sizeof(*spte)];
489f1d65
DE
1755 if ((gpa & (pte_size - 1)) || (bytes < pte_size)) {
1756 gentry = 0;
1757 r = kvm_read_guest_atomic(vcpu->kvm,
1758 gpa & ~(u64)(pte_size - 1),
1759 &gentry, pte_size);
1760 new = (const void *)&gentry;
1761 if (r < 0)
1762 new = NULL;
1763 }
ac1b714e 1764 while (npte--) {
79539cec 1765 entry = *spte;
4db35314 1766 mmu_pte_write_zap_pte(vcpu, sp, spte);
489f1d65
DE
1767 if (new)
1768 mmu_pte_write_new_pte(vcpu, sp, spte, new);
79539cec 1769 mmu_pte_write_flush_tlb(vcpu, entry, *spte);
ac1b714e 1770 ++spte;
9b7a0325 1771 }
9b7a0325 1772 }
c7addb90 1773 kvm_mmu_audit(vcpu, "post pte write");
aaee2c94 1774 spin_unlock(&vcpu->kvm->mmu_lock);
d7824fff
AK
1775 if (vcpu->arch.update_pte.page) {
1776 kvm_release_page_clean(vcpu->arch.update_pte.page);
1777 vcpu->arch.update_pte.page = NULL;
1778 }
da4a00f0
AK
1779}
1780
a436036b
AK
1781int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
1782{
10589a46
MT
1783 gpa_t gpa;
1784 int r;
a436036b 1785
72dc67a6 1786 down_read(&vcpu->kvm->slots_lock);
10589a46 1787 gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, gva);
72dc67a6 1788 up_read(&vcpu->kvm->slots_lock);
10589a46 1789
aaee2c94 1790 spin_lock(&vcpu->kvm->mmu_lock);
10589a46 1791 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
aaee2c94 1792 spin_unlock(&vcpu->kvm->mmu_lock);
10589a46 1793 return r;
a436036b
AK
1794}
1795
22d95b12 1796void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86 1797{
f05e70ac 1798 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES) {
4db35314 1799 struct kvm_mmu_page *sp;
ebeace86 1800
f05e70ac 1801 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
4db35314
AK
1802 struct kvm_mmu_page, link);
1803 kvm_mmu_zap_page(vcpu->kvm, sp);
4cee5764 1804 ++vcpu->kvm->stat.mmu_recycled;
ebeace86
AK
1805 }
1806}
ebeace86 1807
3067714c
AK
1808int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
1809{
1810 int r;
1811 enum emulation_result er;
1812
ad312c7c 1813 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
3067714c
AK
1814 if (r < 0)
1815 goto out;
1816
1817 if (!r) {
1818 r = 1;
1819 goto out;
1820 }
1821
b733bfb5
AK
1822 r = mmu_topup_memory_caches(vcpu);
1823 if (r)
1824 goto out;
1825
3067714c 1826 er = emulate_instruction(vcpu, vcpu->run, cr2, error_code, 0);
3067714c
AK
1827
1828 switch (er) {
1829 case EMULATE_DONE:
1830 return 1;
1831 case EMULATE_DO_MMIO:
1832 ++vcpu->stat.mmio_exits;
1833 return 0;
1834 case EMULATE_FAIL:
1835 kvm_report_emulation_failure(vcpu, "pagetable");
1836 return 1;
1837 default:
1838 BUG();
1839 }
1840out:
3067714c
AK
1841 return r;
1842}
1843EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
1844
18552672
JR
1845void kvm_enable_tdp(void)
1846{
1847 tdp_enabled = true;
1848}
1849EXPORT_SYMBOL_GPL(kvm_enable_tdp);
1850
6aa8b732
AK
1851static void free_mmu_pages(struct kvm_vcpu *vcpu)
1852{
4db35314 1853 struct kvm_mmu_page *sp;
6aa8b732 1854
f05e70ac
ZX
1855 while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
1856 sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
4db35314
AK
1857 struct kvm_mmu_page, link);
1858 kvm_mmu_zap_page(vcpu->kvm, sp);
f51234c2 1859 }
ad312c7c 1860 free_page((unsigned long)vcpu->arch.mmu.pae_root);
6aa8b732
AK
1861}
1862
1863static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
1864{
17ac10ad 1865 struct page *page;
6aa8b732
AK
1866 int i;
1867
1868 ASSERT(vcpu);
1869
f05e70ac
ZX
1870 if (vcpu->kvm->arch.n_requested_mmu_pages)
1871 vcpu->kvm->arch.n_free_mmu_pages =
1872 vcpu->kvm->arch.n_requested_mmu_pages;
82ce2c96 1873 else
f05e70ac
ZX
1874 vcpu->kvm->arch.n_free_mmu_pages =
1875 vcpu->kvm->arch.n_alloc_mmu_pages;
17ac10ad
AK
1876 /*
1877 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
1878 * Therefore we need to allocate shadow page tables in the first
1879 * 4GB of memory, which happens to fit the DMA32 zone.
1880 */
1881 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
1882 if (!page)
1883 goto error_1;
ad312c7c 1884 vcpu->arch.mmu.pae_root = page_address(page);
17ac10ad 1885 for (i = 0; i < 4; ++i)
ad312c7c 1886 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 1887
6aa8b732
AK
1888 return 0;
1889
1890error_1:
1891 free_mmu_pages(vcpu);
1892 return -ENOMEM;
1893}
1894
8018c27b 1895int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 1896{
6aa8b732 1897 ASSERT(vcpu);
ad312c7c 1898 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 1899
8018c27b
IM
1900 return alloc_mmu_pages(vcpu);
1901}
6aa8b732 1902
8018c27b
IM
1903int kvm_mmu_setup(struct kvm_vcpu *vcpu)
1904{
1905 ASSERT(vcpu);
ad312c7c 1906 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2c264957 1907
8018c27b 1908 return init_kvm_mmu(vcpu);
6aa8b732
AK
1909}
1910
1911void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
1912{
1913 ASSERT(vcpu);
1914
1915 destroy_kvm_mmu(vcpu);
1916 free_mmu_pages(vcpu);
714b93da 1917 mmu_free_memory_caches(vcpu);
6aa8b732
AK
1918}
1919
90cb0529 1920void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732 1921{
4db35314 1922 struct kvm_mmu_page *sp;
6aa8b732 1923
f05e70ac 1924 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
6aa8b732
AK
1925 int i;
1926 u64 *pt;
1927
4db35314 1928 if (!test_bit(slot, &sp->slot_bitmap))
6aa8b732
AK
1929 continue;
1930
4db35314 1931 pt = sp->spt;
6aa8b732
AK
1932 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1933 /* avoid RMW */
9647c14c 1934 if (pt[i] & PT_WRITABLE_MASK)
6aa8b732 1935 pt[i] &= ~PT_WRITABLE_MASK;
6aa8b732
AK
1936 }
1937}
37a7d8b0 1938
90cb0529 1939void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 1940{
4db35314 1941 struct kvm_mmu_page *sp, *node;
e0fa826f 1942
aaee2c94 1943 spin_lock(&kvm->mmu_lock);
f05e70ac 1944 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
4db35314 1945 kvm_mmu_zap_page(kvm, sp);
aaee2c94 1946 spin_unlock(&kvm->mmu_lock);
e0fa826f 1947
90cb0529 1948 kvm_flush_remote_tlbs(kvm);
e0fa826f
DL
1949}
1950
b5a33a75
AK
1951void kvm_mmu_module_exit(void)
1952{
1953 if (pte_chain_cache)
1954 kmem_cache_destroy(pte_chain_cache);
1955 if (rmap_desc_cache)
1956 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
1957 if (mmu_page_header_cache)
1958 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
1959}
1960
1961int kvm_mmu_module_init(void)
1962{
1963 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
1964 sizeof(struct kvm_pte_chain),
20c2df83 1965 0, 0, NULL);
b5a33a75
AK
1966 if (!pte_chain_cache)
1967 goto nomem;
1968 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
1969 sizeof(struct kvm_rmap_desc),
20c2df83 1970 0, 0, NULL);
b5a33a75
AK
1971 if (!rmap_desc_cache)
1972 goto nomem;
1973
d3d25b04
AK
1974 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
1975 sizeof(struct kvm_mmu_page),
20c2df83 1976 0, 0, NULL);
d3d25b04
AK
1977 if (!mmu_page_header_cache)
1978 goto nomem;
1979
b5a33a75
AK
1980 return 0;
1981
1982nomem:
1983 kvm_mmu_module_exit();
1984 return -ENOMEM;
1985}
1986
3ad82a7e
ZX
1987/*
1988 * Caculate mmu pages needed for kvm.
1989 */
1990unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
1991{
1992 int i;
1993 unsigned int nr_mmu_pages;
1994 unsigned int nr_pages = 0;
1995
1996 for (i = 0; i < kvm->nmemslots; i++)
1997 nr_pages += kvm->memslots[i].npages;
1998
1999 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
2000 nr_mmu_pages = max(nr_mmu_pages,
2001 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
2002
2003 return nr_mmu_pages;
2004}
2005
37a7d8b0
AK
2006#ifdef AUDIT
2007
2008static const char *audit_msg;
2009
2010static gva_t canonicalize(gva_t gva)
2011{
2012#ifdef CONFIG_X86_64
2013 gva = (long long)(gva << 16) >> 16;
2014#endif
2015 return gva;
2016}
2017
2018static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
2019 gva_t va, int level)
2020{
2021 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
2022 int i;
2023 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
2024
2025 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
2026 u64 ent = pt[i];
2027
c7addb90 2028 if (ent == shadow_trap_nonpresent_pte)
37a7d8b0
AK
2029 continue;
2030
2031 va = canonicalize(va);
c7addb90
AK
2032 if (level > 1) {
2033 if (ent == shadow_notrap_nonpresent_pte)
2034 printk(KERN_ERR "audit: (%s) nontrapping pte"
2035 " in nonleaf level: levels %d gva %lx"
2036 " level %d pte %llx\n", audit_msg,
ad312c7c 2037 vcpu->arch.mmu.root_level, va, level, ent);
c7addb90 2038
37a7d8b0 2039 audit_mappings_page(vcpu, ent, va, level - 1);
c7addb90 2040 } else {
ad312c7c 2041 gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, va);
1d28f5f4
AK
2042 struct page *page = gpa_to_page(vcpu, gpa);
2043 hpa_t hpa = page_to_phys(page);
37a7d8b0 2044
c7addb90 2045 if (is_shadow_present_pte(ent)
37a7d8b0 2046 && (ent & PT64_BASE_ADDR_MASK) != hpa)
c7addb90
AK
2047 printk(KERN_ERR "xx audit error: (%s) levels %d"
2048 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
ad312c7c 2049 audit_msg, vcpu->arch.mmu.root_level,
d77c26fc
MD
2050 va, gpa, hpa, ent,
2051 is_shadow_present_pte(ent));
c7addb90
AK
2052 else if (ent == shadow_notrap_nonpresent_pte
2053 && !is_error_hpa(hpa))
2054 printk(KERN_ERR "audit: (%s) notrap shadow,"
2055 " valid guest gva %lx\n", audit_msg, va);
b4231d61 2056 kvm_release_page_clean(page);
c7addb90 2057
37a7d8b0
AK
2058 }
2059 }
2060}
2061
2062static void audit_mappings(struct kvm_vcpu *vcpu)
2063{
1ea252af 2064 unsigned i;
37a7d8b0 2065
ad312c7c
ZX
2066 if (vcpu->arch.mmu.root_level == 4)
2067 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
37a7d8b0
AK
2068 else
2069 for (i = 0; i < 4; ++i)
ad312c7c 2070 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
37a7d8b0 2071 audit_mappings_page(vcpu,
ad312c7c 2072 vcpu->arch.mmu.pae_root[i],
37a7d8b0
AK
2073 i << 30,
2074 2);
2075}
2076
2077static int count_rmaps(struct kvm_vcpu *vcpu)
2078{
2079 int nmaps = 0;
2080 int i, j, k;
2081
2082 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
2083 struct kvm_memory_slot *m = &vcpu->kvm->memslots[i];
2084 struct kvm_rmap_desc *d;
2085
2086 for (j = 0; j < m->npages; ++j) {
290fc38d 2087 unsigned long *rmapp = &m->rmap[j];
37a7d8b0 2088
290fc38d 2089 if (!*rmapp)
37a7d8b0 2090 continue;
290fc38d 2091 if (!(*rmapp & 1)) {
37a7d8b0
AK
2092 ++nmaps;
2093 continue;
2094 }
290fc38d 2095 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
37a7d8b0
AK
2096 while (d) {
2097 for (k = 0; k < RMAP_EXT; ++k)
2098 if (d->shadow_ptes[k])
2099 ++nmaps;
2100 else
2101 break;
2102 d = d->more;
2103 }
2104 }
2105 }
2106 return nmaps;
2107}
2108
2109static int count_writable_mappings(struct kvm_vcpu *vcpu)
2110{
2111 int nmaps = 0;
4db35314 2112 struct kvm_mmu_page *sp;
37a7d8b0
AK
2113 int i;
2114
f05e70ac 2115 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 2116 u64 *pt = sp->spt;
37a7d8b0 2117
4db35314 2118 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
37a7d8b0
AK
2119 continue;
2120
2121 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
2122 u64 ent = pt[i];
2123
2124 if (!(ent & PT_PRESENT_MASK))
2125 continue;
2126 if (!(ent & PT_WRITABLE_MASK))
2127 continue;
2128 ++nmaps;
2129 }
2130 }
2131 return nmaps;
2132}
2133
2134static void audit_rmap(struct kvm_vcpu *vcpu)
2135{
2136 int n_rmap = count_rmaps(vcpu);
2137 int n_actual = count_writable_mappings(vcpu);
2138
2139 if (n_rmap != n_actual)
2140 printk(KERN_ERR "%s: (%s) rmap %d actual %d\n",
b8688d51 2141 __func__, audit_msg, n_rmap, n_actual);
37a7d8b0
AK
2142}
2143
2144static void audit_write_protection(struct kvm_vcpu *vcpu)
2145{
4db35314 2146 struct kvm_mmu_page *sp;
290fc38d
IE
2147 struct kvm_memory_slot *slot;
2148 unsigned long *rmapp;
2149 gfn_t gfn;
37a7d8b0 2150
f05e70ac 2151 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 2152 if (sp->role.metaphysical)
37a7d8b0
AK
2153 continue;
2154
4db35314
AK
2155 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
2156 gfn = unalias_gfn(vcpu->kvm, sp->gfn);
290fc38d
IE
2157 rmapp = &slot->rmap[gfn - slot->base_gfn];
2158 if (*rmapp)
37a7d8b0
AK
2159 printk(KERN_ERR "%s: (%s) shadow page has writable"
2160 " mappings: gfn %lx role %x\n",
b8688d51 2161 __func__, audit_msg, sp->gfn,
4db35314 2162 sp->role.word);
37a7d8b0
AK
2163 }
2164}
2165
2166static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
2167{
2168 int olddbg = dbg;
2169
2170 dbg = 0;
2171 audit_msg = msg;
2172 audit_rmap(vcpu);
2173 audit_write_protection(vcpu);
2174 audit_mappings(vcpu);
2175 dbg = olddbg;
2176}
2177
2178#endif