]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/kvm/mmu.c
KVM: replace x86 kvm n_free_mmu_pages with n_used_mmu_pages
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kvm / mmu.c
CommitLineData
6aa8b732
AK
1/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * MMU support
8 *
9 * Copyright (C) 2006 Qumranet, Inc.
221d059d 10 * Copyright 2010 Red Hat, Inc. and/or its affilates.
6aa8b732
AK
11 *
12 * Authors:
13 * Yaniv Kamay <yaniv@qumranet.com>
14 * Avi Kivity <avi@qumranet.com>
15 *
16 * This work is licensed under the terms of the GNU GPL, version 2. See
17 * the COPYING file in the top-level directory.
18 *
19 */
e495606d 20
1d737c8a 21#include "mmu.h"
836a1b3c 22#include "x86.h"
6de4f3ad 23#include "kvm_cache_regs.h"
e495606d 24
edf88417 25#include <linux/kvm_host.h>
6aa8b732
AK
26#include <linux/types.h>
27#include <linux/string.h>
6aa8b732
AK
28#include <linux/mm.h>
29#include <linux/highmem.h>
30#include <linux/module.h>
448353ca 31#include <linux/swap.h>
05da4558 32#include <linux/hugetlb.h>
2f333bcb 33#include <linux/compiler.h>
bc6678a3 34#include <linux/srcu.h>
5a0e3ad6 35#include <linux/slab.h>
bf998156 36#include <linux/uaccess.h>
6aa8b732 37
e495606d
AK
38#include <asm/page.h>
39#include <asm/cmpxchg.h>
4e542370 40#include <asm/io.h>
13673a90 41#include <asm/vmx.h>
6aa8b732 42
18552672
JR
43/*
44 * When setting this variable to true it enables Two-Dimensional-Paging
45 * where the hardware walks 2 page tables:
46 * 1. the guest-virtual to guest-physical
47 * 2. while doing 1. it walks guest-physical to host-physical
48 * If the hardware supports that we don't need to do shadow paging.
49 */
2f333bcb 50bool tdp_enabled = false;
18552672 51
37a7d8b0
AK
52#undef MMU_DEBUG
53
54#undef AUDIT
55
56#ifdef AUDIT
57static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg);
58#else
59static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg) {}
60#endif
61
62#ifdef MMU_DEBUG
63
64#define pgprintk(x...) do { if (dbg) printk(x); } while (0)
65#define rmap_printk(x...) do { if (dbg) printk(x); } while (0)
66
67#else
68
69#define pgprintk(x...) do { } while (0)
70#define rmap_printk(x...) do { } while (0)
71
72#endif
73
74#if defined(MMU_DEBUG) || defined(AUDIT)
6ada8cca
AK
75static int dbg = 0;
76module_param(dbg, bool, 0644);
37a7d8b0 77#endif
6aa8b732 78
582801a9
MT
79static int oos_shadow = 1;
80module_param(oos_shadow, bool, 0644);
81
d6c69ee9
YD
82#ifndef MMU_DEBUG
83#define ASSERT(x) do { } while (0)
84#else
6aa8b732
AK
85#define ASSERT(x) \
86 if (!(x)) { \
87 printk(KERN_WARNING "assertion failed %s:%d: %s\n", \
88 __FILE__, __LINE__, #x); \
89 }
d6c69ee9 90#endif
6aa8b732 91
6aa8b732
AK
92#define PT_FIRST_AVAIL_BITS_SHIFT 9
93#define PT64_SECOND_AVAIL_BITS_SHIFT 52
94
6aa8b732
AK
95#define PT64_LEVEL_BITS 9
96
97#define PT64_LEVEL_SHIFT(level) \
d77c26fc 98 (PAGE_SHIFT + (level - 1) * PT64_LEVEL_BITS)
6aa8b732
AK
99
100#define PT64_LEVEL_MASK(level) \
101 (((1ULL << PT64_LEVEL_BITS) - 1) << PT64_LEVEL_SHIFT(level))
102
103#define PT64_INDEX(address, level)\
104 (((address) >> PT64_LEVEL_SHIFT(level)) & ((1 << PT64_LEVEL_BITS) - 1))
105
106
107#define PT32_LEVEL_BITS 10
108
109#define PT32_LEVEL_SHIFT(level) \
d77c26fc 110 (PAGE_SHIFT + (level - 1) * PT32_LEVEL_BITS)
6aa8b732
AK
111
112#define PT32_LEVEL_MASK(level) \
113 (((1ULL << PT32_LEVEL_BITS) - 1) << PT32_LEVEL_SHIFT(level))
e04da980
JR
114#define PT32_LVL_OFFSET_MASK(level) \
115 (PT32_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
116 * PT32_LEVEL_BITS))) - 1))
6aa8b732
AK
117
118#define PT32_INDEX(address, level)\
119 (((address) >> PT32_LEVEL_SHIFT(level)) & ((1 << PT32_LEVEL_BITS) - 1))
120
121
27aba766 122#define PT64_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1))
6aa8b732
AK
123#define PT64_DIR_BASE_ADDR_MASK \
124 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + PT64_LEVEL_BITS)) - 1))
e04da980
JR
125#define PT64_LVL_ADDR_MASK(level) \
126 (PT64_BASE_ADDR_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
127 * PT64_LEVEL_BITS))) - 1))
128#define PT64_LVL_OFFSET_MASK(level) \
129 (PT64_BASE_ADDR_MASK & ((1ULL << (PAGE_SHIFT + (((level) - 1) \
130 * PT64_LEVEL_BITS))) - 1))
6aa8b732
AK
131
132#define PT32_BASE_ADDR_MASK PAGE_MASK
133#define PT32_DIR_BASE_ADDR_MASK \
134 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + PT32_LEVEL_BITS)) - 1))
e04da980
JR
135#define PT32_LVL_ADDR_MASK(level) \
136 (PAGE_MASK & ~((1ULL << (PAGE_SHIFT + (((level) - 1) \
137 * PT32_LEVEL_BITS))) - 1))
6aa8b732 138
79539cec
AK
139#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
140 | PT64_NX_MASK)
6aa8b732 141
cd4a4e53
AK
142#define RMAP_EXT 4
143
fe135d2c
AK
144#define ACC_EXEC_MASK 1
145#define ACC_WRITE_MASK PT_WRITABLE_MASK
146#define ACC_USER_MASK PT_USER_MASK
147#define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
148
90bb6fc5
AK
149#include <trace/events/kvm.h>
150
07420171
AK
151#define CREATE_TRACE_POINTS
152#include "mmutrace.h"
153
1403283a
IE
154#define SPTE_HOST_WRITEABLE (1ULL << PT_FIRST_AVAIL_BITS_SHIFT)
155
135f8c2b
AK
156#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
157
cd4a4e53 158struct kvm_rmap_desc {
d555c333 159 u64 *sptes[RMAP_EXT];
cd4a4e53
AK
160 struct kvm_rmap_desc *more;
161};
162
2d11123a
AK
163struct kvm_shadow_walk_iterator {
164 u64 addr;
165 hpa_t shadow_addr;
166 int level;
167 u64 *sptep;
168 unsigned index;
169};
170
171#define for_each_shadow_entry(_vcpu, _addr, _walker) \
172 for (shadow_walk_init(&(_walker), _vcpu, _addr); \
173 shadow_walk_okay(&(_walker)); \
174 shadow_walk_next(&(_walker)))
175
1047df1f 176typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte);
ad8cfbe3 177
b5a33a75
AK
178static struct kmem_cache *pte_chain_cache;
179static struct kmem_cache *rmap_desc_cache;
d3d25b04 180static struct kmem_cache *mmu_page_header_cache;
b5a33a75 181
c7addb90
AK
182static u64 __read_mostly shadow_trap_nonpresent_pte;
183static u64 __read_mostly shadow_notrap_nonpresent_pte;
7b52345e
SY
184static u64 __read_mostly shadow_base_present_pte;
185static u64 __read_mostly shadow_nx_mask;
186static u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
187static u64 __read_mostly shadow_user_mask;
188static u64 __read_mostly shadow_accessed_mask;
189static u64 __read_mostly shadow_dirty_mask;
c7addb90 190
82725b20
DE
191static inline u64 rsvd_bits(int s, int e)
192{
193 return ((1ULL << (e - s + 1)) - 1) << s;
194}
195
c7addb90
AK
196void kvm_mmu_set_nonpresent_ptes(u64 trap_pte, u64 notrap_pte)
197{
198 shadow_trap_nonpresent_pte = trap_pte;
199 shadow_notrap_nonpresent_pte = notrap_pte;
200}
201EXPORT_SYMBOL_GPL(kvm_mmu_set_nonpresent_ptes);
202
7b52345e
SY
203void kvm_mmu_set_base_ptes(u64 base_pte)
204{
205 shadow_base_present_pte = base_pte;
206}
207EXPORT_SYMBOL_GPL(kvm_mmu_set_base_ptes);
208
209void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
4b12f0de 210 u64 dirty_mask, u64 nx_mask, u64 x_mask)
7b52345e
SY
211{
212 shadow_user_mask = user_mask;
213 shadow_accessed_mask = accessed_mask;
214 shadow_dirty_mask = dirty_mask;
215 shadow_nx_mask = nx_mask;
216 shadow_x_mask = x_mask;
217}
218EXPORT_SYMBOL_GPL(kvm_mmu_set_mask_ptes);
219
3dbe1415 220static bool is_write_protection(struct kvm_vcpu *vcpu)
6aa8b732 221{
4d4ec087 222 return kvm_read_cr0_bits(vcpu, X86_CR0_WP);
6aa8b732
AK
223}
224
225static int is_cpuid_PSE36(void)
226{
227 return 1;
228}
229
73b1087e
AK
230static int is_nx(struct kvm_vcpu *vcpu)
231{
f6801dff 232 return vcpu->arch.efer & EFER_NX;
73b1087e
AK
233}
234
c7addb90
AK
235static int is_shadow_present_pte(u64 pte)
236{
c7addb90
AK
237 return pte != shadow_trap_nonpresent_pte
238 && pte != shadow_notrap_nonpresent_pte;
239}
240
05da4558
MT
241static int is_large_pte(u64 pte)
242{
243 return pte & PT_PAGE_SIZE_MASK;
244}
245
8dae4445 246static int is_writable_pte(unsigned long pte)
6aa8b732
AK
247{
248 return pte & PT_WRITABLE_MASK;
249}
250
43a3795a 251static int is_dirty_gpte(unsigned long pte)
e3c5e7ec 252{
439e218a 253 return pte & PT_DIRTY_MASK;
e3c5e7ec
AK
254}
255
43a3795a 256static int is_rmap_spte(u64 pte)
cd4a4e53 257{
4b1a80fa 258 return is_shadow_present_pte(pte);
cd4a4e53
AK
259}
260
776e6633
MT
261static int is_last_spte(u64 pte, int level)
262{
263 if (level == PT_PAGE_TABLE_LEVEL)
264 return 1;
852e3c19 265 if (is_large_pte(pte))
776e6633
MT
266 return 1;
267 return 0;
268}
269
35149e21 270static pfn_t spte_to_pfn(u64 pte)
0b49ea86 271{
35149e21 272 return (pte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
0b49ea86
AK
273}
274
da928521
AK
275static gfn_t pse36_gfn_delta(u32 gpte)
276{
277 int shift = 32 - PT32_DIR_PSE36_SHIFT - PAGE_SHIFT;
278
279 return (gpte & PT32_DIR_PSE36_MASK) << shift;
280}
281
d555c333 282static void __set_spte(u64 *sptep, u64 spte)
e663ee64 283{
7645e432 284 set_64bit(sptep, spte);
e663ee64
AK
285}
286
a9221dd5
AK
287static u64 __xchg_spte(u64 *sptep, u64 new_spte)
288{
289#ifdef CONFIG_X86_64
290 return xchg(sptep, new_spte);
291#else
292 u64 old_spte;
293
294 do {
295 old_spte = *sptep;
296 } while (cmpxchg64(sptep, old_spte, new_spte) != old_spte);
297
298 return old_spte;
299#endif
300}
301
8672b721
XG
302static bool spte_has_volatile_bits(u64 spte)
303{
304 if (!shadow_accessed_mask)
305 return false;
306
307 if (!is_shadow_present_pte(spte))
308 return false;
309
4132779b
XG
310 if ((spte & shadow_accessed_mask) &&
311 (!is_writable_pte(spte) || (spte & shadow_dirty_mask)))
8672b721
XG
312 return false;
313
314 return true;
315}
316
4132779b
XG
317static bool spte_is_bit_cleared(u64 old_spte, u64 new_spte, u64 bit_mask)
318{
319 return (old_spte & bit_mask) && !(new_spte & bit_mask);
320}
321
b79b93f9
AK
322static void update_spte(u64 *sptep, u64 new_spte)
323{
4132779b
XG
324 u64 mask, old_spte = *sptep;
325
326 WARN_ON(!is_rmap_spte(new_spte));
b79b93f9 327
4132779b
XG
328 new_spte |= old_spte & shadow_dirty_mask;
329
330 mask = shadow_accessed_mask;
331 if (is_writable_pte(old_spte))
332 mask |= shadow_dirty_mask;
333
334 if (!spte_has_volatile_bits(old_spte) || (new_spte & mask) == mask)
b79b93f9 335 __set_spte(sptep, new_spte);
4132779b 336 else
b79b93f9 337 old_spte = __xchg_spte(sptep, new_spte);
4132779b
XG
338
339 if (!shadow_accessed_mask)
340 return;
341
342 if (spte_is_bit_cleared(old_spte, new_spte, shadow_accessed_mask))
343 kvm_set_pfn_accessed(spte_to_pfn(old_spte));
344 if (spte_is_bit_cleared(old_spte, new_spte, shadow_dirty_mask))
345 kvm_set_pfn_dirty(spte_to_pfn(old_spte));
b79b93f9
AK
346}
347
e2dec939 348static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
2e3e5882 349 struct kmem_cache *base_cache, int min)
714b93da
AK
350{
351 void *obj;
352
353 if (cache->nobjs >= min)
e2dec939 354 return 0;
714b93da 355 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 356 obj = kmem_cache_zalloc(base_cache, GFP_KERNEL);
714b93da 357 if (!obj)
e2dec939 358 return -ENOMEM;
714b93da
AK
359 cache->objects[cache->nobjs++] = obj;
360 }
e2dec939 361 return 0;
714b93da
AK
362}
363
e8ad9a70
XG
364static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc,
365 struct kmem_cache *cache)
714b93da
AK
366{
367 while (mc->nobjs)
e8ad9a70 368 kmem_cache_free(cache, mc->objects[--mc->nobjs]);
714b93da
AK
369}
370
c1158e63 371static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
2e3e5882 372 int min)
c1158e63
AK
373{
374 struct page *page;
375
376 if (cache->nobjs >= min)
377 return 0;
378 while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
2e3e5882 379 page = alloc_page(GFP_KERNEL);
c1158e63
AK
380 if (!page)
381 return -ENOMEM;
c1158e63
AK
382 cache->objects[cache->nobjs++] = page_address(page);
383 }
384 return 0;
385}
386
387static void mmu_free_memory_cache_page(struct kvm_mmu_memory_cache *mc)
388{
389 while (mc->nobjs)
c4d198d5 390 free_page((unsigned long)mc->objects[--mc->nobjs]);
c1158e63
AK
391}
392
2e3e5882 393static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
714b93da 394{
e2dec939
AK
395 int r;
396
ad312c7c 397 r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache,
2e3e5882 398 pte_chain_cache, 4);
e2dec939
AK
399 if (r)
400 goto out;
ad312c7c 401 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
c41ef344 402 rmap_desc_cache, 4);
d3d25b04
AK
403 if (r)
404 goto out;
ad312c7c 405 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
d3d25b04
AK
406 if (r)
407 goto out;
ad312c7c 408 r = mmu_topup_memory_cache(&vcpu->arch.mmu_page_header_cache,
2e3e5882 409 mmu_page_header_cache, 4);
e2dec939
AK
410out:
411 return r;
714b93da
AK
412}
413
414static void mmu_free_memory_caches(struct kvm_vcpu *vcpu)
415{
e8ad9a70
XG
416 mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, pte_chain_cache);
417 mmu_free_memory_cache(&vcpu->arch.mmu_rmap_desc_cache, rmap_desc_cache);
ad312c7c 418 mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache);
e8ad9a70
XG
419 mmu_free_memory_cache(&vcpu->arch.mmu_page_header_cache,
420 mmu_page_header_cache);
714b93da
AK
421}
422
423static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc,
424 size_t size)
425{
426 void *p;
427
428 BUG_ON(!mc->nobjs);
429 p = mc->objects[--mc->nobjs];
714b93da
AK
430 return p;
431}
432
714b93da
AK
433static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu)
434{
ad312c7c 435 return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache,
714b93da
AK
436 sizeof(struct kvm_pte_chain));
437}
438
90cb0529 439static void mmu_free_pte_chain(struct kvm_pte_chain *pc)
714b93da 440{
e8ad9a70 441 kmem_cache_free(pte_chain_cache, pc);
714b93da
AK
442}
443
444static struct kvm_rmap_desc *mmu_alloc_rmap_desc(struct kvm_vcpu *vcpu)
445{
ad312c7c 446 return mmu_memory_cache_alloc(&vcpu->arch.mmu_rmap_desc_cache,
714b93da
AK
447 sizeof(struct kvm_rmap_desc));
448}
449
90cb0529 450static void mmu_free_rmap_desc(struct kvm_rmap_desc *rd)
714b93da 451{
e8ad9a70 452 kmem_cache_free(rmap_desc_cache, rd);
714b93da
AK
453}
454
2032a93d
LJ
455static gfn_t kvm_mmu_page_get_gfn(struct kvm_mmu_page *sp, int index)
456{
457 if (!sp->role.direct)
458 return sp->gfns[index];
459
460 return sp->gfn + (index << ((sp->role.level - 1) * PT64_LEVEL_BITS));
461}
462
463static void kvm_mmu_page_set_gfn(struct kvm_mmu_page *sp, int index, gfn_t gfn)
464{
465 if (sp->role.direct)
466 BUG_ON(gfn != kvm_mmu_page_get_gfn(sp, index));
467 else
468 sp->gfns[index] = gfn;
469}
470
05da4558
MT
471/*
472 * Return the pointer to the largepage write count for a given
473 * gfn, handling slots that are not large page aligned.
474 */
d25797b2
JR
475static int *slot_largepage_idx(gfn_t gfn,
476 struct kvm_memory_slot *slot,
477 int level)
05da4558
MT
478{
479 unsigned long idx;
480
82855413
JR
481 idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
482 (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
d25797b2 483 return &slot->lpage_info[level - 2][idx].write_count;
05da4558
MT
484}
485
486static void account_shadowed(struct kvm *kvm, gfn_t gfn)
487{
d25797b2 488 struct kvm_memory_slot *slot;
05da4558 489 int *write_count;
d25797b2 490 int i;
05da4558 491
a1f4d395 492 slot = gfn_to_memslot(kvm, gfn);
d25797b2
JR
493 for (i = PT_DIRECTORY_LEVEL;
494 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
495 write_count = slot_largepage_idx(gfn, slot, i);
496 *write_count += 1;
497 }
05da4558
MT
498}
499
500static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn)
501{
d25797b2 502 struct kvm_memory_slot *slot;
05da4558 503 int *write_count;
d25797b2 504 int i;
05da4558 505
a1f4d395 506 slot = gfn_to_memslot(kvm, gfn);
d25797b2
JR
507 for (i = PT_DIRECTORY_LEVEL;
508 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
d25797b2
JR
509 write_count = slot_largepage_idx(gfn, slot, i);
510 *write_count -= 1;
511 WARN_ON(*write_count < 0);
512 }
05da4558
MT
513}
514
d25797b2
JR
515static int has_wrprotected_page(struct kvm *kvm,
516 gfn_t gfn,
517 int level)
05da4558 518{
2843099f 519 struct kvm_memory_slot *slot;
05da4558
MT
520 int *largepage_idx;
521
a1f4d395 522 slot = gfn_to_memslot(kvm, gfn);
05da4558 523 if (slot) {
d25797b2 524 largepage_idx = slot_largepage_idx(gfn, slot, level);
05da4558
MT
525 return *largepage_idx;
526 }
527
528 return 1;
529}
530
d25797b2 531static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
05da4558 532{
8f0b1ab6 533 unsigned long page_size;
d25797b2 534 int i, ret = 0;
05da4558 535
8f0b1ab6 536 page_size = kvm_host_page_size(kvm, gfn);
05da4558 537
d25797b2
JR
538 for (i = PT_PAGE_TABLE_LEVEL;
539 i < (PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES); ++i) {
540 if (page_size >= KVM_HPAGE_SIZE(i))
541 ret = i;
542 else
543 break;
544 }
545
4c2155ce 546 return ret;
05da4558
MT
547}
548
d25797b2 549static int mapping_level(struct kvm_vcpu *vcpu, gfn_t large_gfn)
05da4558
MT
550{
551 struct kvm_memory_slot *slot;
878403b7 552 int host_level, level, max_level;
05da4558
MT
553
554 slot = gfn_to_memslot(vcpu->kvm, large_gfn);
555 if (slot && slot->dirty_bitmap)
d25797b2 556 return PT_PAGE_TABLE_LEVEL;
05da4558 557
d25797b2
JR
558 host_level = host_mapping_level(vcpu->kvm, large_gfn);
559
560 if (host_level == PT_PAGE_TABLE_LEVEL)
561 return host_level;
562
878403b7
SY
563 max_level = kvm_x86_ops->get_lpage_level() < host_level ?
564 kvm_x86_ops->get_lpage_level() : host_level;
565
566 for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
d25797b2
JR
567 if (has_wrprotected_page(vcpu->kvm, large_gfn, level))
568 break;
d25797b2
JR
569
570 return level - 1;
05da4558
MT
571}
572
290fc38d
IE
573/*
574 * Take gfn and return the reverse mapping to it.
290fc38d
IE
575 */
576
44ad9944 577static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
290fc38d
IE
578{
579 struct kvm_memory_slot *slot;
05da4558 580 unsigned long idx;
290fc38d
IE
581
582 slot = gfn_to_memslot(kvm, gfn);
44ad9944 583 if (likely(level == PT_PAGE_TABLE_LEVEL))
05da4558
MT
584 return &slot->rmap[gfn - slot->base_gfn];
585
82855413
JR
586 idx = (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
587 (slot->base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
05da4558 588
44ad9944 589 return &slot->lpage_info[level - 2][idx].rmap_pde;
290fc38d
IE
590}
591
cd4a4e53
AK
592/*
593 * Reverse mapping data structures:
594 *
290fc38d
IE
595 * If rmapp bit zero is zero, then rmapp point to the shadw page table entry
596 * that points to page_address(page).
cd4a4e53 597 *
290fc38d
IE
598 * If rmapp bit zero is one, (then rmap & ~1) points to a struct kvm_rmap_desc
599 * containing more mappings.
53a27b39
MT
600 *
601 * Returns the number of rmap entries before the spte was added or zero if
602 * the spte was not added.
603 *
cd4a4e53 604 */
44ad9944 605static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
cd4a4e53 606{
4db35314 607 struct kvm_mmu_page *sp;
cd4a4e53 608 struct kvm_rmap_desc *desc;
290fc38d 609 unsigned long *rmapp;
53a27b39 610 int i, count = 0;
cd4a4e53 611
43a3795a 612 if (!is_rmap_spte(*spte))
53a27b39 613 return count;
4db35314 614 sp = page_header(__pa(spte));
2032a93d 615 kvm_mmu_page_set_gfn(sp, spte - sp->spt, gfn);
44ad9944 616 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
290fc38d 617 if (!*rmapp) {
cd4a4e53 618 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
290fc38d
IE
619 *rmapp = (unsigned long)spte;
620 } else if (!(*rmapp & 1)) {
cd4a4e53 621 rmap_printk("rmap_add: %p %llx 1->many\n", spte, *spte);
714b93da 622 desc = mmu_alloc_rmap_desc(vcpu);
d555c333
AK
623 desc->sptes[0] = (u64 *)*rmapp;
624 desc->sptes[1] = spte;
290fc38d 625 *rmapp = (unsigned long)desc | 1;
cd4a4e53
AK
626 } else {
627 rmap_printk("rmap_add: %p %llx many->many\n", spte, *spte);
290fc38d 628 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
d555c333 629 while (desc->sptes[RMAP_EXT-1] && desc->more) {
cd4a4e53 630 desc = desc->more;
53a27b39
MT
631 count += RMAP_EXT;
632 }
d555c333 633 if (desc->sptes[RMAP_EXT-1]) {
714b93da 634 desc->more = mmu_alloc_rmap_desc(vcpu);
cd4a4e53
AK
635 desc = desc->more;
636 }
d555c333 637 for (i = 0; desc->sptes[i]; ++i)
cd4a4e53 638 ;
d555c333 639 desc->sptes[i] = spte;
cd4a4e53 640 }
53a27b39 641 return count;
cd4a4e53
AK
642}
643
290fc38d 644static void rmap_desc_remove_entry(unsigned long *rmapp,
cd4a4e53
AK
645 struct kvm_rmap_desc *desc,
646 int i,
647 struct kvm_rmap_desc *prev_desc)
648{
649 int j;
650
d555c333 651 for (j = RMAP_EXT - 1; !desc->sptes[j] && j > i; --j)
cd4a4e53 652 ;
d555c333
AK
653 desc->sptes[i] = desc->sptes[j];
654 desc->sptes[j] = NULL;
cd4a4e53
AK
655 if (j != 0)
656 return;
657 if (!prev_desc && !desc->more)
d555c333 658 *rmapp = (unsigned long)desc->sptes[0];
cd4a4e53
AK
659 else
660 if (prev_desc)
661 prev_desc->more = desc->more;
662 else
290fc38d 663 *rmapp = (unsigned long)desc->more | 1;
90cb0529 664 mmu_free_rmap_desc(desc);
cd4a4e53
AK
665}
666
290fc38d 667static void rmap_remove(struct kvm *kvm, u64 *spte)
cd4a4e53 668{
cd4a4e53
AK
669 struct kvm_rmap_desc *desc;
670 struct kvm_rmap_desc *prev_desc;
4db35314 671 struct kvm_mmu_page *sp;
2032a93d 672 gfn_t gfn;
290fc38d 673 unsigned long *rmapp;
cd4a4e53
AK
674 int i;
675
4db35314 676 sp = page_header(__pa(spte));
2032a93d
LJ
677 gfn = kvm_mmu_page_get_gfn(sp, spte - sp->spt);
678 rmapp = gfn_to_rmap(kvm, gfn, sp->role.level);
290fc38d 679 if (!*rmapp) {
19ada5c4 680 printk(KERN_ERR "rmap_remove: %p 0->BUG\n", spte);
cd4a4e53 681 BUG();
290fc38d 682 } else if (!(*rmapp & 1)) {
19ada5c4 683 rmap_printk("rmap_remove: %p 1->0\n", spte);
290fc38d 684 if ((u64 *)*rmapp != spte) {
19ada5c4 685 printk(KERN_ERR "rmap_remove: %p 1->BUG\n", spte);
cd4a4e53
AK
686 BUG();
687 }
290fc38d 688 *rmapp = 0;
cd4a4e53 689 } else {
19ada5c4 690 rmap_printk("rmap_remove: %p many->many\n", spte);
290fc38d 691 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
cd4a4e53
AK
692 prev_desc = NULL;
693 while (desc) {
d555c333
AK
694 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i)
695 if (desc->sptes[i] == spte) {
290fc38d 696 rmap_desc_remove_entry(rmapp,
714b93da 697 desc, i,
cd4a4e53
AK
698 prev_desc);
699 return;
700 }
701 prev_desc = desc;
702 desc = desc->more;
703 }
19ada5c4 704 pr_err("rmap_remove: %p many->many\n", spte);
cd4a4e53
AK
705 BUG();
706 }
707}
708
e4b502ea 709static void set_spte_track_bits(u64 *sptep, u64 new_spte)
be38d276 710{
ce061867 711 pfn_t pfn;
9a3aad70
XG
712 u64 old_spte = *sptep;
713
8672b721 714 if (!spte_has_volatile_bits(old_spte))
9a3aad70 715 __set_spte(sptep, new_spte);
8672b721 716 else
9a3aad70 717 old_spte = __xchg_spte(sptep, new_spte);
ce061867 718
a9221dd5 719 if (!is_rmap_spte(old_spte))
ce061867 720 return;
8672b721 721
a9221dd5 722 pfn = spte_to_pfn(old_spte);
daa3db69 723 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
ce061867 724 kvm_set_pfn_accessed(pfn);
4132779b 725 if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
ce061867 726 kvm_set_pfn_dirty(pfn);
e4b502ea
XG
727}
728
729static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
730{
731 set_spte_track_bits(sptep, new_spte);
be38d276 732 rmap_remove(kvm, sptep);
be38d276
AK
733}
734
98348e95 735static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
374cbac0 736{
374cbac0 737 struct kvm_rmap_desc *desc;
98348e95
IE
738 u64 *prev_spte;
739 int i;
740
741 if (!*rmapp)
742 return NULL;
743 else if (!(*rmapp & 1)) {
744 if (!spte)
745 return (u64 *)*rmapp;
746 return NULL;
747 }
748 desc = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
98348e95
IE
749 prev_spte = NULL;
750 while (desc) {
d555c333 751 for (i = 0; i < RMAP_EXT && desc->sptes[i]; ++i) {
98348e95 752 if (prev_spte == spte)
d555c333
AK
753 return desc->sptes[i];
754 prev_spte = desc->sptes[i];
98348e95
IE
755 }
756 desc = desc->more;
757 }
758 return NULL;
759}
760
b1a36821 761static int rmap_write_protect(struct kvm *kvm, u64 gfn)
98348e95 762{
290fc38d 763 unsigned long *rmapp;
374cbac0 764 u64 *spte;
44ad9944 765 int i, write_protected = 0;
374cbac0 766
44ad9944 767 rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
374cbac0 768
98348e95
IE
769 spte = rmap_next(kvm, rmapp, NULL);
770 while (spte) {
374cbac0 771 BUG_ON(!spte);
374cbac0 772 BUG_ON(!(*spte & PT_PRESENT_MASK));
374cbac0 773 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte);
8dae4445 774 if (is_writable_pte(*spte)) {
b79b93f9 775 update_spte(spte, *spte & ~PT_WRITABLE_MASK);
caa5b8a5
ED
776 write_protected = 1;
777 }
9647c14c 778 spte = rmap_next(kvm, rmapp, spte);
374cbac0 779 }
855149aa 780
05da4558 781 /* check for huge page mappings */
44ad9944
JR
782 for (i = PT_DIRECTORY_LEVEL;
783 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
784 rmapp = gfn_to_rmap(kvm, gfn, i);
785 spte = rmap_next(kvm, rmapp, NULL);
786 while (spte) {
787 BUG_ON(!spte);
788 BUG_ON(!(*spte & PT_PRESENT_MASK));
789 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
790 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
8dae4445 791 if (is_writable_pte(*spte)) {
be38d276
AK
792 drop_spte(kvm, spte,
793 shadow_trap_nonpresent_pte);
44ad9944 794 --kvm->stat.lpages;
44ad9944
JR
795 spte = NULL;
796 write_protected = 1;
797 }
798 spte = rmap_next(kvm, rmapp, spte);
05da4558 799 }
05da4558
MT
800 }
801
b1a36821 802 return write_protected;
374cbac0
AK
803}
804
8a8365c5
FD
805static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
806 unsigned long data)
e930bffe
AA
807{
808 u64 *spte;
809 int need_tlb_flush = 0;
810
811 while ((spte = rmap_next(kvm, rmapp, NULL))) {
812 BUG_ON(!(*spte & PT_PRESENT_MASK));
813 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte);
be38d276 814 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
e930bffe
AA
815 need_tlb_flush = 1;
816 }
817 return need_tlb_flush;
818}
819
8a8365c5
FD
820static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
821 unsigned long data)
3da0dd43
IE
822{
823 int need_flush = 0;
e4b502ea 824 u64 *spte, new_spte;
3da0dd43
IE
825 pte_t *ptep = (pte_t *)data;
826 pfn_t new_pfn;
827
828 WARN_ON(pte_huge(*ptep));
829 new_pfn = pte_pfn(*ptep);
830 spte = rmap_next(kvm, rmapp, NULL);
831 while (spte) {
832 BUG_ON(!is_shadow_present_pte(*spte));
833 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte);
834 need_flush = 1;
835 if (pte_write(*ptep)) {
be38d276 836 drop_spte(kvm, spte, shadow_trap_nonpresent_pte);
3da0dd43
IE
837 spte = rmap_next(kvm, rmapp, NULL);
838 } else {
839 new_spte = *spte &~ (PT64_BASE_ADDR_MASK);
840 new_spte |= (u64)new_pfn << PAGE_SHIFT;
841
842 new_spte &= ~PT_WRITABLE_MASK;
843 new_spte &= ~SPTE_HOST_WRITEABLE;
b79b93f9 844 new_spte &= ~shadow_accessed_mask;
e4b502ea 845 set_spte_track_bits(spte, new_spte);
3da0dd43
IE
846 spte = rmap_next(kvm, rmapp, spte);
847 }
848 }
849 if (need_flush)
850 kvm_flush_remote_tlbs(kvm);
851
852 return 0;
853}
854
8a8365c5
FD
855static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
856 unsigned long data,
3da0dd43 857 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
8a8365c5 858 unsigned long data))
e930bffe 859{
852e3c19 860 int i, j;
90bb6fc5 861 int ret;
e930bffe 862 int retval = 0;
bc6678a3
MT
863 struct kvm_memslots *slots;
864
90d83dc3 865 slots = kvm_memslots(kvm);
e930bffe 866
46a26bf5
MT
867 for (i = 0; i < slots->nmemslots; i++) {
868 struct kvm_memory_slot *memslot = &slots->memslots[i];
e930bffe
AA
869 unsigned long start = memslot->userspace_addr;
870 unsigned long end;
871
e930bffe
AA
872 end = start + (memslot->npages << PAGE_SHIFT);
873 if (hva >= start && hva < end) {
874 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
852e3c19 875
90bb6fc5 876 ret = handler(kvm, &memslot->rmap[gfn_offset], data);
852e3c19
JR
877
878 for (j = 0; j < KVM_NR_PAGE_SIZES - 1; ++j) {
6e3e243c
AA
879 unsigned long idx;
880 int sh;
881
882 sh = KVM_HPAGE_GFN_SHIFT(PT_DIRECTORY_LEVEL+j);
883 idx = ((memslot->base_gfn+gfn_offset) >> sh) -
884 (memslot->base_gfn >> sh);
90bb6fc5 885 ret |= handler(kvm,
3da0dd43
IE
886 &memslot->lpage_info[j][idx].rmap_pde,
887 data);
852e3c19 888 }
90bb6fc5
AK
889 trace_kvm_age_page(hva, memslot, ret);
890 retval |= ret;
e930bffe
AA
891 }
892 }
893
894 return retval;
895}
896
897int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
898{
3da0dd43
IE
899 return kvm_handle_hva(kvm, hva, 0, kvm_unmap_rmapp);
900}
901
902void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
903{
8a8365c5 904 kvm_handle_hva(kvm, hva, (unsigned long)&pte, kvm_set_pte_rmapp);
e930bffe
AA
905}
906
8a8365c5
FD
907static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
908 unsigned long data)
e930bffe
AA
909{
910 u64 *spte;
911 int young = 0;
912
6316e1c8
RR
913 /*
914 * Emulate the accessed bit for EPT, by checking if this page has
915 * an EPT mapping, and clearing it if it does. On the next access,
916 * a new EPT mapping will be established.
917 * This has some overhead, but not as much as the cost of swapping
918 * out actively used pages or breaking up actively used hugepages.
919 */
534e38b4 920 if (!shadow_accessed_mask)
6316e1c8 921 return kvm_unmap_rmapp(kvm, rmapp, data);
534e38b4 922
e930bffe
AA
923 spte = rmap_next(kvm, rmapp, NULL);
924 while (spte) {
925 int _young;
926 u64 _spte = *spte;
927 BUG_ON(!(_spte & PT_PRESENT_MASK));
928 _young = _spte & PT_ACCESSED_MASK;
929 if (_young) {
930 young = 1;
931 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
932 }
933 spte = rmap_next(kvm, rmapp, spte);
934 }
935 return young;
936}
937
53a27b39
MT
938#define RMAP_RECYCLE_THRESHOLD 1000
939
852e3c19 940static void rmap_recycle(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
53a27b39
MT
941{
942 unsigned long *rmapp;
852e3c19
JR
943 struct kvm_mmu_page *sp;
944
945 sp = page_header(__pa(spte));
53a27b39 946
852e3c19 947 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
53a27b39 948
3da0dd43 949 kvm_unmap_rmapp(vcpu->kvm, rmapp, 0);
53a27b39
MT
950 kvm_flush_remote_tlbs(vcpu->kvm);
951}
952
e930bffe
AA
953int kvm_age_hva(struct kvm *kvm, unsigned long hva)
954{
3da0dd43 955 return kvm_handle_hva(kvm, hva, 0, kvm_age_rmapp);
e930bffe
AA
956}
957
d6c69ee9 958#ifdef MMU_DEBUG
47ad8e68 959static int is_empty_shadow_page(u64 *spt)
6aa8b732 960{
139bdb2d
AK
961 u64 *pos;
962 u64 *end;
963
47ad8e68 964 for (pos = spt, end = pos + PAGE_SIZE / sizeof(u64); pos != end; pos++)
3c915510 965 if (is_shadow_present_pte(*pos)) {
b8688d51 966 printk(KERN_ERR "%s: %p %llx\n", __func__,
139bdb2d 967 pos, *pos);
6aa8b732 968 return 0;
139bdb2d 969 }
6aa8b732
AK
970 return 1;
971}
d6c69ee9 972#endif
6aa8b732 973
4db35314 974static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
260746c0 975{
4db35314 976 ASSERT(is_empty_shadow_page(sp->spt));
7775834a 977 hlist_del(&sp->hash_link);
4db35314
AK
978 list_del(&sp->link);
979 __free_page(virt_to_page(sp->spt));
2032a93d
LJ
980 if (!sp->role.direct)
981 __free_page(virt_to_page(sp->gfns));
e8ad9a70 982 kmem_cache_free(mmu_page_header_cache, sp);
49d5ca26 983 --kvm->arch.n_used_mmu_pages;
260746c0
AK
984}
985
cea0f0e7
AK
986static unsigned kvm_page_table_hashfn(gfn_t gfn)
987{
1ae0a13d 988 return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1);
cea0f0e7
AK
989}
990
25c0de2c 991static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
2032a93d 992 u64 *parent_pte, int direct)
6aa8b732 993{
4db35314 994 struct kvm_mmu_page *sp;
6aa8b732 995
ad312c7c
ZX
996 sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp);
997 sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE);
2032a93d
LJ
998 if (!direct)
999 sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache,
1000 PAGE_SIZE);
4db35314 1001 set_page_private(virt_to_page(sp->spt), (unsigned long)sp);
f05e70ac 1002 list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages);
291f26bc 1003 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
4db35314
AK
1004 sp->multimapped = 0;
1005 sp->parent_pte = parent_pte;
49d5ca26 1006 ++vcpu->kvm->arch.n_used_mmu_pages;
4db35314 1007 return sp;
6aa8b732
AK
1008}
1009
714b93da 1010static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu,
4db35314 1011 struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7
AK
1012{
1013 struct kvm_pte_chain *pte_chain;
1014 struct hlist_node *node;
1015 int i;
1016
1017 if (!parent_pte)
1018 return;
4db35314
AK
1019 if (!sp->multimapped) {
1020 u64 *old = sp->parent_pte;
cea0f0e7
AK
1021
1022 if (!old) {
4db35314 1023 sp->parent_pte = parent_pte;
cea0f0e7
AK
1024 return;
1025 }
4db35314 1026 sp->multimapped = 1;
714b93da 1027 pte_chain = mmu_alloc_pte_chain(vcpu);
4db35314
AK
1028 INIT_HLIST_HEAD(&sp->parent_ptes);
1029 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
1030 pte_chain->parent_ptes[0] = old;
1031 }
4db35314 1032 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) {
cea0f0e7
AK
1033 if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1])
1034 continue;
1035 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i)
1036 if (!pte_chain->parent_ptes[i]) {
1037 pte_chain->parent_ptes[i] = parent_pte;
1038 return;
1039 }
1040 }
714b93da 1041 pte_chain = mmu_alloc_pte_chain(vcpu);
cea0f0e7 1042 BUG_ON(!pte_chain);
4db35314 1043 hlist_add_head(&pte_chain->link, &sp->parent_ptes);
cea0f0e7
AK
1044 pte_chain->parent_ptes[0] = parent_pte;
1045}
1046
4db35314 1047static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp,
cea0f0e7
AK
1048 u64 *parent_pte)
1049{
1050 struct kvm_pte_chain *pte_chain;
1051 struct hlist_node *node;
1052 int i;
1053
4db35314
AK
1054 if (!sp->multimapped) {
1055 BUG_ON(sp->parent_pte != parent_pte);
1056 sp->parent_pte = NULL;
cea0f0e7
AK
1057 return;
1058 }
4db35314 1059 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
cea0f0e7
AK
1060 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1061 if (!pte_chain->parent_ptes[i])
1062 break;
1063 if (pte_chain->parent_ptes[i] != parent_pte)
1064 continue;
697fe2e2
AK
1065 while (i + 1 < NR_PTE_CHAIN_ENTRIES
1066 && pte_chain->parent_ptes[i + 1]) {
cea0f0e7
AK
1067 pte_chain->parent_ptes[i]
1068 = pte_chain->parent_ptes[i + 1];
1069 ++i;
1070 }
1071 pte_chain->parent_ptes[i] = NULL;
697fe2e2
AK
1072 if (i == 0) {
1073 hlist_del(&pte_chain->link);
90cb0529 1074 mmu_free_pte_chain(pte_chain);
4db35314
AK
1075 if (hlist_empty(&sp->parent_ptes)) {
1076 sp->multimapped = 0;
1077 sp->parent_pte = NULL;
697fe2e2
AK
1078 }
1079 }
cea0f0e7
AK
1080 return;
1081 }
1082 BUG();
1083}
1084
6b18493d 1085static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn)
ad8cfbe3
MT
1086{
1087 struct kvm_pte_chain *pte_chain;
1088 struct hlist_node *node;
1089 struct kvm_mmu_page *parent_sp;
1090 int i;
1091
1092 if (!sp->multimapped && sp->parent_pte) {
1093 parent_sp = page_header(__pa(sp->parent_pte));
1047df1f 1094 fn(parent_sp, sp->parent_pte);
ad8cfbe3
MT
1095 return;
1096 }
1047df1f 1097
ad8cfbe3
MT
1098 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
1099 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
1047df1f
XG
1100 u64 *spte = pte_chain->parent_ptes[i];
1101
1102 if (!spte)
ad8cfbe3 1103 break;
1047df1f
XG
1104 parent_sp = page_header(__pa(spte));
1105 fn(parent_sp, spte);
ad8cfbe3
MT
1106 }
1107}
1108
1047df1f
XG
1109static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte);
1110static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp)
0074ff63 1111{
1047df1f 1112 mmu_parent_walk(sp, mark_unsync);
0074ff63
MT
1113}
1114
1047df1f 1115static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte)
0074ff63 1116{
1047df1f 1117 unsigned int index;
0074ff63 1118
1047df1f
XG
1119 index = spte - sp->spt;
1120 if (__test_and_set_bit(index, sp->unsync_child_bitmap))
0074ff63 1121 return;
1047df1f 1122 if (sp->unsync_children++)
0074ff63 1123 return;
1047df1f 1124 kvm_mmu_mark_parents_unsync(sp);
0074ff63
MT
1125}
1126
d761a501
AK
1127static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
1128 struct kvm_mmu_page *sp)
1129{
1130 int i;
1131
1132 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
1133 sp->spt[i] = shadow_trap_nonpresent_pte;
1134}
1135
e8bc217a 1136static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
be71e061 1137 struct kvm_mmu_page *sp, bool clear_unsync)
e8bc217a
MT
1138{
1139 return 1;
1140}
1141
a7052897
MT
1142static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
1143{
1144}
1145
60c8aec6
MT
1146#define KVM_PAGE_ARRAY_NR 16
1147
1148struct kvm_mmu_pages {
1149 struct mmu_page_and_offset {
1150 struct kvm_mmu_page *sp;
1151 unsigned int idx;
1152 } page[KVM_PAGE_ARRAY_NR];
1153 unsigned int nr;
1154};
1155
0074ff63
MT
1156#define for_each_unsync_children(bitmap, idx) \
1157 for (idx = find_first_bit(bitmap, 512); \
1158 idx < 512; \
1159 idx = find_next_bit(bitmap, 512, idx+1))
1160
cded19f3
HE
1161static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
1162 int idx)
4731d4c7 1163{
60c8aec6 1164 int i;
4731d4c7 1165
60c8aec6
MT
1166 if (sp->unsync)
1167 for (i=0; i < pvec->nr; i++)
1168 if (pvec->page[i].sp == sp)
1169 return 0;
1170
1171 pvec->page[pvec->nr].sp = sp;
1172 pvec->page[pvec->nr].idx = idx;
1173 pvec->nr++;
1174 return (pvec->nr == KVM_PAGE_ARRAY_NR);
1175}
1176
1177static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
1178 struct kvm_mmu_pages *pvec)
1179{
1180 int i, ret, nr_unsync_leaf = 0;
4731d4c7 1181
0074ff63 1182 for_each_unsync_children(sp->unsync_child_bitmap, i) {
7a8f1a74 1183 struct kvm_mmu_page *child;
4731d4c7
MT
1184 u64 ent = sp->spt[i];
1185
7a8f1a74
XG
1186 if (!is_shadow_present_pte(ent) || is_large_pte(ent))
1187 goto clear_child_bitmap;
1188
1189 child = page_header(ent & PT64_BASE_ADDR_MASK);
1190
1191 if (child->unsync_children) {
1192 if (mmu_pages_add(pvec, child, i))
1193 return -ENOSPC;
1194
1195 ret = __mmu_unsync_walk(child, pvec);
1196 if (!ret)
1197 goto clear_child_bitmap;
1198 else if (ret > 0)
1199 nr_unsync_leaf += ret;
1200 else
1201 return ret;
1202 } else if (child->unsync) {
1203 nr_unsync_leaf++;
1204 if (mmu_pages_add(pvec, child, i))
1205 return -ENOSPC;
1206 } else
1207 goto clear_child_bitmap;
1208
1209 continue;
1210
1211clear_child_bitmap:
1212 __clear_bit(i, sp->unsync_child_bitmap);
1213 sp->unsync_children--;
1214 WARN_ON((int)sp->unsync_children < 0);
4731d4c7
MT
1215 }
1216
4731d4c7 1217
60c8aec6
MT
1218 return nr_unsync_leaf;
1219}
1220
1221static int mmu_unsync_walk(struct kvm_mmu_page *sp,
1222 struct kvm_mmu_pages *pvec)
1223{
1224 if (!sp->unsync_children)
1225 return 0;
1226
1227 mmu_pages_add(pvec, sp, 0);
1228 return __mmu_unsync_walk(sp, pvec);
4731d4c7
MT
1229}
1230
4731d4c7
MT
1231static void kvm_unlink_unsync_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1232{
1233 WARN_ON(!sp->unsync);
5e1b3ddb 1234 trace_kvm_mmu_sync_page(sp);
4731d4c7
MT
1235 sp->unsync = 0;
1236 --kvm->stat.mmu_unsync;
1237}
1238
7775834a
XG
1239static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1240 struct list_head *invalid_list);
1241static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1242 struct list_head *invalid_list);
4731d4c7 1243
f41d335a
XG
1244#define for_each_gfn_sp(kvm, sp, gfn, pos) \
1245 hlist_for_each_entry(sp, pos, \
7ae680eb
XG
1246 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1247 if ((sp)->gfn != (gfn)) {} else
1248
f41d335a
XG
1249#define for_each_gfn_indirect_valid_sp(kvm, sp, gfn, pos) \
1250 hlist_for_each_entry(sp, pos, \
7ae680eb
XG
1251 &(kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)], hash_link) \
1252 if ((sp)->gfn != (gfn) || (sp)->role.direct || \
1253 (sp)->role.invalid) {} else
1254
f918b443 1255/* @sp->gfn should be write-protected at the call site */
1d9dc7e0 1256static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
d98ba053 1257 struct list_head *invalid_list, bool clear_unsync)
4731d4c7 1258{
5b7e0102 1259 if (sp->role.cr4_pae != !!is_pae(vcpu)) {
d98ba053 1260 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
4731d4c7
MT
1261 return 1;
1262 }
1263
f918b443 1264 if (clear_unsync)
1d9dc7e0 1265 kvm_unlink_unsync_page(vcpu->kvm, sp);
1d9dc7e0 1266
be71e061 1267 if (vcpu->arch.mmu.sync_page(vcpu, sp, clear_unsync)) {
d98ba053 1268 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
4731d4c7
MT
1269 return 1;
1270 }
1271
1272 kvm_mmu_flush_tlb(vcpu);
4731d4c7
MT
1273 return 0;
1274}
1275
1d9dc7e0
XG
1276static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
1277 struct kvm_mmu_page *sp)
1278{
d98ba053 1279 LIST_HEAD(invalid_list);
1d9dc7e0
XG
1280 int ret;
1281
d98ba053 1282 ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
be71e061 1283 if (ret)
d98ba053
XG
1284 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
1285
1d9dc7e0
XG
1286 return ret;
1287}
1288
d98ba053
XG
1289static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
1290 struct list_head *invalid_list)
1d9dc7e0 1291{
d98ba053 1292 return __kvm_sync_page(vcpu, sp, invalid_list, true);
1d9dc7e0
XG
1293}
1294
9f1a122f
XG
1295/* @gfn should be write-protected at the call site */
1296static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
1297{
9f1a122f 1298 struct kvm_mmu_page *s;
f41d335a 1299 struct hlist_node *node;
d98ba053 1300 LIST_HEAD(invalid_list);
9f1a122f
XG
1301 bool flush = false;
1302
f41d335a 1303 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
7ae680eb 1304 if (!s->unsync)
9f1a122f
XG
1305 continue;
1306
1307 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1308 if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
be71e061 1309 (vcpu->arch.mmu.sync_page(vcpu, s, true))) {
d98ba053 1310 kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
9f1a122f
XG
1311 continue;
1312 }
1313 kvm_unlink_unsync_page(vcpu->kvm, s);
1314 flush = true;
1315 }
1316
d98ba053 1317 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
9f1a122f
XG
1318 if (flush)
1319 kvm_mmu_flush_tlb(vcpu);
1320}
1321
60c8aec6
MT
1322struct mmu_page_path {
1323 struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
1324 unsigned int idx[PT64_ROOT_LEVEL-1];
4731d4c7
MT
1325};
1326
60c8aec6
MT
1327#define for_each_sp(pvec, sp, parents, i) \
1328 for (i = mmu_pages_next(&pvec, &parents, -1), \
1329 sp = pvec.page[i].sp; \
1330 i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
1331 i = mmu_pages_next(&pvec, &parents, i))
1332
cded19f3
HE
1333static int mmu_pages_next(struct kvm_mmu_pages *pvec,
1334 struct mmu_page_path *parents,
1335 int i)
60c8aec6
MT
1336{
1337 int n;
1338
1339 for (n = i+1; n < pvec->nr; n++) {
1340 struct kvm_mmu_page *sp = pvec->page[n].sp;
1341
1342 if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
1343 parents->idx[0] = pvec->page[n].idx;
1344 return n;
1345 }
1346
1347 parents->parent[sp->role.level-2] = sp;
1348 parents->idx[sp->role.level-1] = pvec->page[n].idx;
1349 }
1350
1351 return n;
1352}
1353
cded19f3 1354static void mmu_pages_clear_parents(struct mmu_page_path *parents)
4731d4c7 1355{
60c8aec6
MT
1356 struct kvm_mmu_page *sp;
1357 unsigned int level = 0;
1358
1359 do {
1360 unsigned int idx = parents->idx[level];
4731d4c7 1361
60c8aec6
MT
1362 sp = parents->parent[level];
1363 if (!sp)
1364 return;
1365
1366 --sp->unsync_children;
1367 WARN_ON((int)sp->unsync_children < 0);
1368 __clear_bit(idx, sp->unsync_child_bitmap);
1369 level++;
1370 } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
4731d4c7
MT
1371}
1372
60c8aec6
MT
1373static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
1374 struct mmu_page_path *parents,
1375 struct kvm_mmu_pages *pvec)
4731d4c7 1376{
60c8aec6
MT
1377 parents->parent[parent->role.level-1] = NULL;
1378 pvec->nr = 0;
1379}
4731d4c7 1380
60c8aec6
MT
1381static void mmu_sync_children(struct kvm_vcpu *vcpu,
1382 struct kvm_mmu_page *parent)
1383{
1384 int i;
1385 struct kvm_mmu_page *sp;
1386 struct mmu_page_path parents;
1387 struct kvm_mmu_pages pages;
d98ba053 1388 LIST_HEAD(invalid_list);
60c8aec6
MT
1389
1390 kvm_mmu_pages_init(parent, &parents, &pages);
1391 while (mmu_unsync_walk(parent, &pages)) {
b1a36821
MT
1392 int protected = 0;
1393
1394 for_each_sp(pages, sp, parents, i)
1395 protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1396
1397 if (protected)
1398 kvm_flush_remote_tlbs(vcpu->kvm);
1399
60c8aec6 1400 for_each_sp(pages, sp, parents, i) {
d98ba053 1401 kvm_sync_page(vcpu, sp, &invalid_list);
60c8aec6
MT
1402 mmu_pages_clear_parents(&parents);
1403 }
d98ba053 1404 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
4731d4c7 1405 cond_resched_lock(&vcpu->kvm->mmu_lock);
60c8aec6
MT
1406 kvm_mmu_pages_init(parent, &parents, &pages);
1407 }
4731d4c7
MT
1408}
1409
cea0f0e7
AK
1410static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1411 gfn_t gfn,
1412 gva_t gaddr,
1413 unsigned level,
f6e2c02b 1414 int direct,
41074d07 1415 unsigned access,
f7d9c7b7 1416 u64 *parent_pte)
cea0f0e7
AK
1417{
1418 union kvm_mmu_page_role role;
cea0f0e7 1419 unsigned quadrant;
9f1a122f 1420 struct kvm_mmu_page *sp;
f41d335a 1421 struct hlist_node *node;
9f1a122f 1422 bool need_sync = false;
cea0f0e7 1423
a770f6f2 1424 role = vcpu->arch.mmu.base_role;
cea0f0e7 1425 role.level = level;
f6e2c02b 1426 role.direct = direct;
84b0c8c6 1427 if (role.direct)
5b7e0102 1428 role.cr4_pae = 0;
41074d07 1429 role.access = access;
b66d8000 1430 if (!tdp_enabled && vcpu->arch.mmu.root_level <= PT32_ROOT_LEVEL) {
cea0f0e7
AK
1431 quadrant = gaddr >> (PAGE_SHIFT + (PT64_PT_BITS * level));
1432 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
1433 role.quadrant = quadrant;
1434 }
f41d335a 1435 for_each_gfn_sp(vcpu->kvm, sp, gfn, node) {
7ae680eb
XG
1436 if (!need_sync && sp->unsync)
1437 need_sync = true;
4731d4c7 1438
7ae680eb
XG
1439 if (sp->role.word != role.word)
1440 continue;
4731d4c7 1441
7ae680eb
XG
1442 if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
1443 break;
e02aa901 1444
7ae680eb
XG
1445 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1446 if (sp->unsync_children) {
a8eeb04a 1447 kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
7ae680eb
XG
1448 kvm_mmu_mark_parents_unsync(sp);
1449 } else if (sp->unsync)
1450 kvm_mmu_mark_parents_unsync(sp);
e02aa901 1451
7ae680eb
XG
1452 trace_kvm_mmu_get_page(sp, false);
1453 return sp;
1454 }
dfc5aa00 1455 ++vcpu->kvm->stat.mmu_cache_miss;
2032a93d 1456 sp = kvm_mmu_alloc_page(vcpu, parent_pte, direct);
4db35314
AK
1457 if (!sp)
1458 return sp;
4db35314
AK
1459 sp->gfn = gfn;
1460 sp->role = role;
7ae680eb
XG
1461 hlist_add_head(&sp->hash_link,
1462 &vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
f6e2c02b 1463 if (!direct) {
b1a36821
MT
1464 if (rmap_write_protect(vcpu->kvm, gfn))
1465 kvm_flush_remote_tlbs(vcpu->kvm);
9f1a122f
XG
1466 if (level > PT_PAGE_TABLE_LEVEL && need_sync)
1467 kvm_sync_pages(vcpu, gfn);
1468
4731d4c7
MT
1469 account_shadowed(vcpu->kvm, gfn);
1470 }
131d8279
AK
1471 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)
1472 vcpu->arch.mmu.prefetch_page(vcpu, sp);
1473 else
1474 nonpaging_prefetch_page(vcpu, sp);
f691fe1d 1475 trace_kvm_mmu_get_page(sp, true);
4db35314 1476 return sp;
cea0f0e7
AK
1477}
1478
2d11123a
AK
1479static void shadow_walk_init(struct kvm_shadow_walk_iterator *iterator,
1480 struct kvm_vcpu *vcpu, u64 addr)
1481{
1482 iterator->addr = addr;
1483 iterator->shadow_addr = vcpu->arch.mmu.root_hpa;
1484 iterator->level = vcpu->arch.mmu.shadow_root_level;
1485 if (iterator->level == PT32E_ROOT_LEVEL) {
1486 iterator->shadow_addr
1487 = vcpu->arch.mmu.pae_root[(addr >> 30) & 3];
1488 iterator->shadow_addr &= PT64_BASE_ADDR_MASK;
1489 --iterator->level;
1490 if (!iterator->shadow_addr)
1491 iterator->level = 0;
1492 }
1493}
1494
1495static bool shadow_walk_okay(struct kvm_shadow_walk_iterator *iterator)
1496{
1497 if (iterator->level < PT_PAGE_TABLE_LEVEL)
1498 return false;
4d88954d
MT
1499
1500 if (iterator->level == PT_PAGE_TABLE_LEVEL)
1501 if (is_large_pte(*iterator->sptep))
1502 return false;
1503
2d11123a
AK
1504 iterator->index = SHADOW_PT_INDEX(iterator->addr, iterator->level);
1505 iterator->sptep = ((u64 *)__va(iterator->shadow_addr)) + iterator->index;
1506 return true;
1507}
1508
1509static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
1510{
1511 iterator->shadow_addr = *iterator->sptep & PT64_BASE_ADDR_MASK;
1512 --iterator->level;
1513}
1514
32ef26a3
AK
1515static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp)
1516{
1517 u64 spte;
1518
1519 spte = __pa(sp->spt)
1520 | PT_PRESENT_MASK | PT_ACCESSED_MASK
1521 | PT_WRITABLE_MASK | PT_USER_MASK;
121eee97 1522 __set_spte(sptep, spte);
32ef26a3
AK
1523}
1524
a3aa51cf
AK
1525static void drop_large_spte(struct kvm_vcpu *vcpu, u64 *sptep)
1526{
1527 if (is_large_pte(*sptep)) {
1528 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
1529 kvm_flush_remote_tlbs(vcpu->kvm);
1530 }
1531}
1532
a357bd22
AK
1533static void validate_direct_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1534 unsigned direct_access)
1535{
1536 if (is_shadow_present_pte(*sptep) && !is_large_pte(*sptep)) {
1537 struct kvm_mmu_page *child;
1538
1539 /*
1540 * For the direct sp, if the guest pte's dirty bit
1541 * changed form clean to dirty, it will corrupt the
1542 * sp's access: allow writable in the read-only sp,
1543 * so we should update the spte at this point to get
1544 * a new sp with the correct access.
1545 */
1546 child = page_header(*sptep & PT64_BASE_ADDR_MASK);
1547 if (child->role.access == direct_access)
1548 return;
1549
1550 mmu_page_remove_parent_pte(child, sptep);
1551 __set_spte(sptep, shadow_trap_nonpresent_pte);
1552 kvm_flush_remote_tlbs(vcpu->kvm);
1553 }
1554}
1555
90cb0529 1556static void kvm_mmu_page_unlink_children(struct kvm *kvm,
4db35314 1557 struct kvm_mmu_page *sp)
a436036b 1558{
697fe2e2
AK
1559 unsigned i;
1560 u64 *pt;
1561 u64 ent;
1562
4db35314 1563 pt = sp->spt;
697fe2e2 1564
697fe2e2
AK
1565 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1566 ent = pt[i];
1567
05da4558 1568 if (is_shadow_present_pte(ent)) {
776e6633 1569 if (!is_last_spte(ent, sp->role.level)) {
05da4558
MT
1570 ent &= PT64_BASE_ADDR_MASK;
1571 mmu_page_remove_parent_pte(page_header(ent),
1572 &pt[i]);
1573 } else {
776e6633
MT
1574 if (is_large_pte(ent))
1575 --kvm->stat.lpages;
be38d276
AK
1576 drop_spte(kvm, &pt[i],
1577 shadow_trap_nonpresent_pte);
05da4558
MT
1578 }
1579 }
c7addb90 1580 pt[i] = shadow_trap_nonpresent_pte;
697fe2e2 1581 }
a436036b
AK
1582}
1583
4db35314 1584static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
cea0f0e7 1585{
4db35314 1586 mmu_page_remove_parent_pte(sp, parent_pte);
a436036b
AK
1587}
1588
12b7d28f
AK
1589static void kvm_mmu_reset_last_pte_updated(struct kvm *kvm)
1590{
1591 int i;
988a2cae 1592 struct kvm_vcpu *vcpu;
12b7d28f 1593
988a2cae
GN
1594 kvm_for_each_vcpu(i, vcpu, kvm)
1595 vcpu->arch.last_pte_updated = NULL;
12b7d28f
AK
1596}
1597
31aa2b44 1598static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
a436036b
AK
1599{
1600 u64 *parent_pte;
1601
4db35314
AK
1602 while (sp->multimapped || sp->parent_pte) {
1603 if (!sp->multimapped)
1604 parent_pte = sp->parent_pte;
a436036b
AK
1605 else {
1606 struct kvm_pte_chain *chain;
1607
4db35314 1608 chain = container_of(sp->parent_ptes.first,
a436036b
AK
1609 struct kvm_pte_chain, link);
1610 parent_pte = chain->parent_ptes[0];
1611 }
697fe2e2 1612 BUG_ON(!parent_pte);
4db35314 1613 kvm_mmu_put_page(sp, parent_pte);
d555c333 1614 __set_spte(parent_pte, shadow_trap_nonpresent_pte);
a436036b 1615 }
31aa2b44
AK
1616}
1617
60c8aec6 1618static int mmu_zap_unsync_children(struct kvm *kvm,
7775834a
XG
1619 struct kvm_mmu_page *parent,
1620 struct list_head *invalid_list)
4731d4c7 1621{
60c8aec6
MT
1622 int i, zapped = 0;
1623 struct mmu_page_path parents;
1624 struct kvm_mmu_pages pages;
4731d4c7 1625
60c8aec6 1626 if (parent->role.level == PT_PAGE_TABLE_LEVEL)
4731d4c7 1627 return 0;
60c8aec6
MT
1628
1629 kvm_mmu_pages_init(parent, &parents, &pages);
1630 while (mmu_unsync_walk(parent, &pages)) {
1631 struct kvm_mmu_page *sp;
1632
1633 for_each_sp(pages, sp, parents, i) {
7775834a 1634 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
60c8aec6 1635 mmu_pages_clear_parents(&parents);
77662e00 1636 zapped++;
60c8aec6 1637 }
60c8aec6
MT
1638 kvm_mmu_pages_init(parent, &parents, &pages);
1639 }
1640
1641 return zapped;
4731d4c7
MT
1642}
1643
7775834a
XG
1644static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1645 struct list_head *invalid_list)
31aa2b44 1646{
4731d4c7 1647 int ret;
f691fe1d 1648
7775834a 1649 trace_kvm_mmu_prepare_zap_page(sp);
31aa2b44 1650 ++kvm->stat.mmu_shadow_zapped;
7775834a 1651 ret = mmu_zap_unsync_children(kvm, sp, invalid_list);
4db35314 1652 kvm_mmu_page_unlink_children(kvm, sp);
31aa2b44 1653 kvm_mmu_unlink_parents(kvm, sp);
f6e2c02b 1654 if (!sp->role.invalid && !sp->role.direct)
5b5c6a5a 1655 unaccount_shadowed(kvm, sp->gfn);
4731d4c7
MT
1656 if (sp->unsync)
1657 kvm_unlink_unsync_page(kvm, sp);
4db35314 1658 if (!sp->root_count) {
54a4f023
GJ
1659 /* Count self */
1660 ret++;
7775834a 1661 list_move(&sp->link, invalid_list);
2e53d63a 1662 } else {
5b5c6a5a 1663 list_move(&sp->link, &kvm->arch.active_mmu_pages);
2e53d63a
MT
1664 kvm_reload_remote_mmus(kvm);
1665 }
7775834a
XG
1666
1667 sp->role.invalid = 1;
12b7d28f 1668 kvm_mmu_reset_last_pte_updated(kvm);
4731d4c7 1669 return ret;
a436036b
AK
1670}
1671
7775834a
XG
1672static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1673 struct list_head *invalid_list)
1674{
1675 struct kvm_mmu_page *sp;
1676
1677 if (list_empty(invalid_list))
1678 return;
1679
1680 kvm_flush_remote_tlbs(kvm);
1681
1682 do {
1683 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
1684 WARN_ON(!sp->role.invalid || sp->root_count);
1685 kvm_mmu_free_page(kvm, sp);
1686 } while (!list_empty(invalid_list));
1687
1688}
1689
82ce2c96
IE
1690/*
1691 * Changing the number of mmu pages allocated to the vm
49d5ca26 1692 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
82ce2c96 1693 */
49d5ca26 1694void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
82ce2c96 1695{
d98ba053 1696 LIST_HEAD(invalid_list);
82ce2c96
IE
1697 /*
1698 * If we set the number of mmu pages to be smaller be than the
1699 * number of actived pages , we must to free some mmu pages before we
1700 * change the value
1701 */
1702
49d5ca26
DH
1703 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
1704 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
77662e00 1705 !list_empty(&kvm->arch.active_mmu_pages)) {
82ce2c96
IE
1706 struct kvm_mmu_page *page;
1707
f05e70ac 1708 page = container_of(kvm->arch.active_mmu_pages.prev,
82ce2c96 1709 struct kvm_mmu_page, link);
49d5ca26 1710 kvm_mmu_prepare_zap_page(kvm, page,
d98ba053 1711 &invalid_list);
82ce2c96 1712 }
d98ba053 1713 kvm_mmu_commit_zap_page(kvm, &invalid_list);
49d5ca26 1714 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
82ce2c96 1715 }
82ce2c96 1716
49d5ca26 1717 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
82ce2c96
IE
1718}
1719
f67a46f4 1720static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
a436036b 1721{
4db35314 1722 struct kvm_mmu_page *sp;
f41d335a 1723 struct hlist_node *node;
d98ba053 1724 LIST_HEAD(invalid_list);
a436036b
AK
1725 int r;
1726
b8688d51 1727 pgprintk("%s: looking for gfn %lx\n", __func__, gfn);
a436036b 1728 r = 0;
f41d335a
XG
1729
1730 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
7ae680eb
XG
1731 pgprintk("%s: gfn %lx role %x\n", __func__, gfn,
1732 sp->role.word);
1733 r = 1;
f41d335a 1734 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
7ae680eb 1735 }
d98ba053 1736 kvm_mmu_commit_zap_page(kvm, &invalid_list);
a436036b 1737 return r;
cea0f0e7
AK
1738}
1739
f67a46f4 1740static void mmu_unshadow(struct kvm *kvm, gfn_t gfn)
97a0a01e 1741{
4db35314 1742 struct kvm_mmu_page *sp;
f41d335a 1743 struct hlist_node *node;
d98ba053 1744 LIST_HEAD(invalid_list);
97a0a01e 1745
f41d335a 1746 for_each_gfn_indirect_valid_sp(kvm, sp, gfn, node) {
7ae680eb
XG
1747 pgprintk("%s: zap %lx %x\n",
1748 __func__, gfn, sp->role.word);
f41d335a 1749 kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list);
97a0a01e 1750 }
d98ba053 1751 kvm_mmu_commit_zap_page(kvm, &invalid_list);
97a0a01e
AK
1752}
1753
38c335f1 1754static void page_header_update_slot(struct kvm *kvm, void *pte, gfn_t gfn)
6aa8b732 1755{
bc6678a3 1756 int slot = memslot_id(kvm, gfn);
4db35314 1757 struct kvm_mmu_page *sp = page_header(__pa(pte));
6aa8b732 1758
291f26bc 1759 __set_bit(slot, sp->slot_bitmap);
6aa8b732
AK
1760}
1761
6844dec6
MT
1762static void mmu_convert_notrap(struct kvm_mmu_page *sp)
1763{
1764 int i;
1765 u64 *pt = sp->spt;
1766
1767 if (shadow_trap_nonpresent_pte == shadow_notrap_nonpresent_pte)
1768 return;
1769
1770 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
1771 if (pt[i] == shadow_notrap_nonpresent_pte)
d555c333 1772 __set_spte(&pt[i], shadow_trap_nonpresent_pte);
6844dec6
MT
1773 }
1774}
1775
74be52e3
SY
1776/*
1777 * The function is based on mtrr_type_lookup() in
1778 * arch/x86/kernel/cpu/mtrr/generic.c
1779 */
1780static int get_mtrr_type(struct mtrr_state_type *mtrr_state,
1781 u64 start, u64 end)
1782{
1783 int i;
1784 u64 base, mask;
1785 u8 prev_match, curr_match;
1786 int num_var_ranges = KVM_NR_VAR_MTRR;
1787
1788 if (!mtrr_state->enabled)
1789 return 0xFF;
1790
1791 /* Make end inclusive end, instead of exclusive */
1792 end--;
1793
1794 /* Look in fixed ranges. Just return the type as per start */
1795 if (mtrr_state->have_fixed && (start < 0x100000)) {
1796 int idx;
1797
1798 if (start < 0x80000) {
1799 idx = 0;
1800 idx += (start >> 16);
1801 return mtrr_state->fixed_ranges[idx];
1802 } else if (start < 0xC0000) {
1803 idx = 1 * 8;
1804 idx += ((start - 0x80000) >> 14);
1805 return mtrr_state->fixed_ranges[idx];
1806 } else if (start < 0x1000000) {
1807 idx = 3 * 8;
1808 idx += ((start - 0xC0000) >> 12);
1809 return mtrr_state->fixed_ranges[idx];
1810 }
1811 }
1812
1813 /*
1814 * Look in variable ranges
1815 * Look of multiple ranges matching this address and pick type
1816 * as per MTRR precedence
1817 */
1818 if (!(mtrr_state->enabled & 2))
1819 return mtrr_state->def_type;
1820
1821 prev_match = 0xFF;
1822 for (i = 0; i < num_var_ranges; ++i) {
1823 unsigned short start_state, end_state;
1824
1825 if (!(mtrr_state->var_ranges[i].mask_lo & (1 << 11)))
1826 continue;
1827
1828 base = (((u64)mtrr_state->var_ranges[i].base_hi) << 32) +
1829 (mtrr_state->var_ranges[i].base_lo & PAGE_MASK);
1830 mask = (((u64)mtrr_state->var_ranges[i].mask_hi) << 32) +
1831 (mtrr_state->var_ranges[i].mask_lo & PAGE_MASK);
1832
1833 start_state = ((start & mask) == (base & mask));
1834 end_state = ((end & mask) == (base & mask));
1835 if (start_state != end_state)
1836 return 0xFE;
1837
1838 if ((start & mask) != (base & mask))
1839 continue;
1840
1841 curr_match = mtrr_state->var_ranges[i].base_lo & 0xff;
1842 if (prev_match == 0xFF) {
1843 prev_match = curr_match;
1844 continue;
1845 }
1846
1847 if (prev_match == MTRR_TYPE_UNCACHABLE ||
1848 curr_match == MTRR_TYPE_UNCACHABLE)
1849 return MTRR_TYPE_UNCACHABLE;
1850
1851 if ((prev_match == MTRR_TYPE_WRBACK &&
1852 curr_match == MTRR_TYPE_WRTHROUGH) ||
1853 (prev_match == MTRR_TYPE_WRTHROUGH &&
1854 curr_match == MTRR_TYPE_WRBACK)) {
1855 prev_match = MTRR_TYPE_WRTHROUGH;
1856 curr_match = MTRR_TYPE_WRTHROUGH;
1857 }
1858
1859 if (prev_match != curr_match)
1860 return MTRR_TYPE_UNCACHABLE;
1861 }
1862
1863 if (prev_match != 0xFF)
1864 return prev_match;
1865
1866 return mtrr_state->def_type;
1867}
1868
4b12f0de 1869u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
74be52e3
SY
1870{
1871 u8 mtrr;
1872
1873 mtrr = get_mtrr_type(&vcpu->arch.mtrr_state, gfn << PAGE_SHIFT,
1874 (gfn << PAGE_SHIFT) + PAGE_SIZE);
1875 if (mtrr == 0xfe || mtrr == 0xff)
1876 mtrr = MTRR_TYPE_WRBACK;
1877 return mtrr;
1878}
4b12f0de 1879EXPORT_SYMBOL_GPL(kvm_get_guest_memory_type);
74be52e3 1880
9cf5cf5a
XG
1881static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1882{
1883 trace_kvm_mmu_unsync_page(sp);
1884 ++vcpu->kvm->stat.mmu_unsync;
1885 sp->unsync = 1;
1886
1887 kvm_mmu_mark_parents_unsync(sp);
1888 mmu_convert_notrap(sp);
1889}
1890
1891static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
4731d4c7 1892{
4731d4c7 1893 struct kvm_mmu_page *s;
f41d335a 1894 struct hlist_node *node;
9cf5cf5a 1895
f41d335a 1896 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
7ae680eb 1897 if (s->unsync)
4731d4c7 1898 continue;
9cf5cf5a
XG
1899 WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
1900 __kvm_unsync_page(vcpu, s);
4731d4c7 1901 }
4731d4c7
MT
1902}
1903
1904static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
1905 bool can_unsync)
1906{
9cf5cf5a 1907 struct kvm_mmu_page *s;
f41d335a 1908 struct hlist_node *node;
9cf5cf5a
XG
1909 bool need_unsync = false;
1910
f41d335a 1911 for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn, node) {
36a2e677
XG
1912 if (!can_unsync)
1913 return 1;
1914
9cf5cf5a 1915 if (s->role.level != PT_PAGE_TABLE_LEVEL)
4731d4c7 1916 return 1;
9cf5cf5a
XG
1917
1918 if (!need_unsync && !s->unsync) {
36a2e677 1919 if (!oos_shadow)
9cf5cf5a
XG
1920 return 1;
1921 need_unsync = true;
1922 }
4731d4c7 1923 }
9cf5cf5a
XG
1924 if (need_unsync)
1925 kvm_unsync_pages(vcpu, gfn);
4731d4c7
MT
1926 return 0;
1927}
1928
d555c333 1929static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1e73f9dd 1930 unsigned pte_access, int user_fault,
852e3c19 1931 int write_fault, int dirty, int level,
c2d0ee46 1932 gfn_t gfn, pfn_t pfn, bool speculative,
1403283a 1933 bool can_unsync, bool reset_host_protection)
1c4f1fd6
AK
1934{
1935 u64 spte;
1e73f9dd 1936 int ret = 0;
64d4d521 1937
1c4f1fd6
AK
1938 /*
1939 * We don't set the accessed bit, since we sometimes want to see
1940 * whether the guest actually used the pte (in order to detect
1941 * demand paging).
1942 */
4132779b 1943 spte = shadow_base_present_pte;
947da538 1944 if (!speculative)
3201b5d9 1945 spte |= shadow_accessed_mask;
1c4f1fd6
AK
1946 if (!dirty)
1947 pte_access &= ~ACC_WRITE_MASK;
7b52345e
SY
1948 if (pte_access & ACC_EXEC_MASK)
1949 spte |= shadow_x_mask;
1950 else
1951 spte |= shadow_nx_mask;
1c4f1fd6 1952 if (pte_access & ACC_USER_MASK)
7b52345e 1953 spte |= shadow_user_mask;
852e3c19 1954 if (level > PT_PAGE_TABLE_LEVEL)
05da4558 1955 spte |= PT_PAGE_SIZE_MASK;
4b12f0de
SY
1956 if (tdp_enabled)
1957 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
1958 kvm_is_mmio_pfn(pfn));
1c4f1fd6 1959
1403283a
IE
1960 if (reset_host_protection)
1961 spte |= SPTE_HOST_WRITEABLE;
1962
35149e21 1963 spte |= (u64)pfn << PAGE_SHIFT;
1c4f1fd6
AK
1964
1965 if ((pte_access & ACC_WRITE_MASK)
8184dd38
AK
1966 || (!tdp_enabled && write_fault && !is_write_protection(vcpu)
1967 && !user_fault)) {
1c4f1fd6 1968
852e3c19
JR
1969 if (level > PT_PAGE_TABLE_LEVEL &&
1970 has_wrprotected_page(vcpu->kvm, gfn, level)) {
38187c83 1971 ret = 1;
be38d276
AK
1972 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
1973 goto done;
38187c83
MT
1974 }
1975
1c4f1fd6 1976 spte |= PT_WRITABLE_MASK;
1c4f1fd6 1977
69325a12
AK
1978 if (!tdp_enabled && !(pte_access & ACC_WRITE_MASK))
1979 spte &= ~PT_USER_MASK;
1980
ecc5589f
MT
1981 /*
1982 * Optimization: for pte sync, if spte was writable the hash
1983 * lookup is unnecessary (and expensive). Write protection
1984 * is responsibility of mmu_get_page / kvm_sync_page.
1985 * Same reasoning can be applied to dirty page accounting.
1986 */
8dae4445 1987 if (!can_unsync && is_writable_pte(*sptep))
ecc5589f
MT
1988 goto set_pte;
1989
4731d4c7 1990 if (mmu_need_write_protect(vcpu, gfn, can_unsync)) {
1c4f1fd6 1991 pgprintk("%s: found shadow page for %lx, marking ro\n",
b8688d51 1992 __func__, gfn);
1e73f9dd 1993 ret = 1;
1c4f1fd6 1994 pte_access &= ~ACC_WRITE_MASK;
8dae4445 1995 if (is_writable_pte(spte))
1c4f1fd6 1996 spte &= ~PT_WRITABLE_MASK;
1c4f1fd6
AK
1997 }
1998 }
1999
1c4f1fd6
AK
2000 if (pte_access & ACC_WRITE_MASK)
2001 mark_page_dirty(vcpu->kvm, gfn);
2002
38187c83 2003set_pte:
b79b93f9 2004 update_spte(sptep, spte);
be38d276 2005done:
1e73f9dd
MT
2006 return ret;
2007}
2008
d555c333 2009static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1e73f9dd
MT
2010 unsigned pt_access, unsigned pte_access,
2011 int user_fault, int write_fault, int dirty,
852e3c19 2012 int *ptwrite, int level, gfn_t gfn,
1403283a
IE
2013 pfn_t pfn, bool speculative,
2014 bool reset_host_protection)
1e73f9dd
MT
2015{
2016 int was_rmapped = 0;
53a27b39 2017 int rmap_count;
1e73f9dd
MT
2018
2019 pgprintk("%s: spte %llx access %x write_fault %d"
2020 " user_fault %d gfn %lx\n",
d555c333 2021 __func__, *sptep, pt_access,
1e73f9dd
MT
2022 write_fault, user_fault, gfn);
2023
d555c333 2024 if (is_rmap_spte(*sptep)) {
1e73f9dd
MT
2025 /*
2026 * If we overwrite a PTE page pointer with a 2MB PMD, unlink
2027 * the parent of the now unreachable PTE.
2028 */
852e3c19
JR
2029 if (level > PT_PAGE_TABLE_LEVEL &&
2030 !is_large_pte(*sptep)) {
1e73f9dd 2031 struct kvm_mmu_page *child;
d555c333 2032 u64 pte = *sptep;
1e73f9dd
MT
2033
2034 child = page_header(pte & PT64_BASE_ADDR_MASK);
d555c333 2035 mmu_page_remove_parent_pte(child, sptep);
3be2264b
MT
2036 __set_spte(sptep, shadow_trap_nonpresent_pte);
2037 kvm_flush_remote_tlbs(vcpu->kvm);
d555c333 2038 } else if (pfn != spte_to_pfn(*sptep)) {
1e73f9dd 2039 pgprintk("hfn old %lx new %lx\n",
d555c333 2040 spte_to_pfn(*sptep), pfn);
be38d276 2041 drop_spte(vcpu->kvm, sptep, shadow_trap_nonpresent_pte);
91546356 2042 kvm_flush_remote_tlbs(vcpu->kvm);
6bed6b9e
JR
2043 } else
2044 was_rmapped = 1;
1e73f9dd 2045 }
852e3c19 2046
d555c333 2047 if (set_spte(vcpu, sptep, pte_access, user_fault, write_fault,
1403283a
IE
2048 dirty, level, gfn, pfn, speculative, true,
2049 reset_host_protection)) {
1e73f9dd
MT
2050 if (write_fault)
2051 *ptwrite = 1;
5304efde 2052 kvm_mmu_flush_tlb(vcpu);
a378b4e6 2053 }
1e73f9dd 2054
d555c333 2055 pgprintk("%s: setting spte %llx\n", __func__, *sptep);
1e73f9dd 2056 pgprintk("instantiating %s PTE (%s) at %ld (%llx) addr %p\n",
d555c333 2057 is_large_pte(*sptep)? "2MB" : "4kB",
a205bc19
JR
2058 *sptep & PT_PRESENT_MASK ?"RW":"R", gfn,
2059 *sptep, sptep);
d555c333 2060 if (!was_rmapped && is_large_pte(*sptep))
05da4558
MT
2061 ++vcpu->kvm->stat.lpages;
2062
d555c333 2063 page_header_update_slot(vcpu->kvm, sptep, gfn);
1c4f1fd6 2064 if (!was_rmapped) {
44ad9944 2065 rmap_count = rmap_add(vcpu, sptep, gfn);
53a27b39 2066 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
852e3c19 2067 rmap_recycle(vcpu, sptep, gfn);
1c4f1fd6 2068 }
9ed5520d 2069 kvm_release_pfn_clean(pfn);
1b7fcd32 2070 if (speculative) {
d555c333 2071 vcpu->arch.last_pte_updated = sptep;
1b7fcd32
AK
2072 vcpu->arch.last_pte_gfn = gfn;
2073 }
1c4f1fd6
AK
2074}
2075
6aa8b732
AK
2076static void nonpaging_new_cr3(struct kvm_vcpu *vcpu)
2077{
2078}
2079
9f652d21 2080static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
852e3c19 2081 int level, gfn_t gfn, pfn_t pfn)
140754bc 2082{
9f652d21 2083 struct kvm_shadow_walk_iterator iterator;
140754bc 2084 struct kvm_mmu_page *sp;
9f652d21 2085 int pt_write = 0;
140754bc 2086 gfn_t pseudo_gfn;
6aa8b732 2087
9f652d21 2088 for_each_shadow_entry(vcpu, (u64)gfn << PAGE_SHIFT, iterator) {
852e3c19 2089 if (iterator.level == level) {
9f652d21
AK
2090 mmu_set_spte(vcpu, iterator.sptep, ACC_ALL, ACC_ALL,
2091 0, write, 1, &pt_write,
1403283a 2092 level, gfn, pfn, false, true);
9f652d21
AK
2093 ++vcpu->stat.pf_fixed;
2094 break;
6aa8b732
AK
2095 }
2096
9f652d21 2097 if (*iterator.sptep == shadow_trap_nonpresent_pte) {
c9fa0b3b
LJ
2098 u64 base_addr = iterator.addr;
2099
2100 base_addr &= PT64_LVL_ADDR_MASK(iterator.level);
2101 pseudo_gfn = base_addr >> PAGE_SHIFT;
9f652d21
AK
2102 sp = kvm_mmu_get_page(vcpu, pseudo_gfn, iterator.addr,
2103 iterator.level - 1,
2104 1, ACC_ALL, iterator.sptep);
2105 if (!sp) {
2106 pgprintk("nonpaging_map: ENOMEM\n");
2107 kvm_release_pfn_clean(pfn);
2108 return -ENOMEM;
2109 }
140754bc 2110
d555c333
AK
2111 __set_spte(iterator.sptep,
2112 __pa(sp->spt)
2113 | PT_PRESENT_MASK | PT_WRITABLE_MASK
2114 | shadow_user_mask | shadow_x_mask);
9f652d21
AK
2115 }
2116 }
2117 return pt_write;
6aa8b732
AK
2118}
2119
bf998156
HY
2120static void kvm_send_hwpoison_signal(struct kvm *kvm, gfn_t gfn)
2121{
2122 char buf[1];
2123 void __user *hva;
2124 int r;
2125
2126 /* Touch the page, so send SIGBUS */
2127 hva = (void __user *)gfn_to_hva(kvm, gfn);
2128 r = copy_from_user(buf, hva, 1);
2129}
2130
2131static int kvm_handle_bad_page(struct kvm *kvm, gfn_t gfn, pfn_t pfn)
2132{
2133 kvm_release_pfn_clean(pfn);
2134 if (is_hwpoison_pfn(pfn)) {
2135 kvm_send_hwpoison_signal(kvm, gfn);
2136 return 0;
edba23e5
GN
2137 } else if (is_fault_pfn(pfn))
2138 return -EFAULT;
2139
bf998156
HY
2140 return 1;
2141}
2142
10589a46
MT
2143static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
2144{
2145 int r;
852e3c19 2146 int level;
35149e21 2147 pfn_t pfn;
e930bffe 2148 unsigned long mmu_seq;
aaee2c94 2149
852e3c19
JR
2150 level = mapping_level(vcpu, gfn);
2151
2152 /*
2153 * This path builds a PAE pagetable - so we can map 2mb pages at
2154 * maximum. Therefore check if the level is larger than that.
2155 */
2156 if (level > PT_DIRECTORY_LEVEL)
2157 level = PT_DIRECTORY_LEVEL;
2158
2159 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
05da4558 2160
e930bffe 2161 mmu_seq = vcpu->kvm->mmu_notifier_seq;
4c2155ce 2162 smp_rmb();
35149e21 2163 pfn = gfn_to_pfn(vcpu->kvm, gfn);
aaee2c94 2164
d196e343 2165 /* mmio */
bf998156
HY
2166 if (is_error_pfn(pfn))
2167 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
d196e343 2168
aaee2c94 2169 spin_lock(&vcpu->kvm->mmu_lock);
e930bffe
AA
2170 if (mmu_notifier_retry(vcpu, mmu_seq))
2171 goto out_unlock;
eb787d10 2172 kvm_mmu_free_some_pages(vcpu);
852e3c19 2173 r = __direct_map(vcpu, v, write, level, gfn, pfn);
aaee2c94
MT
2174 spin_unlock(&vcpu->kvm->mmu_lock);
2175
aaee2c94 2176
10589a46 2177 return r;
e930bffe
AA
2178
2179out_unlock:
2180 spin_unlock(&vcpu->kvm->mmu_lock);
2181 kvm_release_pfn_clean(pfn);
2182 return 0;
10589a46
MT
2183}
2184
2185
17ac10ad
AK
2186static void mmu_free_roots(struct kvm_vcpu *vcpu)
2187{
2188 int i;
4db35314 2189 struct kvm_mmu_page *sp;
d98ba053 2190 LIST_HEAD(invalid_list);
17ac10ad 2191
ad312c7c 2192 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
7b53aa56 2193 return;
aaee2c94 2194 spin_lock(&vcpu->kvm->mmu_lock);
ad312c7c
ZX
2195 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2196 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad 2197
4db35314
AK
2198 sp = page_header(root);
2199 --sp->root_count;
d98ba053
XG
2200 if (!sp->root_count && sp->role.invalid) {
2201 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2202 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2203 }
ad312c7c 2204 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
aaee2c94 2205 spin_unlock(&vcpu->kvm->mmu_lock);
17ac10ad
AK
2206 return;
2207 }
17ac10ad 2208 for (i = 0; i < 4; ++i) {
ad312c7c 2209 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad 2210
417726a3 2211 if (root) {
417726a3 2212 root &= PT64_BASE_ADDR_MASK;
4db35314
AK
2213 sp = page_header(root);
2214 --sp->root_count;
2e53d63a 2215 if (!sp->root_count && sp->role.invalid)
d98ba053
XG
2216 kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
2217 &invalid_list);
417726a3 2218 }
ad312c7c 2219 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 2220 }
d98ba053 2221 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
aaee2c94 2222 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 2223 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
17ac10ad
AK
2224}
2225
8986ecc0
MT
2226static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
2227{
2228 int ret = 0;
2229
2230 if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
a8eeb04a 2231 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
8986ecc0
MT
2232 ret = 1;
2233 }
2234
2235 return ret;
2236}
2237
2238static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
17ac10ad
AK
2239{
2240 int i;
cea0f0e7 2241 gfn_t root_gfn;
4db35314 2242 struct kvm_mmu_page *sp;
f6e2c02b 2243 int direct = 0;
6de4f3ad 2244 u64 pdptr;
3bb65a22 2245
ad312c7c 2246 root_gfn = vcpu->arch.cr3 >> PAGE_SHIFT;
17ac10ad 2247
ad312c7c
ZX
2248 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2249 hpa_t root = vcpu->arch.mmu.root_hpa;
17ac10ad
AK
2250
2251 ASSERT(!VALID_PAGE(root));
8986ecc0
MT
2252 if (mmu_check_root(vcpu, root_gfn))
2253 return 1;
5a7388c2
EN
2254 if (tdp_enabled) {
2255 direct = 1;
2256 root_gfn = 0;
2257 }
8facbbff 2258 spin_lock(&vcpu->kvm->mmu_lock);
24955b6c 2259 kvm_mmu_free_some_pages(vcpu);
4db35314 2260 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
f6e2c02b 2261 PT64_ROOT_LEVEL, direct,
fb72d167 2262 ACC_ALL, NULL);
4db35314
AK
2263 root = __pa(sp->spt);
2264 ++sp->root_count;
8facbbff 2265 spin_unlock(&vcpu->kvm->mmu_lock);
ad312c7c 2266 vcpu->arch.mmu.root_hpa = root;
8986ecc0 2267 return 0;
17ac10ad 2268 }
f6e2c02b 2269 direct = !is_paging(vcpu);
17ac10ad 2270 for (i = 0; i < 4; ++i) {
ad312c7c 2271 hpa_t root = vcpu->arch.mmu.pae_root[i];
17ac10ad
AK
2272
2273 ASSERT(!VALID_PAGE(root));
ad312c7c 2274 if (vcpu->arch.mmu.root_level == PT32E_ROOT_LEVEL) {
6de4f3ad 2275 pdptr = kvm_pdptr_read(vcpu, i);
43a3795a 2276 if (!is_present_gpte(pdptr)) {
ad312c7c 2277 vcpu->arch.mmu.pae_root[i] = 0;
417726a3
AK
2278 continue;
2279 }
6de4f3ad 2280 root_gfn = pdptr >> PAGE_SHIFT;
ad312c7c 2281 } else if (vcpu->arch.mmu.root_level == 0)
cea0f0e7 2282 root_gfn = 0;
8986ecc0
MT
2283 if (mmu_check_root(vcpu, root_gfn))
2284 return 1;
5a7388c2
EN
2285 if (tdp_enabled) {
2286 direct = 1;
2287 root_gfn = i << 30;
2288 }
8facbbff 2289 spin_lock(&vcpu->kvm->mmu_lock);
24955b6c 2290 kvm_mmu_free_some_pages(vcpu);
4db35314 2291 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
f6e2c02b 2292 PT32_ROOT_LEVEL, direct,
f7d9c7b7 2293 ACC_ALL, NULL);
4db35314
AK
2294 root = __pa(sp->spt);
2295 ++sp->root_count;
8facbbff
AK
2296 spin_unlock(&vcpu->kvm->mmu_lock);
2297
ad312c7c 2298 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
17ac10ad 2299 }
ad312c7c 2300 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
8986ecc0 2301 return 0;
17ac10ad
AK
2302}
2303
0ba73cda
MT
2304static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2305{
2306 int i;
2307 struct kvm_mmu_page *sp;
2308
2309 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
2310 return;
2311 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
2312 hpa_t root = vcpu->arch.mmu.root_hpa;
2313 sp = page_header(root);
2314 mmu_sync_children(vcpu, sp);
2315 return;
2316 }
2317 for (i = 0; i < 4; ++i) {
2318 hpa_t root = vcpu->arch.mmu.pae_root[i];
2319
8986ecc0 2320 if (root && VALID_PAGE(root)) {
0ba73cda
MT
2321 root &= PT64_BASE_ADDR_MASK;
2322 sp = page_header(root);
2323 mmu_sync_children(vcpu, sp);
2324 }
2325 }
2326}
2327
2328void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
2329{
2330 spin_lock(&vcpu->kvm->mmu_lock);
2331 mmu_sync_roots(vcpu);
6cffe8ca 2332 spin_unlock(&vcpu->kvm->mmu_lock);
0ba73cda
MT
2333}
2334
1871c602
GN
2335static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr,
2336 u32 access, u32 *error)
6aa8b732 2337{
1871c602
GN
2338 if (error)
2339 *error = 0;
6aa8b732
AK
2340 return vaddr;
2341}
2342
2343static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
3f3e7124 2344 u32 error_code)
6aa8b732 2345{
e833240f 2346 gfn_t gfn;
e2dec939 2347 int r;
6aa8b732 2348
b8688d51 2349 pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
e2dec939
AK
2350 r = mmu_topup_memory_caches(vcpu);
2351 if (r)
2352 return r;
714b93da 2353
6aa8b732 2354 ASSERT(vcpu);
ad312c7c 2355 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 2356
e833240f 2357 gfn = gva >> PAGE_SHIFT;
6aa8b732 2358
e833240f
AK
2359 return nonpaging_map(vcpu, gva & PAGE_MASK,
2360 error_code & PFERR_WRITE_MASK, gfn);
6aa8b732
AK
2361}
2362
fb72d167
JR
2363static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2364 u32 error_code)
2365{
35149e21 2366 pfn_t pfn;
fb72d167 2367 int r;
852e3c19 2368 int level;
05da4558 2369 gfn_t gfn = gpa >> PAGE_SHIFT;
e930bffe 2370 unsigned long mmu_seq;
fb72d167
JR
2371
2372 ASSERT(vcpu);
2373 ASSERT(VALID_PAGE(vcpu->arch.mmu.root_hpa));
2374
2375 r = mmu_topup_memory_caches(vcpu);
2376 if (r)
2377 return r;
2378
852e3c19
JR
2379 level = mapping_level(vcpu, gfn);
2380
2381 gfn &= ~(KVM_PAGES_PER_HPAGE(level) - 1);
2382
e930bffe 2383 mmu_seq = vcpu->kvm->mmu_notifier_seq;
4c2155ce 2384 smp_rmb();
35149e21 2385 pfn = gfn_to_pfn(vcpu->kvm, gfn);
bf998156
HY
2386 if (is_error_pfn(pfn))
2387 return kvm_handle_bad_page(vcpu->kvm, gfn, pfn);
fb72d167 2388 spin_lock(&vcpu->kvm->mmu_lock);
e930bffe
AA
2389 if (mmu_notifier_retry(vcpu, mmu_seq))
2390 goto out_unlock;
fb72d167
JR
2391 kvm_mmu_free_some_pages(vcpu);
2392 r = __direct_map(vcpu, gpa, error_code & PFERR_WRITE_MASK,
852e3c19 2393 level, gfn, pfn);
fb72d167 2394 spin_unlock(&vcpu->kvm->mmu_lock);
fb72d167
JR
2395
2396 return r;
e930bffe
AA
2397
2398out_unlock:
2399 spin_unlock(&vcpu->kvm->mmu_lock);
2400 kvm_release_pfn_clean(pfn);
2401 return 0;
fb72d167
JR
2402}
2403
6aa8b732
AK
2404static void nonpaging_free(struct kvm_vcpu *vcpu)
2405{
17ac10ad 2406 mmu_free_roots(vcpu);
6aa8b732
AK
2407}
2408
2409static int nonpaging_init_context(struct kvm_vcpu *vcpu)
2410{
ad312c7c 2411 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
2412
2413 context->new_cr3 = nonpaging_new_cr3;
2414 context->page_fault = nonpaging_page_fault;
6aa8b732
AK
2415 context->gva_to_gpa = nonpaging_gva_to_gpa;
2416 context->free = nonpaging_free;
c7addb90 2417 context->prefetch_page = nonpaging_prefetch_page;
e8bc217a 2418 context->sync_page = nonpaging_sync_page;
a7052897 2419 context->invlpg = nonpaging_invlpg;
cea0f0e7 2420 context->root_level = 0;
6aa8b732 2421 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 2422 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
2423 return 0;
2424}
2425
d835dfec 2426void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu)
6aa8b732 2427{
1165f5fe 2428 ++vcpu->stat.tlb_flush;
a8eeb04a 2429 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
6aa8b732
AK
2430}
2431
2432static void paging_new_cr3(struct kvm_vcpu *vcpu)
2433{
b8688d51 2434 pgprintk("%s: cr3 %lx\n", __func__, vcpu->arch.cr3);
cea0f0e7 2435 mmu_free_roots(vcpu);
6aa8b732
AK
2436}
2437
6aa8b732
AK
2438static void inject_page_fault(struct kvm_vcpu *vcpu,
2439 u64 addr,
2440 u32 err_code)
2441{
c3c91fee 2442 kvm_inject_page_fault(vcpu, addr, err_code);
6aa8b732
AK
2443}
2444
6aa8b732
AK
2445static void paging_free(struct kvm_vcpu *vcpu)
2446{
2447 nonpaging_free(vcpu);
2448}
2449
82725b20
DE
2450static bool is_rsvd_bits_set(struct kvm_vcpu *vcpu, u64 gpte, int level)
2451{
2452 int bit7;
2453
2454 bit7 = (gpte >> 7) & 1;
2455 return (gpte & vcpu->arch.mmu.rsvd_bits_mask[bit7][level-1]) != 0;
2456}
2457
6aa8b732
AK
2458#define PTTYPE 64
2459#include "paging_tmpl.h"
2460#undef PTTYPE
2461
2462#define PTTYPE 32
2463#include "paging_tmpl.h"
2464#undef PTTYPE
2465
82725b20
DE
2466static void reset_rsvds_bits_mask(struct kvm_vcpu *vcpu, int level)
2467{
2468 struct kvm_mmu *context = &vcpu->arch.mmu;
2469 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2470 u64 exb_bit_rsvd = 0;
2471
2472 if (!is_nx(vcpu))
2473 exb_bit_rsvd = rsvd_bits(63, 63);
2474 switch (level) {
2475 case PT32_ROOT_LEVEL:
2476 /* no rsvd bits for 2 level 4K page table entries */
2477 context->rsvd_bits_mask[0][1] = 0;
2478 context->rsvd_bits_mask[0][0] = 0;
f815bce8
XG
2479 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
2480
2481 if (!is_pse(vcpu)) {
2482 context->rsvd_bits_mask[1][1] = 0;
2483 break;
2484 }
2485
82725b20
DE
2486 if (is_cpuid_PSE36())
2487 /* 36bits PSE 4MB page */
2488 context->rsvd_bits_mask[1][1] = rsvd_bits(17, 21);
2489 else
2490 /* 32 bits PSE 4MB page */
2491 context->rsvd_bits_mask[1][1] = rsvd_bits(13, 21);
82725b20
DE
2492 break;
2493 case PT32E_ROOT_LEVEL:
20c466b5
DE
2494 context->rsvd_bits_mask[0][2] =
2495 rsvd_bits(maxphyaddr, 63) |
2496 rsvd_bits(7, 8) | rsvd_bits(1, 2); /* PDPTE */
82725b20 2497 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
4c26b4cd 2498 rsvd_bits(maxphyaddr, 62); /* PDE */
82725b20
DE
2499 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2500 rsvd_bits(maxphyaddr, 62); /* PTE */
2501 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
2502 rsvd_bits(maxphyaddr, 62) |
2503 rsvd_bits(13, 20); /* large page */
f815bce8 2504 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
82725b20
DE
2505 break;
2506 case PT64_ROOT_LEVEL:
2507 context->rsvd_bits_mask[0][3] = exb_bit_rsvd |
2508 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2509 context->rsvd_bits_mask[0][2] = exb_bit_rsvd |
2510 rsvd_bits(maxphyaddr, 51) | rsvd_bits(7, 8);
2511 context->rsvd_bits_mask[0][1] = exb_bit_rsvd |
4c26b4cd 2512 rsvd_bits(maxphyaddr, 51);
82725b20
DE
2513 context->rsvd_bits_mask[0][0] = exb_bit_rsvd |
2514 rsvd_bits(maxphyaddr, 51);
2515 context->rsvd_bits_mask[1][3] = context->rsvd_bits_mask[0][3];
e04da980
JR
2516 context->rsvd_bits_mask[1][2] = exb_bit_rsvd |
2517 rsvd_bits(maxphyaddr, 51) |
2518 rsvd_bits(13, 29);
82725b20 2519 context->rsvd_bits_mask[1][1] = exb_bit_rsvd |
4c26b4cd
SY
2520 rsvd_bits(maxphyaddr, 51) |
2521 rsvd_bits(13, 20); /* large page */
f815bce8 2522 context->rsvd_bits_mask[1][0] = context->rsvd_bits_mask[0][0];
82725b20
DE
2523 break;
2524 }
2525}
2526
17ac10ad 2527static int paging64_init_context_common(struct kvm_vcpu *vcpu, int level)
6aa8b732 2528{
ad312c7c 2529 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732
AK
2530
2531 ASSERT(is_pae(vcpu));
2532 context->new_cr3 = paging_new_cr3;
2533 context->page_fault = paging64_page_fault;
6aa8b732 2534 context->gva_to_gpa = paging64_gva_to_gpa;
c7addb90 2535 context->prefetch_page = paging64_prefetch_page;
e8bc217a 2536 context->sync_page = paging64_sync_page;
a7052897 2537 context->invlpg = paging64_invlpg;
6aa8b732 2538 context->free = paging_free;
17ac10ad
AK
2539 context->root_level = level;
2540 context->shadow_root_level = level;
17c3ba9d 2541 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
2542 return 0;
2543}
2544
17ac10ad
AK
2545static int paging64_init_context(struct kvm_vcpu *vcpu)
2546{
82725b20 2547 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
17ac10ad
AK
2548 return paging64_init_context_common(vcpu, PT64_ROOT_LEVEL);
2549}
2550
6aa8b732
AK
2551static int paging32_init_context(struct kvm_vcpu *vcpu)
2552{
ad312c7c 2553 struct kvm_mmu *context = &vcpu->arch.mmu;
6aa8b732 2554
82725b20 2555 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
6aa8b732
AK
2556 context->new_cr3 = paging_new_cr3;
2557 context->page_fault = paging32_page_fault;
6aa8b732
AK
2558 context->gva_to_gpa = paging32_gva_to_gpa;
2559 context->free = paging_free;
c7addb90 2560 context->prefetch_page = paging32_prefetch_page;
e8bc217a 2561 context->sync_page = paging32_sync_page;
a7052897 2562 context->invlpg = paging32_invlpg;
6aa8b732
AK
2563 context->root_level = PT32_ROOT_LEVEL;
2564 context->shadow_root_level = PT32E_ROOT_LEVEL;
17c3ba9d 2565 context->root_hpa = INVALID_PAGE;
6aa8b732
AK
2566 return 0;
2567}
2568
2569static int paging32E_init_context(struct kvm_vcpu *vcpu)
2570{
82725b20 2571 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
17ac10ad 2572 return paging64_init_context_common(vcpu, PT32E_ROOT_LEVEL);
6aa8b732
AK
2573}
2574
fb72d167
JR
2575static int init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
2576{
2577 struct kvm_mmu *context = &vcpu->arch.mmu;
2578
2579 context->new_cr3 = nonpaging_new_cr3;
2580 context->page_fault = tdp_page_fault;
2581 context->free = nonpaging_free;
2582 context->prefetch_page = nonpaging_prefetch_page;
e8bc217a 2583 context->sync_page = nonpaging_sync_page;
a7052897 2584 context->invlpg = nonpaging_invlpg;
67253af5 2585 context->shadow_root_level = kvm_x86_ops->get_tdp_level();
fb72d167
JR
2586 context->root_hpa = INVALID_PAGE;
2587
2588 if (!is_paging(vcpu)) {
2589 context->gva_to_gpa = nonpaging_gva_to_gpa;
2590 context->root_level = 0;
2591 } else if (is_long_mode(vcpu)) {
82725b20 2592 reset_rsvds_bits_mask(vcpu, PT64_ROOT_LEVEL);
fb72d167
JR
2593 context->gva_to_gpa = paging64_gva_to_gpa;
2594 context->root_level = PT64_ROOT_LEVEL;
2595 } else if (is_pae(vcpu)) {
82725b20 2596 reset_rsvds_bits_mask(vcpu, PT32E_ROOT_LEVEL);
fb72d167
JR
2597 context->gva_to_gpa = paging64_gva_to_gpa;
2598 context->root_level = PT32E_ROOT_LEVEL;
2599 } else {
82725b20 2600 reset_rsvds_bits_mask(vcpu, PT32_ROOT_LEVEL);
fb72d167
JR
2601 context->gva_to_gpa = paging32_gva_to_gpa;
2602 context->root_level = PT32_ROOT_LEVEL;
2603 }
2604
2605 return 0;
2606}
2607
2608static int init_kvm_softmmu(struct kvm_vcpu *vcpu)
6aa8b732 2609{
a770f6f2
AK
2610 int r;
2611
6aa8b732 2612 ASSERT(vcpu);
ad312c7c 2613 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732
AK
2614
2615 if (!is_paging(vcpu))
a770f6f2 2616 r = nonpaging_init_context(vcpu);
a9058ecd 2617 else if (is_long_mode(vcpu))
a770f6f2 2618 r = paging64_init_context(vcpu);
6aa8b732 2619 else if (is_pae(vcpu))
a770f6f2 2620 r = paging32E_init_context(vcpu);
6aa8b732 2621 else
a770f6f2
AK
2622 r = paging32_init_context(vcpu);
2623
5b7e0102 2624 vcpu->arch.mmu.base_role.cr4_pae = !!is_pae(vcpu);
3dbe1415 2625 vcpu->arch.mmu.base_role.cr0_wp = is_write_protection(vcpu);
a770f6f2
AK
2626
2627 return r;
6aa8b732
AK
2628}
2629
fb72d167
JR
2630static int init_kvm_mmu(struct kvm_vcpu *vcpu)
2631{
35149e21
AL
2632 vcpu->arch.update_pte.pfn = bad_pfn;
2633
fb72d167
JR
2634 if (tdp_enabled)
2635 return init_kvm_tdp_mmu(vcpu);
2636 else
2637 return init_kvm_softmmu(vcpu);
2638}
2639
6aa8b732
AK
2640static void destroy_kvm_mmu(struct kvm_vcpu *vcpu)
2641{
2642 ASSERT(vcpu);
62ad0755
SY
2643 if (VALID_PAGE(vcpu->arch.mmu.root_hpa))
2644 /* mmu.free() should set root_hpa = INVALID_PAGE */
ad312c7c 2645 vcpu->arch.mmu.free(vcpu);
6aa8b732
AK
2646}
2647
2648int kvm_mmu_reset_context(struct kvm_vcpu *vcpu)
17c3ba9d
AK
2649{
2650 destroy_kvm_mmu(vcpu);
2651 return init_kvm_mmu(vcpu);
2652}
8668a3c4 2653EXPORT_SYMBOL_GPL(kvm_mmu_reset_context);
17c3ba9d
AK
2654
2655int kvm_mmu_load(struct kvm_vcpu *vcpu)
6aa8b732 2656{
714b93da
AK
2657 int r;
2658
e2dec939 2659 r = mmu_topup_memory_caches(vcpu);
17c3ba9d
AK
2660 if (r)
2661 goto out;
8986ecc0 2662 r = mmu_alloc_roots(vcpu);
8facbbff 2663 spin_lock(&vcpu->kvm->mmu_lock);
0ba73cda 2664 mmu_sync_roots(vcpu);
aaee2c94 2665 spin_unlock(&vcpu->kvm->mmu_lock);
8986ecc0
MT
2666 if (r)
2667 goto out;
3662cb1c 2668 /* set_cr3() should ensure TLB has been flushed */
ad312c7c 2669 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
714b93da
AK
2670out:
2671 return r;
6aa8b732 2672}
17c3ba9d
AK
2673EXPORT_SYMBOL_GPL(kvm_mmu_load);
2674
2675void kvm_mmu_unload(struct kvm_vcpu *vcpu)
2676{
2677 mmu_free_roots(vcpu);
2678}
6aa8b732 2679
09072daf 2680static void mmu_pte_write_zap_pte(struct kvm_vcpu *vcpu,
4db35314 2681 struct kvm_mmu_page *sp,
ac1b714e
AK
2682 u64 *spte)
2683{
2684 u64 pte;
2685 struct kvm_mmu_page *child;
2686
2687 pte = *spte;
c7addb90 2688 if (is_shadow_present_pte(pte)) {
776e6633 2689 if (is_last_spte(pte, sp->role.level))
be38d276 2690 drop_spte(vcpu->kvm, spte, shadow_trap_nonpresent_pte);
ac1b714e
AK
2691 else {
2692 child = page_header(pte & PT64_BASE_ADDR_MASK);
90cb0529 2693 mmu_page_remove_parent_pte(child, spte);
ac1b714e
AK
2694 }
2695 }
d555c333 2696 __set_spte(spte, shadow_trap_nonpresent_pte);
05da4558
MT
2697 if (is_large_pte(pte))
2698 --vcpu->kvm->stat.lpages;
ac1b714e
AK
2699}
2700
0028425f 2701static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
4db35314 2702 struct kvm_mmu_page *sp,
0028425f 2703 u64 *spte,
489f1d65 2704 const void *new)
0028425f 2705{
30945387 2706 if (sp->role.level != PT_PAGE_TABLE_LEVEL) {
7e4e4056
JR
2707 ++vcpu->kvm->stat.mmu_pde_zapped;
2708 return;
30945387 2709 }
0028425f 2710
fa1de2bf
XG
2711 if (is_rsvd_bits_set(vcpu, *(u64 *)new, PT_PAGE_TABLE_LEVEL))
2712 return;
2713
4cee5764 2714 ++vcpu->kvm->stat.mmu_pte_updated;
5b7e0102 2715 if (!sp->role.cr4_pae)
489f1d65 2716 paging32_update_pte(vcpu, sp, spte, new);
0028425f 2717 else
489f1d65 2718 paging64_update_pte(vcpu, sp, spte, new);
0028425f
AK
2719}
2720
79539cec
AK
2721static bool need_remote_flush(u64 old, u64 new)
2722{
2723 if (!is_shadow_present_pte(old))
2724 return false;
2725 if (!is_shadow_present_pte(new))
2726 return true;
2727 if ((old ^ new) & PT64_BASE_ADDR_MASK)
2728 return true;
2729 old ^= PT64_NX_MASK;
2730 new ^= PT64_NX_MASK;
2731 return (old & ~new & PT64_PERM_MASK) != 0;
2732}
2733
0671a8e7
XG
2734static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
2735 bool remote_flush, bool local_flush)
79539cec 2736{
0671a8e7
XG
2737 if (zap_page)
2738 return;
2739
2740 if (remote_flush)
79539cec 2741 kvm_flush_remote_tlbs(vcpu->kvm);
0671a8e7 2742 else if (local_flush)
79539cec
AK
2743 kvm_mmu_flush_tlb(vcpu);
2744}
2745
12b7d28f
AK
2746static bool last_updated_pte_accessed(struct kvm_vcpu *vcpu)
2747{
ad312c7c 2748 u64 *spte = vcpu->arch.last_pte_updated;
12b7d28f 2749
7b52345e 2750 return !!(spte && (*spte & shadow_accessed_mask));
12b7d28f
AK
2751}
2752
d7824fff 2753static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
72016f3a 2754 u64 gpte)
d7824fff
AK
2755{
2756 gfn_t gfn;
35149e21 2757 pfn_t pfn;
d7824fff 2758
43a3795a 2759 if (!is_present_gpte(gpte))
d7824fff
AK
2760 return;
2761 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
72dc67a6 2762
e930bffe 2763 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;
4c2155ce 2764 smp_rmb();
35149e21 2765 pfn = gfn_to_pfn(vcpu->kvm, gfn);
72dc67a6 2766
35149e21
AL
2767 if (is_error_pfn(pfn)) {
2768 kvm_release_pfn_clean(pfn);
d196e343
AK
2769 return;
2770 }
d7824fff 2771 vcpu->arch.update_pte.gfn = gfn;
35149e21 2772 vcpu->arch.update_pte.pfn = pfn;
d7824fff
AK
2773}
2774
1b7fcd32
AK
2775static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2776{
2777 u64 *spte = vcpu->arch.last_pte_updated;
2778
2779 if (spte
2780 && vcpu->arch.last_pte_gfn == gfn
2781 && shadow_accessed_mask
2782 && !(*spte & shadow_accessed_mask)
2783 && is_shadow_present_pte(*spte))
2784 set_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte);
2785}
2786
09072daf 2787void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
ad218f85
MT
2788 const u8 *new, int bytes,
2789 bool guest_initiated)
da4a00f0 2790{
9b7a0325 2791 gfn_t gfn = gpa >> PAGE_SHIFT;
fa1de2bf 2792 union kvm_mmu_page_role mask = { .word = 0 };
4db35314 2793 struct kvm_mmu_page *sp;
f41d335a 2794 struct hlist_node *node;
d98ba053 2795 LIST_HEAD(invalid_list);
489f1d65 2796 u64 entry, gentry;
9b7a0325 2797 u64 *spte;
9b7a0325 2798 unsigned offset = offset_in_page(gpa);
0e7bc4b9 2799 unsigned pte_size;
9b7a0325 2800 unsigned page_offset;
0e7bc4b9 2801 unsigned misaligned;
fce0657f 2802 unsigned quadrant;
9b7a0325 2803 int level;
86a5ba02 2804 int flooded = 0;
ac1b714e 2805 int npte;
489f1d65 2806 int r;
08e850c6 2807 int invlpg_counter;
0671a8e7
XG
2808 bool remote_flush, local_flush, zap_page;
2809
2810 zap_page = remote_flush = local_flush = false;
9b7a0325 2811
b8688d51 2812 pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
72016f3a 2813
08e850c6 2814 invlpg_counter = atomic_read(&vcpu->kvm->arch.invlpg_counter);
72016f3a
AK
2815
2816 /*
2817 * Assume that the pte write on a page table of the same type
2818 * as the current vcpu paging mode. This is nearly always true
2819 * (might be false while changing modes). Note it is verified later
2820 * by update_pte().
2821 */
08e850c6 2822 if ((is_pae(vcpu) && bytes == 4) || !new) {
72016f3a 2823 /* Handle a 32-bit guest writing two halves of a 64-bit gpte */
08e850c6
AK
2824 if (is_pae(vcpu)) {
2825 gpa &= ~(gpa_t)7;
2826 bytes = 8;
2827 }
2828 r = kvm_read_guest(vcpu->kvm, gpa, &gentry, min(bytes, 8));
72016f3a
AK
2829 if (r)
2830 gentry = 0;
08e850c6
AK
2831 new = (const u8 *)&gentry;
2832 }
2833
2834 switch (bytes) {
2835 case 4:
2836 gentry = *(const u32 *)new;
2837 break;
2838 case 8:
2839 gentry = *(const u64 *)new;
2840 break;
2841 default:
2842 gentry = 0;
2843 break;
72016f3a
AK
2844 }
2845
2846 mmu_guess_page_from_pte_write(vcpu, gpa, gentry);
aaee2c94 2847 spin_lock(&vcpu->kvm->mmu_lock);
08e850c6
AK
2848 if (atomic_read(&vcpu->kvm->arch.invlpg_counter) != invlpg_counter)
2849 gentry = 0;
1b7fcd32 2850 kvm_mmu_access_page(vcpu, gfn);
eb787d10 2851 kvm_mmu_free_some_pages(vcpu);
4cee5764 2852 ++vcpu->kvm->stat.mmu_pte_write;
c7addb90 2853 kvm_mmu_audit(vcpu, "pre pte write");
ad218f85
MT
2854 if (guest_initiated) {
2855 if (gfn == vcpu->arch.last_pt_write_gfn
2856 && !last_updated_pte_accessed(vcpu)) {
2857 ++vcpu->arch.last_pt_write_count;
2858 if (vcpu->arch.last_pt_write_count >= 3)
2859 flooded = 1;
2860 } else {
2861 vcpu->arch.last_pt_write_gfn = gfn;
2862 vcpu->arch.last_pt_write_count = 1;
2863 vcpu->arch.last_pte_updated = NULL;
2864 }
86a5ba02 2865 }
3246af0e 2866
fa1de2bf 2867 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
f41d335a 2868 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
5b7e0102 2869 pte_size = sp->role.cr4_pae ? 8 : 4;
0e7bc4b9 2870 misaligned = (offset ^ (offset + bytes - 1)) & ~(pte_size - 1);
e925c5ba 2871 misaligned |= bytes < 4;
86a5ba02 2872 if (misaligned || flooded) {
0e7bc4b9
AK
2873 /*
2874 * Misaligned accesses are too much trouble to fix
2875 * up; also, they usually indicate a page is not used
2876 * as a page table.
86a5ba02
AK
2877 *
2878 * If we're seeing too many writes to a page,
2879 * it may no longer be a page table, or we may be
2880 * forking, in which case it is better to unmap the
2881 * page.
0e7bc4b9
AK
2882 */
2883 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
4db35314 2884 gpa, bytes, sp->role.word);
0671a8e7 2885 zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
f41d335a 2886 &invalid_list);
4cee5764 2887 ++vcpu->kvm->stat.mmu_flooded;
0e7bc4b9
AK
2888 continue;
2889 }
9b7a0325 2890 page_offset = offset;
4db35314 2891 level = sp->role.level;
ac1b714e 2892 npte = 1;
5b7e0102 2893 if (!sp->role.cr4_pae) {
ac1b714e
AK
2894 page_offset <<= 1; /* 32->64 */
2895 /*
2896 * A 32-bit pde maps 4MB while the shadow pdes map
2897 * only 2MB. So we need to double the offset again
2898 * and zap two pdes instead of one.
2899 */
2900 if (level == PT32_ROOT_LEVEL) {
6b8d0f9b 2901 page_offset &= ~7; /* kill rounding error */
ac1b714e
AK
2902 page_offset <<= 1;
2903 npte = 2;
2904 }
fce0657f 2905 quadrant = page_offset >> PAGE_SHIFT;
9b7a0325 2906 page_offset &= ~PAGE_MASK;
4db35314 2907 if (quadrant != sp->role.quadrant)
fce0657f 2908 continue;
9b7a0325 2909 }
0671a8e7 2910 local_flush = true;
4db35314 2911 spte = &sp->spt[page_offset / sizeof(*spte)];
ac1b714e 2912 while (npte--) {
79539cec 2913 entry = *spte;
4db35314 2914 mmu_pte_write_zap_pte(vcpu, sp, spte);
fa1de2bf
XG
2915 if (gentry &&
2916 !((sp->role.word ^ vcpu->arch.mmu.base_role.word)
2917 & mask.word))
72016f3a 2918 mmu_pte_write_new_pte(vcpu, sp, spte, &gentry);
0671a8e7
XG
2919 if (!remote_flush && need_remote_flush(entry, *spte))
2920 remote_flush = true;
ac1b714e 2921 ++spte;
9b7a0325 2922 }
9b7a0325 2923 }
0671a8e7 2924 mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
d98ba053 2925 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
c7addb90 2926 kvm_mmu_audit(vcpu, "post pte write");
aaee2c94 2927 spin_unlock(&vcpu->kvm->mmu_lock);
35149e21
AL
2928 if (!is_error_pfn(vcpu->arch.update_pte.pfn)) {
2929 kvm_release_pfn_clean(vcpu->arch.update_pte.pfn);
2930 vcpu->arch.update_pte.pfn = bad_pfn;
d7824fff 2931 }
da4a00f0
AK
2932}
2933
a436036b
AK
2934int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva)
2935{
10589a46
MT
2936 gpa_t gpa;
2937 int r;
a436036b 2938
60f24784
AK
2939 if (tdp_enabled)
2940 return 0;
2941
1871c602 2942 gpa = kvm_mmu_gva_to_gpa_read(vcpu, gva, NULL);
10589a46 2943
aaee2c94 2944 spin_lock(&vcpu->kvm->mmu_lock);
10589a46 2945 r = kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
aaee2c94 2946 spin_unlock(&vcpu->kvm->mmu_lock);
10589a46 2947 return r;
a436036b 2948}
577bdc49 2949EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
a436036b 2950
22d95b12 2951void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
ebeace86 2952{
d98ba053 2953 LIST_HEAD(invalid_list);
103ad25a 2954
e0df7b9f 2955 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
3b80fffe 2956 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
4db35314 2957 struct kvm_mmu_page *sp;
ebeace86 2958
f05e70ac 2959 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
4db35314 2960 struct kvm_mmu_page, link);
e0df7b9f 2961 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
4cee5764 2962 ++vcpu->kvm->stat.mmu_recycled;
ebeace86 2963 }
d98ba053 2964 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
ebeace86 2965}
ebeace86 2966
3067714c
AK
2967int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
2968{
2969 int r;
2970 enum emulation_result er;
2971
ad312c7c 2972 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code);
3067714c
AK
2973 if (r < 0)
2974 goto out;
2975
2976 if (!r) {
2977 r = 1;
2978 goto out;
2979 }
2980
b733bfb5
AK
2981 r = mmu_topup_memory_caches(vcpu);
2982 if (r)
2983 goto out;
2984
851ba692 2985 er = emulate_instruction(vcpu, cr2, error_code, 0);
3067714c
AK
2986
2987 switch (er) {
2988 case EMULATE_DONE:
2989 return 1;
2990 case EMULATE_DO_MMIO:
2991 ++vcpu->stat.mmio_exits;
6d77dbfc 2992 /* fall through */
3067714c 2993 case EMULATE_FAIL:
3f5d18a9 2994 return 0;
3067714c
AK
2995 default:
2996 BUG();
2997 }
2998out:
3067714c
AK
2999 return r;
3000}
3001EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
3002
a7052897
MT
3003void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
3004{
a7052897 3005 vcpu->arch.mmu.invlpg(vcpu, gva);
a7052897
MT
3006 kvm_mmu_flush_tlb(vcpu);
3007 ++vcpu->stat.invlpg;
3008}
3009EXPORT_SYMBOL_GPL(kvm_mmu_invlpg);
3010
18552672
JR
3011void kvm_enable_tdp(void)
3012{
3013 tdp_enabled = true;
3014}
3015EXPORT_SYMBOL_GPL(kvm_enable_tdp);
3016
5f4cb662
JR
3017void kvm_disable_tdp(void)
3018{
3019 tdp_enabled = false;
3020}
3021EXPORT_SYMBOL_GPL(kvm_disable_tdp);
3022
6aa8b732
AK
3023static void free_mmu_pages(struct kvm_vcpu *vcpu)
3024{
ad312c7c 3025 free_page((unsigned long)vcpu->arch.mmu.pae_root);
6aa8b732
AK
3026}
3027
3028static int alloc_mmu_pages(struct kvm_vcpu *vcpu)
3029{
17ac10ad 3030 struct page *page;
6aa8b732
AK
3031 int i;
3032
3033 ASSERT(vcpu);
3034
17ac10ad
AK
3035 /*
3036 * When emulating 32-bit mode, cr3 is only 32 bits even on x86_64.
3037 * Therefore we need to allocate shadow page tables in the first
3038 * 4GB of memory, which happens to fit the DMA32 zone.
3039 */
3040 page = alloc_page(GFP_KERNEL | __GFP_DMA32);
3041 if (!page)
d7fa6ab2
WY
3042 return -ENOMEM;
3043
ad312c7c 3044 vcpu->arch.mmu.pae_root = page_address(page);
17ac10ad 3045 for (i = 0; i < 4; ++i)
ad312c7c 3046 vcpu->arch.mmu.pae_root[i] = INVALID_PAGE;
17ac10ad 3047
6aa8b732 3048 return 0;
6aa8b732
AK
3049}
3050
8018c27b 3051int kvm_mmu_create(struct kvm_vcpu *vcpu)
6aa8b732 3052{
6aa8b732 3053 ASSERT(vcpu);
ad312c7c 3054 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
6aa8b732 3055
8018c27b
IM
3056 return alloc_mmu_pages(vcpu);
3057}
6aa8b732 3058
8018c27b
IM
3059int kvm_mmu_setup(struct kvm_vcpu *vcpu)
3060{
3061 ASSERT(vcpu);
ad312c7c 3062 ASSERT(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
2c264957 3063
8018c27b 3064 return init_kvm_mmu(vcpu);
6aa8b732
AK
3065}
3066
3067void kvm_mmu_destroy(struct kvm_vcpu *vcpu)
3068{
3069 ASSERT(vcpu);
3070
3071 destroy_kvm_mmu(vcpu);
3072 free_mmu_pages(vcpu);
714b93da 3073 mmu_free_memory_caches(vcpu);
6aa8b732
AK
3074}
3075
90cb0529 3076void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
6aa8b732 3077{
4db35314 3078 struct kvm_mmu_page *sp;
6aa8b732 3079
f05e70ac 3080 list_for_each_entry(sp, &kvm->arch.active_mmu_pages, link) {
6aa8b732
AK
3081 int i;
3082 u64 *pt;
3083
291f26bc 3084 if (!test_bit(slot, sp->slot_bitmap))
6aa8b732
AK
3085 continue;
3086
4db35314 3087 pt = sp->spt;
6aa8b732
AK
3088 for (i = 0; i < PT64_ENT_PER_PAGE; ++i)
3089 /* avoid RMW */
01c168ac 3090 if (is_writable_pte(pt[i]))
6aa8b732 3091 pt[i] &= ~PT_WRITABLE_MASK;
6aa8b732 3092 }
171d595d 3093 kvm_flush_remote_tlbs(kvm);
6aa8b732 3094}
37a7d8b0 3095
90cb0529 3096void kvm_mmu_zap_all(struct kvm *kvm)
e0fa826f 3097{
4db35314 3098 struct kvm_mmu_page *sp, *node;
d98ba053 3099 LIST_HEAD(invalid_list);
e0fa826f 3100
aaee2c94 3101 spin_lock(&kvm->mmu_lock);
3246af0e 3102restart:
f05e70ac 3103 list_for_each_entry_safe(sp, node, &kvm->arch.active_mmu_pages, link)
d98ba053 3104 if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list))
3246af0e
XG
3105 goto restart;
3106
d98ba053 3107 kvm_mmu_commit_zap_page(kvm, &invalid_list);
aaee2c94 3108 spin_unlock(&kvm->mmu_lock);
e0fa826f
DL
3109}
3110
d98ba053
XG
3111static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
3112 struct list_head *invalid_list)
3ee16c81
IE
3113{
3114 struct kvm_mmu_page *page;
3115
3116 page = container_of(kvm->arch.active_mmu_pages.prev,
3117 struct kvm_mmu_page, link);
d98ba053 3118 return kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
3ee16c81
IE
3119}
3120
7f8275d0 3121static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3ee16c81
IE
3122{
3123 struct kvm *kvm;
3124 struct kvm *kvm_freed = NULL;
3125 int cache_count = 0;
3126
3127 spin_lock(&kvm_lock);
3128
3129 list_for_each_entry(kvm, &vm_list, vm_list) {
d35b8dd9 3130 int npages, idx, freed_pages;
d98ba053 3131 LIST_HEAD(invalid_list);
3ee16c81 3132
f656ce01 3133 idx = srcu_read_lock(&kvm->srcu);
3ee16c81 3134 spin_lock(&kvm->mmu_lock);
39de71ec 3135 npages = kvm->arch.n_max_mmu_pages -
e0df7b9f 3136 kvm_mmu_available_pages(kvm);
3ee16c81
IE
3137 cache_count += npages;
3138 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
d98ba053
XG
3139 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
3140 &invalid_list);
d35b8dd9 3141 cache_count -= freed_pages;
3ee16c81
IE
3142 kvm_freed = kvm;
3143 }
3144 nr_to_scan--;
3145
d98ba053 3146 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3ee16c81 3147 spin_unlock(&kvm->mmu_lock);
f656ce01 3148 srcu_read_unlock(&kvm->srcu, idx);
3ee16c81
IE
3149 }
3150 if (kvm_freed)
3151 list_move_tail(&kvm_freed->vm_list, &vm_list);
3152
3153 spin_unlock(&kvm_lock);
3154
3155 return cache_count;
3156}
3157
3158static struct shrinker mmu_shrinker = {
3159 .shrink = mmu_shrink,
3160 .seeks = DEFAULT_SEEKS * 10,
3161};
3162
2ddfd20e 3163static void mmu_destroy_caches(void)
b5a33a75
AK
3164{
3165 if (pte_chain_cache)
3166 kmem_cache_destroy(pte_chain_cache);
3167 if (rmap_desc_cache)
3168 kmem_cache_destroy(rmap_desc_cache);
d3d25b04
AK
3169 if (mmu_page_header_cache)
3170 kmem_cache_destroy(mmu_page_header_cache);
b5a33a75
AK
3171}
3172
3ee16c81
IE
3173void kvm_mmu_module_exit(void)
3174{
3175 mmu_destroy_caches();
3176 unregister_shrinker(&mmu_shrinker);
3177}
3178
b5a33a75
AK
3179int kvm_mmu_module_init(void)
3180{
3181 pte_chain_cache = kmem_cache_create("kvm_pte_chain",
3182 sizeof(struct kvm_pte_chain),
20c2df83 3183 0, 0, NULL);
b5a33a75
AK
3184 if (!pte_chain_cache)
3185 goto nomem;
3186 rmap_desc_cache = kmem_cache_create("kvm_rmap_desc",
3187 sizeof(struct kvm_rmap_desc),
20c2df83 3188 0, 0, NULL);
b5a33a75
AK
3189 if (!rmap_desc_cache)
3190 goto nomem;
3191
d3d25b04
AK
3192 mmu_page_header_cache = kmem_cache_create("kvm_mmu_page_header",
3193 sizeof(struct kvm_mmu_page),
20c2df83 3194 0, 0, NULL);
d3d25b04
AK
3195 if (!mmu_page_header_cache)
3196 goto nomem;
3197
3ee16c81
IE
3198 register_shrinker(&mmu_shrinker);
3199
b5a33a75
AK
3200 return 0;
3201
3202nomem:
3ee16c81 3203 mmu_destroy_caches();
b5a33a75
AK
3204 return -ENOMEM;
3205}
3206
3ad82a7e
ZX
3207/*
3208 * Caculate mmu pages needed for kvm.
3209 */
3210unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm)
3211{
3212 int i;
3213 unsigned int nr_mmu_pages;
3214 unsigned int nr_pages = 0;
bc6678a3 3215 struct kvm_memslots *slots;
3ad82a7e 3216
90d83dc3
LJ
3217 slots = kvm_memslots(kvm);
3218
bc6678a3
MT
3219 for (i = 0; i < slots->nmemslots; i++)
3220 nr_pages += slots->memslots[i].npages;
3ad82a7e
ZX
3221
3222 nr_mmu_pages = nr_pages * KVM_PERMILLE_MMU_PAGES / 1000;
3223 nr_mmu_pages = max(nr_mmu_pages,
3224 (unsigned int) KVM_MIN_ALLOC_MMU_PAGES);
3225
3226 return nr_mmu_pages;
3227}
3228
2f333bcb
MT
3229static void *pv_mmu_peek_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3230 unsigned len)
3231{
3232 if (len > buffer->len)
3233 return NULL;
3234 return buffer->ptr;
3235}
3236
3237static void *pv_mmu_read_buffer(struct kvm_pv_mmu_op_buffer *buffer,
3238 unsigned len)
3239{
3240 void *ret;
3241
3242 ret = pv_mmu_peek_buffer(buffer, len);
3243 if (!ret)
3244 return ret;
3245 buffer->ptr += len;
3246 buffer->len -= len;
3247 buffer->processed += len;
3248 return ret;
3249}
3250
3251static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
3252 gpa_t addr, gpa_t value)
3253{
3254 int bytes = 8;
3255 int r;
3256
3257 if (!is_long_mode(vcpu) && !is_pae(vcpu))
3258 bytes = 4;
3259
3260 r = mmu_topup_memory_caches(vcpu);
3261 if (r)
3262 return r;
3263
3200f405 3264 if (!emulator_write_phys(vcpu, addr, &value, bytes))
2f333bcb
MT
3265 return -EFAULT;
3266
3267 return 1;
3268}
3269
3270static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
3271{
2390218b 3272 (void)kvm_set_cr3(vcpu, vcpu->arch.cr3);
2f333bcb
MT
3273 return 1;
3274}
3275
3276static int kvm_pv_mmu_release_pt(struct kvm_vcpu *vcpu, gpa_t addr)
3277{
3278 spin_lock(&vcpu->kvm->mmu_lock);
3279 mmu_unshadow(vcpu->kvm, addr >> PAGE_SHIFT);
3280 spin_unlock(&vcpu->kvm->mmu_lock);
3281 return 1;
3282}
3283
3284static int kvm_pv_mmu_op_one(struct kvm_vcpu *vcpu,
3285 struct kvm_pv_mmu_op_buffer *buffer)
3286{
3287 struct kvm_mmu_op_header *header;
3288
3289 header = pv_mmu_peek_buffer(buffer, sizeof *header);
3290 if (!header)
3291 return 0;
3292 switch (header->op) {
3293 case KVM_MMU_OP_WRITE_PTE: {
3294 struct kvm_mmu_op_write_pte *wpte;
3295
3296 wpte = pv_mmu_read_buffer(buffer, sizeof *wpte);
3297 if (!wpte)
3298 return 0;
3299 return kvm_pv_mmu_write(vcpu, wpte->pte_phys,
3300 wpte->pte_val);
3301 }
3302 case KVM_MMU_OP_FLUSH_TLB: {
3303 struct kvm_mmu_op_flush_tlb *ftlb;
3304
3305 ftlb = pv_mmu_read_buffer(buffer, sizeof *ftlb);
3306 if (!ftlb)
3307 return 0;
3308 return kvm_pv_mmu_flush_tlb(vcpu);
3309 }
3310 case KVM_MMU_OP_RELEASE_PT: {
3311 struct kvm_mmu_op_release_pt *rpt;
3312
3313 rpt = pv_mmu_read_buffer(buffer, sizeof *rpt);
3314 if (!rpt)
3315 return 0;
3316 return kvm_pv_mmu_release_pt(vcpu, rpt->pt_phys);
3317 }
3318 default: return 0;
3319 }
3320}
3321
3322int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
3323 gpa_t addr, unsigned long *ret)
3324{
3325 int r;
6ad18fba 3326 struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
2f333bcb 3327
6ad18fba
DH
3328 buffer->ptr = buffer->buf;
3329 buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
3330 buffer->processed = 0;
2f333bcb 3331
6ad18fba 3332 r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
2f333bcb
MT
3333 if (r)
3334 goto out;
3335
6ad18fba
DH
3336 while (buffer->len) {
3337 r = kvm_pv_mmu_op_one(vcpu, buffer);
2f333bcb
MT
3338 if (r < 0)
3339 goto out;
3340 if (r == 0)
3341 break;
3342 }
3343
3344 r = 1;
3345out:
6ad18fba 3346 *ret = buffer->processed;
2f333bcb
MT
3347 return r;
3348}
3349
94d8b056
MT
3350int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4])
3351{
3352 struct kvm_shadow_walk_iterator iterator;
3353 int nr_sptes = 0;
3354
3355 spin_lock(&vcpu->kvm->mmu_lock);
3356 for_each_shadow_entry(vcpu, addr, iterator) {
3357 sptes[iterator.level-1] = *iterator.sptep;
3358 nr_sptes++;
3359 if (!is_shadow_present_pte(*iterator.sptep))
3360 break;
3361 }
3362 spin_unlock(&vcpu->kvm->mmu_lock);
3363
3364 return nr_sptes;
3365}
3366EXPORT_SYMBOL_GPL(kvm_mmu_get_spte_hierarchy);
3367
37a7d8b0
AK
3368#ifdef AUDIT
3369
3370static const char *audit_msg;
3371
3372static gva_t canonicalize(gva_t gva)
3373{
3374#ifdef CONFIG_X86_64
3375 gva = (long long)(gva << 16) >> 16;
3376#endif
3377 return gva;
3378}
3379
08a3732b 3380
805d32de 3381typedef void (*inspect_spte_fn) (struct kvm *kvm, u64 *sptep);
08a3732b
MT
3382
3383static void __mmu_spte_walk(struct kvm *kvm, struct kvm_mmu_page *sp,
3384 inspect_spte_fn fn)
3385{
3386 int i;
3387
3388 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3389 u64 ent = sp->spt[i];
3390
3391 if (is_shadow_present_pte(ent)) {
2920d728 3392 if (!is_last_spte(ent, sp->role.level)) {
08a3732b
MT
3393 struct kvm_mmu_page *child;
3394 child = page_header(ent & PT64_BASE_ADDR_MASK);
3395 __mmu_spte_walk(kvm, child, fn);
2920d728 3396 } else
805d32de 3397 fn(kvm, &sp->spt[i]);
08a3732b
MT
3398 }
3399 }
3400}
3401
3402static void mmu_spte_walk(struct kvm_vcpu *vcpu, inspect_spte_fn fn)
3403{
3404 int i;
3405 struct kvm_mmu_page *sp;
3406
3407 if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
3408 return;
3409 if (vcpu->arch.mmu.shadow_root_level == PT64_ROOT_LEVEL) {
3410 hpa_t root = vcpu->arch.mmu.root_hpa;
3411 sp = page_header(root);
3412 __mmu_spte_walk(vcpu->kvm, sp, fn);
3413 return;
3414 }
3415 for (i = 0; i < 4; ++i) {
3416 hpa_t root = vcpu->arch.mmu.pae_root[i];
3417
3418 if (root && VALID_PAGE(root)) {
3419 root &= PT64_BASE_ADDR_MASK;
3420 sp = page_header(root);
3421 __mmu_spte_walk(vcpu->kvm, sp, fn);
3422 }
3423 }
3424 return;
3425}
3426
37a7d8b0
AK
3427static void audit_mappings_page(struct kvm_vcpu *vcpu, u64 page_pte,
3428 gva_t va, int level)
3429{
3430 u64 *pt = __va(page_pte & PT64_BASE_ADDR_MASK);
3431 int i;
3432 gva_t va_delta = 1ul << (PAGE_SHIFT + 9 * (level - 1));
3433
3434 for (i = 0; i < PT64_ENT_PER_PAGE; ++i, va += va_delta) {
3435 u64 ent = pt[i];
3436
c7addb90 3437 if (ent == shadow_trap_nonpresent_pte)
37a7d8b0
AK
3438 continue;
3439
3440 va = canonicalize(va);
2920d728
MT
3441 if (is_shadow_present_pte(ent) && !is_last_spte(ent, level))
3442 audit_mappings_page(vcpu, ent, va, level - 1);
3443 else {
1871c602 3444 gpa_t gpa = kvm_mmu_gva_to_gpa_read(vcpu, va, NULL);
34382539
JK
3445 gfn_t gfn = gpa >> PAGE_SHIFT;
3446 pfn_t pfn = gfn_to_pfn(vcpu->kvm, gfn);
3447 hpa_t hpa = (hpa_t)pfn << PAGE_SHIFT;
37a7d8b0 3448
2aaf65e8
MT
3449 if (is_error_pfn(pfn)) {
3450 kvm_release_pfn_clean(pfn);
3451 continue;
3452 }
3453
c7addb90 3454 if (is_shadow_present_pte(ent)
37a7d8b0 3455 && (ent & PT64_BASE_ADDR_MASK) != hpa)
c7addb90
AK
3456 printk(KERN_ERR "xx audit error: (%s) levels %d"
3457 " gva %lx gpa %llx hpa %llx ent %llx %d\n",
ad312c7c 3458 audit_msg, vcpu->arch.mmu.root_level,
d77c26fc
MD
3459 va, gpa, hpa, ent,
3460 is_shadow_present_pte(ent));
c7addb90
AK
3461 else if (ent == shadow_notrap_nonpresent_pte
3462 && !is_error_hpa(hpa))
3463 printk(KERN_ERR "audit: (%s) notrap shadow,"
3464 " valid guest gva %lx\n", audit_msg, va);
35149e21 3465 kvm_release_pfn_clean(pfn);
c7addb90 3466
37a7d8b0
AK
3467 }
3468 }
3469}
3470
3471static void audit_mappings(struct kvm_vcpu *vcpu)
3472{
1ea252af 3473 unsigned i;
37a7d8b0 3474
ad312c7c
ZX
3475 if (vcpu->arch.mmu.root_level == 4)
3476 audit_mappings_page(vcpu, vcpu->arch.mmu.root_hpa, 0, 4);
37a7d8b0
AK
3477 else
3478 for (i = 0; i < 4; ++i)
ad312c7c 3479 if (vcpu->arch.mmu.pae_root[i] & PT_PRESENT_MASK)
37a7d8b0 3480 audit_mappings_page(vcpu,
ad312c7c 3481 vcpu->arch.mmu.pae_root[i],
37a7d8b0
AK
3482 i << 30,
3483 2);
3484}
3485
3486static int count_rmaps(struct kvm_vcpu *vcpu)
3487{
805d32de
XG
3488 struct kvm *kvm = vcpu->kvm;
3489 struct kvm_memslots *slots;
37a7d8b0 3490 int nmaps = 0;
bc6678a3 3491 int i, j, k, idx;
37a7d8b0 3492
bc6678a3 3493 idx = srcu_read_lock(&kvm->srcu);
90d83dc3 3494 slots = kvm_memslots(kvm);
37a7d8b0 3495 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
bc6678a3 3496 struct kvm_memory_slot *m = &slots->memslots[i];
37a7d8b0
AK
3497 struct kvm_rmap_desc *d;
3498
3499 for (j = 0; j < m->npages; ++j) {
290fc38d 3500 unsigned long *rmapp = &m->rmap[j];
37a7d8b0 3501
290fc38d 3502 if (!*rmapp)
37a7d8b0 3503 continue;
290fc38d 3504 if (!(*rmapp & 1)) {
37a7d8b0
AK
3505 ++nmaps;
3506 continue;
3507 }
290fc38d 3508 d = (struct kvm_rmap_desc *)(*rmapp & ~1ul);
37a7d8b0
AK
3509 while (d) {
3510 for (k = 0; k < RMAP_EXT; ++k)
d555c333 3511 if (d->sptes[k])
37a7d8b0
AK
3512 ++nmaps;
3513 else
3514 break;
3515 d = d->more;
3516 }
3517 }
3518 }
bc6678a3 3519 srcu_read_unlock(&kvm->srcu, idx);
37a7d8b0
AK
3520 return nmaps;
3521}
3522
805d32de 3523void inspect_spte_has_rmap(struct kvm *kvm, u64 *sptep)
08a3732b
MT
3524{
3525 unsigned long *rmapp;
3526 struct kvm_mmu_page *rev_sp;
3527 gfn_t gfn;
3528
01c168ac 3529 if (is_writable_pte(*sptep)) {
08a3732b 3530 rev_sp = page_header(__pa(sptep));
2032a93d 3531 gfn = kvm_mmu_page_get_gfn(rev_sp, sptep - rev_sp->spt);
08a3732b
MT
3532
3533 if (!gfn_to_memslot(kvm, gfn)) {
3534 if (!printk_ratelimit())
3535 return;
3536 printk(KERN_ERR "%s: no memslot for gfn %ld\n",
3537 audit_msg, gfn);
3538 printk(KERN_ERR "%s: index %ld of sp (gfn=%lx)\n",
805d32de 3539 audit_msg, (long int)(sptep - rev_sp->spt),
08a3732b
MT
3540 rev_sp->gfn);
3541 dump_stack();
3542 return;
3543 }
3544
2032a93d 3545 rmapp = gfn_to_rmap(kvm, gfn, rev_sp->role.level);
08a3732b
MT
3546 if (!*rmapp) {
3547 if (!printk_ratelimit())
3548 return;
3549 printk(KERN_ERR "%s: no rmap for writable spte %llx\n",
3550 audit_msg, *sptep);
3551 dump_stack();
3552 }
3553 }
3554
3555}
3556
3557void audit_writable_sptes_have_rmaps(struct kvm_vcpu *vcpu)
3558{
3559 mmu_spte_walk(vcpu, inspect_spte_has_rmap);
3560}
3561
3562static void check_writable_mappings_rmap(struct kvm_vcpu *vcpu)
37a7d8b0 3563{
4db35314 3564 struct kvm_mmu_page *sp;
37a7d8b0
AK
3565 int i;
3566
f05e70ac 3567 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
4db35314 3568 u64 *pt = sp->spt;
37a7d8b0 3569
4db35314 3570 if (sp->role.level != PT_PAGE_TABLE_LEVEL)
37a7d8b0
AK
3571 continue;
3572
3573 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
3574 u64 ent = pt[i];
3575
3576 if (!(ent & PT_PRESENT_MASK))
3577 continue;
01c168ac 3578 if (!is_writable_pte(ent))
37a7d8b0 3579 continue;
805d32de 3580 inspect_spte_has_rmap(vcpu->kvm, &pt[i]);
37a7d8b0
AK
3581 }
3582 }
08a3732b 3583 return;
37a7d8b0
AK
3584}
3585
3586static void audit_rmap(struct kvm_vcpu *vcpu)
3587{
08a3732b
MT
3588 check_writable_mappings_rmap(vcpu);
3589 count_rmaps(vcpu);
37a7d8b0
AK
3590}
3591
3592static void audit_write_protection(struct kvm_vcpu *vcpu)
3593{
4db35314 3594 struct kvm_mmu_page *sp;
290fc38d
IE
3595 struct kvm_memory_slot *slot;
3596 unsigned long *rmapp;
e58b0f9e 3597 u64 *spte;
290fc38d 3598 gfn_t gfn;
37a7d8b0 3599
f05e70ac 3600 list_for_each_entry(sp, &vcpu->kvm->arch.active_mmu_pages, link) {
f6e2c02b 3601 if (sp->role.direct)
37a7d8b0 3602 continue;
e58b0f9e
MT
3603 if (sp->unsync)
3604 continue;
37a7d8b0 3605
a1f4d395 3606 slot = gfn_to_memslot(vcpu->kvm, sp->gfn);
290fc38d 3607 rmapp = &slot->rmap[gfn - slot->base_gfn];
e58b0f9e
MT
3608
3609 spte = rmap_next(vcpu->kvm, rmapp, NULL);
3610 while (spte) {
01c168ac 3611 if (is_writable_pte(*spte))
e58b0f9e
MT
3612 printk(KERN_ERR "%s: (%s) shadow page has "
3613 "writable mappings: gfn %lx role %x\n",
b8688d51 3614 __func__, audit_msg, sp->gfn,
4db35314 3615 sp->role.word);
e58b0f9e
MT
3616 spte = rmap_next(vcpu->kvm, rmapp, spte);
3617 }
37a7d8b0
AK
3618 }
3619}
3620
3621static void kvm_mmu_audit(struct kvm_vcpu *vcpu, const char *msg)
3622{
3623 int olddbg = dbg;
3624
3625 dbg = 0;
3626 audit_msg = msg;
3627 audit_rmap(vcpu);
3628 audit_write_protection(vcpu);
2aaf65e8
MT
3629 if (strcmp("pre pte write", audit_msg) != 0)
3630 audit_mappings(vcpu);
08a3732b 3631 audit_writable_sptes_have_rmaps(vcpu);
37a7d8b0
AK
3632 dbg = olddbg;
3633}
3634
3635#endif